query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Return All Feed Sources | def get_all_feed_sources(request):
feed_sources = FeedSource.objects.all().order_by('-id')
return get_feed_sources_list(feed_sources) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listsources():\n\tmain_url = \" https://newsapi.org/v2/sources?apiKey=5f81b593f35d42a8980313250c03d7e7\"\n\n\t# fetching data in json format \n\topen_source = requests.get(main_url).json() \n\n\t# getting all articles in a string sources\n\tsource = open_source[\"sources\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n\tresults = [] \n\t\n\tfor k in source: \n results.append(k[\"id\"])\n \n \t\n\tfor w in results[0:4]:\n print(w)",
"def show_sources_all():\n response = requests.get(SOURCE_URL)\n json = response.json()\n for source in json['sources']:\n print(u\"{0}: <{1}> {2}\".format(\"News Code\", source['id'], source['name']))",
"def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources:\")\n print(sources)\n return sources",
"def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources\")\n print(sources)\n return sources",
"def fetch_feeds(self):\n feed_list = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n rss_title = rss.get('title', '-')\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n feed_list.append({\n 'title':rss_title,\n 'href':rss_href,\n 'status': feed.get('status', 400),\n 'updated': feed.get('updated', None),\n 'updated_parsed': feed.get('updated_parsed', None),\n 'encoding': feed.get('encoding', None),\n 'bozo': feed.get('bozo', None),\n 'headers': feed.get('headers', {}),\n 'etag': feed.get('etag', None),\n 'version': feed.get('version', None),\n 'entries': feed.get('entries', []),\n 'namespaces': feed.get('namespaces', None)\n })\n\n return feed_list",
"def Sources():\n return _sources",
"def list_feed(self):\n entities = []\n entities_j = self._get('traversal/type=f')\n if entities_j:\n for entity_j in entities_j:\n entities.append(Feed(entity_j['id'], CanonicalPath(entity_j['path'])))\n return entities",
"def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries",
"def list_feed(self):\n entities = []\n entities_j = self._get('strings/tags/module:inventory,feed:*')\n if entities_j and entities_j['feed']:\n for entity_j in entities_j['feed']:\n entities.append(Feed(entity_j, CanonicalPath('/f;{}'.format(entity_j))))\n return entities",
"def source_list(self):\n return list(self._client.group.streams_by_name().keys())",
"def sources(self) -> Sequence[Any]:\n return pulumi.get(self, \"sources\")",
"def getSubscriptionList(self):\r\n return self.feeds",
"def get(self):\n\n return self.get_request_handler(request.headers).get_all_sources()",
"def Sources(self):\n return self._sources",
"def sources(self):\n return self._sources",
"def feed(self):\n feed_dict = feedparser.parse(self.URL)\n return [self.entry_dict(entry) for entry in feed_dict['entries']]",
"def source_list(self):\n return list(self._group.streams_by_name().keys())",
"def fetch_feed_list(self, **args):\n return self.fetch(\"/feedlist\", **args)",
"def get_rss(self):\r\n rssfiles = []\r\n \r\n rssfiles.append(feedparser.parse(self.url))\r\n return rssfiles",
"def get_urls():\r\n return []",
"def get_data_sources(self) -> [DataSource]:\n return []",
"def getURLs():",
"def sources(self):\n for source_name, source in self._sources.items():\n yield source_name, source",
"def get_results_from_aggregation_sources(self, context):\n sources = context.getContentSources()\n results = []\n for source in sources:\n sresults = source.queryCatalog()\n if not sresults:\n continue\n results.append({\n 'id': source.id,\n 'title': source.Title(),\n 'description': source.Description(),\n 'uid': source.UID(),\n 'portal_type': sresults[0].portal_type,\n 'brains': sresults,\n 'brains_count': len(sresults),\n })\n return results",
"def solr_sources(self):\n # conn = pysolr.Solr(settings.SOLR['SERVER'])\n q = {\n \"fq\": ['type:source', f'archive_i:{self.pk}'],\n \"fl\": [\"pk\",\n \"public_images_b\",\n 'display_name_s',\n 'cover_image_i',\n 'source_type_s',\n 'date_statement_s',\n 'surface_type_s'],\n \"rows\": 10000,\n \"sort\": [\"shelfmark_ans asc\"]\n }\n\n res = SolrConnection.search(\"*:*\", **q)\n if res.hits > 0:\n return res.docs\n else:\n return []",
"def list_feeds(self) -> FeedList:\n return FeedList(\n feeds=[\n FeedAPIRecord(\n name=\"grypedb\",\n description=\"grypedb feed\",\n access_tier=\"0\",\n )\n ]\n )",
"def get_feed(self):\n possible_endings = ('rss', 'rss/')\n if not self.url or not self.url.endswith(possible_endings):\n print('Please check URL(is RSS?) and Internet connection')\n sys.exit()\n try:\n data = feedparser.parse(self.url)\n except urllib.error.URLError:\n print('Please input correct URL')\n sys.exit()\n self.get_content(data)\n return self.items",
"def sources(self):\n return self._sources.keys()",
"def find_feeds(keyword):\n feeds = []\n\n for url in google_find_urls(keyword):\n feeds.extend(find_rss_feeds(url))\n\n return feeds",
"def urls(self) -> list[str]:\r\n ..."
] | [
"0.71941465",
"0.71610236",
"0.6967968",
"0.6939923",
"0.685093",
"0.6846315",
"0.6794876",
"0.6746038",
"0.67335147",
"0.6717489",
"0.66628605",
"0.6634832",
"0.66216797",
"0.6619418",
"0.6598849",
"0.6596755",
"0.6580428",
"0.6546504",
"0.64944905",
"0.6429307",
"0.6428021",
"0.63770586",
"0.63703257",
"0.6286627",
"0.62860435",
"0.62662375",
"0.6226479",
"0.6172821",
"0.6132459",
"0.61194193"
] | 0.84350914 | 0 |
Partial method to extract exception messages to list | def _get_errors(exc):
if hasattr(exc, 'message'):
errors = exc.messages
else:
errors = [str(exc)]
return errors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_error(self) -> List[str]:\n return []",
"def get_error(self) -> List[str]:\n return []",
"def GetLongLineExceptions(self):\n return []",
"def _render_extended_error_message_list(self, extended_error):\n messages = []\n if isinstance(extended_error, dict):\n if ('Type' in extended_error and\n extended_error['Type'].startswith('ExtendedError.')):\n for msg in extended_error['Messages']:\n message_id = msg['MessageID']\n x = message_id.split('.')\n registry = x[0]\n msgkey = x[len(x) - 1]\n\n # if the correct message registry is loaded,\n # do string resolution\n if (registry in self.message_registries and msgkey in\n self.message_registries[registry]['Messages']):\n rmsgs = self.message_registries[registry]['Messages']\n msg_dict = rmsgs[msgkey]\n msg_str = message_id + ': ' + msg_dict['Message']\n\n for argn in range(0, msg_dict['NumberOfArgs']):\n subst = '%' + str(argn+1)\n m = str(msg['MessageArgs'][argn])\n msg_str = msg_str.replace(subst, m)\n\n if ('Resolution' in msg_dict and\n msg_dict['Resolution'] != 'None'):\n msg_str += ' ' + msg_dict['Resolution']\n\n messages.append(msg_str)\n else:\n # no message registry, simply return the msg object\n # in string form\n messages.append(str(message_id))\n\n return messages",
"def karma_exceptions(self, server, channel, nick, params):\n return pretty_list(self.exceptions)",
"def _parse_message(self, exc):\n return '%s: %s' % (exc.__class__.__name__, str(exc))",
"def error_wrapper(x):\n errors = list()\n for error_key, error_list in list(x.items()):\n for error in error_list:\n if error_key == 'non_field_errors':\n errors.append(error)\n else:\n errors.append(\"%s: %s\" % (error_key, error))\n return errors",
"def getExceptions(self):\n return self.getOrDefault(\"exceptions\")",
"def errors(self) -> List[Error]:",
"def error_messages(self) -> List[str]:\n spatial_msgs = []\n temporal_msgs = []\n if self.spatial:\n spatial_msgs = [m for v, m in self.spatial_validations if not v(self.spatial)]\n if self.temporal:\n temporal_msgs = [m for v, m in self.temporal_validations if not v(self.temporal)]\n\n return spatial_msgs + temporal_msgs",
"def parse(self, errors, explicit_ignore):\n\n error_list = []\n if errors is None:\n return error_list\n\n errors.sort(key=linter.cmp_to_key(lambda a, b: a.lineno < b.lineno))\n for error in errors:\n error_level = 'W' if not hasattr(error, 'level') else error.level\n message = error.message.capitalize()\n\n error_data = {\n 'underline_range': False,\n 'level': error_level,\n 'lineno': error.lineno,\n 'message': message,\n 'raw_error': str(error)\n }\n if hasattr(error, 'offset'):\n error_data['offset'] = error.offset\n elif hasattr(error, 'col'):\n error_data['offset'] = error.col\n\n if (isinstance(error, (linter.OffsetError))):\n error_data['underline_range'] = True\n error_list.append(error_data)\n elif (isinstance(\n error, (\n pyflakes.messages.RedefinedWhileUnused,\n pyflakes.messages.RedefinedInListComp,\n pyflakes.messages.UndefinedName,\n pyflakes.messages.UndefinedExport,\n pyflakes.messages.UndefinedLocal,\n pyflakes.messages.UnusedVariable)) and\n error.__class__.__name__ not in explicit_ignore):\n\n error_data['len'] = len(error.message_args[0])\n error_data['regex'] = (\n r'((and|or|not|if|elif|while|in)\\s+|[+\\-*^%%<>=\\(\\{{])*\\s'\n '*(?P<underline>[\\w\\.]*{0}[\\w]*)'.format(re.escape(\n error.message_args[0]\n ))\n )\n error_list.append(error_data)\n elif isinstance(error, pyflakes.messages.ImportShadowedByLoopVar):\n regex = 'for\\s+(?P<underline>[\\w]*{0}[\\w*])'.format(\n re.escape(error.message_args[0])\n )\n error_data['regex'] = regex\n error_list.append(error_data)\n elif (isinstance(\n error, (\n pyflakes.messages.UnusedImport,\n pyflakes.messages.ImportStarUsed)) and\n error.__class__.__name__ not in explicit_ignore):\n if isinstance(error, pyflakes.messages.ImportStarUsed):\n word = '*'\n else:\n word = error.message_args[0]\n\n linematch = '(from\\s+[\\w_\\.]+\\s+)?import\\s+(?P<match>[^#;]+)'\n r = '(^|\\s+|,\\s*|as\\s+)(?P<underline>[\\w]*{0}[\\w]*)'.format(\n re.escape(word)\n )\n error_data['regex'] = r\n error_data['linematch'] = linematch\n error_list.append(error_data)\n elif (isinstance(error, pyflakes.messages.DuplicateArgument) and\n error.__class__.__name__ not in explicit_ignore):\n regex = 'def [\\w_]+\\(.*?(?P<underline>[\\w]*{0}[\\w]*)'.format(\n re.escape(error.message_args[0])\n )\n error_data['regex'] = regex\n error_list.append(error_data)\n elif isinstance(error, pyflakes.messages.LateFutureImport):\n pass\n elif isinstance(error, linter.PythonError):\n print(error)\n else:\n print(\n 'Ooops, we missed an error type for pyflakes', type(error)\n )\n\n return error_list",
"def warnings(self) -> List[Error]:",
"def parse_last_exception(message):\n for pattern, response in patterns:\n items_found = re.findall(pattern, repr(message))\n if items_found:\n #print(\"FOUND\", items_found)\n print_exception_message(response, items_found[0])\n break\n else:\n unrecognised_exception(message)",
"def retrieve_error_messages(self):\n return self.errors_seen[:]",
"def _preprocess(self, msg, args, kwargs):\n if \"exc_info\" in kwargs:\n f = failure.Failure()\n kwargs['failure'] = f\n kwargs['isError'] = 1\n del kwargs['exc_info']\n return (msg, args, kwargs)",
"def errors_fatal(self) -> List[Error]:",
"def _exceptions_formatter(field):\n heads = ['throws']\n types = _or_types(field)\n if types:\n heads.append(types)\n tail = field.get('description', '')\n return heads, tail",
"def formatErrors(self):\n errorlist = []\n xepsWithErrors = sorted(\n set(self.getParseErrors() + self.getBuildErrors()),\n key=lambda x: str(x))\n if self.getErrors() or xepsWithErrors:\n if self.getErrors():\n errorlist.append(\"********** Read errors **********\")\n for error in self.getErrors():\n errorlist.append(error)\n for xep in xepsWithErrors:\n errorlist.append(\n \"********** Error report for {} **********\".format(str(xep)))\n if xep.parseErrors:\n errorlist.append(\"********** Parsing Errors **********\")\n errors = list(set(xep.parseErrors))\n for error in errors:\n errorlist.append(error)\n if xep.buildErrors:\n errorlist.append(\"********** Build Errors **********\")\n for error in xep.buildErrors:\n if len(error.splitlines()) > 4:\n error = ''.join(error.splitlines()[:4])\n errorlist.append(error)\n return '\\n'.join(errorlist)\n else:\n return None",
"def get_er_exceptions():\n express_route_exceptions_lst = []\n try:\n for i in get_data():\n if i['expressRoute'] is False:\n express_route_exceptions_lst.append(i)\n express_route_exceptions_dic = {'expressRoutesExceptions': express_route_exceptions_lst}\n return get_json(express_route_exceptions_dic)\n except ValueError as e:\n print(e)",
"def exception_stacktrace(self):\n # type: () -> list[string_types]\n return self._exception_stacktrace",
"def get_errors(self, obj):\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True, context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None",
"def addExceptionMessage(self, q, inst, traceback):\n self.fail('FAIL: Exception raised: %s' % inst)\n self.addMessage('')\n for line in traceback.format_exc().split('\\n'):\n self.addMessage(line)",
"def ignored(*exceptions):\n import logging\n import pprint\n try:\n yield\n except exceptions:\n logging.warning(pprint.pformat(exceptions[0]))\n pass",
"def service_exception_to_warning():\n try:\n yield\n except Exception: # pylint: disable=broad-except\n LOG.warning(\"Experiment service operation failed: %s\", traceback.format_exc())",
"def handle_exception_24011(msgs):\n\n cp_class_name = \"short message and notification transfer on CM\"\n rp_class_name = \"short message and notification transfer on CM-RP messages\"\n types = {\n # 'cp-data' [1, cp_Class_name] is embedding below messages\n \"rp-data\": [0, rp_class_name],\n \"rp-data\": [1, rp_class_name],\n \"rp-ack\": [2, rp_class_name],\n \"rp-ack\": [3, rp_class_name],\n \"rp-error\": [4, rp_class_name],\n \"rp-error\": [5, rp_class_name],\n \"rp-smma\": [6, rp_class_name],\n \"cp-ack\": [4, cp_class_name],\n \"cp-error\": [16, cp_class_name],\n }\n\n return msgs, types",
"def _get_retriable_errors(out: List[str]) -> List[str]:\n return [\n line for line in out\n if any(error in line for error in RETRIABLE_ERRORS)\n ]",
"def validation_errors_to_error_messages(validation_errors):\n error_messages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n error_messages.append(f\"{field}: {error}\")\n return error_messages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages"
] | [
"0.63805306",
"0.63805306",
"0.6372775",
"0.62631696",
"0.60500824",
"0.60415626",
"0.5934655",
"0.59270895",
"0.59237975",
"0.5885909",
"0.5848883",
"0.577784",
"0.5772229",
"0.57692844",
"0.5768745",
"0.5656789",
"0.56534594",
"0.56390977",
"0.56339467",
"0.5612228",
"0.5599838",
"0.55981404",
"0.55915225",
"0.558531",
"0.5574687",
"0.55693114",
"0.5562891",
"0.5562575",
"0.5562575",
"0.5562575"
] | 0.7112101 | 0 |
Displays the conflicts in a specific schedule given a requirements file. | def check_reqs(reqs_file, sched_file):
ras = scheduler.parse_file(reqs_file)
ras_dict = dict()
for ra in ras:
ras_dict[ra.name] = ra
sched = scheduler.parse_sched_file(sched_file)
count = 0
for curr in sched:
parts = curr.split('-')
curr_date = date(int(parts[0]), int(parts[1]), int(parts[2]))
ra = ras_dict[sched[curr]]
if curr_date in ra.unvirregular or curr_date.weekday() in ra.unv_regular:
print 'Conflict: %s - %s' % (curr, sched[curr])
count += 1
print '%d total conflicts' % (count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def conflicts(request):\n\n form = ConflictsForm(request.GET)\n if form.is_valid():\n beg_date = form.cleaned_data['beg_date']\n end_date = form.cleaned_data['end_date']\n else:\n beg_date, end_date = get_week_range_by_date(datetime.datetime.today())\n\n terms = Term.prepare_conflict_dict(beg_date, end_date)\n title = 'Konflikty'\n return TemplateResponse(request, 'schedule/conflicts.html', locals())",
"def output_schedule(self) -> None:\n with open(\"Output.txt\", \"w\") as out_file:\n for sem in self.plan:\n out_file.write(sem.title.center(15 + 20 + 50 + 5) + \"\\n\\n\")\n for course in sem.required_courses:\n if course.special:\n out_file.write(\"*\" * 10 + \" \" * 5 + f\"{course.special_type}\\n\")\n elif course.grade != \"\":\n out_file.write(\n course.sem_taken.ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + course.grade.ljust(5)\n + \"\\n\"\n )\n else:\n out_file.write(\n \"AP/UNK\".ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + \"AP/UNK\".ljust(5)\n + \"\\n\"\n )\n out_file.write(\"\\n\\n\")",
"def print_schedule():\n clear_screen()\n print(\"====Current Schedule====\")\n days = ['sun', 'mon', 'tues', 'wed', 'thurs', 'fri', 'sat']\n with open('current_courses.json', 'r') as current_file:\n schedule = json.load(current_file)\n for day in days:\n for val, val2 in schedule.items():\n if day in val2[0]:\n print(day, val, str(val2[1])+'-'+str(val2[2])+\" Presumed Grade: \"+ val2[3])\n return 0",
"def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"",
"def log_conflicts(clock, places):\n for place in places:\n place.log_conflicts(clock)",
"def output_schedule_brief(cout, courses_to_schedule_d, courses_to_mt_d):\n cout.writerow([\"CourseCode\",\"DayWeek\",\"Start\",\"End\",\"Campus\"])\n\n # first write out the courses we just scheduled\n for cn in sorted(courses_to_schedule_d.keys()):\n meeting_time = courses_to_mt_d[cn]\n assert is_cross_list_canonical(cn)\n (subj, catalog) = sct.parse_canonical_course_name(cn)\n\n if print_area and subj != print_area:\n continue\n\n campus = \"Allston\" if will_be_allston_course_subj_catalog(subj, catalog) else \"Cambridge\"\n ct = ss.meeting_time_to_course_time(meeting_time)\n days = ct.days_of_week(separator='/')\n cout.writerow([cn, days, ct.time_start, ct.time_end, campus])\n\n # Now write out all the other courses\n for cn in sorted(sched_d.keys()):\n assert is_cross_list_canonical(cn)\n (subj, catalog) = sct.parse_canonical_course_name(cn)\n if print_area and subj != print_area:\n continue\n\n campus = \"Allston\" if will_be_allston_course_subj_catalog(subj, catalog) else \"Cambridge\"\n cts = sched_d[cn]\n for ct in cts:\n days = ct.days_of_week(separator='/')\n cout.writerow([cn, days, ct.time_start, ct.time_end, campus])",
"def baron_schedule(request):\n assert isinstance(request, HttpRequest)\n\n return render(\n request,\n 'AscensionESports_Baseline/schedule.html',\n {\n 'background': getBaronBackground(),\n 'color': getBaronColor(),\n 'title':'Baron League Schedule',\n 'query_results': Baron_Match_Report_Request(request),\n 'year': datetime.now().year,\n }\n )",
"def printSchedule():\r\n print(\"{0:^45}\".format(\"Your Schedule:\\n\"))\r\n print(\" Day Class Time\")\r\n if(len(classes) == 0):\r\n print(\"\\nThere are no classes\\n\")\r\n return\r\n for class_ in classes:\r\n print(class_.scheduleString())\r\n print()",
"def print_team_schedule(\n sch: Schedule,\n team: str,\n team_list: list[str],\n capt_list: list[str],\n outfile: typing.Union[str, TextIOWrapper] = \"print\",\n):\n if outfile == \"print\":\n\n def pline(txt):\n print(txt)\n\n else:\n\n def pline(txt):\n outfile.write(txt + \"\\n\")\n\n line = \"\"\n\n pline(\"\\nTeam: \" + team + \"\\n\")\n for rnd in range(sch.nrounds):\n _rnd = sch.rounds[rnd]\n line = f\"{_rnd.play_date}\"\n game_not_found = True\n match = 0\n while game_not_found and match < _rnd.nmatches:\n _match = _rnd.matches[match]\n if _match.home == team:\n _teamidx = team_list.index(_match.away)\n _capt = capt_list[_teamidx]\n if \"Bye\" not in _match.away:\n line = line + f\" vs. {_match.away} ({_capt})\"\n else:\n line = line + \" --- BYE ---\"\n game_not_found = False\n elif _match.away == team:\n _teamidx = team_list.index(_match.home)\n _capt = capt_list[_teamidx]\n if \"Bye\" not in _match.home:\n line = line + f\" @ {_match.home} ({_capt})\"\n else:\n line = line + \" --- BYE ---\"\n game_not_found = False\n else:\n match = match + 1\n if game_not_found:\n logging.warning(\"Bye week is not expected.\")\n line = line + \"Bye Week\"\n pline(line)",
"def solve_schedule_loop(conflicts_d, sched_d, courses_to_schedule_d, enroll_d, constraints = None, loop_count = None):\n # Create the solver\n solver = pywraplp.Solver('CourseSchedule',\n # pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n # pywraplp.Solver.BOP_INTEGER_PROGRAMMING)\n pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n\n courses = {}\n # Let each constraint create its variables and constraints\n for cname in courses_to_schedule_d:\n assert is_cross_list_canonical(cname)\n courses[cname] = Course(cname, courses_to_schedule_d[cname][0], courses_to_schedule_d[cname][1])\n courses[cname].createVarsAndConstraints(solver)\n\n # Now let each course put in its objective function, to avoid bad conflicts\n objective = solver.Objective()\n objective.SetMinimization()\n\n conflict_vars_d = {}\n \n for cname in courses:\n courses[cname].createObjective(solver, objective, conflict_vars_d, sched_d, conflicts_d, courses)\n\n\n add_area_constraints(solver, objective, courses)\n\n if SOLVER_VERSION == 1:\n add_student_schedule_constraints_v1(solver, objective, courses, enroll_d, sched_d)\n elif SOLVER_VERSION == 2:\n add_student_schedule_constraints_v2(solver, objective, courses, enroll_d, sched_d)\n else:\n assert SOLVER_VERSION == 3\n\n if constraints:\n for cs in constraints:\n # cs is a list of pairs (cn, mt) of canonical course name cn and meeting time mt\n # Add constraints to make sure that we can't have the conjunction of these.\n vars = [courses[cn].vars_meeting_time[mt] for (cn, mt) in cs]\n if vars:\n v = solver.IntVar(0, 1, \"Soln count %s constraint %s\"%(loop_count, cs))\n makeConjunction(solver, v, vars)\n cnst = solver.Constraint(0, 0)\n cnst.SetCoefficient(v, 1)\n\n \n if SOLVER_VERSION in [1,2]:\n print('Number of courses to schedule =', len(courses_to_schedule_d))\n print('Number of variables =', solver.NumVariables())\n print('Number of constraints =', solver.NumConstraints())\n print(\"Starting to solve....\")\n\n if SOLVER_VERSION == 3:\n solver.SetTimeLimit(10 * 1000) # 10 second time limit\n\n starttime = datetime.datetime.now()\n result_status = solver.Solve()\n endtime = datetime.datetime.now()\n\n # The problem has an optimal solution.\n if SOLVER_VERSION in [1,2,3]:\n print (\"Result status: %s\"%result_status)\n print (\"Time: %s seconds\"%((endtime-starttime).total_seconds()))\n\n if result_status == pywraplp.Solver.FEASIBLE:\n # we timed out!\n return None\n \n assert result_status == pywraplp.Solver.OPTIMAL\n\n \n # The solution looks legit (when using solvers other than\n # GLOP_LINEAR_PROGRAMMING, verifying the solution is highly recommended!).\n assert solver.VerifySolution(1e-7, True)\n\n # The objective value of the solution.\n if SOLVER_VERSION in [1,2]:\n print('Optimal objective value = ' + str(solver.Objective().Value()))\n print()\n\n\n # Print out the bad conflicts\n any_conflicts = False\n for cn1 in conflict_vars_d:\n for cn2 in conflict_vars_d[cn1]:\n v = conflict_vars_d[cn1][cn2]\n if v.solution_value():\n any_conflicts = True\n print(\"%s conflicts with %s (badness %s)\"%(cn1, cn2, conflicts_d[cn1][cn2]))\n if not any_conflicts:\n print(\"No bad conflicts!\")\n\n print()\n \n for cname in sorted(courses.keys()):\n c = courses[cname]\n s = c.solution_meeting_time()\n print(\"%-13s scheduled %s\"%(cname, ss.meeting_time_to_course_time(s)))\n\n return (solver, courses)",
"def nflschedule(self, irc, msg, args, optlist, optteam):\n \n fullSchedule = False\n for (option, arg) in optlist:\n if option == 'full':\n fullSchedule = True\n \n optteam = optteam.upper()\n \n if optteam not in self._validteams():\n irc.reply(\"Team not found. Must be one of: %s\" % self._validteams())\n return\n \n lookupteam = self._translateTeam('yahoo', 'team', optteam) # don't need a check for 0 here because we validate prior.\n \n if fullSchedule: # diff url/method.\n url = self._b64decode('aHR0cDovL3Nwb3J0cy55YWhvby5jb20vbmZsL3RlYW1z') + '/%s/schedule' % lookupteam\n\n try:\n request = urllib2.Request(url)\n html = (urllib2.urlopen(request)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'summary':'Regular Season Games'})\n \n if not table:\n irc.reply(\"ERROR: Failed to find schedule for: %s\") % optteam\n return\n \n tbody = table.find('tbody')\n rows = tbody.findAll('tr')\n\n append_list = []\n\n for row in rows:\n tds = row.findAll('td')\n week = tds[0]\n \n if row.find('td', attrs={'class':'title bye'}):\n date = \"BYE\"\n opp = \"\"\n score = \"\"\n appendString = \"W{0}-{1}\".format(ircutils.bold(week.getText()), ircutils.underline(\"BYE\"))\n else:\n date = tds[1].getText()\n dateSplit = date.split(',', 1) # take the date, dump the rest.\n date = dateSplit[1]\n opp = tds[2] # with how the Tag/string comes in, we need to extract one part and format the other.\n oppName = opp.find('span')\n if oppName:\n oppName.extract()\n oppTeam = opp.find('a').getText() \n #opp = tds[2].find('span').getText()\n #opp = self._translateTeam('team','full', opp) # use the db to make a full team small.\n score = tds[3].getText().replace('EDT','').replace('EST','').replace('pm','').replace('am','') # strip the garbage\n #score = score.replace('W', ircutils.mircColor('W', 'green')).replace('L', ircutils.mircColor('L', 'red'))\n appendString = \"W{0}-{1} {2} {3}\".format(ircutils.bold(week.getText()), date.strip(), oppTeam.strip(), score.strip())\n \n append_list.append(appendString)\n\n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} SCHED :: {1}\".format(ircutils.mircColor(optteam, 'red'), descstring)\n irc.reply(output)\n else:\n url = self._b64decode('aHR0cDovL3Nwb3J0cy55YWhvby5jb20vbmZsL3RlYW1z') + '/%s/calendar/rss.xml' % lookupteam\n \n try:\n req = urllib2.Request(url)\n response = urllib2.urlopen(req)\n html = response.read()\n except:\n irc.reply(\"Cannot open: %s\" % url)\n return\n\n # clean this stuff up\n html = html.replace('<![CDATA[','').replace(']]>','').replace('EDT','').replace('\\xc2\\xa0',' ')\n\n soup = BeautifulSoup(html)\n items = soup.find('channel').findAll('item')\n \n append_list = []\n\n for item in items:\n title = item.find('title').renderContents().strip() # title is good.\n day, date = title.split(',')\n desc = item.find('description') # everything in desc but its messy.\n desctext = desc.findAll(text=True) # get all text, first, but its in a list.\n descappend = (''.join(desctext).strip()) # list transform into a string.\n if not descappend.startswith('@'): # if something is @, it's before, but vs. otherwise.\n descappend = 'vs. ' + descappend\n descappend += \" [\" + date.strip() + \"]\"\n append_list.append(descappend) # put all into a list.\n\n \n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} {1}\".format(ircutils.bold(optteam), descstring)\n irc.reply(output)",
"def mainSchedule():\n\timport time\n\tc1 = Content(1,5,20)\n\tc2 = Content(2,6,30)\n\tc3 = Content(3,5,25)\n\tc1_ = Content(1,1,20)\n\tc5 = Content(5,3,29)\n\tc6 = Content(6,11,50)\n\tc7 = Content(7,7,34)\n\tc1__ = Content(1,3,20)\n\tc8 = Content(8,6,10)\n\ta1 = Area('a1',1.0)\n\ta2 = Area('a2',0.5)\n\ta3 = Area('a3',0.8)\n\tcontents = [c1,c2,c3,c1_,c5,c6,c7,c1__,c8]\n\tareas = [a1,a2,a3]\n\tsol_schedule = Schedule_solution()\n\tprint \"random sampling schedule:\\n\"\n\ttime_r = time.time()\n\tschedule_sols = sol_schedule.schedule_randomSampling(contents,areas)\n\tprint \"running time,\",time.time()-time_r\n\tprint \"local search schedule:\"\n\ttime_l = time.time()\n\tschedule_sols_local = sol_schedule.schedule_localSearch(contents,areas)\n\tprint \"running time,\",time.time()-time_l\n\tsol_selection = Selection_solution()\n\tsol_selection.select_bruteforce(4,*schedule_sols) #argument unpacking",
"def getConflictReport(self,srcInstaller,mode):\n data = self.data\n srcOrder = srcInstaller.order\n conflictsMode = (mode == 'OVER')\n if conflictsMode:\n #mismatched = srcInstaller.mismatchedFiles | srcInstaller.missingFiles\n mismatched = set(srcInstaller.data_sizeCrc)\n else:\n mismatched = srcInstaller.underrides\n showInactive = conflictsMode and settings['bash.installers.conflictsReport.showInactive']\n showLower = conflictsMode and settings['bash.installers.conflictsReport.showLower']\n if not mismatched: return ''\n src_sizeCrc = srcInstaller.data_sizeCrc\n packConflicts = []\n getArchiveOrder = lambda x: data[x].order\n for package in sorted(self.data,key=getArchiveOrder):\n installer = data[package]\n if installer.order == srcOrder: continue\n if not showInactive and not installer.isActive: continue\n if not showLower and installer.order < srcOrder: continue\n curConflicts = Installer.sortFiles([x.s for x,y in installer.data_sizeCrc.iteritems() \n if x in mismatched and y != src_sizeCrc[x]])\n if curConflicts: packConflicts.append((installer.order,package.s,curConflicts))\n #--Unknowns\n isHigher = -1\n buff = cStringIO.StringIO()\n for order,package,files in packConflicts:\n if showLower and (order > srcOrder) != isHigher:\n isHigher = (order > srcOrder)\n buff.write('= %s %s\\n' % ((_('Lower'),_('Higher'))[isHigher],'='*40))\n buff.write('==%d== %s\\n'% (order,package))\n for file in files:\n buff.write(file)\n buff.write('\\n')\n buff.write('\\n')\n report = buff.getvalue()\n if not conflictsMode and not report and not srcInstaller.isActive:\n report = _(\"No Underrides. Mod is not completely un-installed.\")\n return report",
"def add_timeslot(self, inds_con_route, inds_confict_groups, inds_merge_groups, prio,\n ids_con_tls, inds_cons_conflict, inds_cons_merge,\n phaseallocations, phaseconflicts, phaseblocks, phasepriorities,\n ids_fromlane_tls,\n ids_tolane_tls,\n are_enabeled):\n print '\\nadd_timeslot for', ids_con_tls[inds_con_route]\n # go through all connections used by this route and\n # signalize all conflicts\n # for id_con in ids_con_tls[inds_con_route]:\n # #inds_con_conflict = inds_cons_conflict[ids_con]\n # print ' check id_con',id_con\n\n slots, slots_blocked = self.init_slots(inds_con_route, # all used connections\n ids_con_tls,\n inds_cons_conflict, inds_cons_merge,\n ids_fromlane_tls,\n ids_tolane_tls,\n are_enabeled)\n n_slots = len(np.flatnonzero(slots))\n print ' n_slots', n_slots, 'n_phases', len(phaseconflicts)\n\n if n_slots == 0:\n print ' no conflicts detected'\n pass\n\n elif len(phaseconflicts) == 0:\n print ' append first phase'\n phaseallocations.append(inds_con_route)\n phaseconflicts.append(slots)\n phaseblocks.append(slots_blocked)\n phasepriorities.append(prio)\n else:\n print ' search phase with minimum signal difference n_phases=', len(phaseallocations)\n n_diff_min = 10**8\n i_phase = 0\n i_phase_min = -1\n for inds_phaseallocation, inds_phaseconflict in zip(phaseallocations, phaseconflicts):\n # check if slots overlap with allocated connections this phase\n print ' compare phase', i_phase\n print ' new allocations', ids_con_tls[inds_con_route]\n print ' phaseallocations', ids_con_tls[inds_phaseallocation]\n print ' new conflicts', ids_con_tls[slots == 2]\n print ' phase conflicts', ids_con_tls[inds_phaseconflict == 2]\n # print ' allocations',ids_con_tls[inds_phaseallocation]\n print ' n_diff =', np.sum(np.any((slots == 2) & inds_phaseallocation))\n\n if not np.any((slots == 2) & inds_phaseallocation):\n print ' no conflict in this phase go for a merge'\n i_phase_min = i_phase\n n_diff_min = -1 # indicate phase merge\n\n else:\n print ' there are conflicts with this phase...count'\n n_diff = np.sum(np.any((slots == 2) & inds_phaseallocation))\n #n_diff = np.sum(np.abs(slots - inds_phaseconflict)!=0)\n # print ' ',inds_phaseconflict,n_diff\n if n_diff < n_diff_min:\n n_diff_min = n_diff\n i_phase_min = i_phase\n\n i_phase += 1\n\n print ' finished comparing phases i_phase_min,n_diff_min', i_phase_min, n_diff_min\n\n if n_diff_min == 0:\n print ' already a phase with suitable signalling, nothing to do'\n pass\n\n elif n_diff_min == -1:\n print ' there are no phase conflicts, so merge'\n phaseallocations[i_phase_min] = phaseallocations[i_phase_min] | inds_con_route\n phaseconflicts[i_phase_min] = np.max([slots, phaseconflicts[i_phase_min]], 0)\n phaseblocks[i_phase_min] = phaseblocks[i_phase_min] | slots_blocked\n phasepriorities[i_phase_min] = max(prio, phasepriorities[i_phase_min])\n\n # elif n_diff_min>10**7:\n # # all existing phases are conflicting\n\n else:\n # get number of cons which are more restrictive signals\n n_diff_pos = np.sum((slots - phaseconflicts[i_phase_min]) > 0)\n\n # get number of cons which are less restrictive signals\n n_diff_neg = np.sum((slots - phaseconflicts[i_phase_min]) < 0)\n\n print ' n_diff_min', n_diff_min, 'n_diff_pos', n_diff_pos, 'n_diff_neg', n_diff_neg, 'i_phase_min', i_phase_min\n # print ' inds_phaseconflict_min',ids_con_tls[phaseconflicts[i_phase_min] >0]\n # print ' inds_phaseconflict',ids_con_tls[slots>0]\n #inds_diff = np.abs(slots - inds_phaseconflict)!=0\n # if (n_diff_pos>0) & (n_diff_neg == 0):\n if (n_diff_pos >= n_diff_neg) & (n_diff_pos <= 2):\n # only more restrictive\n print ' put new phase after the phase with minimum difference'\n phaseallocations.insert(i_phase_min+1, inds_con_route)\n phaseconflicts.insert(i_phase_min+1, slots)\n phaseblocks.insert(i_phase_min+1, slots_blocked)\n phasepriorities.insert(i_phase_min+1, prio)\n\n # elif (n_diff_pos==0) & (n_diff_neg > 0):\n if (n_diff_pos < n_diff_neg) & (n_diff_neg <= 2):\n # only less restrictive\n print ' put new phase before the phase with minimum difference'\n phaseallocations.insert(i_phase_min, inds_con_route)\n phaseconflicts.insert(i_phase_min, slots)\n phaseblocks.insert(i_phase_min, slots_blocked)\n phasepriorities.insert(i_phase_min, prio)\n\n else:\n # mixed changes\n print ' append en entirely new phase'\n phaseconflicts.append(slots)\n phaseblocks.append(slots_blocked)\n phasepriorities.append(prio)",
"def output_schedule_registrar(cout, schedule_d, courses_to_mt_d):\n\n schedule_score.output_course_schedule(cout, make_sched_d_from_solution(schedule_d, courses_to_mt_d))",
"def print_schedule(self):\n for entry in self.entries:\n print(entry.get_entry_string())",
"def show_cron_tab(argv):\n package_name = argv[1]\n package = nvwa.fccpm.get_current_version(package_name)\n if package == None:\n pcolor.red(\"package %s not exists\" % package_name)\n package_version = package[1]\n if len(argv)>1:\n lines = nvwa.fccpm.cron_content_of_package(package_name,package_version)\n for line in lines:\n print line,\":\",lines[line]\n else:\n print \"You should specific the package name and version\"\n return",
"def job_changes(self):\n cols = \"{:25}{:12.1f}\"\n cols2 = \"{:25}{:12.1f}{:12.1f}\"\n\n lines = [\"Benefit from job creation: \" + self.plant.name + \"\\n\"]\n\n row7 = self.farmer.labor()[1]\n row1 = self.farmer.labor_cost()[1]\n row8 = self.reseller.driving_work()[1]\n row2 = self.reseller.driving_wages()[1]\n row11 = self.reseller.loading_work()[1]\n row12 = self.reseller.loading_wages()[1]\n row9 = self.cofiring_plant.cofuel_om_work()[1]\n row3 = self.cofiring_plant.cofuel_om_wages()[1]\n row6 = -self.coal_work_lost[1]\n row5 = -self.coal_wages_lost[1]\n row10 = self.labor[1]\n row4 = self.wages[1]\n\n display_as(row6, \"FTE\")\n display_as(row7, \"FTE\")\n display_as(row8, \"FTE\")\n display_as(row9, \"FTE\")\n display_as(row10, \"FTE\")\n display_as(row11, \"FTE\")\n\n lines.append(cols2.format(\"Biomass collection\", row7, row1))\n lines.append(cols2.format(\"Biomass transportation\", row8, row2))\n lines.append(cols2.format(\"Biomass loading\", row11, row12))\n lines.append(cols2.format(\"O&M\", row9, row3))\n lines.append(cols2.format(\"Mining\", row6, row5))\n lines.append(cols2.format(\"Total\", row10, row4))\n lines.append(\"\")\n lines.append(cols.format(\"Area collected\", self.supply_chain.area()))\n lines.append(\n cols.format(\"Collection radius\", self.supply_chain.collection_radius())\n )\n lines.append(\n cols.format(\"Maximum transport time\", self.reseller.max_trip_time())\n )\n lines.append(cols.format(\"Number of truck trips\", self.reseller.truck_trips[1]))\n lines.append(\"\")\n lines.append(\"Mining job lost from co-firing at \" + self.plant.name + \"\\n\")\n lines.append(cols.format(\"Coal saved\", self.coal_saved[1]))\n lines.append(\n cols.format(\"Productivity\", self.mining_parameter.productivity_underground)\n )\n lines.append(cols.format(\"Job lost\", self.coal_work_lost[1]))\n lines.append(cols.format(\"Job lost\", display_as(self.coal_work_lost[1], \"FTE\")))\n lines.append(\n cols.format(\"Wage\", display_as(self.mining_parameter.wage_mining, \"USD/hr\"))\n )\n lines.append(cols.format(\"Wage lost\", self.coal_wages_lost[1]))\n return \"\\n\".join(lines)",
"def schedule_text():",
"def display_schedule(schedule):\n\n def display_patches(patches_sequence, margin=8):\n \"\"\"\n Displays a sequence of MatPlotLib patches in a MatPlotLib window\n :param patches_sequence: the patches to display\n :param margin:\n :return:\n \"\"\"\n plt.rcdefaults()\n fig, ax = plt.subplots()\n for p in patches_sequence:\n ax.add_patch(p)\n max_machines = max(rect.get_y() for rect in patches_sequence) + 1\n max_jobs = max(rect.get_x() + margin for rect in patches_sequence)\n plt.axis([0, max_jobs, 0, max_machines])\n plt.show()\n\n patches = list()\n colors = [\"black\", \"darksalmon\", \"DarkKhaki\", \"DarkViolet\", \"red\", \"blue\", \"green\", \"cyan\", \"magenta\", \"yellow\",\n \"black\", \"IndianRed\", \"Pink\", \"Lavender\", \"DarkOrange\", \"GreenYellow\", \"Teal\", \"SteelBlue\",\n \"MidnightBlue\", \"Maroon\", \"DimGray\"]\n\n for i, prof in enumerate(schedule):\n prof = prof[\"Exams\"]\n for eleve, heure in prof.items():\n rekt = mpatches.Rectangle((heure, i), durations[i], 1, color=colors[eleve], ec=\"black\")\n patches.append(rekt)\n\n display_patches(patches)",
"def display_problems():\n\n res = choose_problems()\n\n cc_name1 = res[0][0]\n url_link1 = res[0][1]\n cc_name2 = res[1][0]\n url_link2 = res[1][1]\n cc_name3 = res[2][0]\n url_link3 = res[2][1]\n\n #TODO: implement datetime (i.e. \"11.07.21\")\n print('Weekly Wednesday Problems')\n print(f'Problem 1: {cc_name1} - {url_link1}')\n print(f'Problem 2: {cc_name2} - {url_link2}')\n print(f'Problem 3: {cc_name3} - {url_link3}')\n\n return cc_name1, url_link1, cc_name2, url_link2, cc_name3, url_link3",
"def _print_live_ranges(self, schedule):\n range_starts = {}\n range_ends = {}\n\n for num, node, impl in schedule:\n for output in node.outputs:\n range_starts[output] = num\n for input in node.inputs:\n if input in range_starts:\n range_ends[input] = num\n\n print(\"Live Ranges:\")\n for name in sorted(range_starts.keys()):\n print(\"{:^5}\".format(name), end=\"\")\n print()\n\n for num, _, _ in schedule:\n for name in sorted(range_starts.keys()):\n if num < range_starts[name]:\n print(\" \", end=\"\")\n elif num == range_starts[name]:\n print(\" s \", end=\"\")\n elif num < range_ends[name]:\n print(\" | \", end=\"\")\n elif num == range_ends[name]:\n print(\" e \", end=\"\")\n else:\n print(\" \", end=\"\")\n print()",
"def schedule(request):\r\n\r\n return render(request, 'editorial/schedule.html', {})",
"def get_conflicts(self):\n return []",
"def show_flight_schedule_of_employee(self, staff_ob):\n\n print(\"Continue to pick dates\")\n print(\"\\nB Back\\nC Continue\\n\")\n\n action_str = self.choose_action([\"b\", \"c\"])\n while action_str == False:\n action_str = self.choose_action([\"b\", \"c\"])\n\n if action_str == \"b\":\n return\n\n elif action_str == \"c\":\n \n valid_interval = False\n while valid_interval != True:\n date_from = self.get_date_from()\n while date_from == False:\n date_from = self.get_date_from()\n date_to = self.get_date_to()\n while date_to == False:\n date_to = self.get_date_to()\n valid_interval = self.get_valid_interval(date_from, date_to)\n\n flights_on_asked_time = self.llapi.get_employee_schedule_by_date(staff_ob, date_from, date_to)\n \n counter = 1\n if len(flights_on_asked_time) == 0:\n print(f\"\\n{staff_ob.name} has no flights on selected period\")\n\n else:\n print(self.LENGTH_STAR * \"*\")\n print(f\"{staff_ob.name.upper()}'S FLIGHT SCHEDULE\")\n \n for flight_ob in flights_on_asked_time:\n print(flight_ob.print_schedule(counter))\n counter += 1\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return",
"def check_conflicts(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\t# Now consider conflicts\n\t\tself.log('PHASE: conflicts', level=logging.DEBUG)\n\t\terrs = []\n\t\tself.pause_point('\\nNow checking for conflicts between modules', print_input=False, level=3)\n\t\tfor module_id in self.module_ids():\n\t\t\tif not cfg[module_id]['shutit.core.module.build']:\n\t\t\t\tcontinue\n\t\t\tconflicter = self.shutit_map[module_id]\n\t\t\tfor conflictee in conflicter.conflicts_with:\n\t\t\t\t# If the module id isn't there, there's no problem.\n\t\t\t\tconflictee_obj = self.shutit_map.get(conflictee)\n\t\t\t\tif conflictee_obj is None:\n\t\t\t\t\tcontinue\n\t\t\t\tif ((cfg[conflicter.module_id]['shutit.core.module.build'] or\n\t\t\t\t self.is_to_be_built_or_is_installed(conflicter)) and\n\t\t\t\t (cfg[conflictee_obj.module_id]['shutit.core.module.build'] or\n\t\t\t\t self.is_to_be_built_or_is_installed(conflictee_obj))):\n\t\t\t\t\terrs.append(('conflicter module id: ' + conflicter.module_id + ' is configured to be built or is already built but conflicts with module_id: ' + conflictee_obj.module_id,))\n\t\treturn errs",
"def edit_schedule():\n days_list = ['mon', 'tues', 'wed','thurs', 'fri', 'sat', 'sun']\n valid_grades= [\"A\", \"A-\",\"B+\",\"B\",\"B-\",\"C+\",\"C\",\"C-\",\"D\",\"F\",\"0\"]\n clear_screen()\n with open('full_courses.json', 'r') as f_file:\n full_courses = json.load(f_file)\n with open('current_courses.json', 'r') as s_file:\n current_courses = json.load(s_file)\n while True:\n try:\n print(\"====Course Editing Menu====\")\n menu = int(input(\"1.Edit Class Schedule\\n2.Close out current_classes\\n3.Add Class to current schedule\\n4.Remove courses\\n5.Exit\"))\n if menu == 1:\n edit_current_schedule(current_courses, full_courses)\n elif menu ==2:\n choice = input(\"Are you sure you want to close out your schedule? This will wipe out your current_courses file (Y/N) \")\n if choice.upper() == \"Y\":\n for val,val2 in current_courses.items():\n grade = input(\"Enter final letter grade for class: \"+val)\n full_courses[val][1] = grade\n full_courses[val][2] = \"C\"\n with open('full_courses.json', 'w') as fp:\n json.dump(full_courses, fp) \n fp = open('current_courses.json', 'w')\n fp.close()\n print(\"Current_courses file wiped\")\n continue\n elif choice.upper() == 'N':\n continue\n elif menu == 3:\n class_code = input(\"Input class code, i.e IT106 \")\n if class_code not in full_courses.keys():\n print(\"Class does not exist \")\n continue\n else:\n days = input(\"Using format mon, tues, wed, thurs, fri, sat, sun, input class days. Separate by comma\").split(',')\n for val in days:\n if val not in days_list:\n clear_screen()\n print(\"WARNING: Invalid option\")\n days = \"0\"\n continue\n \n start_time = int(input(\"Using format 2400, input start time: \"))\n end_time = int(input(\"Using format 2400, input end time: \"))\n grade = input(\"Input letter grade for this class. If no grade, input 0: \")\n if grade not in valid_grades:\n grade = \"0\"\n print(\"Invalid option\")\n continue\n else:\n current_courses[class_code.upper()] = [days,start_time,end_time,grade.upper()]\n with open('current_courses.json', 'w') as fp:\n json.dump(current_courses, fp)\n continue\n elif menu == 4:\n print(\"Here are the courses of your semester: \")\n for val in current_courses:\n print(val)\n course_code = input(\"Which class do you want to delete? \")\n if course_code not in current_courses.keys():\n print(\"Invalid Entry\")\n continue\n else:\n choice = input(\"Are you sure you want to delete: \" +course_code+\"?(Y/N) \")\n if choice.upper() == \"Y\":\n del current_courses[course_code]\n with open('current_courses.json', 'w')as fp:\n json.dump(current_courses, fp)\n continue\n else:\n continue\n elif menu == 5:\n break\n except ValueError:\n print(\"Invalid input, try again\")\n continue\n return 0",
"def main(to_be_scheduled):\n\n tasks = order_by_ftime(to_be_scheduled)\n print select_activity(tasks)",
"def resolve_conflicts(fpath, strat, force=False, verbose=True):\n import utool as ut\n import re\n top_pat = re.escape('<' * 7)\n mid_pat = re.escape('=' * 7)\n bot_pat = re.escape('>' * 7)\n flags = re.MULTILINE | re.DOTALL\n # Pattern to remove the top part\n theirs_pat1 = re.compile('^%s.*?%s.*?$\\n' % (top_pat, mid_pat), flags=flags)\n theirs_pat2 = re.compile('^%s.*?$\\n' % (bot_pat), flags=flags)\n # Pattern to remove the bottom part\n ours_pat1 = re.compile('^%s.*?%s.*?$\\n' % (mid_pat, bot_pat), flags=flags)\n ours_pat2 = re.compile('^%s.*?$\\n' % (top_pat), flags=flags)\n strat_pats = {\n 'theirs': [theirs_pat1, theirs_pat2],\n 'ours': [ours_pat1, ours_pat2],\n }\n\n text_in = ut.readfrom(fpath)\n text_out = text_in\n strat = 'ours'\n strat = 'theirs'\n for pat in strat_pats[strat]:\n text_out = pat.sub('', text_out)\n if verbose:\n ut.print_difftext(ut.difftext(text_in, text_out, num_context_lines=3))\n\n if force:\n ut.writeto(fpath, text_out)",
"def solve_problem(filename):\n if len(es.conflict_graph.edges()) == 0: # Checking if a problem is loaded\n print(\"No problem to solve!\") # If it is loaded then len must be > 0\n return()\n\n exams2 = nx.coloring.greedy_color(\n es.conflict_graph, strategy=nx.coloring.strategy_largest_first)\n\n es.optimize_exams = dict(exams2)\n # es.optimize_exams2 = dict(exams2)\n es.best = dict(exams2)\n\n \"\"\" EXPORT SOLUTIONS FILE\n ---------------------------------------------------------------------------\n 1. We itterate through the period_exams dictionary and export to the file\n two columns. The first column contains the subject and the other one\n contains the period that was assigned into.\n ---------------------------------------------------------------------------\n \"\"\"\n\n with open(filename[0:-4]+'.sol', 'w') as f:\n for k, v in exams2.items():\n f.write('{}\\t{}\\n'.format(k, v))\n\n \"\"\"\n In the next itteration of the exams2 dictionary we switch dictionary\n keys and now the period becomes they key and the lessons assigned to it\n the values. It is being saved in the period_exams dictionary.\n \"\"\"\n period_exams = {}\n for k, v in exams2.items():\n if v not in period_exams:\n period_exams[v] = [k]\n else:\n period_exams[v].append(k)\n cost(period_exams)"
] | [
"0.6083416",
"0.57799804",
"0.5715503",
"0.54279906",
"0.5343068",
"0.52080643",
"0.51846665",
"0.51749927",
"0.51412326",
"0.51344234",
"0.5128275",
"0.50786567",
"0.5051712",
"0.503836",
"0.50313216",
"0.49973714",
"0.49888486",
"0.49711156",
"0.49255145",
"0.49076065",
"0.48772293",
"0.48661393",
"0.48631072",
"0.48505124",
"0.47962883",
"0.4778302",
"0.4764537",
"0.4756254",
"0.47423515",
"0.47390997"
] | 0.66495013 | 0 |
Rebalance tree after node insertion or deletion. | def _rebalance(self, val=None):
node = self.search(val)
if node is None:
node = self.root
if self.root is None:
return
try:
self._rebalance(node.left.data)
except AttributeError:
pass
try:
self._rebalance(node.right.data)
except AttributeError:
pass
if self.balance(node) > 1:
if self.balance(node.right) == -1: # should this be > 0?
self.rotate(node.right.left.data)
self.rotate(node.right.data)
elif self.balance(node) < -1:
if self.balance(node.left) == 1:
self.rotate(node.left.right.data)
self.rotate(node.left.data)
# result = [x for x in self.breadth_first_traversal()]
# for num in result:
# print(num)
# print()
# print() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _rebalance(self):\n prev_height = self._height # Save current height for comparison later\n self._height = self.height() # Update the height of the node\n if self._unbalanced():\n if self.full():\n if self._leftchild._height > self._rightchild._height:\n self._restructure_leftchild() # If left causes unbalance\n else:\n self._restructure_rightchild()\n elif self._leftchild and not self._rightchild:\n self._restructure_leftchild()\n else:\n self._restructure_rightchild()\n\n if self._parent:\n self._parent._rebalance() # Rebalance the node's parent\n elif self._height != prev_height and self._parent:\n self._parent._rebalance() # Rebalance parent if height changed",
"def rebalance(self, node):\n # perform rebalance until root\n while node != None:\n\n # compute node height\n node.updateHeight()\n\n # store node's parent\n parent = node.parent\n\n # node balance factor is 2: rotate it properly\n if node.balanceFactor == 2:\n\n # right child of node has positive BF: rotate node left\n if node.right.balanceFactor == 1:\n self.rotateLeft(node)\n\n # right child of node has negative BF: double rotate node left\n else:\n self.rotateRight(node.right)\n self.rotateLeft(node)\n\n # node balance factor is -2: rotate it properly\n elif node.balanceFactor == -2:\n\n # left child of node has negative BF: rotate node right\n if node.left.balanceFactor == -1:\n self.rotateRight(node)\n\n # left child of node has positive BF: double rotate node right\n else:\n self.rotateLeft(node.left)\n self.rotateRight(node)\n\n # ascend to parent and rebalance it\n node = parent",
"def re_balance(self):\n self.update_heights(recursive=False)\n self.update_balances(False)\n\n while self.balance < -1 or self.balance > 1:\n if self.balance > 1:\n if self.node.left.balance < 0:\n self.node.left.rotate_left()\n self.update_heights()\n self.update_balances()\n self.rotate_right()\n self.update_heights()\n self.update_balances()\n\n if self.balance < -1:\n if self.node.right.balance > 0:\n self.node.right.rotate_right()\n self.update_heights()\n self.update_balances()\n self.rotate_left()\n self.update_heights()\n self.update_balances()",
"def rebalance(self):\n\t\tif self.get_height(treenode=self.left) - self.get_height(treenode=self.right) > 1:\n\t\t\t# violation is in left subtree\n\t\t\tif self.get_height(treenode=self.left.left) > self.get_height(treenode=self.left.right):\n\t\t\t\t# we need to rotate right\n\t\t\t\tself = self.rotate_right()\n\t\t\telse:\n\t\t\t\t# we need to rotate left, then rotate right\n\t\t\t\tself = self.rotate_left_right()\n\t\telse:\n\t\t\t# violation is in right subtree\n\t\t\tif self.get_height(treenode=self.right.right) > self.get_height(treenode=self.right.left):\n\t\t\t\t# we need to rotate left\n\t\t\t\tself = self.rotate_left()\n\t\t\telse:\n\t\t\t\t# we need to rotate right, then rotate left\n\t\t\t\tself = self.rotate_right_left()",
"def rebalance(self, node):\n\n w = node\n\n # w.parent and w.parent.parent is for deletion rebalance exception for 2 nodes\n x, y, z = w, w.parent, w.parent and w.parent.parent\n\n # crawl to unbalanced node\n while z and abs(z.balance()) < 2:\n x, y, z = x.parent, y.parent, z.parent\n \n # not unbalanced \n if not z:\n return True\n \n if z.balance() < 0:\n if x < y:\n self.rotate_right(y)\n self.rotate_left(z)\n else:\n if x > y:\n self.rotate_left(y)\n self.rotate_right(z)",
"def rebalance(self):\r\n points = [p for p in self.tree]\r\n if points:\r\n self.tree = kd_factory.generate(points)\r\n self.paint()",
"def rebalance(self, root: AVLTreeNode) -> AVLTreeNode:\n if root.bf == 2:\n if root.left.bf < 0: # L-R\n root.left = self.rotate_left(root.left)\n return self.rotate_right(root)\n else: # L-L\n return self.rotate_right(root)\n elif root.bf == -2:\n if root.right.bf > 0: # R-L\n root.right = self.rotate_right(root.right)\n return self.rotate_left(root)\n else: # R-R\n return self.rotate_left(root)\n else:\n return root # no need to rebalance",
"def rebalance_children(self, node, current=None):\n print('Balancing children...')\n if current is not None:\n node = self.__getitem__(current)\n node['edges'] = sorted(node['edges'])\n self.rebalance_children(node, current=node)\n else:\n node = self.__getitem__(node)\n node['edges'] = sorted(node['edges'])",
"def test_tree_2_nodes_left_unbalanced(one_t):\n one_t.insert(9)\n assert one_t.balance() == 1",
"def test_tree_2_nodes_right_unbalanced(one_t):\n one_t.insert(11)\n assert one_t.balance() == -1",
"def delete(self, k):\n node = super(AVL, self).delete(k)\n ## node.parent is actually the old parent of the node,\n ## which is the first potentially out-of-balance node.\n self.rebalance(node.parent)",
"def _remove(self, node, root):\n if not root:\n return root\n # key is not found, do nothing\n if node.key < root.key:\n root.left = self._remove(node, root.left)\n # removed element from tree, rebalance it\n if root.right and root.right.height - root.left.height == 2:\n # tree is unbalanced, balance it\n right_height = root.right.right.height if root.right.right else 0\n left_height = root.right.left.height if root.right.left else 0\n if right_height >= left_height:\n root = self.rotate_with_left_child(root)\n else:\n root = self.double_with_right_child(root)\n elif node.key > root.key:\n root.right = self._remove(node, root.right)\n # removed element from tree, rebalance it\n if root.left and root.left.height - root.right.height == 2:\n # tree is unbalanced, balance it\n right_height = root.left.right.height if root.left.right else 0\n left_height = root.left.left.height if root.left.left else 0\n if left_height >= right_height:\n root = self.rotate_with_right_child(root)\n else:\n root = self.double_with_left_child(root)\n elif root.left:\n # node to be removed, pick largest one and move it to root\n max_node = self._find_max(root.left) # todo\n root.key = max_node.key\n root.value = max_node.value\n self._remove(max_node, root.left)\n # removed from left side, rebalance\n if root.right and root.right.height - root.left.height == 2:\n # tree in unbalanced, balance it\n right_height = root.right.right.height if root.right.right else 0\n left_height = root.right.left.height if root.right.left else 0\n if right_height >= left_height:\n root = self.rotate_with_left_child(root)\n else:\n root = self.double_with_right_child(root)\n else:\n root = root.left if root.left else root.right\n if root:\n root.height = max(root.left.height if root.left else -1, root.right.height if root.right else -1) + 1\n return root",
"def test_delete_right_leaf_no_rotation(bst_balanced):\n bst_balanced.delete(7)\n assert tuple(bst_balanced.in_order()) == (1, 2, 3, 5, 6)\n assert tuple(bst_balanced.breadth_first()) == (5, 2, 6, 1, 3)",
"def test_delete_left_leaf_no_rotation(bst_balanced):\n bst_balanced.delete(1)\n assert tuple(bst_balanced.in_order()) == (2, 3, 5, 6, 7)\n assert tuple(bst_balanced.breadth_first()) == (5, 2, 6, 3, 7)",
"def _correct_tree(self, current_element: Node):\r\n while True:\r\n if current_element == None or current_element.parent() == None:\r\n return None\r\n current_element = current_element.parent()\r\n b1 = current_element.balance()\r\n\r\n try:\r\n b2 = current_element.right_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.right_son().left_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if b1 in (-1, 0, 1):\r\n if current_element.parent() == None:\r\n break\r\n else:\r\n continue\r\n elif ((b1 == -2 and b2 == 1 and b3 == -1) or\r\n (b1 == -2 and b2 == 1 and b3 == 0 ) or\r\n (b1 == -2 and b2 == 1 and b3 == 1)):\r\n current_element.reset(*self._right_left(current_element))\r\n elif b1 == -2:\r\n current_element.reset(*self._right_right(current_element))\r\n break\r\n\r\n try:\r\n b2 = current_element.left_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.left_son().right_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if ((b1 == 2 and b2 == 2 and b3 == 2) or\r\n (b1 == -1 and b2 == -1 and b3 == -1) or\r\n (b1 == -1 and b2 == 0 and b3 == 1) or\r\n (b1 == 2 and b2 == -1 and b3 == 0)):\r\n current_element.reset(*self._left_right(current_element))\r\n elif b1 == 2:\r\n current_element.reset(*self._left_left(current_element))\r\n break\r\n \r\n if current_element.parent() == None:\r\n break",
"def updateTree(self):\n self.reset()\n self.resetTree() \n self.read()",
"def test_rebalance(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n handler = self.new_handler()\n state = handler.get_gamestate()\n rebal_handler = AVLHandler.from_graph(state)\n ret = check_balance(rebal_handler.root)\n if ret and rebal_handler.balanced:\n successes += 1\n else:\n failures += 1\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to correctly rebalance deserialized tree! '\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated deserialization rebalancing in {successes} trees.{BColors.ENDC}\")",
"def test_delete_right_branch_no_rotation(bst_balanced):\n bst_balanced.delete(6)\n assert tuple(bst_balanced.in_order()) == (1, 2, 3, 5, 7)\n assert tuple(bst_balanced.breadth_first()) == (5, 2, 7, 1, 3)",
"def test_delete_left_branch_no_rotation(bst_balanced):\n bst_balanced.delete(2)\n assert tuple(bst_balanced.in_order()) == (1, 3, 5, 6, 7)\n assert tuple(bst_balanced.breadth_first()) == (5, 3, 6, 1, 7)",
"def update_balances(self, recursive=True):\n if self.node:\n if recursive:\n if self.node.left:\n self.node.left.update_balances()\n if self.node.right:\n self.node.right.update_balances()\n\n self.balance = self.node.left.height - self.node.right.height\n else:\n self.balance = 0",
"def test_delete_two_node_left_balanced_tree_01(bst_empty):\n bst_empty.insert(2)\n bst_empty.insert(1)\n bst_empty.delete(2)\n assert bst_empty._root.val == 1\n assert bst_empty._root.left is None",
"def test_deletion(basic_tree):\n tree = red_black_tree.RBTree()\n\n # 23, 4, 30, 11, 7, 34, 20, 24, 22, 15, 1\n for key, data in basic_tree:\n tree.insert(key=key, data=data)\n\n # No child\n tree.delete(15)\n assert [item for item in tree.inorder_traverse()] == [\n (1, \"1\"),\n (4, \"4\"),\n (7, \"7\"),\n (11, \"11\"),\n (20, \"20\"),\n (22, \"22\"),\n (23, \"23\"),\n (24, \"24\"),\n (30, \"30\"),\n (34, \"34\"),\n ]\n\n # One right child\n tree.delete(7)\n assert [item for item in tree.inorder_traverse()] == [\n (1, \"1\"),\n (4, \"4\"),\n (11, \"11\"),\n (20, \"20\"),\n (22, \"22\"),\n (23, \"23\"),\n (24, \"24\"),\n (30, \"30\"),\n (34, \"34\"),\n ]\n\n # One left child\n tree.insert(key=9, data=\"9\")\n tree.delete(11)\n assert [item for item in tree.inorder_traverse()] == [\n (1, \"1\"),\n (4, \"4\"),\n (9, \"9\"),\n (20, \"20\"),\n (22, \"22\"),\n (23, \"23\"),\n (24, \"24\"),\n (30, \"30\"),\n (34, \"34\"),\n ]\n\n # Two children\n tree.delete(23)\n assert [item for item in tree.inorder_traverse()] == [\n (1, \"1\"),\n (4, \"4\"),\n (9, \"9\"),\n (20, \"20\"),\n (22, \"22\"),\n (24, \"24\"),\n (30, \"30\"),\n (34, \"34\"),\n ]",
"def test_delete_two_node_right_balanced_tree_01(bst_empty):\n bst_empty.insert(1)\n bst_empty.insert(3)\n bst_empty.delete(1)\n assert bst_empty._root.val == 3\n assert bst_empty._root.left is None\n assert bst_empty._root.right is None",
"def test_deletion_no_child(basic_tree):\n tree = red_black_tree.RBTree()\n\n test_tree = [(23, \"23\"), (4, \"4\"), (30, \"30\"), (11, \"11\")]\n\n for key, data in test_tree:\n tree.insert(key=key, data=data)\n\n tree.delete(4)\n assert [item for item in tree.inorder_traverse()] == [\n (11, \"11\"),\n (23, \"23\"),\n (30, \"30\"),\n ]",
"def balance_if_needed(self,node):\n if node is None:\n return\n height = abs(self.height(node.left) - self.height(node.right))\n if height > 1:\n isRight = self.height(node.right) > self.height(node.left)\n biggerChild = node.right if isRight else node.left\n isChildRight = self.height(biggerChild.right) > self.height(biggerChild.left)\n if isRight != isChildRight:\n # Perform a rotation for child first\n newRoot = self.rotate(isChildRight, biggerChild)\n return self.rotate(isRight,node)\n\n else:\n # Already balanced\n return node",
"def __update_size_tree(self, node, delete=False):\r\n if not delete:\r\n node.size_tree += 1\r\n while node.parent:\r\n node = node.parent\r\n node.size_tree += 1\r\n else:\r\n node.size_tree -= 1\r\n while node.parent:\r\n node = node.parent\r\n node.size_tree -= 1",
"def check_balance(self):\n\t\tif math.abs(self.get_height(treenode=self.left) - self.get_height(treenode=self.right)) > 1:\n\t\t\tself.rebalance()",
"def test_deletion_two_children(basic_tree):\n tree = red_black_tree.RBTree()\n\n test_tree = [\n (23, \"23\"),\n (4, \"4\"),\n (30, \"30\"),\n (11, \"11\"),\n (7, \"7\"),\n (34, \"34\"),\n (9, \"9\"),\n (27, \"27\"),\n ]\n\n for key, data in test_tree:\n tree.insert(key=key, data=data)\n\n tree.delete(23)\n assert [item for item in tree.inorder_traverse()] == [\n (4, \"4\"),\n (7, \"7\"),\n (9, \"9\"),\n (11, \"11\"),\n (27, \"27\"),\n (30, \"30\"),\n (34, \"34\"),\n ]",
"def recoverTree(self, root: TreeNode) -> None:\n # base case\n if not root:\n return\n # a list to store node to be exchange\n change = []\n lst = self.inorder(root)\n for i in range(len(lst)-1):\n if lst[i+1].val < lst[i].val:\n # If we already found the first one i, the seconde one would be i+1\n # you can find that in the second example given by Leetcode\n if change:\n change.append(i+1)\n else:\n change.append(i)\n # exchange elements\n if len(change) == 1:\n lst[change[0]].val, lst[change[0]+1].val = lst[change[0]+1].val, lst[change[0]].val\n else:\n lst[change[0]].val, lst[change[1]].val = lst[change[1]].val, lst[change[0]].val",
"def recoverTree(self, root: TreeNode) -> None:\n self.inorder(root)\n self.first.val, self.second.val = self.second.val, self.first.val"
] | [
"0.75554925",
"0.7343056",
"0.7040202",
"0.7030208",
"0.6880421",
"0.68144614",
"0.6544322",
"0.6485529",
"0.6476007",
"0.6465227",
"0.6378443",
"0.6367502",
"0.6316389",
"0.6304218",
"0.6246138",
"0.623869",
"0.6230692",
"0.620086",
"0.61667925",
"0.6128051",
"0.6125295",
"0.61195636",
"0.6108",
"0.61047053",
"0.6099132",
"0.6098095",
"0.6085101",
"0.6082508",
"0.6080795",
"0.6039396"
] | 0.74186516 | 1 |
Helper to replace the connections to node to nxt. | def _replace_node(self, nxt, node):
nxt.left = node.left
nxt.right = node.right
nxt.parent = node.parent
if node is self.root:
self.root = nxt
if nxt.left:
nxt.left.parent = nxt
if nxt.right:
nxt.right.parent = nxt
if nxt.parent:
if nxt.parent.right is node:
nxt.parent.right = nxt
else:
nxt.parent.left = nxt | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n app.edges[src_id].add(trg_id)\n self.mark_as_unsaved()\n self.update()",
"def switchNeighbors(head):\n if head == None or head.next == None or head.next.next == None:\n return\n\n rear = head\n mid = head.next\n pre = head.next.next\n\n while pre!= None:\n rear.next = pre\n mid.next = pre.next\n pre.next = mid\n temp = mid\n mid = pre\n pre = temp\n # go to the next pair of nodes\n if pre.next == None:\n return\n pre = pre.next.next\n mid = mid.next.next\n rear = rear.next.next",
"def _switch_nodes(self, walker):\n walker.prev.next = walker.next \n walker.next = walker.next.next \n walker.next.prev = walker\n walker.prev.next.prev = walker.prev\n walker.prev.next.next = walker \n walker.prev = walker.prev.next",
"def set_both_connections(self, new_node):\n distance_to_new = self.current_node.distance_between(new_node.location)\n self.current_node.set_adjacent_from_direction(distance_to_new, new_node)\n reverse_distance = new_node.distance_between(self.current_node.location)\n new_node.set_adjacent_from_direction(reverse_distance, self.current_node)",
"def insert_after(self,node,new_node):\n new_node.next = node.next\n node.next = new_node",
"def replace_node(old_node: Node, new_node: Node):\n assert old_node.graph is new_node.graph\n graph = old_node.graph\n # save output edges and reconnect them to new node\n for i in range(len(old_node.out_nodes())):\n graph.add_edge(new_node.id, old_node.out_node(i).id, **old_node.out_edge(i))\n # TODO Need to check if there are other users for this node\n graph.remove_node(old_node.id)",
"def swapPairs(head): #: ListNode) -> ListNode:\n # We could modify the node values, but instead let's rearrange the\n # connectivity of the nodes.\n \n # Null/terminal case\n if head == None:\n return None\n elif head.next == None:\n return head\n else:\n # perform the swap and recur:\n newhead = head.next\n head.next = swapPairs(head.next.next)\n newhead.next = head\n print(newhead.val)\n return newhead",
"def set_connection_between_nodes(self):\n\n for i, node in enumerate(self.list_empty_nodes):\n line = node.labyrinth_position[0]\n column = node.labyrinth_position[1]\n\n for j in range(i+1, len(self.list_empty_nodes)):\n line_j = self.list_empty_nodes[j].labyrinth_position[0]\n column_j = self.list_empty_nodes[j].labyrinth_position[1]\n \n if i != j and ((line == line_j and column == column_j - 1) \\\n or (line == line_j and column == column_j + 1) \\\n or (column == column_j and line == line_j - 1) \\\n or (column == column_j and line == line_j + 1)) \\\n and (not node in self.list_empty_nodes[j].connected_to) \\\n and (not self.list_empty_nodes[j] in node.connected_to):\n node.connected_to.append(self.list_empty_nodes[j])\n self.list_empty_nodes[j].connected_to.append(node)",
"def _autoplace(self, nodes):\n for node in nodes:\n node.autoplace()",
"def solution_alternate(head: ListNode) -> ListNode:\n DUMMY = ListNode(-1)\n curr = head\n while curr:\n if curr.next == DUMMY: # If the node is already pointing to the dummy, then it indicates start of cycle\n return curr\n\n next_copy = curr.next # Save link to the next node that is about to be re-pointed to the dummy\n curr.next = DUMMY # Re-point the next node to the dummy node\n curr = next_copy # Using the saved link, update curr\n return None",
"def cell_replace_node(self,c,n_old,n_new):\n for ni in range(self.max_sides):\n if self.cells['nodes'][c,ni] == n_old:\n self.cells['nodes'][c,ni] = n_new\n if self._node_to_cells is not None:\n self._node_to_cells[n_old].remove(c)\n self._node_to_cells[n_new].append(c)",
"def set_neighbours(self,knodes):\n self.neighbours = []\n for kn in knodes:\n # Make sure we don't have ourselves as a neighbour:\n if kn.ident == self.ident:\n continue\n # A neighbour has a path length 1:\n self.neighbours.append(\\\n kn._replace(path_len=1))\n\n\n # Update known nodes:\n self.add_known_nodes(0,self.neighbours)",
"def connect_backwards(self):\n\n for n in self.nodes:\n n.receives_from = []\n\n for n1 in self.nodes:\n for n2 in n1.sends_to:\n n2.receives_from.append(n1)",
"def insert_after(node, new_node):\n new_node.next = node.next\n node.next = new_node",
"def swapPairs(self, head):\r\n if not head or not head.next:\r\n return head\r\n \r\n # Dummy node\r\n dummy = ListNode(0)\r\n # Point the next of dummy node to the head\r\n dummy.next = head\r\n # This node will be used to traverse the list\r\n curr = dummy\r\n # Loop until we reach to the second last node\r\n while curr.next and curr.next.next:\r\n # First node of the pair\r\n first = curr.next\r\n # Second node of the pair\r\n second = curr.next.next\r\n # Point the next of first node to the node after second node\r\n first.next = second.next\r\n # Now the current node's next should be the second node\r\n curr.next = second\r\n # Linking the original second node to the first node\r\n curr.next.next = first\r\n # Move the pointer two nodes ahead\r\n curr = curr.next.next\r\n return dummy.next",
"def replace_node(self, network_node: Node, node: Node) -> None:\n index = self.network.index(network_node)\n self.network[index] = node",
"def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)",
"def rewire_pointers(linked_list):\n before = None\n node = linked_list\n after = node['next']\n if after != None:\n start = after\n\n while after != None:\n if before != None:\n before['next'] = after\n node['next'] = after['next']\n after['next'] = node\n\n previous = node\n node = node['next']\n if node['next']:\n after = node['next']['next']\n else:\n after = None\n\n return start",
"def rearange_nodes_links_old(idx, nodes, links):\n nodes = nodes[idx,:]\n for i in range(0, len(links)):\n links[i, 0] = idx.index(links[i, 0])\n links[i, 1] = idx.index(links[i, 1])\n for i in range (0, len(links)):\n links[i] = sorted(links[i])\n \n # Sort links according to the source.\n links = links[links[:,0].argsort()]\n idx = update_idx_links(links[:,0], links[:,1])\n links = links[idx]\n return nodes, links",
"def set_next(self, node):\r\n self.__next = node",
"def flip_two_ptr(lnk):\n def helper(lnk, prev):\n if lnk is not Link.empty and lnk.rest is not Link.empty:\n if prev:\n prev.rest = lnk.rest\n temp = lnk.rest\n lnk.rest, temp.rest = lnk.rest.rest, lnk\n helper(lnk.rest, lnk)\n helper(lnk, None)",
"def update_nodes(self):\n raise NotImplementedError('ERROR: sweeper has to implement update_nodes(self)')",
"def deleter_after(node):\n node.next = node.next.next",
"def postprocessing_steiner(self, extension: bool, solution_steiner: nx.Graph, link: dict):\n sol_tree = solution_steiner\n if not extension:\n return sol_tree\n connections_to_old = [n for n in sol_tree.neighbors('OldNetworkNode')]\n sol_tree.remove_node('OldNetworkNode')\n for node in connections_to_old:\n sol_tree.add_edge(node, link[node][0])\n old_Graph = nx.Graph(self.old_network_graph)\n sol_tree.add_edges_from(old_Graph.edges)\n return sol_tree",
"def reconnect_node(self, node_id: int,\n effectiveness: float = 0.95) -> None:\n node = self.g_.nodes[node_id]\n to_add = []\n leave = []\n for uv in node[\"_edges\"]:\n if self._random_state.binomial(1, effectiveness):\n to_add.append(uv)\n else:\n leave.append(uv)\n\n self.g_.add_edges_from(to_add)\n node[\"_edges\"] = leave\n if len(node[\"_edges\"]) == 0:\n node[\"isolated\"] = False",
"def remap_nodes(self, new_node_mapping):\n # because all nodes are SchemaNodeIDs (i.e. objects), we only need to reassign nodes one way\n # changes propagate to chains, chain root_nodes, and parents automatically\n for chain in self.chains:\n for edge in chain:\n head, tail = edge\n if head in new_node_mapping.keys():\n head.value = new_node_mapping[head]\n if tail in new_node_mapping.keys():\n tail.value = new_node_mapping[tail]",
"def _update_connections(self, oldVar, newVar):\n vars = [v for v in self.model.get_all_variables() if v.get_source_variable(True) is oldVar]\n # Remove old connections, including interfaces and types so creating the new connection works\n for v in vars:\n self.remove_connections(v)\n self.del_attr(v, u'public_interface')\n self.del_attr(v, u'private_interface')\n v.clear_dependency_info()\n # Create new connections\n for v in vars:\n self.connect_variables(newVar, v)",
"def set_next(self, node):\n self.__next = node",
"def insert(self, index, item):\n \n # Create a new node\n new_code = Node(item)\n \n # Go to node (index - 1)\n curr = self.first\n for i in range(index - 1):\n curr = curr.next\n \n old_next_node = curr.next \n # Update curr's next attribute\n curr.next = new_node\n \n # Update new node's next attribute\n new_node.next = old_next_node",
"def set_next(node, value):\n node['next'] = value"
] | [
"0.6306071",
"0.61568826",
"0.61550254",
"0.6123998",
"0.58381",
"0.5836165",
"0.5787948",
"0.5719936",
"0.5719666",
"0.5714374",
"0.55667543",
"0.55650383",
"0.5557494",
"0.55529934",
"0.55362725",
"0.55246353",
"0.5511303",
"0.5506245",
"0.5463353",
"0.5454586",
"0.5420207",
"0.541893",
"0.5411535",
"0.54058474",
"0.5405847",
"0.5380736",
"0.5368438",
"0.5348512",
"0.534837",
"0.53312653"
] | 0.68727446 | 0 |
Redirect from node1 to node2. | def _redirect(self, node1, node2):
if node1.parent.right is node1:
node1.parent.right = node2
else:
node1.parent.left = node2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect_both(node1, node2, weight):\n connect_one_way(node1, node2, weight)\n connect_one_way(node2, node1, weight)",
"def move_to_node(self,node):\n path=self.get_path(self.current_node,node)\n self.move_to(path)",
"def connect_one_way(node1, node2, weight):\n node1.add_or_update_neighbour(node2, weight)",
"def set_both_connections(self, new_node):\n distance_to_new = self.current_node.distance_between(new_node.location)\n self.current_node.set_adjacent_from_direction(distance_to_new, new_node)\n reverse_distance = new_node.distance_between(self.current_node.location)\n new_node.set_adjacent_from_direction(reverse_distance, self.current_node)",
"def __rshift__(self, other):\n # softly check if the \"other\" is a Node with inputs\n if hasattr(other, \"inputs\"):\n for iname, iplug in other.inputs.items():\n if iname == self.name:\n target = iplug\n else:\n target = other\n self.connect(target)",
"def connect(self, node1, node2):\n self.neighbour1 = node1\n self.neighbour2 = node2",
"def move_ant(self, node_to_visit):\n self.actual_node = node_to_visit\n self.remember_visited_node(node_to_visit)",
"def linkTo( self, node2, port1=None, port2=None ):\n node1 = self\n if port1 is None:\n port1 = node1.newPort()\n if port2 is None:\n port2 = node2.newPort()\n intf1 = node1.intfName( port1 )\n intf2 = node2.intfName( port2 )\n makeIntfPair( intf1, intf2 )\n node1.addIntf( intf1, port1 )\n node2.addIntf( intf2, port2 )\n node1.registerIntf( intf1, node2, intf2 )\n node2.registerIntf( intf2, node1, intf1 )\n return intf1, intf2",
"def p2pconnect():\n \n node1_address = request.form[\"node1_address\"]\n node2_address = request.form[\"node2_address\"]\n node2_address = \"http://\"+node2_address\n url = 'http://'+node1_address +'/register_with'\n header = {'Content-Type': 'application/json'}\n data = '{\"node_address\": \"%s\"}'%(node2_address)\n\n response = requests.post(url,headers=header,data=data)\n\n print(response.status_code)\n print(response.content)\n # if response.status_code == 200:\n # flash('Two nodes are connected')\n # else:\n # flash('Please check node address')\n \n return redirect('/')",
"def replace_node(old_node: Node, new_node: Node):\n assert old_node.graph is new_node.graph\n graph = old_node.graph\n # save output edges and reconnect them to new node\n for i in range(len(old_node.out_nodes())):\n graph.add_edge(new_node.id, old_node.out_node(i).id, **old_node.out_edge(i))\n # TODO Need to check if there are other users for this node\n graph.remove_node(old_node.id)",
"def forward_pass(self):",
"def other(self, node):\n if node == self.__node_a:\n return self.__node_b\n elif node == self.__node_b:\n return self.__node_a",
"def connect_node():\n node_address = request.form[\"nodeadress_port\"]\n # post_object = {\n # 'Node address': node_address\n # }\n global CONNECTED_NODE_ADDRESS\n CONNECTED_NODE_ADDRESS = \"http://\"+node_address\n print(connect_node)\n \n return redirect('/transaction')",
"def direct_edge(self, node_name1, node_name2):\n for t in range(self._start, self._end):\n self._edges[(node_name1, t)].add((node_name2, t + 1))",
"def _switch_nodes(self, walker):\n walker.prev.next = walker.next \n walker.next = walker.next.next \n walker.next.prev = walker\n walker.prev.next.prev = walker.prev\n walker.prev.next.next = walker \n walker.prev = walker.prev.next",
"def copy_links_after_socket_swap(socket1, socket2, was_socket1_enabled):\n node_tree = socket1.id_data\n if was_socket1_enabled == socket1.enabled:\n # Nothing changed\n pass\n elif was_socket1_enabled and not socket1.enabled:\n # socket1 was disabled while socket2 was enabled\n for link in socket1.links:\n node_tree.links.new(socket2, link.to_socket)\n else:\n # socket2 was disabled while socket1 was enabled\n for link in socket2.links:\n node_tree.links.new(socket1, link.to_socket)",
"def __remap(self, tree1: int, tree2: int) -> None:\n # if the branch was deleted in the same iteration skip\n if self.__trees.get(tree1) is None:\n return\n\n # re map all mazes to tree2\n self.__mappings[tree1] = tree2\n for key, value in self.__mappings.items():\n if value == tree1:\n self.__mappings[key] = tree2\n self.__trees[tree2] += self.__trees[tree1]\n del self.__trees[tree1]",
"def redirect(self, location):\n self.redirect_see_other(location)",
"def reroute_input(ts0, ts1, op1):\n for i, t in enumerate(op1.inputs):\n if t is ts1:\n op1._update_input(i, ts0) # pylint: disable=protected-access",
"def __swap_kv(self, node1, node2):\r\n node1.key, node2.key = node2.key, node1.key\r\n node1.value, node2.value = node2.value, node1.value",
"def _swap(self, node1, node2):\n arr = self._array\n arr[node1._index], arr[node2._index] = arr[node2._index], \\\n arr[node1._index]\n # Swap indices stored in nodes as well\n node1._index, node2._index = node2._index, node1._index",
"def add_bidirectional_edges( self, node1, node2, distance):\n n1, n2 = self.__create_node(node1.lower(), node2.lower())\n\n #Set neighbour between edges\n #Check if 2 edges oredi a neihbor exist\n if n1 in n2.getNeighbors() and n2 in n1.getNeighbors():\n print(n1.getId()+\" and \"+n2.getId()+\" already a neighbour.\")\n else:\n n1.setNeighbor(n2,distance) # n1 ----> n2\n n2.setNeighbor(n1,distance) # n2 ----> n1",
"def transition2(src_video1, src_video2, dst_file_name):\n # TODO - Add your code here\n pass",
"def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1",
"def connect_walker_ports(self, port1: Port, port2: Port) -> None:\n self.port_end.req_ports = port1\n self.port_end.req_ports = port2",
"def _add(self, node1, node2):\r\n\r\n self._graph[node1].add(node2)",
"def node_command(ctx, old, new):\n try:\n with ctx.obj[\"reader\"] as reader, ctx.obj[\"writer\"] as writer:\n writer.copy_schema(reader)\n writer.prepare_encode_cache()\n writer.rename_node(old.encode(\"utf-8\"), new.encode(\"utf-8\"))\n writer.write(reader)\n except Exception:\n click.secho(\"Failed!\", fg=\"red\", bold=True, err=True)\n raise\n else:\n click.secho(\"Done!\", fg=\"green\", err=True, bold=True)",
"def match_nodes(source_node, target_node):\n\n node_position = cmds.xform(source_node, q=True, ws=True, t=True)\n node_rotation = cmds.xform(source_node, q=True, ws=True, ro=True)\n cmds.xform(target_node, ws=True, t=node_position)\n cmds.xform(target_node, ws=True, ro=node_rotation)",
"def add_connection(self, n1: Node, n2: Node):\n if n2.node_id in n1.get_connections_ids() or n1.node_id in n2.get_connections_ids():\n return\n n1.add_child(n2)\n n2.add_child(n1)",
"def add_node_pairs(self, node_a,node_b):\r\n \r\n if node_b is not None : \r\n self.nodes[node_a].append(node_b)"
] | [
"0.60445195",
"0.58684015",
"0.58391327",
"0.5750028",
"0.5736963",
"0.57198584",
"0.56048125",
"0.55863553",
"0.5558446",
"0.54873115",
"0.5465547",
"0.54571635",
"0.5416378",
"0.54136115",
"0.54048353",
"0.5389015",
"0.53774303",
"0.5367003",
"0.5347666",
"0.5318217",
"0.52775204",
"0.527742",
"0.5256023",
"0.5250014",
"0.52214015",
"0.5212642",
"0.52068543",
"0.5200314",
"0.51736724",
"0.5153467"
] | 0.7867398 | 0 |
Add newlines to a logger object | def add_newlines(self: logging.Logger, num_newlines=1) -> None:
self.removeHandler(self.base_handler)
self.addHandler(self.newline_handler)
# Main code comes here
for _ in range(num_newlines):
self.info('')
self.removeHandler(self.newline_handler)
self.addHandler(self.base_handler) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_line_in_log():\n logging.info(' ' + '-' * 60 + '\\n')",
"def logprint(self, message):\n print message\n self.log += message+\"\\n\"",
"def log(self, message, *args, newline):\n\n self.current_character.log(message, *args, newline=newline)",
"def log_msg(self, msg):\n self.log.append(msg + \"\\n\")\n self.log.setCaretPosition(self.log.getDocument().getLength())",
"def log(self, string, newline=True):\n if self.on:\n with open(self.log_path, 'a') as logf:\n logf.write(string)\n if newline: \n logf.write('\\n')\n\n sys.stdout.write(string)\n if newline: \n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def log(self, line_text, no_enter=False, overwrite_last=False):\n if self.logging:\n with open(os.path.join(self.network_path,'activity.log'), 'a+') as f:\n f.write(line_text + os.linesep)\n if overwrite_last:\n line_text = \"\\r\" + line_text\n if no_enter:\n print(line_text, end=\"\", flush=True)\n else:\n print(line_text)",
"def logline(msg):\n print msg",
"def newline(self) -> None:\n self._lines.append(\"\")",
"def append_line_to_log(line = '\\n'):\n with open(logPath, 'a') as f:\n f.write(line + '\\n')",
"def log(self, message, indent_amount=0):\n indent = \" \" * indent_amount\n text = \"{indent}{text}\\n\".format(indent=indent, text=message)\n sys.stdout.write(text)",
"def _newline(self):\n if prettyprint:\n return '\\n' + self._indent_spaces()\n else:\n return ''",
"def _Log(self, logf, s):\r\n if logf:\r\n logf(s + '\\n')",
"def writeLog(msg, addEndline=True):\n\n with open(LOG_FILE, \"a\") as f:\n f.write(\"\\n\")\n f.write(msg)\n \n if addEndline == True:\n f.write(\"\\n---------------------------------------------\\n\")",
"def log(message):\n if type(message) is not str:\n message = str(message)\n print(PREFIX + re.sub('\\n', '\\n' + PREFIX, message))",
"def append_log_message(self, text):\n self._new_logs.append(text)",
"def _log_append(self, msg):\n\t\tp = self._edit.get_buffer()\n\t\tstart,end = p.get_bounds()\n\t\tp.insert(end, msg)\n\t\tself._trunc_lines()\n\t\tself._edit.scroll_to_iter(p.get_end_iter(), 0.0)",
"def log(self, msg=\"\"):\n if len(msg):\n msg = \"[%.03fs] %s\" % (time.time()-self.timeStart, msg)\n print(msg)\n self.logLines.append(msg)",
"def log(obj):\n with open(\"logfile\", 'a') as file:\n file.write('\\n{}'.format(obj))",
"def _newLine(self, usePos = True):",
"def log(self, data):\n if isinstance(data, basestring):\n self._write(data + '\\n', 'a')",
"def write_log(self, msg, level = \"DEBUG\"):\r\n if len(self.parent)> 13:\r\n spacer = \"\\t\"\r\n elif len(self.parent) < 8:\r\n spacer = \"\\t\\t\\t\"\r\n else:\r\n spacer = \"\\t\\t\"\r\n \r\n log = level + \"\\t\" + self.parent +spacer +str(msg)\r\n print(log)",
"def log(self, text, stdout=False, indent=''):\n\n if stdout:\n print(text)\n\n for line in text.split(\"\\n\"):\n while len(self._log) > self.log_lines:\n self._log.pop(0)\n self._log.append(urwid.Text(indent + line))\n\n self._log.set_focus(len(self._log)-1)\n\n #if self._mainloop is not None:\n #self._mainloop.draw_screen()",
"def add_line(self, line):\n self._set_instance_data('body', self.indent + ' ' * 4 + line)",
"def log(self, message):\n self._log += \"%s\\n\" % message\n print message",
"def write_line(self,msg):\n self.body += str(msg)\n self.body.br()",
"def log_line(self, line):\n print '%s%s' % (LOG_LINE_PREFIX, line)",
"def log(self, message, *,\n color: List[ANSICode] or ANSICode or None = None,\n new_line=True):\n\n message = self.ansi_code(message, color)\n\n self.current_line.append(message)\n\n if new_line:\n end_char = '\\n'\n else:\n end_char = ''\n\n text = \"\".join(self.current_line)\n lim = self.__count_text(self.over_write_line,\n self.__count_text(text))\n\n if lim < len(self.over_write_line):\n text += self.over_write_line[lim:]\n self.over_write_line = text\n\n print(\"\\r\" + text, end=end_char, flush=True)\n if new_line:\n self.current_line = []\n self.over_write_line = \"\"",
"def log_no_newline(self, msg):\n self.print2file(self.logfile, False, False, msg)",
"def extra_log(self, string):\n if hasattr(self.parent, \"log\"):\n self.parent.log += \"\\r\\n[%s] \" % time.process_time()\n self.parent.log += string + \"\\r\\n\"",
"def add_row_to_logfile(output_dir, *args):\n with open(os.path.join(output_dir, LOGFILE), 'a') as f:\n args_as_strings = map(str, args)\n f.write('\\n' + ', '.join(args_as_strings))"
] | [
"0.6630974",
"0.62861097",
"0.6200761",
"0.61713684",
"0.6058086",
"0.60562426",
"0.60483265",
"0.5990074",
"0.5972366",
"0.5953456",
"0.59534085",
"0.5949501",
"0.5920461",
"0.5851288",
"0.5821413",
"0.5816214",
"0.5804447",
"0.57826835",
"0.5742061",
"0.5737293",
"0.5726202",
"0.5705706",
"0.5674471",
"0.56453294",
"0.5639815",
"0.5629389",
"0.5625185",
"0.5624588",
"0.5610381",
"0.5601425"
] | 0.70678204 | 0 |
Creates the logger for the current application | def create_logger(app_name: str) -> logging.Logger:
if not os.path.exists(os.path.join(os.getcwd(), 'logs')):
os.mkdir(os.path.join(os.getcwd(), 'logs'))
app_logfile = os.path.join(os.getcwd(), 'logs', f'{app_name}.log')
logger = logging.getLogger(f"{app_name}-logger")
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(filename=app_logfile, mode='a', maxBytes=20000, backupCount=10)
handler.setLevel(logging.DEBUG)
# Set the formatter
formatter = logging.Formatter("%(asctime)s | %(levelname)s | %(message)s", "%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
# Set it as the base handler
logger.base_handler = handler
# Also add a newline handler to switch to later
newline_handler = logging.FileHandler(filename=app_logfile, mode='a')
newline_handler.setLevel(logging.DEBUG)
newline_handler.setFormatter(logging.Formatter(fmt='')) # Must be an empty format
logger.newline_handler = newline_handler
# Also add the provision for a newline handler using a custom method attribute
logger.newline = types.MethodType(add_newlines, logger)
# Also add a StreamHandler for printing to stderr
console_handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s | %(levelname)s | %(message)s", "%Y-%m-%d %H:%M:%S")
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def log_setup(self):\n # Logger initialisation\n logger = logging.getLogger(self.app_name)\n logger.setLevel(logging.DEBUG)\n\n # Creating console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # Creating formatter\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n\n # Adding formatter to ch\n ch.setFormatter(formatter)\n\n # Adding ch to logger\n logger.addHandler(ch)\n\n # Setting the Logger Level (INFO)\n logger.setLevel(logging.INFO)\n\n return logger",
"def create_logger():\r\n global logger\r\n logger = logging.getLogger(logger_name)\r\n\r\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')\r\n \r\n handler = logging.StreamHandler()\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n \r\n return logger",
"def create_logger():\n logging.basicConfig(level = logging.INFO, filename='logging', filemode='w')\n logger = logging.getLogger(\" \")\n admin_handler = logging.FileHandler('logging')\n admin_handler.setLevel(logging.INFO)\n logger.addHandler(admin_handler)\n logger.warning(f'{admin_handler} created a new logger')\n return logger",
"def initlogger(cls, app):\n global mylogapp\n global mylogger\n if uselogger == 0:\n return None\n # pylint: disable=E0601\n if mylogger == 0:\n name = \"Log_\" + time.strftime(\"%Y%m%d_%H_%M_%S\")\n logging.basicConfig(level=logging.DEBUG,\n format='%(name)-12s:%(asctime)s ' +\n '%(levelname)-8s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename=name,\n filemode='w')\n mylogger = 1\n if app not in mylogapp:\n logger = logging.getLogger(app)\n mylogapp[app] = logger\n return mylogapp[app]",
"def create_logger():\n global logger\n\n formatter = logging.Formatter('%(asctime)s|%(levelname)s|%(message)s')\n handler = TimedRotatingFileHandler(log_file, when=\"midnight\", interval=1)\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n handler.suffix = \"%Y-%m-%d\"\n logger = logging.getLogger(\"sacplus\")\n logger.setLevel(log_level)\n logger.addHandler(handler)",
"def _setup_logging(self):\n if self.app_config_has(\"logging\"):\n log_config = self.app_config()[\"logging\"]\n filename_list = [\n v['filename'] for k, v in\n _find_config_tree(log_config, \"filename\")\n ]\n # pre-create directory in advance for all loggers\n for file in filename_list:\n file_dir = os.path.dirname(file)\n if file_dir and not os.path.isdir(file_dir):\n os.makedirs(file_dir, exist_ok=True)\n dictConfig(log_config)\n else:\n log = getLogger()\n handler = StreamHandler()\n formatter = Formatter(\n \"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s\"\n )\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(DEBUG)\n msg = (\"Starting \" + os.path.basename(__name__) +\n \" version \" + __version__ + \" on \" +\n \"_\".join(uname()).replace(\" \", \"_\"))\n logger = getLogger(__name__)\n logger.debug(msg)",
"def build_logger(self):\n pass",
"def logger(self) -> logging.Logger:\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(name)-15s - [%(levelname)-10s] %(message)s\"\n )\n return logging.getLogger(os.path.basename(__file__))",
"def config_logger( self, ):\r\n logger = logging.getLogger( self.logger_id )\r\n\r\n logger.handlers = []\r\n logger.setLevel( self.parameters.logging_level ) # DEBUG , INFO WARNING ERROR CRITICAL\r\n\r\n # create the logging file handler.....\r\n fh = logging.FileHandler( self.parameters.pylogging_fn )\r\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n fh.setFormatter( formatter )\r\n logger.addHandler( fh )\r\n\r\n msg = \"Done config_logger\"\r\n print( msg )\r\n logger.info( msg ) # .debug .info .warn .error\r\n AppGlobal.set_logger( logger )\r\n\r\n return logger",
"def setup_applevel_logger(logger_name = APP_LOGGER_NAME, file_name=None):\n\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setFormatter(formatter)\n logger.handlers.clear()\n logger.addHandler(stream_handler)\n if file_name:\n if not os.path.exists(\"logs\"):\n os.makedirs(\"logs\")\n file_handler = logging.FileHandler(f\"./logs/{file_name}\", encoding=\"utf-8\")\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n return logger",
"def _init_logger(self):\n # Create log directory, if it doesn't already exist.\n self._create_directory(directory=self._log_directory)\n log_filename = \"{0}/{1}.log\".format(self._log_directory, self._program)\n\n # Add the date to the log file names.\n logging.basicConfig(\n filename=log_filename,\n filemode='w',\n level=logging.DEBUG,\n format='%(asctime)s|%(name)s|%(levelname)-5s| %(message)s',\n datefmt='%Y-%m-%d %I:%M:%S %p')\n\n # define a Handler which writes LOG messages or higher to the sys.stderr\n console = logging.StreamHandler()\n #\n # Note: Anything above the logging level is displayed to stdout.\n #\n # Level Numeric value\n # CRITICAL\t50\n # ERROR \t40\n # WARNING\t30\n # LOG 25 (our log level)\n # INFO\t 20\n # DEBUG \t10\n # NOTSET\t0\n #\n # Add a logging level to always display to stderr.\n logging.addLevelName(self._LOG_LEVEL, self._LOG_NAME)\n if self._debug:\n console.setLevel(logging.DEBUG)\n else:\n console.setLevel(self._LOG_LEVEL)\n # Set a format which is simpler for console use.\n formatter = logging.Formatter('%(name)s|%(levelname)-5s| %(message)s')\n console.setFormatter(formatter)\n # Add the handler to the root logger.\n logging.getLogger('').addHandler(console)\n self._logger = logging.getLogger()",
"def start_logger(app_name, calling_function):\n # Create logs directory if not present\n Path('logs').mkdir(parents=True, exist_ok=True)\n\n # Start logger\n logger.start_logger(app_name)\n\n module_logger = logging.getLogger('{app_name}.{calling_function}'.\\\n format(app_name=app_name, calling_function=calling_function))\n\n return module_logger",
"def create_logger() -> logging.Logger:\n pass # TODO: Replace with implementation!",
"def _instanciate_logger(self):\n\t\tself._logger = logging.getLogger('main')\n\t\tself._logger.setLevel(logging.DEBUG)\n\t\tself._logger.addHandler(logging.StreamHandler())",
"def start_logging() -> logging.RootLogger:\r\n # Defines the format of the logged messages.\r\n log_format = \"%(levelname)s | %(asctime)s | %(message)s\"\r\n # Configures logging, logs all messages >= 20 (INFO).\r\n logging.basicConfig(filename=app.config[\"log_file_name\"],\r\n format=log_format,\r\n level=logging.INFO)\r\n # Handle on the logger.\r\n logger = logging.getLogger()\r\n return logger",
"def _init_logger(self):\n self.logger = logging.getLogger('WSClientAPILogger')\n self.logger.setLevel(logging.DEBUG)\n self.logger_handler = logging.FileHandler(self.__class__.__name__ + '.log')\n self.logger_handler.setLevel(logging.DEBUG)\n self.logger_formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%d-%m %H:%M:%S')\n self.logger_handler.setFormatter(self.logger_formatter)\n self.logger.addHandler(self.logger_handler)",
"def setup_logger():\n\n global _logger\n global _has_logbook\n\n if _has_logbook:\n _logger = Logger('UoM_WIFI')\n try:\n log_path = join(sys.argv[1], '%s.log' % USERNAME)\n except IndexError:\n log_path = join(split(abspath(__file__))[0], '%s.log' % USERNAME)\n\n # because the log file is owned by root, if this program is ran by a\n # regular user, we need to prevent it from crashing by writing to a file\n # owned by root\n try:\n # create the handler\n log_handler = RotatingFileHandler(log_path)\n\n # push the context object to the application stack\n log_handler.push_application()\n except IOError:\n _has_logbook = False",
"def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log",
"def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)",
"def logger():\n return logging.getLogger(__name__)",
"def init_logs(self):\n\n handler = logging.FileHandler(self.app.config['LOG'])\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))\n self.app.logger.addHandler(handler)\n if self.app.config.get(\"LOG_LEVEL\") == \"DEBUG\":\n self.app.logger.setLevel(logging.DEBUG)\n elif self.app.config.get(\"LOG_LEVEL\") == \"WARN\":\n self.app.logger.setLevel(logging.WARN)\n else:\n self.app.logger.setLevel(logging.INFO)\n self.app.logger.info('Startup with log: %s' % self.app.config['LOG'])",
"def init_logger(app:Flask) -> None:\n with app.app_context():\n file = app.config[\"LOG_FILE\"]\n level = app.config[\"LOG_LEVEL\"]\n maxBytes = app.config[\"MAXBYTES\"]\n backupCount = app.config[\"BACKUPCOUNT\"]\n\n dictConfig({\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"incremental\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s\"\n }\n },\n \"handlers\": {\n \"file\": {\n \"level\": level,\n \"filename\": file,\n \"formatter\": \"default\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"maxBytes\": maxBytes,\n \"backupCount\": backupCount\n },\n \"console\": {\n \"level\": level,\n \"formatter\": \"default\",\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://sys.stdout\"\n }\n },\n \"loggers\": {\n \"werkzeug\": {\n \"handlers\": [\"file\"],\n \"level\": level,\n \"propagate\": True\n },\n \"sms\": {\n \"handlers\": [\"file\", \"console\"],\n \"level\": level,\n \"propagate\": True\n }\n }\n })",
"def get_logger(name):\n filename = \"file_sync.log\"\n _create_log_dir()\n filepath = os.path.join(FLASK_APP.config[\"LOG_DIR\"], filename)\n logger = logging.getLogger(name)\n handler = TimedRotatingFileHandler(filepath, when=\"midnight\")\n logger.setLevel(LOG_LEVELS[FLASK_APP.config[\"LOG_LEVEL\"]])\n handler.setLevel(LOG_LEVELS[FLASK_APP.config[\"LOG_LEVEL\"]])\n log_format = (\"%(asctime)s %(levelname)s %(pathname)s\"\n \":%(funcName)s: %(lineno)d - %(message)s\")\n formatter = logging.Formatter(log_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger",
"def setup_logger():\n LOG_DIR = unicode( os.environ.get(u'usep_gh__LOG_DIR') )\n LOG_LEVEL = unicode( os.environ.get(u'usep_gh__LOG_LEVEL') )\n filename = u'%s/usep_gh_handler.log' % LOG_DIR\n formatter = logging.Formatter( u'[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s' )\n logger = logging.getLogger( __name__ )\n # logger = logging.getLogger( u'usep_gh_handler' )\n level_dict = { u'debug': logging.DEBUG, u'info':logging.INFO }\n logger.setLevel( level_dict[LOG_LEVEL] )\n file_handler = logging.FileHandler( filename )\n file_handler.setFormatter( formatter )\n logger.addHandler( file_handler )\n logger.debug( u'in utils.log_helper.setup_logger(); log initialized at %s' % unicode(datetime.datetime.now()) )\n return logger",
"def log():\n return logging.getLogger(__name__)",
"def create_logger(log_dir=None):\n if log_dir and not os.path.exists(log_dir):\n os.makedirs(log_dir)\n log_format = '%(asctime)s %(process)d [%(levelname)s] %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_format)\n logger = logging.getLogger('es_on_gke')\n if log_dir:\n log_file = os.path.join(log_dir, 'log.txt')\n file_hdl = logging.FileHandler(log_file)\n formatter = logging.Formatter(fmt=log_format)\n file_hdl.setFormatter(formatter)\n logger.addHandler(file_hdl)\n return logger",
"def _configure_logging(self):\n logger = logging.getLogger('BatchAppsBlender')\n\n console_format = logging.Formatter(\n \"BatchApps: [%(levelname)s] %(message)s\")\n\n file_format = logging.Formatter(\n \"%(asctime)-15s [%(levelname)s] %(module)s: %(message)s\")\n\n console_logging = logging.StreamHandler()\n console_logging.setFormatter(console_format)\n logger.addHandler(console_logging)\n\n logfile = os.path.join(self.props.data_dir, \"batch_apps.log\")\n\n file_logging = logging.FileHandler(logfile)\n file_logging.setFormatter(file_format)\n logger.addHandler(file_logging)\n\n logger.setLevel(int(self.props.log_level))\n return logger",
"def make_logger(model_dir: str, log_file: str = \"train.log\") -> Logger:\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level=logging.DEBUG)\n fh = logging.FileHandler(\"{}/{}\".format(model_dir, log_file))\n fh.setLevel(level=logging.DEBUG)\n logger.addHandler(fh)\n formatter = logging.Formatter(\"%(asctime)s %(message)s\")\n fh.setFormatter(formatter)\n if platform == \"linux\":\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(sh)\n logger.info(\"Hello! This is Joey-NMT.\")\n return logger",
"def create_logger():\n log = logging.getLogger() # root logger\n log.setLevel(logging.DEBUG)\n format_str = '%(asctime)s - %(levelname)-8s - %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n if os.isatty(2):\n cformat = '%(log_color)s' + format_str\n colors = {'DEBUG': 'reset',\n 'INFO': 'reset',\n 'WARNING': 'bold_yellow',\n 'ERROR': 'bold_red',\n 'CRITICAL': 'bold_red'}\n formatter = colorlog.ColoredFormatter(cformat, date_format,\n log_colors=colors)\n else:\n formatter = logging.Formatter(format_str, date_format)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n log.addHandler(stream_handler)\n return logging.getLogger(__name__)",
"def logger_setup(self, logger_name):\n logger = logging.getLogger(logger_name)\n logger_path = \"/tmp/\" + logger.name\n logger_format = '%(asctime)s %(name)s %(levelname)s %(lineno)d %(message)s'\n\n # set up logging to file\n logging.basicConfig(\n level=logging.INFO,\n format=logger_format,\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=logger_path,\n filemode='w'\n )\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which for console use\n formatter = logging.Formatter(logger_format)\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n return logger"
] | [
"0.75154406",
"0.7406874",
"0.7310582",
"0.73023075",
"0.72598875",
"0.7259758",
"0.7158899",
"0.71581304",
"0.71384096",
"0.7136243",
"0.70920765",
"0.70719284",
"0.7055563",
"0.7045381",
"0.7019379",
"0.69999313",
"0.69985956",
"0.69716984",
"0.6966761",
"0.6965733",
"0.6959577",
"0.69222707",
"0.6917476",
"0.6907593",
"0.6904787",
"0.68838096",
"0.6847099",
"0.6837784",
"0.68325394",
"0.68237823"
] | 0.769293 | 0 |
Render the website's oauth page. | def oauth():
code = request.args.get('code')
if code:
params = deepcopy(settings)
url = "{host}/oauth2/access_token/".format(host=params.pop('host'))
params['code'] = code
params['client_id'] = params.pop('clientId')
params['redirect_uri'] = params.pop('redirectURI')
r = requests.post(url, data=params)
if r.status_code == 500:
f = open('error.html','w')
f.write(r.content)
f.close()
if r.status_code == 200:
data = json.loads(r.content)
resp = make_response(render_template('oauth.html', settings=settings, access_token=data.get('access_token')))
for k,v in data.items():
resp.set_cookie(k, v)
return resp
access_token = request.cookies.get("access_token")
return render_template('oauth.html',settings=settings, access_token=access_token) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def front_page():\n vars = dict(request.args)\n vars.setdefault('output', vars.get('format'))\n\n key = vars.get('auth_entity')\n if key:\n vars['entity'] = ndb.Key(urlsafe=key).get()\n if vars['entity']:\n vars.setdefault('site', vars['entity'].site_name().lower())\n\n vars.update({\n silo + '_html': module.Start.button_html(\n f'/{silo}/start_auth',\n image_prefix='/oauth_dropins_static/',\n outer_classes='col-lg-2 col-sm-4 col-xs-6',\n scopes=SCOPE_OVERRIDES.get(silo, ''),\n )\n for silo, module in OAUTHS.items()})\n\n return render_template('index.html', **vars)",
"def index():\n base_url = '%s/authorize' % SERVER_HOST\n oauth_state = str(random.randint(0, sys.maxint))\n args = {\n 'response_type': 'code',\n 'client_id': CLIENT_ID,\n 'redirect_uri': REDIRECT_URLS[0],\n 'scope': ' '.join(OAUTHIST_SCOPES),\n 'state': oauth_state,\n }\n link = '%s?%s' % (base_url, urllib.urlencode(args))\n session['oauth_state'] = oauth_state\n return render_template('client/index.html', link=link,\n server_host=SERVER_HOST)",
"def callback():\n # Grap the authentication verifier code from the incoming HTTP request (sent by Twitter)\n oauth_verifier = request.args.get('oauth_verifier')\n # Exchange the request token for authorized access token\n authenticator.get_access_token(oauth_verifier)\n # Now we have authenticated access to Twitter for the user\n\n # Render the web page to UI\n return render_template('callback.html', twitter=twitter_factory(authenticator)())",
"def get(self):\n self.render(\"login.html\")",
"def display():\n return render_template(\"signin.html\")",
"def start_oauth_view(request):\n url = get_oauth_url()\n return redirect(url)",
"def main():\n access_token = get_access_token()\n\n return render_template('index.html', ACCESS_TOKEN=access_token)",
"def main(request):\n try:\n canvas_course_id = request.session['LTI_LAUNCH']['custom_canvas_course_id']\n canvas_user_id = request.session['LTI_LAUNCH']['user_id']\n except KeyError:\n return http.HttpResponseBadRequest()\n\n edx_courses = EdxCourse.objects.all()\n try:\n canvas_auth = CanvasApiAuthorization.objects.get(lti_user_id=canvas_user_id)\n except CanvasApiAuthorization.DoesNotExist:\n return canvas_api.start_oauth(request, canvas_user_id)\n\n try:\n canvas_modules = canvas_api.get_module_list(canvas_auth, canvas_course_id)\n except CanvasAPIError as e:\n if e.status_code == 401:\n return canvas_api.start_oauth(request, canvas_user_id)\n raise\n return render(request, 'edx2canvas/index.html', {\n 'edx_courses': edx_courses,\n 'canvas_modules': json.dumps({'id': canvas_course_id, 'modules': canvas_modules})\n })",
"def login():\r\n return render_template(\r\n 'about.html',\r\n title='About',\r\n year=datetime.now().year,\r\n message='Your application description page.'\r\n )",
"def secure_page():\n return render_template('secure_page.html')",
"def auth_complete():\n logger.info(sys._getframe().f_code.co_name)\n\n app_url = request.params.get(\"app\")\n try:\n body = render_template('auth.html', **locals())\n except Exception as e:\n logger.error(\"Excepcion renderizando auth.html: %s\", e)\n return \"Error rendering auth.html\"\n\n return body",
"def login_page():\n text = '<a href=\"%s\">Authenticate with Okta</a>'\n return text % create_auth_url()",
"def main(request):\n\t# if we haven't authorised yet, direct to login page\n\tif check_key(request):\n\t\treturn HttpResponseRedirect(reverse('info'))\n\telse:\n\t\treturn render_to_response('twitter_auth/login.html')",
"def get_oauth():\n\n # initial app authorization request - not tied to specific user\n request_token, request_token_secret = goodreads.get_request_token(header_auth=True)\n\n # assign request tokens to session for future use\n session['request_token'] = request_token\n session['request_token_secret'] = request_token_secret\n\n # url takes user to Goodreads and presents them with option to authorize readerboard\n authorize_url = goodreads.get_authorize_url(request_token)\n\n # send user to goodreads\n return redirect(authorize_url)",
"def get(self, request):\n\t\treturn render(request, 'blog/login.html')",
"def get(self):\n return render_template(LOGIN_TEMPLATE)",
"def index(request):\n \n user = get_user(request)\n\n # single auth system?\n if len(ENABLED_AUTH_SYSTEMS) == 1 and not user:\n return HttpResponseRedirect(reverse(AUTH_START, args=[ENABLED_AUTH_SYSTEMS[0]])+ '?return_url=' + request.GET.get('return_url', ''))\n\n #if DEFAULT_AUTH_SYSTEM and not user:\n # return HttpResponseRedirect(reverse(start, args=[DEFAULT_AUTH_SYSTEM])+ '?return_url=' + request.GET.get('return_url', ''))\n \n default_auth_system_obj = None\n if DEFAULT_AUTH_SYSTEM:\n default_auth_system_obj = AUTH_SYSTEMS[DEFAULT_AUTH_SYSTEM]\n\n #form = password.LoginForm()\n\n return render_template(request, 'index', {'return_url' : request.GET.get('return_url', '/'),\n 'enabled_auth_systems' : ENABLED_AUTH_SYSTEMS,\n 'default_auth_system': DEFAULT_AUTH_SYSTEM,\n 'default_auth_system_obj': default_auth_system_obj})",
"def html_page():\n return render_template('Map_twitter.html')",
"def index(request):\n if request.user.is_authenticated:\n return redirect('/dashboard')\n else:\n context = {'client_id': settings.OPENHUMANS_CLIENT_ID,\n 'oh_proj_page': settings.OH_ACTIVITY_PAGE}\n\n return render(request, 'main/index.html', context=context)",
"def login():\n return render_template('auth/login.html')",
"def login():\n return render_template('auth/login.html')",
"def get(self):\n cont = self.request_string('continue', default=\"/\")\n direct = self.request_bool('direct', default=False)\n if App.facebook_app_secret is None:\n self.redirect(users.create_login_url(cont))\n return\n template_values = {\n 'continue': cont,\n 'direct': direct\n }\n self.render_jinja2_template('login.html', template_values)",
"def dashboard():\n logger.debug(\"In private\")\n #info = oidc.user_getinfo(['preferred_username', 'email', 'sub'])\n info = oidc.user_getinfo(['email', 'sub','name','cognito:groups', 'cognito:roles'])\n logger.debug(info)\n username = info.get('email')\n email = info.get('email')\n user_id = info.get('sub')\n greeting = \"Hello %s\" % username\n access_token = oidc.get_access_token()\n logger.debug('access_token=<%s>' % access_token)\n try:\n headers = {'Authorization': 'Bearer %s' % (access_token)}\n logger.debug(headers)\n greeting = requests.get('https://localhost:9999/token', headers=headers, verify=False).text \n except Exception as e:\n logger.debug(e)\n return (\"\"\"%s your email is %s and your user_id is %s!\n <ul>\n <li><a href=\"/\">Home</a></li>\n <li><a href=\"/setup\">API</a></li>\n <li><a href=\"/polls\">API</a></li>\n <li><a href=\"/vote?topic_id=1&option_id=1\">API</a></li>\n </ul>\"\"\" %\n (greeting, email, user_id))",
"def index():\n return redirect(auth_flow.get_authorization_url())",
"def main_page():\n pages=get_accounts()\n return render_template('disp.html',pages=pages)",
"def landing():\n return render_template('index.html', token=webview.token)",
"def landing():\n return render_template('index.html', token=webview.token)",
"async def oauth_show(self, ctx):\n whitelisted = self.bot.config[\"oauth_whitelist\"]\n\n users = []\n roles = []\n\n for id_ in whitelisted:\n user = self.bot.get_user(id_)\n if user:\n users.append(user)\n role = self.bot.modmail_guild.get_role(id_)\n if role:\n roles.append(role)\n\n embed = Embed(color=self.bot.main_color)\n embed.title = \"Oauth Whitelist\"\n\n embed.add_field(\n name=\"Users\", value=\" \".join(u.mention for u in users) or \"None\"\n )\n embed.add_field(\n name=\"Roles\", value=\" \".join(r.mention for r in roles) or \"None\"\n )\n\n await ctx.send(embed=embed)",
"def login_get():\n\treturn render_template('auth/login.html')",
"async def oauth(self, ctx):\n await ctx.send_help(ctx.command)"
] | [
"0.74304885",
"0.6749856",
"0.65433884",
"0.65398455",
"0.6504993",
"0.64355445",
"0.61797065",
"0.61615616",
"0.6106211",
"0.6069451",
"0.6052601",
"0.6035346",
"0.6025927",
"0.6012938",
"0.5994196",
"0.5946412",
"0.5881522",
"0.5872709",
"0.5847579",
"0.5839746",
"0.5839746",
"0.58330953",
"0.58121973",
"0.57809645",
"0.57680994",
"0.5759386",
"0.5759386",
"0.57577354",
"0.5756596",
"0.57508266"
] | 0.68202096 | 1 |
Download and read DWD XML Weather Forecast File of Type KML. | def read(self, url: str):
log.info(f"Downloading KMZ file {basename(url)}")
kml = self.fetch(url)
log.info("Parsing KML data")
self.iter_elems = iterparse(BytesIO(kml), events=("start", "end"), resolve_entities=False)
prod_items = {
"issuer": "Issuer",
"product_id": "ProductID",
"generating_process": "GeneratingProcess",
"issue_time": "IssueTime",
}
nsmap = None
# Get Basic Metadata
prod_definition = None
prod_definition_tag = None
for event, element in self.iter_elems:
if event == "start":
# get namespaces from root element
if nsmap is None:
nsmap = element.nsmap
prod_definition_tag = f"{{{nsmap['dwd']}}}ProductDefinition"
elif event == "end":
if element.tag == prod_definition_tag:
prod_definition = element
# stop processing after head
# leave forecast data for iteration
break
self.metadata = {k: prod_definition.find(f"{{{nsmap['dwd']}}}{v}").text for k, v in prod_items.items()}
self.metadata["issue_time"] = dt.datetime.fromisoformat(self.metadata["issue_time"])
# Get time steps.
timesteps = prod_definition.findall(
"dwd:ForecastTimeSteps",
nsmap,
)[0]
self.timesteps = [dt.datetime.fromisoformat(i.text) for i in timesteps.getchildren()]
# save namespace map for later iteration
self.nsmap = nsmap | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_url_and_parse():\n \n # TODO: dynamic site_code\n #global site_code\n #\n #if not site_code:\n # site_code = get_site_code(city_name)\n\n urllib.request.urlretrieve(\n \"https://dd.weather.gc.ca/citypage_weather/xml/AB/s0000661_e.xml\", \"s0000661_e.xml\")\n tree = ET.parse(\"s0000661_e.xml\")\n return tree.getroot()",
"def fetch_wdi() -> None:\n\n log.info(\"Started fetching WDI.\")\n url = \"http://databank.worldbank.org/data/download/WDI_csv.zip\"\n common.fetch_source_simply(name=\"wdi\", url=url)\n log.info(\"Finished fetchign WDI.\")",
"def read_kml():\n global kmldata\n global CONFIG\n if type(kmldata) == type(None):\n if not os.path.exists(CONFIG[\"kmlfile\"]):\n fiona.drvsupport.supported_drivers['KML'] = 'rw'\n kmldata = geopandas.read_file(CONFIG[\"kmlrepo\"], driver=\"KML\")\n os.makedirs(CONFIG[\"cachedir\"],exist_ok=True)\n with open(CONFIG[\"kmlfile\"], \"wb\") as fh:\n pickle.dump(kmldata,fh)\n else:\n with open(CONFIG[\"kmlfile\"], \"rb\") as fh:\n kmldata = pickle.load(fh)\n return kmldata",
"def connect(self):\r\n zip = self.zip\r\n ccode = self.ccode\r\n apikey = self.apikey\r\n url = f\"https://api.openweathermap.org/data/2.5/weather?zip={zip},{ccode}&appid={apikey}\"\r\n\r\n weather_obj = self._download_url(url)\r\n if weather_obj is not None:\r\n return weather_obj",
"def download_dailydialog(daily_raw_fname: str, data_path: str):\n wget.download(daily_raw_fname, data_path)\n # Manually unzip the train/dev/test files",
"def get_forecast(url):\n r = requests.get(url)\n return r.json()",
"def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()",
"def downloadXML(url): \n request=NSURLRequest.requestWithURL_(NSURL.URLWithString_(url))\n (data, response, error)= NSURLConnection.sendSynchronousRequest_returningResponse_error_(request)\n return data",
"def kml(cls, user, logs, kml, kml_doc):\n # KML Compliant Datetime Formatter\n kml_datetime_format = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n icon = 'http://maps.google.com/mapfiles/kml/shapes/airports.png'\n threshold = 1 # Degrees\n\n kml_folder = kml.newfolder(name=user.username)\n\n flights = TakeoffOrLandingEvent.flights(user)\n if len(flights) == 0:\n return\n\n logs = filter(lambda log: cls._is_bad_position(log, threshold), logs)\n for i, flight in enumerate(flights):\n label = 'Flight {}'.format(i + 1) # Flights are one-indexed\n kml_flight = kml_folder.newfolder(name=label)\n\n flight_logs = filter(lambda x: flight.within(x.timestamp), logs)\n if len(flight_logs) < 2:\n continue\n\n coords = []\n angles = []\n when = []\n for entry in flight_logs:\n pos = entry.uas_position.gps_position\n # Spatial Coordinates\n coord = (pos.longitude, pos.latitude,\n units.feet_to_meters(entry.uas_position.altitude_msl))\n coords.append(coord)\n\n # Time Elements\n time = entry.timestamp.strftime(kml_datetime_format)\n when.append(time)\n\n # Degrees heading, tilt, and roll\n angle = (entry.uas_heading, 0.0, 0.0)\n angles.append(angle)\n\n # Create a new track in the folder\n trk = kml_flight.newgxtrack(name='Flight Path')\n trk.altitudemode = AltitudeMode.absolute\n\n # Append flight data\n trk.newwhen(when)\n trk.newgxcoord(coords)\n trk.newgxangle(angles)\n\n # Set styling\n trk.extrude = 1 # Extend path to ground\n trk.style.linestyle.width = 2\n trk.style.linestyle.color = Color.blue\n trk.iconstyle.icon.href = icon\n\n for obstacle in MovingObstacle.objects.all():\n obstacle.kml(path=flight_logs, kml=kml_flight, kml_doc=kml_doc)",
"def fetch_weather():\n\n # Fetch the current weather.\n response = requests.get(f'https://api.openweathermap.org/data/2.5/weather?q=Manchester,UK&units=metric&APPID={WEATHER_API_KEY}')\n\n # Return the data.\n return response.json()",
"def wunder(self, irc, msg, args, query):\n soup = self._fetch_xml(\"Forecast\", query)\n result = [\"Forecast for %s:\" % self._get_location_name(query)]\n days = soup.forecast.txt_forecast.findAll('forecastday')\n if len(days) == 0:\n result.append('No weather data available')\n else:\n for day in days:\n result.append(': '.join([self._extract_text(day.title), self._extract_text(day.fcttext)]).encode('utf-8'))\n irc.reply(' '.join(result))",
"def __getitem__(self, pathway):\n xmlpath = self.local_kgml_dir + pathway + '.xml'\n if exists(xmlpath):\n tree = ElementTree.parse(xmlpath)\n root = tree.getroot()\n else:\n try:\n r = requests.get(self.link_to_kgml.format(pathway), timeout=5, headers=self.headers)\n r.raise_for_status()\n root = ElementTree.fromstring(r.text)\n except requests.exceptions.HTTPError:\n self.logger.warning('Unable to download pathway xml: {}'.format(pathway))\n return None\n except requests.exceptions.ConnectTimeout:\n self.logger.warning('Unable to download pathway xml: {}'.format(pathway))\n return None\n except ElementTree.ParseError:\n self.logger.warning('Unable to parse pathway xml: {}'.format(pathway))\n return None\n except Exception:\n self.logger.warning('Unknown error getting pathway xml: {}'.format(pathway))\n return None\n\n if self.save_local:\n with open(xmlpath, 'w') as fo:\n fo.write(r.text)\n\n return self.parseKGML(root)",
"def pull_forecast(city, api_key):\n base_url = \"http://api.openweathermap.org/data/2.5/forecast?\"\n url = base_url + \"appid=\" + api_key + \"&q=\" + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data",
"def getting_user_weather_1(location_key):\n\n API_Key = \"zIGuOeUd0aE4O621Gj1KGDc6JiZ3PAGb\"\n http_request = f\"http://dataservice.accuweather.com/forecasts/v1/daily/1day/{location_key}?apikey={API_Key}&language=pt-br&metric=true\"\n\n accu_request = requests.get(http_request)\n\n if accu_request.status_code != 200:\n print(\"It was not possible to stablish connection with the metherological server. Please, try again later!\")\n exit()\n\n else:\n accu_response = accu_request.json()\n\n return accu_response",
"def get_weather_forecast(self, base_url):\n url = self._parsing_url(base_url)\n api_response = requests.get(url)\n if not api_response.ok:\n logging.error(\n \"\"\"Error occured while trying to get response from AccuWeather\n API\"\"\")\n weather_forcast = api_response.json()\n return weather_forcast",
"def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()",
"def modpricesetter_download_xml_from_nemweb(self):\r\n year, month, day = self._get_market_year_month_day_as_str()\r\n base_url = \"https://www.nemweb.com.au/Data_Archive/Wholesale_Electricity/NEMDE/{year}/NEMDE_{year}_{month}/\" + \\\r\n \"NEMDE_Market_Data/NEMDE_Files/NemPriceSetter_{year}{month}{day}_xml.zip\"\r\n url = base_url.format(year=year, month=month, day=day)\r\n r = requests.get(url)\r\n z = zipfile.ZipFile(io.BytesIO(r.content))\r\n z.extractall(self.cache_folder)",
"def get_weather(xml_data):\n\n import elementtree.ElementTree as ET\n \n page = ET.fromstring(unicode(xml_data, errors=\"ignore\"))\n\n weather = page.find( \"weather/current_conditions\" )\n\n return {\n 'f' : weather.find( \"temp_f\" ).get( \"data\" ),\n 'c' : weather.find( \"temp_c\" ).get( \"data\" ),\n 'humidity' : weather.find( \"humidity\" ).get( \"data\" ),\n 'wind' : weather.find( \"wind_condition\" ).get( \"data\" )\n }",
"def get_forecast(location_list):\n #Might need to munge location to get a query out of it\n location, human_location = location_list\n date = datetime.datetime.today()\n query = location\n url = \"http://api.wunderground.com/auto/wui/geo/ForecastXML/index.xml?query=%s\" % query\n f = urllib2.urlopen(url)\n xml = f.read()\n root = ET.XML(xml)\n \n forecast = {'location': location, 'human_location': human_location}\n #Find forecast\n simple = root.find('simpleforecast')\n for day in simple.findall('forecastday'):\n forecast['forecast_date'] = parser.parse(day.find('date').find('pretty').text)\n forecast['high_temp'] = day.find('high').find('fahrenheit').text\n forecast['low_temp'] = day.find('low').find('fahrenheit').text\n forecast['conditions'] = day.find('conditions').text\n forecast['icon'] = day.find('icon').text\n forecast['skyicon'] = day.find('skyicon').text\n try:\n f, created = ForecastDay.objects.get_or_create(**forecast)\n if created:\n f.save()\n except:\n logging.info(\"Long Range Forecast Data missing or already created\")\n \n \n #Find Moon\n moon = root.find('moon_phase')\n illuminated = moon.find('percentIlluminated')\n age = moon.find('ageOfMoon')\n sun_rise = datetime.datetime(date.year, date.month, date.day, **_hour_minute(moon.find('sunrise')))\n sun_set = datetime.datetime(date.year, date.month, date.day, **_hour_minute(moon.find('sunset'))) \n #It doesn't error, so it appears to be doing what it should.\n f = ForecastDay.objects.get(forecast_date=date)\n f.sun_rise = sun_rise\n f.sun_set = sun_set\n f.moon_illuminated = illuminated.text\n f.moon_age = age.text\n try:\n f.save()\n except:\n logging.info(\"Moon Data missing or no new data available\")",
"async def get_kml_network_link():\n logger.info('/c-haines/network-link')\n headers = {\"Content-Type\": kml_media_type,\n \"Content-Disposition\": \"inline;filename=c-haines-network-link.kml\"}\n\n return Response(headers=headers, media_type=kml_media_type, content=fetch_network_link_kml())",
"def main(config, model, stid, forecast_date):\n # Get the API key from the config\n try:\n api_key = config['Models'][model]['api_key']\n except KeyError:\n raise KeyError('wunderground.py: no api_key parameter defined for model %s in config!' % model)\n\n # Get forecast\n forecast = get_twc_forecast(stid, api_key, forecast_date)\n\n return forecast",
"def download_flight_data():\n response = requests.get(FlightAndRoutConst.URL_FLIGHTS_OF_SPRING_AIRLINE,\n headers={'Content-type': 'text/html; charset=utf-8'})\n output_path = build_file_name()\n # Using response.text instead of response.content for chinese string display\n IOUtils.write_to_file(str(response.text), output_path)\n return output_path",
"def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)",
"def make_forecastio_request():\n REQUEST = REQ_BASE + \"{0}/\".format(APIKEY)+\\\n \"{0},{1}\".format(LAT,LON)\n try:\n conn = httplib.HTTPSConnection(FORECASTIO_URL)\n conn.request(\"GET\", REQUEST)\n resp = conn.getresponse()\n data = resp.read()\n except:\n giveup()\n else:\n return data",
"def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()",
"def get_current(location_list):\n import re\n import feedparser\n location, human_location = location_list\n city, state = human_location.split(',')\n url = \"http://rss.wunderground.com/auto/rss_full/%s/%s.xml\" % (state.strip(), city.strip())\n feed = feedparser.parse(url)\n s = feed.entries[0].summary\n current = {'location': location, 'human_location': human_location}\n \n current['observation_time'] = parser.parse(feed.entries[0].updated)\n temperature = re.compile('Temperature: ([\\d\\.]+)')\n current['temperature'] = temperature.search(s).group(1)\n humidity = re.compile('Humidity: (\\d+)')\n current['humidity'] = humidity.search(s).group(1)\n conditions = re.compile('Conditions: ([\\w\\s]+)')\n current['conditions'] = conditions.search(s).group(1)\n windspeed = re.compile('Wind Speed: ([\\d\\.]+)')\n current['wind_speed'] = windspeed.search(s).group(1)\n winddirection = re.compile('Wind Direction: (\\w+)')\n current['wind_direction'] = winddirection.search(s).group(1)\n try:\n f = Forecast(**current)\n f.save()\n except:\n logging.info(\"Current Forecast Data missing or no new data available\")",
"def download_forecast_temp_old(contents):\n \"\"\"download temperature forecast temperature from accueilweather\"\"\"\n soup = BeautifulSoup(contents)\n datedivs = soup.findAll(\"h3\", {\"class\": \"date\"})\n ltempdivs = soup.findAll(\"span\", {\"class\": \"large-temp\"})\n stempdivs = soup.findAll(\"span\", {\"class\": \"small-temp\"})\n \n datelist, ltemplist, stemplist = [], [], []\n \n for div in datedivs:\n required = text_between(str(div),'<time>', '</time>')\n estimated_date = str(required) + \" \" + str(datetime.today().year)\n yourdate = dateutil.parser.parse(estimated_date)\n year = yourdate.year\n month = yourdate.month\n day = yourdate.day\n item_date = str(date(int(year), int(month), int(day)))\n datelist.append(item_date)\n \n for div in ltempdivs:\n required = text_between(str(div),'\"large-temp\">', '°</span>')\n try:\n ltemplist.append(float(required))\n except Exception:\n print(\"t/- Scraping of the forecast page going wrong\")\n \n for div in stempdivs:\n required = text_between(str(div),'\"small-temp\">/', '°</span>')\n stemplist.append(float(required))\n try:\n stemplist.append(float(required))\n except Exception:\n print(\"t/- Scraping of the forecast page going wrong\")\n \n \n if len(datedivs) == len(ltempdivs) and len(datedivs) == len(stempdivs):\n None\n else:\n print(\"t/-error in scraping of weather forecast download\")\n \n \n forecast_dict = {}\n for i in range(0,len(datelist)-1):\n forecast_dict[datelist[i]] = statistics.mean([ltemplist[i],stemplist[i]])\n \n return forecast_dict",
"def _download_epw_file(url):\n r = requests.get(url)\n if r.ok:\n # py2 and 3 compatible: binary write, encode text first\n log.debug(\" ... OK!\")\n return io.StringIO(r.text)\n else:\n log.error(\" connection error status code: %s\" % r.status_code)\n r.raise_for_status()",
"def x_download():\n\t#_loadconfig()\n\tconf = _get_config()\n\t#print conf['xplane']\n\tdownload_url = conf['xplane']['download']\n\tlocal(\"wget -P %s %s\" % (navimport.conf.work_dir(\"/xplane_zips\"), download_url))",
"def download_dataset(self):\n dataset_name = ADE20K_URL.split(\"/\")[-1].split(\".\")[0]\n req = urllib.request.Request(ADE20K_URL, method=\"HEAD\")\n size_file = urllib.request.urlopen(req).headers[\"Content-Length\"]\n download = \"n\"\n while download != \"y\":\n if not self.yes_all:\n download = input(f\"You are about to download {dataset_name} ({size_file} bytes) to the temporary folder {self.tmp_path}. Do you want to continue? [y/n] \\n\")\n if self.yes_all or download == \"y\":\n logger.info(f\"Downloading dataset {dataset_name} at {ADE20K_URL} to temporary folder {self.tmp_path}...\")\n zip_path, hdrs = urllib.request.urlretrieve(ADE20K_URL, f\"{self.tmp_path}/{dataset_name}.zip\")\n logger.info(f\"Extracting {zip_path} to temporary folder {self.tmp_path}...\")\n with zipfile.ZipFile(f\"{zip_path}\", 'r') as z:\n z.extractall(f\"{self.tmp_path}\")\n self.input_data_path = zip_path[:-4]\n break\n elif download == \"n\":\n logger.error(f\"Cannot pursue without downloading the dataset.\")\n sys.exit()\n else:\n logger.error(\"Please enter a valid answer (y or n).\")"
] | [
"0.56716657",
"0.56129897",
"0.5438432",
"0.5406499",
"0.5384055",
"0.5358337",
"0.5321542",
"0.53106785",
"0.52844894",
"0.52812874",
"0.52606463",
"0.52481145",
"0.52425086",
"0.5232936",
"0.52064013",
"0.520311",
"0.5184435",
"0.51593626",
"0.51527905",
"0.51504207",
"0.51295614",
"0.5126584",
"0.51189363",
"0.5072747",
"0.5047131",
"0.50402075",
"0.5029449",
"0.50026995",
"0.5000884",
"0.4977681"
] | 0.72513247 | 0 |
Returns a list of all the physical bumps on the Create 2 | def physical_bumps():
return [Bump.BUMP_L, Bump.BUMP_R] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pumps(self): \n return self._link_reg.pumps",
"def get_pump_list(self):\n return self.pump_array",
"def get_vendor_bills(self, count: int = 10) -> list:\n return list(\n itertools.islice(self.client.vendor_bills.get_all_generator(), count)\n )",
"def action_create_spare_bom(self):\r\n if 'active_id' not in self.env.context:\r\n return False\r\n if 'active_ids' not in self.env.context:\r\n return False\r\n product_type = self.env['product.product']\r\n active_ids = self.env.context.get('active_ids', [])\r\n for prod_ids in active_ids:\r\n prod_prod_obj = product_type.browse(prod_ids)\r\n if not prod_prod_obj:\r\n logging.warning('[action_create_spareBom] product_id {} not found'.format(prod_ids))\r\n continue\r\n obj_boms = self.env['mrp.bom'].search([('product_tmpl_id', '=', prod_prod_obj.product_tmpl_id.id),\r\n ('type', '=', 'spbom')])\r\n if obj_boms:\r\n raise osv.osv.except_osv(_('Creating a new Spare Bom Error.'),\r\n _(\"BoM for Part {} already exists.\".format(prod_prod_obj.name))\r\n )\r\n\r\n product_type.browse(active_ids).action_create_spare_bom_wf()\r\n return {'name': _('Bill of Materials'),\r\n 'view_type': 'form',\r\n \"view_mode\": 'tree,form',\r\n 'res_model': 'mrp.bom',\r\n 'type': 'ir.actions.act_window',\r\n 'domain': \"[('product_id','in', [\" + ','.join(map(str, active_ids)) + \"])]\",\r\n }",
"def fill_item_list(self):\n return_list = []\n with Transaction().start(DBNAME, 1):\n self.productlist = self.Product.search([('description', '=', 'Stock'), ('type', '=', 'goods')])\n for i in self.productlist:\n return_list.append(i.template.name)\n return return_list",
"def bos(self):\n return self.BOS",
"def __sync_bulbs__() -> list:\n\n bulbs = list()\n\n try:\n discovered_bulbs = discover_bulbs(timeout=2)\n except Exception as e:\n raise Exception(str(e))\n\n for bulb in discovered_bulbs:\n ip = bulb['ip']\n port = bulb['port']\n model = bulb['capabilities']['model']\n name = bulb['capabilities']['name']\n name = name if name != '' else ip\n identifier = bulb['capabilities']['id']\n\n found_bulb = Bulb(\n ip=ip,\n port=port,\n model=model\n )\n\n found_bulb.set_name(name)\n properties = found_bulb.get_properties()\n\n bulbs.append({\n 'bulb': found_bulb,\n 'name': name,\n 'model': model,\n 'ip': ip,\n 'metadata':\n {\n 'id': identifier,\n 'ip': ip,\n 'name': name,\n 'model': model,\n 'properties': properties\n }\n })\n\n return bulbs",
"def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]",
"def get_bus_list():\n\n\tbuses = db.session.query(Bus.bus_name).all()\n\n \n\treturn buses",
"def getBrickList(self):\n return self._bricks",
"def bombs(self) -> List[Point]:\n\t\treturn self._bombs",
"def pump_names(self):\n return self._pumps",
"def generateBinItems(quantity=500, bpp1=True):\n if bpp1:\n return [i for i in range(1, quantity+1)]\n return [(i**2)/2 for i in range(1,quantity+1)]",
"def create_puck_shower(num_pucks, left_x, right_x, launch_angle):\n pucks = []\n if num_pucks > 1:\n x_spacing = (right_x - left_x) / (num_pucks - 1)\n else:\n x_spacing = 0\n\n x_coord = left_x - x_spacing\n y_coord = 0\n t = 0\n start_time = 0\n\n for p in range(num_pucks):\n pucks.append(geom.PuckData(PUCK_RADIUS, start_time, inputs.MAX_TIME, p))\n x_coord += x_spacing\n pucks[-1].append(t, x_coord, y_coord, inputs.VO, launch_angle, -1)\n\n return pucks",
"def light_bumps():\n return [Bump.LIGHT_BUMP_L, Bump.LIGHT_BUMP_FL, Bump.LIGHT_BUMP_CL,\n Bump.LIGHT_BUMP_CR, Bump.LIGHT_BUMP_FR, Bump.LIGHT_BUMP_R]",
"def plates(self):\n with sql_connection.TRN as TRN:\n sql = \"\"\"SELECT DISTINCT plate_id\n FROM qiita.container\n LEFT JOIN qiita.well USING (container_id)\n WHERE latest_upstream_process_id = %s\n ORDER BY plate_id\"\"\"\n TRN.add(sql, [self.process_id])\n plate_ids = TRN.execute_fetchflatten()\n return [plate_module.Plate(plate_id) for plate_id in plate_ids]",
"def possible_items(self):\n available_items = self.floors[self.lift_floor]\n single_items = [(i,) for i in available_items]\n double_items = [i for i in combinations(available_items, 2)]\n return single_items + double_items",
"def generate(self):\n return []",
"def power_pump_name_list(self):\n return list(self._link_reg.power_pump_names)",
"def createBinObjects(n):\n bins = []\n for i in range(n):\n \tbins.append(Bin())\n return bins",
"def get_mbeds(self):\n mbeds = []\n for mbed in self.get_mbed_devices():\n mountpoint = re.match('.*\\\\\\\\(.:)$', mbed[0]).group(1)\n logger.debug('Registry mountpoint %s', mountpoint)\n\n if self._mount_point_exists(mountpoint):\n # TargetID is a hex string with 10-48 chars\n m = re.search('[&#]([0-9A-Za-z]{10,48})[&#]', mbed[1])\n if not m:\n continue\n tid = m.group(1)\n mbeds += [(mountpoint, tid)]\n logger.debug(\"get_mbeds mount_point %s usb_id %s\", mountpoint, tid)\n return mbeds",
"def _calculate_initial_bought(self): #计算初始化的 买卖 方向 ,初始都是OUT\n bought = {}\n for s in self.symbol_list: \n bought[s] = 'OUT'\n return bought",
"def pump_name_list(self):\n return list(self._link_reg.pump_names)",
"def get_configs(self):\n batts = [450, 900, 1800]\n\n config_list = []\n\n indices_list = []\n\n # Get lists of index combinations\n for i in range(3):\n for j in range(3):\n for k in range(3):\n indices_list.append([i, j, k])\n\n # Make configuration of batteries until minimal of total capacity\n # needed is exceeded\n for index in indices_list:\n total_cap = 7500\n mini_list = []\n while total_cap > 0:\n for i in index:\n if total_cap <= 0:\n break\n total_cap -= batts[i]\n mini_list.append(batts[i])\n config_list.append(mini_list)\n\n # Sort list, so sorted weights correspond to the battery types\n sorted_list = []\n for i in config_list:\n sorted_list.append(sorted(i))\n\n # Return list of battery capacities\n return [list(item) for item in set(tuple(row) for row in sorted_list)]",
"def get_wall_products(self, wall_bp):\n products = []\n for child in wall_bp.children:\n props = props_closet.get_object_props(child)\n if props.is_closet or props.is_fixed_shelf_and_rod_product_bp:\n child.mv.comment = wall_bp.mv.name_object\n products.append(child)\n products.sort(key=lambda obj: obj.location.x, reverse=False)\n return products",
"def spawn_information_list(self) -> List[List[\"SpawnInformation\"]]:\n pass",
"def get_biases(self):\n return []",
"def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe",
"def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe",
"def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe"
] | [
"0.5822396",
"0.5725565",
"0.5721032",
"0.57186806",
"0.5560406",
"0.55304605",
"0.5493543",
"0.5493474",
"0.53512853",
"0.5330781",
"0.5298371",
"0.52830404",
"0.52013123",
"0.5195306",
"0.5159302",
"0.51538",
"0.51515377",
"0.51443803",
"0.5105714",
"0.50573933",
"0.5050986",
"0.5034769",
"0.5033751",
"0.5033729",
"0.50333387",
"0.50113",
"0.49841937",
"0.49811235",
"0.49811235",
"0.49811235"
] | 0.70293695 | 0 |
Returns a list of all the light bumps on the Create 2. | def light_bumps():
return [Bump.LIGHT_BUMP_L, Bump.LIGHT_BUMP_FL, Bump.LIGHT_BUMP_CL,
Bump.LIGHT_BUMP_CR, Bump.LIGHT_BUMP_FR, Bump.LIGHT_BUMP_R] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lights(self):\n return list(self.GetLights())",
"async def Lights_Description() -> List[Dict[str, Any]]:\n result = []\n for index, light in enumerate(busylightapi.manager.lights):\n result.append(\n {\n \"light_id\": index,\n \"name\": light.name,\n \"info\": light.info,\n \"is_on\": light.is_on,\n \"color\": rgb_to_hex(*light.color),\n }\n )\n return result",
"def get_light_list(self):\n return self.light_array",
"def read_light_bumps(self):\n rtn = {}\n\n for light_bump in Bump.light_bumps():\n rtn[light_bump] = self.read_light_bump(light_bump)\n\n return rtn",
"def lights(self) -> List[dict]:\n return self.items_by_domain(\"light\")",
"def get_rgb_light():\n return list(light.rgb())",
"def physical_bumps():\n return [Bump.BUMP_L, Bump.BUMP_R]",
"def lights(self, mask, inplace=True):\n # TODO: Fields in this methods have to be given by the user and not be static\n # TODO: use BasicEdit.create_object()\n\n lights = list()\n\n zone_names = self.get_field(mask, 'Name')\n\n for zone in zone_names:\n lights.append(list())\n lights[-1].append(\"Lights\") # Object type\n lights[-1].append(\"Lights {}\".format(zone)) # Object name\n lights[-1].append(zone) # Zone\n lights[-1].append(\"Lights Schedule {}\".format(zone)) # Schedule name\n lights[-1].append(\"Watts/Area\") # Design Level Calculation Method\n lights[-1].append(\"\") # Lighting Level {W}\n lights[-1].append(\"2.9\") # Watts per Zone Floor Area {W/m2}\n lights[-1].append(\"\") # Watts per Person {W/person}\n lights[-1].append(\"\") # Return Air Fraction\n lights[-1].append(\"\") # Fraction Radiant\n lights[-1].append(\"\") # Fraction Visible\n lights[-1].append(\"\") # Fraction Replaceable\n\n return lights",
"async def lights(self, context):\n\n await random_image(context, 'lights')",
"def getBrickList(self):\n return self._bricks",
"def lights_on(self) -> list:\n return [\n entity for entity in self.all_lights if self.hass.get_state(entity) == \"on\"\n ]",
"def __sync_bulbs__() -> list:\n\n bulbs = list()\n\n try:\n discovered_bulbs = discover_bulbs(timeout=2)\n except Exception as e:\n raise Exception(str(e))\n\n for bulb in discovered_bulbs:\n ip = bulb['ip']\n port = bulb['port']\n model = bulb['capabilities']['model']\n name = bulb['capabilities']['name']\n name = name if name != '' else ip\n identifier = bulb['capabilities']['id']\n\n found_bulb = Bulb(\n ip=ip,\n port=port,\n model=model\n )\n\n found_bulb.set_name(name)\n properties = found_bulb.get_properties()\n\n bulbs.append({\n 'bulb': found_bulb,\n 'name': name,\n 'model': model,\n 'ip': ip,\n 'metadata':\n {\n 'id': identifier,\n 'ip': ip,\n 'name': name,\n 'model': model,\n 'properties': properties\n }\n })\n\n return bulbs",
"def info_materials_booster_get():\n materials = _material_by_group(712) # 712 == intermediate group\n return materials, 200",
"def build_light(self, item):\n\n # Validete NMS object.\n if \"ObjectID\" not in item:\n return\n\n # Get object id from item.\n object_id = item[\"ObjectID\"]\n # Find light data\n if object_id not in self.lights_dictionary:\n return\n\n # Build Lights\n light_information = self.lights_dictionary[object_id]\n for idx, light_values in enumerate(light_information.values()):\n # Get Light Properties.\n light_type = light_values[\"type\"]\n light_location = light_values[\"location\"]\n\n # Create light.\n light = bpy.ops.object.light_add(\n type=light_type.upper(),\n location=light_location\n )\n light = bpy.context.object\n light[\"NMS_LIGHT\"] = True\n light.name = \"{0}_light{1}\".format(item.name, idx)\n data_copy = deepcopy(light_values)\n\n # Remove invalid blender properties.\n data_copy.pop(\"type\")\n data_copy.pop(\"location\")\n\n # Apply all other properties to blender object.\n for key, value in data_copy.items():\n if isinstance(value, list):\n value = mathutils.Vector(tuple(value))\n setattr(light.data, key, value)\n\n # Parent to object.\n utils.parent(light, item)\n\n # Disable Selection.\n light.hide_viewport = True\n light.hide_select = True",
"async def Rainbow_Lights():\n busylightapi.manager.apply_effect_to_light(ALL_LIGHTS, rainbow)\n return {\n \"action\": \"effect\",\n \"name\": \"rainbow\",\n \"light_id\": \"all\",\n }",
"def bombs(self) -> List[Point]:\n\t\treturn self._bombs",
"def make_melon_types():\n\n all_melon_types = []\n \n musk = MelonType('Muskmelon', 'musk', 1998, 'green',\n True, True)\n musk.add_pairing('mint')\n all_melon_types.append(musk)\n\n cas = MelonType('Casaba', 'cas', 2003, 'orange',\n True, False)\n cas.add_pairing('strawberries')\n cas.add_pairing('mint')\n all_melon_types.append(cas)\n\n cren = MelonType('Crenshaw', 'cren', 1996, 'green',\n True, False)\n cren.add_pairing('proscuitto')\n all_melon_types.append(cren)\n\n yw = MelonType('Yellow Watermelon', 'yw', 2013, 'yellow',\n True, True)\n yw.add_pairing('ice cream')\n all_melon_types.append(yw)\n\n\n # Fill in the rest\n print(all_melon_types)\n return all_melon_types",
"def make_melons(melon_types):\n# Make a list that instantiates a bunch of melons\n# Make an empty list to populate\n\n melons = []\n\n melons_by_id = make_melon_type_lookup(melon_types)\n\n # Make a melon object:\n # (melon_type, shape, color_rating, harvest_from, harvest_by\n\n\n\n melon1 = Melon(\n melons_by_id['yw'],\n 8,\n 7,\n 2,\n 'Sheila', \n )\n\n melon2 = Melon(\n melons_by_id['yw'],\n 3,\n 4,\n 2,\n 'Sheilaa' \n )\n\n melon3 = Melon(\n melons_by_id['yw'],\n 9,\n 8,\n 3,\n 'Sheila'\n )\n\n\n melon4 = Melon(\n melons_by_id['yw'],\n 10,\n 6,\n 35,\n 'Sheila'\n )\n\n melon5 = Melon(\n melons_by_id['yw'],\n 8,\n 9,\n 35,\n 'Michael'\n )\n\n melon6 = Melon(\n melons_by_id['yw'],\n 8,\n 2,\n 35,\n 'Michael'\n )\n\n melon7 = Melon(\n melons_by_id['yw'],\n 2,\n 3,\n 4,\n 'Michael'\n )\n\n\n melon8 = Melon(\n melons_by_id['yw'],\n 6,\n 7,\n 4,\n 'Michael'\n )\n\n melon9 = Melon(\n melons_by_id['yw'],\n 7,\n 10,\n 3,\n 'Sheila'\n )\n\n melons = melons.extend([melon1, melon2, melon3, melon4, melon5, melon6, melon7, melon8, melon9])\n\n return melons",
"def exportLights(self):\n\t\t#TODO! REMOVE CONSTRAINS\n\t\tlights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 )\n\t\tmc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' )\n\t\tlitsToExport = []\n\t\tfor li in lights:\n\t\t\tfinalLi = li.split( '|' )\n\t\t\tif len(finalLi) == 1:\n\t\t\t\tlitsToExport.append( finalLi[0] )\n\t\t\telse:\n\t\t\t\tlitsToExport.append( finalLi[1] )\n\t\tif litsToExport:\n\t\t\tmc.select( litsToExport, r=1, ne=1 )\n\t\t\tmc.file( self.lightPath.path, op=\"v=0\", typ=\"mayaAscii\", pr=1, es=1 )\n\t\t\t#export Light Linking\n\t\t\tself.exportLightLinking()",
"def get_pump_list(self):\n return self.pump_array",
"def generate_mobs(self):\n mobs = []\n z = 0\n while z < 10: # 10 mobs per level for now\n c = 0\n while c == 0:\n x = random.randint(0, self.map.width - 1)\n y = random.randint(0, self.map.height - 1)\n if self.map.map[x][y].blocked == False:\n mobs.append(Monster.Monster(x=x, y=y))\n z += 1\n c += 1\n\n return mobs",
"def get_lights(bridge):\n\n target_names = [\n \"Console Lamp\",\n \"Bedroom Table Lamp\",\n \"Kitchen light\",\n ]\n\n targets = [light for light in bridge.lights if light.name in target_names]\n\n if len(targets) != len(target_names):\n print(\"%s: not found ... %s\" % (target_names, targets))\n exit(1)\n\n return targets",
"def pumps(self): \n return self._link_reg.pumps",
"def fan_list(self):\n return [\"low\", \"medium\", \"high\"]",
"def list():\n return [Cliff.CLIFF_L,\n Cliff.CLIFF_FL,\n Cliff.CLIFF_R,\n Cliff.CLIFF_FR,\n Cliff.VIRTUAL_WALL]",
"def make_glows():\n\n glow = [None] * 16\n for i in range(16):\n dim = 2 * i + 1\n glow[i] = array(\"b\", [0] * (dim**3))\n for x, y, z in product(xrange(dim), repeat=3):\n distance = abs(x - i) + abs(y - i) + abs(z - i)\n glow[i][(x * dim + y) * dim + z] = i + 1 - distance\n glow[i] = array(\"B\", [clamp(x, 0, 15) for x in glow[i]])\n return glow",
"def exportLightLinking(self):\n\t\tlights = [a for a in mc.ls( typ = ['light','aiAreaLight'] ) if not 'eye' in a]\n\t\tallShapes = [s for s in mc.ls( type = 'geometryShape', ni = 1) if not (mc.objectType( s ) in ( 'aiAreaLight','aiSkyDomeLight' ))]\n\t\tlitLinks = {}\n\t\tfor l in lights:\n\t\t\tlightLinkShapes = mc.lightlink( query=True, light=l ,shp=1,t=0,set=0,h=0)\n\t\t\tlitLinks[l]\t = list( set( allShapes ) - set( lightLinkShapes ) )#SHAPES WITH NO LINK TO THIS LIGHT\n\t\tpickle.dump( litLinks, open( self.lightLinkPath.path, \"wb\" ) )",
"def make_melon_types():\n\n all_melon_types = []\n\n all_melon_types.append(muskmelon.name)\n all_melon_types.append(casaba.name)\n all_melon_types.append(crenshaw.name)\n all_melon_types.append(yellow_watermelon.name)\n\n print(all_melon_types)",
"def make_melon_types():\n\n # list of objects of class MelonType\n all_melon_types = []\n\n # instantiating\n # instances = class MelonType (attributes)\n musk = MelonType(\"musk\",1998,\"green\",True,True, \"Muskmelon\")\n # call add_pairing method to add pairings to instance's pairing attribute\n musk.add_pairing(\"mint\")\n all_melon_types.append(musk)\n\n cas = MelonType(\"cas\",2003,\"orange\", True, False, \"Casaba\")\n cas.add_pairing(\"strawberries\")\n cas.add_pairing(\"mint\")\n all_melon_types.append(cas)\n\n cren = MelonType(\"cren\",1996,\"green\",True,False, \"Crenshaw\")\n cren.add_pairing(\"proscuitto\")\n all_melon_types.append(cren)\n\n yw = MelonType(\"yw\",2013,\"yellow\",True,True, \"Yellow Watermelon\")\n yw.add_pairing(\"ice cream\")\n all_melon_types.append(yw)\n\n # all_melons_types returns instance information\n return all_melon_types",
"def create_plasma(self) -> list:\n\n self.plasma = paramak.Plasma(\n major_radius=6.2e2,\n minor_radius=2e2,\n elongation=1.7,\n triangularity=0.33,\n vertical_displacement=5.7e1,\n configuration=\"single-null\",\n rotation_angle=self.rotation_angle,\n )\n\n return [self.plasma]"
] | [
"0.66449255",
"0.6407675",
"0.63921744",
"0.6349192",
"0.6301567",
"0.6216425",
"0.5929051",
"0.58499926",
"0.57615566",
"0.5751924",
"0.563449",
"0.562275",
"0.56202555",
"0.56143504",
"0.5554627",
"0.55225164",
"0.54759777",
"0.54197073",
"0.5348882",
"0.5308788",
"0.5306346",
"0.52832973",
"0.5278767",
"0.5277705",
"0.5265834",
"0.5259957",
"0.5253457",
"0.5242598",
"0.52394617",
"0.5203241"
] | 0.73039806 | 0 |
Determines if a message if a IR message from a dock | def is_dock_msg(msg):
return msg & 0xF0 == Dock.BASE | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_for_kill(comm, message):\n try:\n comm.messages.get(\"broadcast-kill\") # 12x cheaper than listl\n print(message)\n return True\n except comm.messages.client.exceptions.NoSuchKey:\n return False",
"def decompose(msg):\n rtn = {}\n\n if Dock.is_dock_msg(msg):\n rtn = {\n Dock.RED_BUOY: bool(msg & Dock.RED_BUOY),\n Dock.GREEN_BUOY: bool(msg & Dock.GREEN_BUOY),\n Dock.FORCE_FIELD: bool(msg & Dock.FORCE_FIELD)\n }\n\n return rtn",
"def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False",
"def isbimol(rxn_typ):\n return rxn_typ in BIMOL_REACTIONS",
"def isOK(ser):\n while 1:\n msg=ser.readline(300)\n if msg.find(\"<\")!=-1:\n break\n if msg.find(\"<OK\")!=-1:\n return True\n return False",
"def is_match(self, command_bytes):",
"def reaction_check(self, payload):\n if payload.message_id != self.message.id:\n return False\n if payload.user_id not in (*self.bot.owner_ids, self._author_id):\n return False\n return payload.emoji in self.buttons",
"def is_raw_read(command): \n if command.startswith('<READ') and command.endswith('>') and \\\n is_valid_raw(command):\n return True\n else: \n return False\n # end if",
"def check_message(self, msg):\n pass",
"def check_restart(self, data):\n for entry in data:\n if entry.find('MODEM:STARTUP')!=-1: \n #print 'restart detected'\n return True\n if entry.find('+PBREADY')!=-1:\n #print 'ready'\n return True\n return False",
"def isResp(obxDict):\n readingCode = getReadingCode(obxDict)\n return readingCode == '76270-8'",
"def check_ack_or_nak(message):\n value = message.body[-1]\n\n if value == 0x06:\n return\n elif value == 0x15:\n raise CommandFailure(command_code=message.command_code)\n else:\n raise RuntimeError(\"Unexpected ACK/NAK value (0x%02x)\" % value)",
"def is_ringing(self) -> bool:",
"def msg_found(self, msg, message):\n if msg in message:\n return True\n else:\n return False",
"def _msg_is_command(self, msg):\n return isinstance(msg, dict)",
"def check_message(msg):\n words_of_message = msg.split()\n find = False\n for key in gc_words:\n if words_of_message in gc_words[key]['groups']:\n getattr(neuron.general_conversations, key)()\n find = True\n break\n for key in fc_words:\n if words_of_message in fc_words[key]['groups']:\n getattr(neuron.forecast, key)()\n find = True\n break\n for key in twitter_words:\n if words_of_message in twitter_words[key]['groups']:\n getattr(neuron.twitter, key)()\n find = True\n break\n for key in pipo_words:\n if words_of_message in pipo_words[key]['groups']:\n getattr(neuron.pipotron, key)()\n find = True\n break\n if not find:\n neuron.general_conversations.undefined()",
"def checkrun(self):\n return self.relation.check(action='message')",
"def wemo_process(self, msg):\n if msg[\"content\"][\"command\"] == \"nickname\":\n # print msg\n self.nickname = msg[\"content\"][\"value\"]\n self.controller.sending(\n {\"subject\": \"control\" + \".\" + self.controller.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"nickname\",\n \"target\": self.controller.type + \".\" + self.name,\n #\"token\": self.controller.target,\n \"value\": {\"name\": self.name, \"nickname\": msg[\"content\"][\"value\"]}}})\n elif msg[\"content\"][\"command\"] == \"status\":\n # Not gone the way of the dodo\n # try:\n self.controller.sending({\"subject\": self.controller.type,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"status\",\n \"target\": self.controller.type +\n \".\" +\n self.name,\n \"icon status\":\n {\"bu-radar1\": {\"fill\":\"black\", \"opacity\":\"1\"},\n \"bu-radar2\": {\"fill\":cssColour(), \"opacity\":\"0\"},\n \"bu-not-present\": {\n \"opacity\": 0}},\n \"value\": {}}})\n # except: #Most probably is known but we lost pairing\n # pass\n\n\n return None",
"def IsOk(self):\r\n\r\n return self.dock_direction != 0",
"def exec_cond(message, session):\n if message[\"text\"] == buttons[\"schedule\"]:\n return True\n elif message[\"text\"] in get_days():\n session[\"state\"] = states[\"schedule\"]\n return True\n else:\n return False",
"def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]",
"def is_lcm_message(obj):\n return '_get_packed_fingerprint' in dir(obj)",
"def _is_push_command(self):\n return self._match_memory_pattern(\"push\")",
"def which_pump (self):\n if self.msg == b'1': #left\n self.pump_it_80(1)\n # self.pump_it(1)\n # self.pump_it(1)\n # self.pump_it(1)\n # self.pump_it(1)\n elif self.msg == b'2': #right\n self.pump_it_20(2)\n # self.pump_it(2)\n # self.pump_it(2)\n # self.pump_it(2)\n # self.pump_it(2)",
"def listening_for(message):\n\n if Utilities.isNotEmpty(message['text']):\n cmds = ['!whois', '!geoloc', '!ping']\n return message['text'].split()[0] in cmds",
"def is_target(top_container):\n\tif '.' not in top_container.get('barcode', ''):\n\t\treturn True\n\telse:\n\t\treturn False",
"def is_switcher_originator(self) -> bool:\n return hexlify(self.message)[0:4].decode() == \"fef0\" and (\n len(self.message) == 165\n or len(self.message) == 168 # Switcher Breeze\n or len(self.message) == 159 # Switcher Runner and RunnerMini\n )",
"def is_ctrl_message(self):\n return self._id < 0",
"def isOpen(self):\n what = self.checkapf(\"WHATSOPN\").read()\n if \"DomeShutter\" in what or \"MirrorCover\" in what or \"Vents\" in what:\n return True, what\n else:\n return False, ''",
"def CanHandle(self, message):\n return (isinstance(message, messages.ChannelMessage)\n and message.content.startswith(TRIGGER))"
] | [
"0.5816875",
"0.5482376",
"0.5379433",
"0.5278253",
"0.52585906",
"0.52375406",
"0.5201998",
"0.51965576",
"0.51545644",
"0.5095728",
"0.50897115",
"0.5078164",
"0.507621",
"0.5074198",
"0.5051895",
"0.50485533",
"0.5042112",
"0.5038949",
"0.50294524",
"0.5026693",
"0.50156546",
"0.5009",
"0.5002232",
"0.49961212",
"0.49915066",
"0.4987142",
"0.4970514",
"0.49676788",
"0.49284506",
"0.49218452"
] | 0.6874699 | 0 |
Decomposes a dock IR message into the detected beams and force field. This method will check to see if the provided message is from a dock. | def decompose(msg):
rtn = {}
if Dock.is_dock_msg(msg):
rtn = {
Dock.RED_BUOY: bool(msg & Dock.RED_BUOY),
Dock.GREEN_BUOY: bool(msg & Dock.GREEN_BUOY),
Dock.FORCE_FIELD: bool(msg & Dock.FORCE_FIELD)
}
return rtn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decode_message(self, message):\r\n\r\n\t\tprint(\"Decoding message '{}'\".format(message))\r\n\r\n\t\tmessage_split = message[1:-1].split('||')\r\n\r\n\t\tif len(message_split) > 1: # Several messages are queued\r\n\t\t\tfor m in message_split:\r\n\t\t\t\tself.decode_message('|' + m + '|')\r\n\t\t\treturn\r\n\t\telse:\r\n\t\t\tmessage = message_split[0]\r\n\r\n\t\tmessage_split = message.split('|')\r\n\r\n\t\tif message_split[0] == 'LA':\r\n\r\n\t\t\tlist_bars = message_split[1].split(',')\r\n\t\t\tself.send_bar_names.emit(list_bars) # Sending the list to the UI\r\n\r\n\t\telif message_split[0] == 'ME':\r\n\r\n\t\t\tprint(\"New message received : '{}'\".format(message))\r\n\r\n\t\t\tif len(message_split) == 3: # Author was found\r\n\t\t\t\tinfos = (message_split[2], message_split[1])\r\n\t\t\telif len(message_split) == 2: # No author\r\n\t\t\t\tinfos = (message_split[1],)\r\n\t\t\ttry:\r\n\t\t\t\tself.message_received.emit(infos)\r\n\t\t\texcept UnboundLocalError:\r\n\t\t\t\tself._window.open_dialog(\"Message de chat incompréhensible\",\r\n\t\t\t\t\t\t\t\t\t\t \"Le message de chat suivant n'a pas pu être décodé : {}\".format(message),\r\n\t\t\t\t\t\t\t\t\t\t type=\"warning\")\r\n\r\n\t\telif message_split[0] == 'LO': # Message is '|LO|' so just ignoring it\r\n\r\n\t\t\tself.name_set.emit() # Warning the UI about the name being set\r\n\r\n\t\telif message_split[0] == \"CH\":\r\n\r\n\t\t\tpass\r\n\t\t\r\n\t\telif message_split[0] == 'UR':\r\n\r\n\t\t\tprint(\"New message received : '{}'\".format(message))\r\n\r\n\t\t\tif len(message_split) == 3: # Author was found\r\n\t\t\t\tinfos = (message_split[2], message_split[1])\r\n\t\t\telif len(message_split) == 2: # No author\r\n\t\t\t\tinfos = (message_split[1],)\r\n\t\t\ttry:\r\n\t\t\t\tself.urgent_message_received.emit(infos)\r\n\t\t\texcept UnboundLocalError:\r\n\t\t\t\tself._window.open_dialog(\"Message de chat incompréhensible\",\r\n\t\t\t\t\t\t\t\t\t\t \"Le message de chat suivant n'a pas pu être décodé : {}\".format(message),\r\n\t\t\t\t\t\t\t\t\t\t type=\"warning\")\r\n\t\t\t\r\n\t\telif message_split[0] == \"LE\": # Getting the list of products\r\n\r\n\t\t\tif message_split[1]:\r\n\t\t\t\ttuples = message_split[1].split(',')\r\n\t\t\t\tfor t in tuples:\r\n\t\t\t\t\ti, f = t.split(':')\r\n\t\t\t\t\tself.__food[int(i)] = f\r\n\r\n\t\telif message_split[0] == \"RS\": # A new order for Restal\r\n\r\n\t\t\ttry:\r\n\t\t\t\tfood = self.__food[int(message_split[2])]\r\n\t\t\texcept KeyError:\r\n\t\t\t\tfood = \"Inconnue\"\r\n\t\t\t\tprint(\"Unable to get the name of food '{}'\".format(message_split[2]))\r\n\t\t\tprint(message_split[1],message_split[3],message_split[2])\r\n\t\t\tself.add_order.emit(message_split[1], food, int(message_split[3]))\r\n\r\n\t\telse:\r\n\t\t\tself._window.open_dialog(\"Message du serveur incompréhensible\",\r\n\t\t\t\t\t\t\t\t\t \"Le message suivant n'a pas pu être décodé : {}\".format(message), type=\"warning\")\r\n\t\t\tprint(\"Error : message '{}' could not be decoded\".format(message))",
"def DecodeCodedMessage(codedmessage):\n message = CODE.GetMessage(codedmessage)\n return message",
"def decode_message(self, buf, message_type=None):\n self.debugStack = 0\n value, typedef, _ = self._decode_message(\"\", buf, message_type)\n return value, typedef",
"def _decode_message(self, label: str, buf, typedef=None, pos=0, end=None, group=False):\n print(str(pos) + \" decode_message \" + label)\n if end is None:\n end = len(buf)\n\n if typedef is None:\n typedef = {}\n else:\n # Don't want to accidentally modify the original\n typedef = copy.deepcopy(typedef)\n output = {}\n\n while pos < end:\n oldpos = pos\n tag, pos = decoder._DecodeVarint(buf, pos)\n try:\n field_number, wire_type = wire_format.UnpackTag(tag)\n except Exception as exc:\n raise (ValueError,\n 'Could not read valid tag at pos %d. Ensure it is a valid protobuf message: %s'\n % (pos-len(tag), exc), sys.exc_info()[2])\n # Convert to str\n field_number = str(field_number)\n orig_field_number = field_number\n \n field_typedef = None\n if field_number in typedef:\n field_typedef = typedef[field_number]\n else:\n field_typedef = {}\n field_typedef['type'] = self.wire_type_defaults[wire_type]\n field_type = field_typedef['type']\n if self.debug:\n ft = field_type\n if ft == None:\n ft = \"None\"\n print(\"@\" + str(oldpos) + \"-\" + str(pos-1) + \":\" + label + \" field_number \" +\n str(field_number) +\n \" wire_type \" + str(wire_type) +\n \" field_type \" + str(ft))\n # If field_type is None, its either an unsupported wire type, length delim or group\n # length delim we have to try and decode first\n field_out = None\n if field_type == 'LD':\n field_out, pos = self.decode_message_LD(label, buf, pos, field_typedef)\n elif field_type == 'endGroup':\n # TODO Should probably match the field_number to START_GROUP\n if not group:\n raise ValueError(\"Found END_GROUP before START_GROUP\")\n # exit out\n return output, typedef, pos\n elif field_type == 'message':\n field_out, pos = self.decode_message_message(\n label, buf, pos, field_typedef, field_number)\n elif field_type == 'group':\n group_typedef = None\n # Check for a anonymous type\n if 'group_typedef' in field_typedef:\n group_typedef = field_typedef['group_typedef']\n field_out, group_typedef, pos = self.decode_group(\n label, buf, group_typedef, pos)\n # Save type definition\n field_typedef['group_typedef'] = group_typedef\n else:\n # Verify wiretype matches\n if self.wiretypes[field_type] != wire_type:\n raise ValueError(\"Invalid wiretype for field number %s. %s is not wiretype %s\"\n % (field_number, field_type, wire_type))\n # Simple type, just look up the decoder\n field_out, pos = self.decoders[field_type](buf, pos)\n field_typedef['type'] = field_type\n if 'name' not in field_typedef:\n field_typedef['name'] = ''\n field_key = field_number\n if '-' not in field_number and 'name' in field_typedef and field_typedef['name'] != '':\n field_key = field_typedef['name']\n # Deal with repeats\n if field_key in output:\n if isinstance(field_out, list):\n if isinstance(output[field_number], list):\n output[field_key] += field_out\n else:\n output[field_key] = field_out.append(output[field_key])\n else:\n if isinstance(output[field_number], list):\n output[field_key].append(field_out)\n else:\n output[field_key] = [output[field_key], field_out]\n else:\n output[field_key] = field_out\n typedef[orig_field_number] = field_typedef\n if self.debug:\n print(str(field_key) + \" field_out:\" + str(field_out))\n if pos > end:\n raise decoder._DecodeError(\"Invalid Message Length, pos=\" +\n str(pos) + \" end=\" + str(end))\n # Should never hit here as a group\n if group:\n raise ValueError(\"Got START_GROUP with no END_GROUP.\")\n print(\"decode_message finish \" + str(pos))\n return output, typedef, pos",
"def parse_message(message):\n temp = \"\"\n for i in message:\n if i == bitarray('10100011'):\n temp += 'ESC' + ' '\n elif i == bitarray('01111110'):\n temp += 'FLAG' + ' '\n else:\n temp += i.tobytes().decode('ascii') + ' '\n return temp.strip()",
"def is_dock_msg(msg):\n return msg & 0xF0 == Dock.BASE",
"def decode_can_message(self) -> dict:\n\n # the first two chars are '0b' from bin() conversion, so strip them out\n self.message = self.message[2:]\n\n # Start of Frame field, 1-bit\n self.can_data[\"sof\"] = self.read_bits_as_int(1)\n\n \"\"\"Arbitration Field is 12-bits or 32-bits long\n Assume we only have an 11-bit identifiers in this project for now,\n so a 12-bit arbitration field. The 32-bit long field also means the following\n IDE fiels moves out of the control field into arbitration field.\n The ID defines the ECU that sent this message.\"\"\"\n self.can_data[\"can_id\"] = self.read_bits_as_int(11)\n # RTR of 0 means this is a normal data frame\n # RTR of 1 means this is a remote frame, unlikely in our use case\n self.can_data[\"rtr\"] = self.read_bits_as_int(1)\n\n \"\"\"The control field is a 6-bit field that contains the length of the\n data in bytes, so read n bits where n is 8 * data_length_field.\n IDE of 0 uses 11-bit ID format, IDE of 1 uses 29-bit ID format.\n R0 is a reservered spacer field of 1-bit. The SRR field has the value of the\n RTR bit in the extended ID mode, and is not present in the standard\n ID mode.\"\"\"\n self.can_data[\"ide\"] = self.read_bits_as_int(1)\n if int(self.can_data[\"ide\"]) == 1: # 29-bit ID format, 32-bit arbitration field\n self.can_data[\"srr\"] = self.can_data[\"rtr\"]\n self.can_data[\"extended_can_id\"] = self.read_bits_as_int(18)\n self.can_data[\"rtr\"] = self.read_bits_as_int(1)\n else:\n self.can_data[\"srr\"] = None\n self.can_data[\"extended_can_id\"] = None\n self.can_data[\"r0\"] = self.read_bits_as_int(1)\n self.can_data[\"data_length_code\"] = self.read_bits_as_int(4)\n self.can_data[\"data_bin\"] = self.read_bits_as_bin(\n self.can_data[\"data_length_code\"] * 8\n )\n self.can_data[\"data\"] = int(self.can_data[\"data_bin\"], 2)\n\n \"\"\"For sensors providing multiple simultaneous values in one field, we will\n delimit at word length (16-bits) and store each word in our data dictionary.\n For sensors with single valued data, we just use the data field. See the\n views/can.py file for ORM declarations.\"\"\"\n for word in range(self.can_data[\"data_length_code\"] // 2):\n self.can_data[f\"data_word_{word}\"] = self.read_can_data_word(\n self.can_data[\"data_bin\"], word\n )\n\n \"\"\"CRC Field is 16-bits.\n The CRC segment is 15-bits in the field and contains the frame check sequence\n spanning from SOF through Arbitration Field, Control Field, and Data Field.\n The CRC Delimeter bit is always recessive (i.e. 1) following the CRC field.\"\"\"\n self.can_data[\"crc_segment\"] = self.read_bits_as_int(15)\n\n self.can_data[\"crc_delimiter\"] = self.read_bits_as_int(1)\n if self.can_data[\"crc_delimiter\"] == 0:\n raise InvalidBitException(\n self.can_data[\"crc_delimiter\"], \"CRC Delimiter\", self.original_message\n )\n\n # ACK Field is 2-bits\n # Delimiter is always recessive (1)\n self.can_data[\"ack_bit\"] = self.read_bits_as_int(1)\n self.can_data[\"ack_delimiter\"] = self.read_bits_as_int(1)\n if self.can_data[\"ack_delimiter\"] == 0:\n raise InvalidBitException(\n self.can_data[\"ack_delimiter\"], \"ACK Delimiter\", self.original_message\n )\n\n # EOF\n self.can_data[\"end_of_frame\"] = self.read_bits_as_int(7)\n\n # IFS\n self.can_data[\"interframe_space\"] = self.read_bits_as_int(3)\n return self.can_data",
"def unpack(self, raw_message):\n return self._msg_struct.unpack(raw_message)",
"def unwrap_recipe(header, message):\n if mangle_for_receiving:\n message = mangle_for_receiving(message)\n if header.get(\"workflows-recipe\") in {True, \"True\", \"true\", 1}:\n rw = RecipeWrapper(message=message, transport=transport_layer)\n if log_extender and rw.environment and rw.environment.get(\"ID\"):\n with log_extender(\"recipe_ID\", rw.environment[\"ID\"]):\n return callback(rw, header, message.get(\"payload\"))\n return callback(rw, header, message.get(\"payload\"))\n if allow_non_recipe_messages:\n return callback(None, header, message)\n # self.log.warning('Discarding non-recipe message:\\n' + \\\n # \"First 1000 characters of header:\\n%s\\n\" + \\\n # \"First 1000 characters of message:\\n%s\",\n # str(header)[:1000], str(message)[:1000])\n transport_layer.nack(header)",
"def process_received_message(self, msg: str):\n all = msg.split(\":\")\n if len(all) < 2:\n self.log.warning(\"Recv bad formated cmd\", msg)\n return\n cmd, all_param = all[:2]\n params = all_param.split(\";\")\n\n self.strip.setPixelColorRGB(0, 0, 0, 0)\n if cmd == \"\":\n pass\n elif cmd == \"SWU\": # switch update\n sw_id = params[0]\n sw_state = int(params[1])\n self.machine.switch_controller.process_switch_by_num(sw_id, state=sw_state, platform=self, logical=False)\n self.strip.setPixelColorRGB(0, 0, 0, 0xff) # blue\n elif cmd == \"DBG\": # debug message\n self.log.debug(\"RECV:%s\" % msg)\n elif cmd == \"INF\": # debug message\n self.log.info(\"RECV:%s\" % msg)\n elif cmd == \"WRN\": # warning message\n self.log.warning(\"RECV:%s\" % msg)\n self.strip.setPixelColorRGB(0, 0xff, 0xff, 0) # yellow\n elif cmd == \"ERR\": # warning message\n self.log.error(\"RECV:%s\" % msg)\n self.strip.setPixelColorRGB(0, 0xff, 0, 0) # red\n elif cmd == \"TCK\": # arduino is alive !\n self.log.debug(\"TCK ok:%d\" % int(params[0]))\n elif cmd == \"ACK\": # ack of frame\n self.communicator.ack_frame(int(params[0]), params[1] == \"OK\")\n self.strip.setPixelColorRGB(0, 0, 0xff, 0) # green\n else:\n self.log.warning(\"RECV:UNKNOWN FRAME: [%s]\" % msg)\n l = len(self.communicator.frames)\n #TODO: self.machine['frame_cnt'] = l\n self.strip.show()\n self.machine.events.post_async('raspberry_frame_count', frame_cnt=l, frames=self.communicator.frames)",
"def resolve_message(self, rq):\n\n if rq.command == u\"initialize\":\n self.next_seq += 1\n DAPInitializeResponse.create(self.next_seq, rq.seq, True, rq.command, body=DAPCapabilities.create(**features)).send(self._current_client)\n self.next_seq += 1\n DAPInitializedEvent.create(self.next_seq).send(self._current_client)\n elif rq.command == u\"setBreakpoints\":\n self.next_seq += 1\n bkps = self.create_breakpoints(**rq.get_arguments().as_current_kwargs())\n body = DAPSetBreakpointsResponseBody.create([b.serialize() for b in bkps])\n DAPSetBreakpointsResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"configurationDone\":\n self.next_seq += 1\n DAPConfigurationDoneResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n elif rq.command == u\"launch\":\n # no special noDebug\n self.next_seq += 1\n DAPLaunchResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n self._ready_for_events = True\n elif rq.command == u\"disconnect\":\n self.next_seq += 1\n DAPDisconnectResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n self._current_client.close()\n self._current_client = None\n return\n elif rq.command == u\"continue\":\n self.next_seq += 1\n body = DAPContinueResponseBody.create(all_threads_continued=True)\n DAPContinueResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n debugger.stepping = SteppingMode.STEP_NO_STEP\n debugger.continue_next()\n elif rq.command == u\"threads\":\n self.next_seq += 1\n body = DAPThreadsResponseBody.create([DAPThread.create(0, \"renpy_main\")])\n DAPThreadsResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"stackTrace\":\n self.next_seq += 1\n body = DAPStackTraceResponseBody.create(debugger.get_stack_frames(**rq.get_arguments().as_current_kwargs()))\n DAPStackTraceResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"scopes\":\n self.next_seq += 1\n body = DAPScopesResponseBody.create(debugger.get_scopes(int(rq.get_arguments().get_frame_id())))\n DAPScopesResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"variables\":\n self.next_seq += 1\n body = DAPVariablesResponseBody.create(debugger.format_variable(**rq.get_arguments().as_current_kwargs()))\n DAPVariablesResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"pause\":\n self.next_seq += 1\n DAPPauseResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.break_pause = True\n elif rq.command == u\"next\":\n print(\"STEP\")\n self.next_seq += 1\n DAPNextResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.store_frames()\n debugger.stepping = SteppingMode.STEP_NEXT\n debugger.continue_next()\n elif rq.command == u\"stepIn\":\n self.next_seq += 1\n DAPStepInResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.store_frames()\n debugger.stepping = SteppingMode.STEP_INTO\n debugger.continue_next()\n elif rq.command == u\"stepOut\":\n self.next_seq += 1\n DAPStepOutResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.store_frames()\n debugger.stepping = SteppingMode.STEP_OUT\n debugger.continue_next()\n else:\n self.next_seq += 1\n DAPErrorResponse.create(self.next_seq, rq.seq, False, message=\"NotImplemented\").send(self._current_client)",
"def decode_message(self, raw):\n return raw.decode('utf-8')",
"def dock_complex(pose):\n #PyMOL observer assuming the initial call was already made prior to this line.\n AddPyMolObserver_to_energies(pose, True)\n # defining scoring functions (DNA + specific to structures of interests)\n fa_score = get_fa_scorefxn()\n dna_score = create_score_function('dna')\n dna_score.set_weight(fa_elec, 1)\n # movemap minimization / fast relax\n mm = MoveMap()\n mm.set_bb_true_range(\"enter beginning region\", \"enter ending region\")#min in this motif only\n relax = FastRelax()\n relax.set_scorefxn(scorefxn)\n relax.apply(pose)\n # defining specific complex docking protocol\n docking = DockMCMProtocol()\n docking.set_scorefxn(dna_score)\n docking.set_scorefxn_pack(fa_score)\n docking.set_partners(\"B_ACD\")\n # scoring pre and post docking\n dna_init = dna_score(pose)\n fa_init = fa_score(pose)\n # dockng occurs here\n docking.apply(pose)\n # scoring post docking.\n dna_final = dna_score(pose)\n fa_final = fa_score(pose)\n return [fa_init, fa_final, dna_init, dna_final]\n #raise Exception(\"Complex docking not implemented\")",
"def try_read_message(self):\n return sirf.from_bytes(self._read_binary_sirf_msg())",
"def _decode_text(self):\n\n print(f\"Hex decode; received message is {self.message}\")\n return bytes.fromhex(self.message).decode('utf-8')",
"def decode(self, remove: str = None):\n\n self.create_map()\n\n # remove the trailing padding from the flattened binary tree\n if len(self.bit_string[self.bit_string_index:]) % 8 != 0:\n self.bit_string_index += len(self.bit_string[self.bit_string_index:]) % 8\n\n # extract message and write it to a file\n message = self.encode_message(remove)\n with open('decode.txt', 'w') as output:\n output.write(message)\n\n print('Message Decoded')",
"def decrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.decrypt(message)",
"def _decode(self, message):\n raise NotImplementedError(\"_decode needs to be implemented in {} subclass\".format(type(self).__name__))",
"def decode_command(self, msg):\n (x,) = struct.unpack(\"<B\", msg[0])\n if x & (2**7) == 0:\n raise ValueError(\"Expected a command message, got a data message instead\")\n if x == 0x81:\n return (x, msg[1:])\n elif x == 0x82:\n return (x, struct.unpack(\"<L\", msg[1:5]))\n elif x == 0x83:\n return (x,)\n elif x == 0x84:\n return (x,)\n elif x == 0x85:\n return (x,)\n elif x == 0xC2:\n return (x, list(struct.unpack(\"<90B\", msg[1:])))\n elif x == 0xE2:\n return (x, [msg[1:]])\n else:\n raise ValueError(\"Unknown command message with id=%#x\" % x)",
"def hide(self, img, message):\r\n encoded = img.copy()\r\n width, height = img.size\r\n index = 0\r\n\r\n message = message + '~~~'\r\n message_bits = \"\".join(tools.a2bits_list(message))\r\n\r\n npixels = width * height\r\n if len(message_bits) > npixels * 3:\r\n return \"\"\"Too long message (%s > %s).\"\"\" \"\"\"%\"\"\"\r\n (len(message_bits), npixels * 3)\r\n\r\n for row in range(height):\r\n for col in range(width):\r\n if index + 3 <= len(message_bits) :\r\n\r\n # Get the colour component.\r\n (r, g, b) = img.getpixel((col, row))\r\n\r\n # Change the Least Significant Bit of each colour component.\r\n r = tools.setlsb(r, message_bits[index])\r\n g = tools.setlsb(g, message_bits[index+1])\r\n b = tools.setlsb(b, message_bits[index+2])\r\n\r\n # Save the new pixel\r\n encoded.putpixel((col, row), (r, g , b))\r\n\r\n index += 3\r\n\r\n return encoded\r\n self.resultLbl.SetLabel(\"Message successfully encoded.\")",
"def _decode1(self, body, data):\r\n if \" \" in body:\r\n evtype,body = body.split(\" \",1)\r\n else:\r\n evtype,body = body,\"\"\r\n evtype = evtype.upper()\r\n if evtype == \"CIRC\":\r\n m = re.match(r\"(\\d+)\\s+(\\S+)(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"CIRC event misformatted.\")\r\n ident,status,path,purpose,reason,remote = m.groups()\r\n ident = int(ident)\r\n if path:\r\n if \"PURPOSE=\" in path:\r\n remote = reason\r\n reason = purpose\r\n purpose=path\r\n path=[]\r\n elif \"REASON=\" in path:\r\n remote = reason\r\n reason = path\r\n purpose = \"\"\r\n path=[]\r\n else:\r\n path_verb = path.strip().split(\",\")\r\n path = []\r\n for p in path_verb:\r\n path.append(p.replace(\"~\", \"=\").split(\"=\")[0])\r\n else:\r\n path = []\r\n\r\n if purpose and \"REASON=\" in purpose:\r\n remote=reason\r\n reason=purpose\r\n purpose=\"\"\r\n\r\n if purpose: purpose = purpose[9:]\r\n if reason: reason = reason[8:]\r\n if remote: remote = remote[15:]\r\n event = CircuitEvent(evtype, ident, status, path, purpose, reason,\r\n remote, body)\r\n elif evtype == \"STREAM\":\r\n #plog(\"DEBUG\", \"STREAM: \"+body)\r\n m = re.match(r\"(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)?:(\\d+)(\\sREASON=\\S+)?(\\sREMOTE_REASON=\\S+)?(\\sSOURCE=\\S+)?(\\sSOURCE_ADDR=\\S+)?(\\s+PURPOSE=\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"STREAM event misformatted.\")\r\n ident,status,circ,target_host,target_port,reason,remote,source,source_addr,purpose = m.groups()\r\n ident,circ = map(int, (ident,circ))\r\n if not target_host: # This can happen on SOCKS_PROTOCOL failures\r\n target_host = \"(none)\"\r\n if reason: reason = reason[8:]\r\n if remote: remote = remote[15:]\r\n if source: source = source[8:]\r\n if source_addr: source_addr = source_addr[13:]\r\n if purpose:\r\n purpose = purpose.lstrip()\r\n purpose = purpose[8:]\r\n event = StreamEvent(evtype, ident, status, circ, target_host,\r\n int(target_port), reason, remote, source, source_addr,\r\n purpose, body)\r\n elif evtype == \"ORCONN\":\r\n m = re.match(r\"(\\S+)\\s+(\\S+)(\\sAGE=\\S+)?(\\sREAD=\\S+)?(\\sWRITTEN=\\S+)?(\\sREASON=\\S+)?(\\sNCIRCS=\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"ORCONN event misformatted.\")\r\n target, status, age, read, wrote, reason, ncircs = m.groups()\r\n\r\n #plog(\"DEBUG\", \"ORCONN: \"+body)\r\n if ncircs: ncircs = int(ncircs[8:])\r\n else: ncircs = 0\r\n if reason: reason = reason[8:]\r\n if age: age = int(age[5:])\r\n else: age = 0\r\n if read: read = int(read[6:])\r\n else: read = 0\r\n if wrote: wrote = int(wrote[9:])\r\n else: wrote = 0\r\n event = ORConnEvent(evtype, status, target, age, read, wrote,\r\n reason, ncircs, body)\r\n elif evtype == \"STREAM_BW\":\r\n m = re.match(r\"(\\d+)\\s+(\\d+)\\s+(\\d+)\", body)\r\n if not m:\r\n raise ProtocolError(\"STREAM_BW event misformatted.\")\r\n event = StreamBwEvent(evtype, body, *m.groups())\r\n elif evtype == \"BW\":\r\n m = re.match(r\"(\\d+)\\s+(\\d+)\", body)\r\n if not m:\r\n raise ProtocolError(\"BANDWIDTH event misformatted.\")\r\n read, written = map(long, m.groups())\r\n event = BWEvent(evtype, read, written, body)\r\n elif evtype in (\"DEBUG\", \"INFO\", \"NOTICE\", \"WARN\", \"ERR\"):\r\n event = LogEvent(evtype, body)\r\n elif evtype == \"NEWDESC\":\r\n ids_verb = body.split(\" \")\r\n ids = []\r\n for i in ids_verb:\r\n ids.append(i.replace(\"~\", \"=\").split(\"=\")[0].replace(\"$\",\"\"))\r\n event = NewDescEvent(evtype, ids, body)\r\n elif evtype == \"ADDRMAP\":\r\n # TODO: Also parse errors and GMTExpiry\r\n m = re.match(r'(\\S+)\\s+(\\S+)\\s+(\\\"[^\"]+\\\"|\\w+)', body)\r\n if not m:\r\n raise ProtocolError(\"ADDRMAP event misformatted.\")\r\n fromaddr, toaddr, when = m.groups()\r\n if when.upper() == \"NEVER\": \r\n when = None\r\n else:\r\n when = time.strptime(when[1:-1], \"%Y-%m-%d %H:%M:%S\")\r\n event = AddrMapEvent(evtype, fromaddr, toaddr, when, body)\r\n elif evtype == \"NS\":\r\n event = NetworkStatusEvent(evtype, parse_ns_body(data), data)\r\n elif evtype == \"NEWCONSENSUS\":\r\n event = NewConsensusEvent(evtype, parse_ns_body(data), data)\r\n elif evtype == \"BUILDTIMEOUT_SET\":\r\n m = re.match(\r\n r\"(\\S+)\\sTOTAL_TIMES=(\\d+)\\sTIMEOUT_MS=(\\d+)\\sXM=(\\d+)\\sALPHA=(\\S+)\\sCUTOFF_QUANTILE=(\\S+)\",\r\n body)\r\n set_type, total_times, timeout_ms, xm, alpha, quantile = m.groups()\r\n event = BuildTimeoutSetEvent(evtype, set_type, int(total_times),\r\n int(timeout_ms), int(xm), float(alpha),\r\n float(quantile), body)\r\n elif evtype == \"GUARD\":\r\n m = re.match(r\"(\\S+)\\s(\\S+)\\s(\\S+)\", body)\r\n entry, guard, status = m.groups()\r\n event = GuardEvent(evtype, entry, guard, status, body)\r\n elif evtype == \"TORCTL_TIMER\":\r\n event = TimerEvent(evtype, data)\r\n else:\r\n event = UnknownEvent(evtype, body)\r\n\r\n return event",
"def read_message(self):\n\n while True:\n try:\n return sirf.from_bytes(self._read_binary_sirf_msg())\n except sirf.UnrecognizedMessageException:\n pass",
"def decode_message(self, message):\n\n message[\"pl\"] = json.loads(message[\"pl\"])\n if message[\"pl\"][\"~c\"] != \"0\":\n decoded = base64.b64decode(message[\"pl\"][\"pl\"])\n decoded = zlib.decompress(decoded)\n message[\"pl\"][\"pl\"] = json.loads(decoded)\n return message",
"def parse_message(self, message):\n pass",
"def _decode_frame(self):\n\n self._processed.eth_frame.log(level=logging_helper.INFO)\n\n # Parse IP packets, protocol=0x8\n if hex(self._processed.eth_frame.protocol) == u'0x8':\n self._processed.ip_frame = IPFrame(self._processed.eth_frame.payload)\n self._processed.ip_frame.log(level=logging_helper.INFO)\n\n if self._processed.ip_frame.payload is not None:\n self._processed.ip_frame.payload.log(level=logging_helper.INFO)\n\n else:\n logging.info(u'Not an IP payload')\n\n logging.info(self._processed)",
"def decode_text():\n print(f\"{YELLOW}[{MIDDLE_DOT}]{RESET} Enter message to decode: \", end=\"\")\n message = input()\n extract_encoded_message = message.split(LEFT_TO_RIGHT_MARK)[1]\n message = extract_encoded_message\n extract_encoded_message = message.split(RIGHT_TO_LEFT_MARK)[0]\n encoded = ''\n decoded = ''\n\n for message_char in message:\n if message_char in zero_space_symbols:\n encoded = encoded + str(zero_space_symbols.index(message_char))\n\n cur_encoded_char = ''\n\n for index, encoded_char in enumerate(encoded):\n cur_encoded_char = cur_encoded_char + encoded_char\n if index > 0 and (index + 1) % padding == 0:\n decoded = decoded + chr(int(cur_encoded_char, len(zero_space_symbols)))\n cur_encoded_char = ''\n\n return decoded",
"def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")",
"def decode_message(buf, typedef=None, pos=0, end=None, group=False, depth=0, path=None):\n if end is None:\n end = len(buf)\n\n if typedef is None:\n typedef = {}\n else:\n # Don't want to accidentally modify the original\n typedef = copy.deepcopy(typedef)\n\n if path is None:\n path = []\n\n output = {}\n\n while pos < end:\n # Read in a field\n try:\n if six.PY2:\n tag, pos = decoder._DecodeVarint(str(buf), pos)\n else:\n tag, pos = decoder._DecodeVarint(buf, pos)\n except (IndexError, decoder._DecodeError) as exc:\n six.raise_from(DecoderException(\n \"Error decoding length from buffer: %r...\" %\n (binascii.hexlify(buf[pos : pos+8]))), exc)\n\n field_number, wire_type = wire_format.UnpackTag(tag)\n\n # Convert to str\n field_number = str(field_number)\n orig_field_number = field_number\n\n field_path = path[:]\n field_path.append(field_number)\n\n if wire_type not in blackboxprotobuf.lib.types.wire_type_defaults:\n raise DecoderException('%d is not a valid wire type at pos %d.' % (wire_type, pos), field_path)\n\n field_typedef = None\n if field_number in typedef:\n field_typedef = typedef[field_number]\n else:\n field_typedef = {}\n field_typedef['type'] = blackboxprotobuf.lib.types.wire_type_defaults[wire_type]\n\n field_type = field_typedef['type']\n\n # If field_type is None, its either an unsupported wire type, length delim or group\n # length delim we have to try and decode first\n field_out = None\n if field_type is None:\n if wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED:\n out, field_type = decode_guess(buf, pos, depth=depth, path=field_path)\n if field_type == 'message':\n field_out, message_typedef, pos = out\n field_typedef['message_typedef'] = message_typedef\n else:\n field_out, pos = out\n elif wire_type == wire_format.WIRETYPE_END_GROUP:\n # TODO Should probably match the field_number to START_GROUP\n if not group:\n raise DecoderException( \"Found END_GROUP before START_GROUP\", field_path)\n # exit out\n return output, typedef, pos\n else:\n raise DecoderException(\"Could not find default type for wiretype: %d\" % wire_type, field_path)\n else:\n if field_type == 'message':\n #TODO probably big enough to factor out\n message_typedef = None\n # Check for a anonymous type\n if 'message_typedef' in field_typedef:\n message_typedef = field_typedef['message_typedef']\n # Check for type defined by message type name\n elif 'message_type_name' in field_typedef:\n message_typedef = blackboxprotobuf.lib.known_messages[\n field_typedef['message_type_name']]\n\n try:\n field_out, message_typedef, pos = decode_lendelim_message(\n buf, message_typedef, pos, path=field_path)\n # Save type definition\n field_typedef['message_typedef'] = message_typedef\n except DecoderException as exc:\n # If this is the root message just fail\n if pos == 0:\n six.reraise(*sys.exc_info())\n logging.debug(\n (\"Encountered exception when decoding message at %s \"\n \"with known typdef. Trying alt typedefs and then \"\n \"anonymous. Exception: \\n%s\"),\n \"->\".join(map(str, field_path)), str(exc))\n\n if field_out is None and 'alt_typedefs' in field_typedef:\n # check for an alternative type definition\n for alt_field_number, alt_typedef in field_typedef['alt_typedefs'].items():\n try:\n field_out, message_typedef, pos = decode_lendelim_message(\n buf, alt_typedef, pos, path=field_path)\n except DecoderException as exc:\n logging.debug(\n (\"Encountered exception when decoding message at %s with alt_typedef %s. Trying anonymous decoding next. Exception:\\n%s\"),\n \"->\".join(map(str, field_path)),\n str(alt_field_number),\n str(exc))\n\n if field_out is not None:\n # Found working typedef\n field_typedef['alt_typedefs'][alt_field_number] = message_typedef\n field_number = field_number + \"-\" + alt_field_number\n break\n\n if field_out is None:\n # Still no typedef, try anonymous, and let the error propogate if it fails\n field_out, message_typedef, pos = \\\n decode_lendelim_message(buf, {}, pos, path=field_path)\n\n if 'alt_typedefs' in field_typedef:\n # get the next higher alt field number\n alt_field_number = str(\n max(map(int, field_typedef['alt_typedefs'].keys()))\n + 1)\n else:\n field_typedef['alt_typedefs'] = {}\n alt_field_number = '1'\n\n field_typedef['alt_typedefs'][alt_field_number] = message_typedef\n field_number = field_number + \"-\" + alt_field_number\n elif field_type == 'group':\n group_typedef = None\n # Check for a anonymous type\n if 'group_typedef' in field_typedef:\n group_typedef = field_typedef['group_typedef']\n field_out, group_typedef, pos = \\\n decode_group(buf, group_typedef, pos, depth=depth, path=field_path)\n # Save type definition\n field_typedef['group_typedef'] = group_typedef\n else:\n # Verify wiretype matches\n if blackboxprotobuf.lib.types.wiretypes[field_type] != wire_type:\n raise DecoderException(\n \"Invalid wiretype for field number %s. %s is not wiretype %s\"\n % (field_number, field_type, wire_type), field_path)\n\n # Simple type, just look up the decoder\n try:\n field_out, pos = blackboxprotobuf.lib.types.decoders[field_type](buf, pos)\n except DecoderException as exc:\n exc.set_path(field_path)\n six.reraise(*sys.exc_info())\n field_typedef['type'] = field_type\n if 'name' not in field_typedef:\n field_typedef['name'] = ''\n\n field_key = field_number\n if '-' not in field_number and 'name' in field_typedef and field_typedef['name'] != '':\n field_key = field_typedef['name']\n # Deal with repeats\n if field_key in output:\n if isinstance(field_out, list):\n if isinstance(output[field_key], list):\n output[field_key] += field_out\n else:\n output[field_key] = field_out.append(output[field_key])\n else:\n if isinstance(output[field_key], list):\n output[field_key].append(field_out)\n else:\n output[field_key] = [output[field_key], field_out]\n else:\n output[field_key] = field_out\n typedef[orig_field_number] = field_typedef\n if pos > end:\n raise DecoderException(\n \"Field sizes are greater than designated length. pos: %d end_pos: %d\" % (pos, end))\n # Should never hit here as a group\n if group:\n raise DecoderException(\"Got START_GROUP with no END_GROUP.\")\n return output, typedef, pos",
"def process(self, message):\n try:\n self.messages.remove(message)\n except ValueError:\n pass # nothing to see here, just a message that was already processed and is not on the list any more\n except Exception as e:\n print('error removing message from self.message:', e)\n \n try:\n if message['type'] in [\"ticker\"]:\n self.process_tickers(message)\n elif message['type'] in [\"snapshot\", \"l2update\"]:\n self.process_orderbook(message)\n elif message['type'] in [\"received\",\"open\",\"done\",\"match\",\"change\",\"activate\"] and 'user' in self.data:\n self.process_orders(message)\n except Exception as e:\n raise Exception(\"Process raised an error: {}\\n\\t{}\".format(e,message))",
"def rawMessageReceived( self, message ):\n pdu = None\n try:\n pdu = self.encoder.decode(StringIO.StringIO(message))\n except PDUCorruptError, e:\n self.log.exception(e)\n self.log.critical(\"Received corrupt PDU %s\" % _safelylogOutPdu(message))\n self.corruptDataRecvd(status=e.status)\n except PDUParseError, e:\n self.log.exception(e)\n self.log.critical(\"Received unparsable PDU %s\" % _safelylogOutPdu(message))\n header = self.getHeader(message)\n seqNum = header.get('sequence_number', None)\n commandId = header.get('command_id', None)\n self.sendPDU(getPDUClass(commandId).requireAck(seqNum=seqNum, status=e.status))\n else:\n self.PDUReceived(pdu)"
] | [
"0.5267118",
"0.49720767",
"0.4961222",
"0.49351683",
"0.49119562",
"0.48676527",
"0.48319846",
"0.48317653",
"0.4825341",
"0.47967404",
"0.4766257",
"0.4739263",
"0.47169322",
"0.46567076",
"0.46360305",
"0.4620986",
"0.46061063",
"0.45974302",
"0.45940566",
"0.45775744",
"0.45680958",
"0.45532554",
"0.4550225",
"0.4544361",
"0.45274726",
"0.45266202",
"0.45041028",
"0.449174",
"0.44912878",
"0.44895273"
] | 0.6553048 | 0 |
Creates a new instance of PIDController provided with a goal and the individual gains | def __init__(self, goal=0, kP=1, kI=1, kD=1, init_pt=0):
self._pid_lock = threading.Lock()
self.set_goal(goal)
self.reset(init_pt)
self.set_gains({
PIDController.KP_KEY: kP,
PIDController.KI_KEY: kI,
PIDController.KD_KEY: kD
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, kp, ki, kd, tolerance,\n saturation=None, max_integral=None, integral_fade=1.0):\n super().__init__()\n self.tolerance = tolerance\n self.controller = pidController(kp, ki, kd, saturation=saturation,\n max_integral=max_integral, integral_fade_rate=integral_fade)",
"def build_controller(self) -> None:\n self._controller = self._sim._sim.make_greedy_follower(\n 0,\n self._goal_radius,\n stop_key=HabitatSimActions.STOP,\n forward_key=HabitatSimActions.MOVE_FORWARD,\n left_key=HabitatSimActions.TURN_LEFT,\n right_key=HabitatSimActions.TURN_RIGHT,\n )",
"def create_controller() -> Controller:\n _controller = Controller()\n return _controller",
"def create_entity(self):\n \n if self.ORION_CB.get_entity(self.params['name']) is None:\n \n print('[INFO]: Create new PID entity')\n \n entity_dict = {\"id\":self.params['name'], \"type\":'PID_controller'}\n for attr in ['Kp', 'Ti', 'Td', 'lim_low', 'lim_high', 'setpoint']:\n entity_dict.update({attr:{'value':self.params[attr],'type':'Number'}})\n\n entity_dict.update({'reverse_act':{'value':self.params['reverse_act'],'type':'Text'}})\n \n entity = filip.orion.Entity(entity_dict)#, attrs)\n\n self.ORION_CB.post_entity(entity)\n \n else:\n print('Entity name already assigned')",
"def init_controller(self, mode, p=None, d=None, torque=None):\n self.set_mode(Mode[mode])\n\n if p is not None:\n self.kp = p\n elif Mode[mode] == Mode.JOINT_IMP_CTRL:\n self.kp = [32000, 32000, 32000, 32000, 32000, 32000, 32000]\n elif Mode[mode] == Mode.CART_IMP_CTRL:\n self.kp = [2000, 2000, 2000, 20, 20, 20, None]\n # self.kp = [0.0, 0.00, 0.00, 0.00, 0.00, 0.00, None]\n #self.kp = [2, 2, 0.9, 0.2, 0.2, 0.2] + [None]\n #self.kp = [0.2, 0.00, 0.00, 0.00, 0.00, 0.00, None]\n\n if d is not None:\n self.kd = d\n elif Mode[mode] == Mode.JOINT_IMP_CTRL:\n self.kd = [15, 15, 15, 15, 15, 15, 15]\n elif Mode[mode] == Mode.CART_IMP_CTRL:\n self.kd = [0.7, 0.7, 0.7, 0.7, 0.7, 0.7, None]\n #self.kd = [0.005]*6 + [None]\n\n if torque is not None:\n self.torque = torque\n else:\n self.torque = np.zeros(7)",
"def __init__(self, goal_pos=0.72, *args, **kwargs):\n super(Goalie, self).__init__(*args, **kwargs)\n self.goal=goal_pos",
"def __init__(self, objective):\n self.objective = objective\n\n # Initialize players\n # We use three dummy player for the target position\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n\n # Initialize the internal environment\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None",
"def get_controller(self) -> PIDController:\n return deepcopy(self._controller)",
"def goal(self, goal_id):\r\n return Goal(self, goal_id)",
"def __init__(self, translation_gains, rotation_gains):\n # Get PID gains.\n self.translation_pid = PIDController(translation_gains[0], translation_gains[1], translation_gains[2])\n self.rotation_pid = PIDController(rotation_gains[0], rotation_gains[1], rotation_gains[2])\n\n # Kinematic state variables.\n self.pose_curr = Pose2D(0, 0, 0)\n self.pose_prev = Pose2D(0, 0, 0)\n self.pose_diff = Pose2D(0, 0, 0)\n self.velocity_curr = Pose2D(0, 0, 0)\n self.velocity_prev = Pose2D(0, 0, 0)\n self.velocity_diff = Pose2D(0, 0, 0)\n\n # Kinematic target variables.\n self.pose_target = Pose2D(0, 0, 0)\n self.velocity_target = Pose2D(0, 0, 0)\n\n # Timestamps.\n self.timestamp_curr = 0.0\n self.timestamp_prev = 0.0\n self.timestamp_diff = 0.0\n\n # Error-related variables.\n self.error_integral = Pose2D(0, 0, 0)\n self.error_differential = Pose2D(0, 0, 0)",
"def createController(self, id):\n plugs = self._instancePlugins()\n new_controller = plugins.core.PluginController(id, plugs, CommandManager())\n self._controllers[new_controller.id] = new_controller\n return new_controller",
"def __init__(self, source: PIDSource, ffGains: Sequence[float], fbGains: Sequence[float]) -> None:\n self.inputs = ...\n self.outputs = ...\n self.inputGains = ...\n self.outputGains = ...",
"def create_controller(self, typ):\n return self.controller_objects[typ]()",
"def __init__(self, epsilon=0.05, gamma=0.95, alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n PQLearningAgent.__init__(self, **args)",
"def __init__(self,\n learning_rates=[0.1, 0.00025],\n state_sizes=[0, 0],\n constraints=None,\n num_constraints=0,\n num_primitive_actions=0,\n num_controllers=0,\n num_controllers_per_subtask=0,\n num_communication_turns=0,\n critic_fn=None,\n controller_subset_fn=None):\n self._meta_controller_state_size = state_sizes[0]\n\n self._num_controllers = num_controllers\n # Number of controllers that communicate to complete a subtask.\n self._num_controllers_per_subtask = num_controllers_per_subtask\n\n # A controller's state size is the input state size (the environment state)\n # + the ordering vector size (num_controllers_per_subtask)\n # + the communication vectors from the communication rounds and output round\n # (num_communication_turns * num_primitive_actions).\n self._controller_state_size = state_sizes[1] \n self._controller_state_size += self._num_controllers_per_subtask\n self._controller_state_size += num_communication_turns * num_primitive_actions\n\n self._meta_controller = DqnAgent(\n state_dims=state_sizes[0],\n num_actions=num_constraints,\n learning_rate=learning_rates[0],\n epsilon_end=0.01)\n\n self._controller = DqnAgent(\n learning_rate=learning_rates[1],\n num_actions=num_primitive_actions,\n state_dims=[self._controller_state_size],\n epsilon_end=0.01)\n\n self._constraints = constraints\n self._num_constraints = num_constraints\n self._num_primitive_actions = num_primitive_actions\n self._num_communication_turns = num_communication_turns\n self._critic_fn = critic_fn\n self._controller_subset_fn = controller_subset_fn\n\n self._intrinsic_time_step = 0\n self._episode = 0\n\n # Book-keeping variables.\n # Keeps track of the current meta-controller state.\n self._meta_controller_state = None\n # Keeps track of the current action selected by the meta-controller.\n self._curr_constraint = None\n # Keeps track of the meta-controller's reward for the current meta-controller time step.\n self._meta_controller_reward = 0\n\n # Keeps track of the constraints tried for current controller subset.\n self._tried_constraints = self.reset_tried_constraints()\n # Keeps track of controllers who have completed coordination in the current episode.\n self._done_controllers = []",
"def __new__(cls, *args, **kwargs):\n\n instance = super(PGM, cls).__new__(cls)\n instance.timer = Timer(['init', 'solve', 'solve_wo_func',\n 'solve_wo_rsdl', 'solve_wo_btrack'])\n instance.timer.start('init')\n return instance",
"def _PIDController__calculate(self): \n \n\t\tenabled = self.m_enabled\n\t\tpidInput = self.m_pidInput\n\t\tpidInput2 = self.source2\n\n\t\tif enabled:\n\n\t\t input = pidInput.PIDGet() - pidInput2.PIDGet()\n\n\t\t self.m_error = self.m_setpoint - input\n\t\t if self.m_continuous:\n\t\t \n\t\t if math.fabs(self.m_error) > (self.m_maximumInput - self.m_minimumInput) / 2:\n\t\t if self.m_error > 0:\n\t\t self.m_error = self.m_error - self.m_maximumInput + self.m_minimumInput\n\t\t else:\n\t\t self.m_error = self.m_error + self.m_maximumInput - self.m_minimumInput\n\n\t\t potentialIGain = (self.m_totalError + self.m_error) * self.m_I\n\t\t \n\t\t if potentialIGain < self.m_maximumOutput:\n\t\t if potentialIGain > self.m_minimumOutput:\n\t\t self.m_totalError += self.m_error\n\t\t else:\n\t\t self.m_totalError = self.m_minimumOutput / self.m_I\n\t\t else:\n\t\t self.m_totalError = self.m_maximumOutput / self.m_I\n\n\t\t self.m_result = self.m_P * self.m_error + self.m_I * self.m_totalError + self.m_D * (self.m_error - self.m_prevError)\n\t\t self.m_prevError = self.m_error\n\n\t\t if self.m_result > self.m_maximumOutput:\n\t\t self.m_result = self.m_maximumOutput\n\t\t elif self.m_result < self.m_minimumOutput:\n\t\t self.m_result = self.m_minimumOutput\n\n\t\t pidOutput = self.m_pidOutput\n\t\t result = self.m_result",
"def _PIDController__calculate(self): \n \n\t\tenabled = self.m_enabled\n\t\tpidInput = self.m_pidInput\n\n\t\tif enabled:\n\n\t\t input = pidInput.PIDGet()\n\n\t\t self.m_error = self.m_setpoint - input\n\t\t if self.m_continuous:\n\t\t \n\t\t if math.fabs(self.m_error) > (self.m_maximumInput - self.m_minimumInput) / 2:\n\t\t if self.m_error > 0:\n\t\t self.m_error = self.m_error - self.m_maximumInput + self.m_minimumInput\n\t\t else:\n\t\t self.m_error = self.m_error + self.m_maximumInput - self.m_minimumInput\n\n\t\t potentialIGain = (self.m_totalError + self.m_error) * self.m_I\n\t\t \n\t\t if potentialIGain < self.m_maximumOutput:\n\t\t if potentialIGain > self.m_minimumOutput:\n\t\t self.m_totalError += self.m_error\n\t\t else:\n\t\t self.m_totalError = self.m_minimumOutput / self.m_I\n\t\t else:\n\t\t self.m_totalError = self.m_maximumOutput / self.m_I\n\n\t\t self.m_result = self.m_P * self.m_error + self.m_I * self.m_totalError + self.m_D * (self.m_error - self.m_prevError)\n\t\t self.m_prevError = self.m_error\n\n\t\t if self.m_result > self.m_maximumOutput:\n\t\t self.m_result = self.m_maximumOutput\n\t\t elif self.m_result < self.m_minimumOutput:\n\t\t self.m_result = self.m_minimumOutput\n\n\t\t pidOutput = self.m_pidOutput\n\t\t result = self.m_result",
"def _setup_actor_critic_agent(self, ppo_cfg: Config) -> None:\n logger.add_filehandler(self.config.LOG_FILE)\n\n self.actor_critic = PointNavBaselinePolicy(\n observation_space=self.envs.observation_spaces[0],\n action_space=self.envs.action_spaces[0],\n hidden_size=ppo_cfg.hidden_size,\n goal_sensor_uuid=self.config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,\n )\n self.actor_critic.to(self.device)\n\n self.agent = PPO(\n actor_critic=self.actor_critic,\n clip_param=ppo_cfg.clip_param,\n ppo_epoch=ppo_cfg.ppo_epoch,\n num_mini_batch=ppo_cfg.num_mini_batch,\n value_loss_coef=ppo_cfg.value_loss_coef,\n entropy_coef=ppo_cfg.entropy_coef,\n lr=ppo_cfg.lr,\n eps=ppo_cfg.eps,\n max_grad_norm=ppo_cfg.max_grad_norm,\n use_normalized_advantage=ppo_cfg.use_normalized_advantage,\n )",
"def build_perception(\n self,\n view_dist=None,\n directions=None,\n openings=None,\n num_neighbors=None,\n diff_threshold=None,\n ):\n self.perception = None\n if self.border is None:\n raise ValueError(\"Border is not initialized\")\n if (\n directions is None\n and openings is None\n and view_dist is None\n and num_neighbors is None\n and diff_threshold is None\n ):\n raise ValueError(\"All perception parameters are None\")\n\n if view_dist is not None:\n self.perception = Range(view_dist, self.border, self.perception)\n\n if not (directions is None and openings is None):\n for i in range(len(directions)):\n self.perception = BlindSpot(\n directions[i] / 180 * pi,\n openings[i] / 180 * pi,\n self.border,\n self.perception,\n )\n\n if num_neighbors is not None:\n self.perception = KNN(num_neighbors, self.border, self.perception)\n\n if diff_threshold is not None:\n self.perception = Outlier(diff_threshold, self.border, self.perception)",
"def __init__(self, initial, goal):\n self.initial = initial; self.goal = goal",
"def __init__(self, initial, goal=(3, 3, 0, 0, 0)):\n\n self.goal = goal\n Problem.__init__(self, initial, goal)",
"def __init__(self, controller):\n self._controller = controller",
"def __init__(self, initial, goal=None):\n self.initial = initial\n self.goal = goal",
"def __init__(self, goal_pos=7.5, *args, **kwargs):\n super(Attacker, self).__init__(*args, **kwargs)\n self.goal_p=goal_pos\n goal = (-1.5+3*np.random.rand(), self.goal_p)\n self.path, self.status = passPath(self.getRobotConf(self.bot), self.ballEngine.getBallPose(), goal, kick=True)",
"def make_decision_with_policy(self, policy_type, *args):\n if policy_type == 1: # ADP\n assert len(args) == 2, 'args should be exactly 2'\n cur_K = -self.K_im_traj[-1]\n distance_2_tan, radian_at_tan = args\n self.dis_sum += distance_2_tan\n pwm_l_new, pwm_r_new = policy.adp(distance_2_tan, radian_at_tan, self.dis_sum, cur_K)\n elif policy_type == 2: # pure pursuit\n l_d, sin_alpha = args\n amp = 150\n pwm_l_new, pwm_r_new = policy.pure_pursuit(l_d, sin_alpha, amp)\n elif policy_type == 3: # Car following with ADP\n assert len(args) == 3, 'args should be exactly 3'\n cur_K = -self.K_im_traj[-1]\n distance_2_tan, radian_at_tan, estimated_dis = args\n self.dis_sum += distance_2_tan\n if self.is_recording and self.counter % 100 == 0:\n np.save('./.out/record', self.record)\n pwm_l_new, pwm_r_new = policy.car_following_with_adp(distance_2_tan, radian_at_tan, self.dis_sum, cur_K, estimated_dis, self.record)\n print(self.counter)\n self.counter += 1\n elif policy_type == 4:\n K = 0.5\n dis2car, = args\n pwm_l_new, pwm_r_new = policy.car_following(dis2car, K)\n elif policy_type == 5:\n d_arc, d_curve, theta = args\n pwm_l_new, pwm_r_new = policy.adp_coupled_car_following(d_arc, d_curve, theta, self.z, self.K_coupled)\n else:\n pwm_l_new, pwm_r_new = 0, 0\n print('Policy Not Found')\n self.motor.motor_set_new_speed(pwm_l_new, pwm_r_new)",
"def pid_controller_calculator(pid_parameters, error, duration=0.17):\n k_p = pid_parameters[0]\n k_i = pid_parameters[1]\n k_d = pid_parameters[2]\n error = np.array(error)\n proportional_term = k_p * error[-1]\n integrational_term = k_i * duration * np.sum(error)\n differential_term = k_d * (error[-1] - error[-2]) / duration\n output = np.sqrt(\n (proportional_term + integrational_term + differential_term) * 22)\n return output",
"def __init__(self, upstream=None, downstream=None,\n name='', master = None, Kv = 0.0, verbose=0): \n global _pccount\n if name == '':\n name = 'PressureController_'+`_pccount`\n _pccount += 1\n FlowDevice.__init__(self,2,name,verbose)\n if upstream and downstream:\n self.install(upstream, downstream)\n self.setPressureCoeff(Kv)\n self.setMaster(master)",
"def __init__(self, goal):\n self.goal = None\n self.goal_state_value_dict = dict()\n self.num_goals_to_satisfy = 0\n self.set_goal(goal)",
"def __init__(self, pitch, color, our_side, video_port=0, comm_port='/dev/ttyACM0', penalty=False, comms=1):\n assert pitch in [0, 1]\n assert color in ['yellow', 'blue']\n assert our_side in ['left', 'right']\n\n self.pitch = pitch\n\n # Set up the Arduino communications\n self.arduino = Arduino(comm_port, 115200, 1, comms)\n\n # Set up camera for frames\n self.camera = Camera(port=video_port, pitch=self.pitch)\n frame = self.camera.get_frame()\n center_point = self.camera.get_adjusted_center(frame)\n\n # Set up vision\n self.calibration = tools.get_colors(pitch)\n self.vision = Vision(\n pitch=pitch, color=color, our_side=our_side,\n frame_shape=frame.shape, frame_center=center_point,\n calibration=self.calibration)\n\n # Set up postprocessing for vision\n self.postprocessing = Postprocessing(our_side)\n\n # Set up main planner\n self.planner = Planner(our_side=our_side, pitch_num=self.pitch, isPenalty=penalty)\n\n # Set up GUI\n self.GUI = GUI(calibration=self.calibration, arduino=self.arduino, pitch=self.pitch)\n\n self.color = color\n self.side = our_side\n\n self.preprocessing = Preprocessing()\n\n self.robot = Robot_Controller()"
] | [
"0.6518662",
"0.54904646",
"0.54119706",
"0.5255919",
"0.5183708",
"0.50222105",
"0.50092596",
"0.49834642",
"0.49521568",
"0.49350682",
"0.49029905",
"0.49026063",
"0.48764795",
"0.48754042",
"0.48738855",
"0.48700404",
"0.48634544",
"0.48603043",
"0.4852328",
"0.4851333",
"0.48383272",
"0.4837087",
"0.48085332",
"0.48078334",
"0.48008093",
"0.4799244",
"0.47986177",
"0.47924644",
"0.4769046",
"0.47663143"
] | 0.6670641 | 0 |
Resets the previous error time. | def reset_time(self):
self._prev_error_time = time.time() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_error(self):\n\t\t\n\t\tself.error = None",
"def reset(self):\n\n self.elapsed_time = 0",
"def reset_error_state(self):\n self.error_state = Error.none\n self.error_info = ''",
"def _reset(self) -> ts.TimeStep:",
"def reset(self):\n self._timestep = np.array([0])",
"def reset(self, init_pt=0):\n self._pid_lock.acquire() # Acquire Lock\n\n self._error_sum = 0\n self._delta_error = 0\n self._curr_err = init_pt - self._goal\n\n self._prev_error_time = time.time()\n\n self._pid_lock.release() # Release Lock",
"def reset(self):\n self._start_time = None\n self.time_left = None",
"def reset(self):\n self.error_p = 0.0\n self.error_i = 0.0\n self.error_d = 0.0\n self.errors = [ 0.0 ] * self.samples\n if callable(self.debug_callback):\n self.debug_callback(\"reset\")",
"def reset(self):\n self.cumtime = 0\n self.start_time = self.time()",
"def reset(self, time):\n for key in self.data['step']:\n self.data['step'][key] = None\n\n self.time = time",
"def reset(self):\n self.control_counter = 0\n self.last_position_error = np.zeros(3)\n self.integral_position_error = np.zeros(3)\n self.last_attitude_error = np.zeros(3)\n self.integral_attitude_error = np.zeros(3)",
"def clear(self) -> None:\n self._last_err = 0",
"def clear_error(self):\n self.got_error = False",
"def reset(self):\n self.logger.debug(\"Resetting...\")\n pass",
"def reset_timer(self):\r\n self.time_minutes = 0\r\n self.time_seconds = 0",
"def reset(self):\n super().reset()\n self.m_n = 1\n self.m_num_errors = 0\n self.m_d = 0\n self.m_lastd = 0\n self.m_mean = 0.0\n self.m_std_temp = 0.0\n self.m_m2s_max = 0.0\n self.estimation = 0.0",
"def reset_cause():",
"def reset_time_out(self):\n self.reconnect()\n self.reconnect_params()",
"def reset_sum(self):\n self._error_sum = 0",
"def reset(self):\n self.integral = 0.0\n self.previous_error = 0.0",
"def reset_error(self):\n self.dLdu = np.zeros(self.u.shape)\n self.dLdw = np.zeros(self.w.shape)\n self.dLdv = np.zeros(self.v.shape)\n self.dLdb = np.zeros(self.b.shape)",
"def reset(self):\r\n err = self._cfuncs['ka_reset'](self._core._get_ka())\r\n self._core._handle_error(err)",
"def reset(self):\n\t\tself._initial = None\n\t\tself._start = None\n\t\tself._time = 0\n\t\tself._total = 0\n\t\treturn self",
"def reset_average(self):\n self._total_time = 0\n self._average_time = 0\n self._calls = 0",
"def reset_timer():\n resetTimer = time.time()\n target_time.clear()\n target_time.append(resetTimer)",
"def reset():\r\n pass",
"def reset():",
"def reset():",
"def reset():",
"def reset(self) -> ts.TimeStep:\n self._current_time_step = self._reset()\n return self._current_time_step"
] | [
"0.77852577",
"0.7318812",
"0.72486573",
"0.7211997",
"0.7106713",
"0.70843196",
"0.7006914",
"0.69474334",
"0.6890593",
"0.6766076",
"0.6753054",
"0.66990274",
"0.6608443",
"0.6580561",
"0.6562556",
"0.6502796",
"0.64796436",
"0.64674515",
"0.6453745",
"0.64373565",
"0.6433744",
"0.642772",
"0.63928527",
"0.6382339",
"0.63724905",
"0.6368453",
"0.6364214",
"0.6364214",
"0.6364214",
"0.63295573"
] | 0.9294501 | 0 |
Sets the goal for the PID controller. | def set_goal(self, goal):
self._pid_lock.acquire() # Acquire Lock
self._goal = goal
self._pid_lock.release() # Release Lock | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_goal(self, **kwargs):\n return self.env.set_goal(**kwargs)",
"def goal(self, goal):\n\n self._goal = goal",
"def set_goal(self, goal: GoalType) -> None:\n self.goal = goal",
"def set_goal(self, x):\n self.controllers[0].set_goal(x)\n self.controllers[1].set_goal(x)",
"def set_goal(self, goal):\r\n self.goal = goal\r\n self.start_time = self.get_current_time()",
"def set_goal(goal_loc):\n BoardPath._goal_loc = goal_loc",
"def set_pid(self, pid): # type: (int) -> None\n for i in range(len(self.__target_pids)):\n if self.__target_pids[i] == \"$$TBD\":\n self.__target_pids[i] = pid\n break",
"def setGoalNode(self, newGoal):\r\n\t\tself.goalNode = newGoal",
"def _set_task(self, goal):\n if goal.actionID == 'dh_change':\n self.dh_change(goal)\n elif goal.actionID == 'set_rcvel':\n self.set_rcvel(goal)\n elif goal.actionID == 'gate_pass':\n self.gate_pass(goal)\n elif goal.actionID == 'object_center':\n self.object_center(goal)\n elif goal.actionID == 'arm':\n self.arm(goal.arm)\n elif goal.actionID == 'rc_off':\n self.rc_off()\n else:\n rospy.loginfo('%s actionID not recognized'%goal.actionID)",
"def update_goal(self):\n pass",
"def assign_goal(self, goal_index):\n gearbox_index = int(np.floor(goal_index / self.cables_per_gearbox))\n cable_index = goal_index - gearbox_index * self.cables_per_gearbox\n # Activate the goal\n self.gearboxes[gearbox_index].hub_cable_goals[cable_index] = 1.",
"def def_pid(self,pid):\n self.pid=int(pid)",
"def _update_PID(self):\n self.pid = PID(p=self.paramP, i=self.paramI, d=self.paramD, setpoint=self.voltageSetpoint, memory=self.paramMemory)",
"def setPtr(self, newPtr):\n self.goalPtr = newPtr",
"def set_goal(self, robot_id, task, pub_msg): \n pub_names = self.goal_pubs.keys()\n pub_objs = self.goal_pubs.values()\n for i in range(len(pub_names)):\n if robot_id == int(pub_names[i]):\n Goal = MoveBaseActionGoal()\n Goal.header.stamp = rospy.Time.now()\n Goal.header.frame_id = ''\n Goal.goal_id.stamp = rospy.Time.now()\n Goal.goal_id.id = str(int(task[0]))\n Goal.goal.target_pose.header.stamp = rospy.Time.now()\n Goal.goal.target_pose.header.frame_id = 'map'\n Goal.goal.target_pose.pose.position.x = task[1]\n Goal.goal.target_pose.pose.position.y = task[2]\n z_rot_rad = task[3] * np.pi / 180\n q = quaternion_from_euler(0, 0, z_rot_rad)\n Goal.goal.target_pose.pose.orientation.z = q[2]\n Goal.goal.target_pose.pose.orientation.w = q[3]\n pub_obj = pub_objs[i]\n pub_obj.publish(Goal)\n print(\"Goal set for robot \" + str(robot_id) + \". Task id: \" + str(int(task[0])) + \".\")\n msg_str = \"Goal set for robot \" + str(robot_id) + \". Task id: \" + str(int(task[0])) + \". Time: %s\" % rospy.Time.now().to_sec()\n pub_msg.publish(msg_str)\n break\n else:\n pass",
"def pid(self, pid):\n\n self._pid = pid",
"def pid(self, pid):\n\n self._pid = pid",
"def set_goal(self,pos):\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = 'map'\n goal.target_pose.header.stamp = rospy.Time.now()\n mygoal = Pose(Point(pos[0],pos[1],0),Quaternion(0,0,0,1))\n goal.target_pose.pose = mygoal\n self.move_base.send_goal(goal)",
"def __init__(self, goal=0, kP=1, kI=1, kD=1, init_pt=0):\n self._pid_lock = threading.Lock()\n\n self.set_goal(goal)\n self.reset(init_pt)\n self.set_gains({\n PIDController.KP_KEY: kP,\n PIDController.KI_KEY: kI,\n PIDController.KD_KEY: kD\n })",
"def set_goal(self, traj_point, traj_point_sigma=1e-6):\n self.add_viapoint(1., traj_point, traj_point_sigma)",
"def set_goal_done(self):\n self.has_goal = False\n self.last_goal_wait = False",
"def set_pid(self, pid, value):\n if type(value) in (list, tuple):\n value = \",\".join(map(hex, value))\n cmd = \"ATSET {}={}\\r\".format(pid, value)\n self.sendCMD(cmd)",
"def setProgress(self, prog):\n\t\tself.progress = prog",
"def setGoalLength(self, length):\n assert isinstance(length, int)\n self.goal_length = length",
"async def goal(self, ctx, channel : discord.Channel, goal : int):\r\n \r\n server = ctx.message.server\r\n if server.id not in self.set:\r\n await self.bot.say(\":x: Uninitialized server!\")\r\n return\r\n if channel.id not in self.set[server.id][\"channels\"]:\r\n await self.bot.say(\":x: This is not a counting channel!\")\r\n return\r\n self.set[server.id][\"channels\"][channel.id][\"goal\"] = goal\r\n self.save()\r\n current_count = self.set[server.id][\"channels\"][channel.id][\"count\"]\r\n if goal > 0:\r\n await self.bot.edit_channel(channel,topic = \"Next message must start with {} | Reach {} to complete.\".format(current_count+1,goal))\r\n else:\r\n await self.bot.edit_channel(channel,topic = \"Next message must start with {}\".format(current_count+1))\r\n await self.bot.say(\"Channel goal set to {}!\".format(goal))",
"def set_setpoint():\n setpoint = request.params.get(\"setpoint\", 0, type=float)\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetSetpoint(pid, ctypes.c_float(setpoint))\n if retval != 0:\n LOG.error(\"Failed to set PID setpoint. Error code: %s\", ERROR_CODES[retval])\n LOG.info(\"setpoint: %f\", setpoint)\n LOG.info(\"PID: %d\", pid)",
"def set_parameter_and_step(self, pname, value, nstep=2, warning_action=\"default\"):\n setattr(self.p, pname, value)\n with warnings.catch_warnings():\n warnings.simplefilter(warning_action)\n for _ in range(nstep):\n self.step()",
"def set_pid(self, kp=None, ki=None, kd=None):\n if kp is not None:\n self.k_p = kp\n if ki is not None:\n self.k_i = ki\n if kd is not None:\n self.k_d = kd\n\n self.reset_sum()",
"def __init__(self, goal_pos=0.72, *args, **kwargs):\n super(Goalie, self).__init__(*args, **kwargs)\n self.goal=goal_pos",
"def set_curr_value(self, val):\n # only goal that is in progress can have it's current value changed\n if self._status != EGoalStatus.IN_PROGRESS:\n raise NotImplementedError('Cannot set value to finished or not started goal')\n # try cast to int - mainly for QuantifiedGoal representation\n val = self.fw.types.try_float_cast(val)\n # update both in the stages object and in raw data\n self._values[EStage.CURRENT] = self._data_process(val)\n self._skeleton.curr_value = val\n # use progressor to update the database\n self._progressor.dump_to_database(self)"
] | [
"0.72353566",
"0.6878171",
"0.6867699",
"0.686121",
"0.6811669",
"0.66538084",
"0.6361599",
"0.6180388",
"0.6142914",
"0.60614246",
"0.60093695",
"0.59347427",
"0.5907836",
"0.5900681",
"0.58977747",
"0.5873667",
"0.5873667",
"0.5847958",
"0.57549393",
"0.5752069",
"0.5594057",
"0.55318624",
"0.5454622",
"0.5418522",
"0.5382233",
"0.53742605",
"0.5362134",
"0.5328257",
"0.5301443",
"0.5296927"
] | 0.7567917 | 0 |
Retrieves the goal of the PID Controller. | def get_goal(self):
self._pid_lock.acquire() # Acquire Lock
rtn = self._goal
self._pid_lock.release() # Release Lock
return rtn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_goal(self):\n return self.get_observation(self.env._get_goal())",
"def goal(self):\n return self._build_goal",
"def goal(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"goal\")",
"def get_goal_msg(self):\n goal_handle = self._as.current_goal\n \n goal = goal_handle.get_goal()\n return goal",
"def getGoalNode(self):\r\n\t\treturn self.goalNode",
"def goal_pos(self) -> Pt:\n return self._goal",
"def getPath(self):\r\n\t\treturn self.pathToGoal",
"def get_goal(self) -> GoalType:\n return self.goal",
"def build_goal(self):\n return self._build_goal",
"def target(self):\n return self.problem.target",
"def target(self) -> Optional[int]:\n return pulumi.get(self, \"target\")",
"def get_PID(self):\n return self.PID",
"def find_goal(self, concl, goal_id):\n prf = self.prf\n try:\n for n in goal_id:\n for item in prf.items[:n]:\n if item.th is not None and item.th.can_prove(concl):\n return item.id\n prf = prf.items[n].subproof\n except (AttributeError, IndexError):\n raise TacticException()",
"def pidGet(self) -> float:\n ...",
"def pidGet(self) -> float:\n ...",
"def getPID(self):\r\n self._update('getPID')\r\n return self.supervisord.options.get_pid()",
"def get_target(self, ):\n return self.get_parameter('target')",
"def _get_controller(self):\n return self.__controller",
"def goal_pwm(self):\n return self._read(MX_GOAL_PWM)",
"def get_controller(self) -> PIDController:\n return deepcopy(self._controller)",
"def controller_status(self) -> Optional['outputs.CSIPowerMaxStatusControllerStatus']:\n return pulumi.get(self, \"controller_status\")",
"def primary_step(self) -> 'outputs.PrimaryStepResponse':\n return pulumi.get(self, \"primary_step\")",
"def controller(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"controller\")",
"def goal(self, goal_id):\r\n return goals.Goal(self, goal_id)",
"def get_controller1(self):\n return self.__controller1",
"def getDrone(self):\n return self._drone",
"def get_current_controller():\n controllers = parse_yaml_file(JUJU_CONTROLLERS_YAML)\n return controllers.get(\"current-controller\", \"\")",
"def getController(self):\n return self.__controller",
"def getAction(self, gameState):\n # finding the optimal move for pacman by calling max_value first using the current state\n val, move = self.max_value(state=gameState)\n return move",
"def _http_get_current_id(self):\n return self._http_request('').json()['currentplid']"
] | [
"0.6787776",
"0.6286115",
"0.61180484",
"0.6092645",
"0.58849204",
"0.5828273",
"0.5645308",
"0.5632437",
"0.5594528",
"0.5534305",
"0.54554325",
"0.541853",
"0.54180735",
"0.5350246",
"0.5350246",
"0.53072256",
"0.52519006",
"0.5225484",
"0.522036",
"0.52076226",
"0.51778346",
"0.51559114",
"0.51377344",
"0.5124274",
"0.51185775",
"0.51132303",
"0.51069915",
"0.5106169",
"0.5103072",
"0.50991625"
] | 0.6917194 | 0 |
Sets the warning song to the specified song number. This should be called before playing the warning song. | def set_warning_song(self, song_number):
self._warning_song_num = int(math.fabs(song_number)) % 5
# Song is in c major scale and is the 5th (G) to the 3rd (E).
cmd = "140 " + str(self._warning_song_num) + " 2 67 16 64 16"
self._serial_conn.send_command(cmd) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def play_warning_song(self):\n if self._warning_song_num is None:\n self.set_warning_song(0)\n\n self._serial_conn.send_command(\"141 \" + str(self._warning_song_num))",
"def set_happy_song(self, song_number):\n self._happy_song_num = int(math.fabs(song_number)) % 5\n\n # Song is in c major scale and is the 5th (G) to the 3rd (E).\n cmd = \"140 \" + str(self._happy_song_num) + \" 2 64 16 67 16\"\n\n self._serial_conn.send_command(cmd)",
"def update(self, song: int) -> None:\n if 0 <= song < len(self.sounds):\n self.sounds[song].play()",
"def song(self, value):\r\n self._song_id = value\r\n data = Song(value)\r\n self.songtitel = data.songtitel if data.found else \"\"",
"def warning_count(self, warning_count):\n\n self._warning_count = warning_count",
"def warning_count(self, warning_count):\n\n self._warning_count = warning_count",
"def setMidiNumber(self, new_nbr):\n\n self.nbr = limiter(new_nbr)",
"def set_warning_message(msg):\n set_message(msg, TYPE_WARNING)",
"def play_happy_song(self):\n if self._happy_song_num is None:\n self.set_happy_song(1)\n\n self._serial_conn.send_command(\"141 \" + str(self._happy_song_num))",
"def set_mute(self, track, xclip, ident, value = None):\n if track in self.song().tracks + self.song().return_tracks:\n if value in KEYWORDS:\n track.mute = KEYWORDS[value]\n else:\n track.mute = not(track.mute)",
"def warning(self, warning):\n pass",
"def warning_spoilers(self, warning_spoilers):\n\n self._warning_spoilers = warning_spoilers",
"def notice(self, warning):\n pass",
"def set_track(self, number: int, count: int) -> None:\n if count and count > 0:\n self.track_count = count\n else:\n self.track_count = None\n if number and number > 0:\n if self.track_count and number > self.track_count:\n # The track number cannot be greater than the total track number.\n self.track_number = self.track_count\n self.track_number = number\n return\n self.track_number = None",
"def set_warning(warning):\n impl.set_warning(**locals())",
"def mute_track(self, track, muted):\n pass",
"def bottle_song_for(num):\n pass",
"def warning(self, msg):\n oscid = self.app.global_osc_id()\n print(\"WARNING : /Llia/%s : %s\" % (oscid, msg))",
"def change_music(self, track):\n try:\n if self.bg_volume != 0:\n self.current = self.music_lib[track]\n pygame.mixer.music.load(self.current)\n pygame.mixer.music.play(-1)\n self.current = track\n else:\n pygame.mixer.music.stop()\n except:\n print \"Couldn't load track '\", track + \"'!\"",
"def markfile(self, song_id):\n cur = self.conn.cursor()\n query = \"\"\"UPDATE caro_song SET score = -1000 WHERE id=%s\"\"\"\n cur.execute(query, (song_id, ))\n\n self.memcache.delete(\":1:song_%d\" % song_id)\n\n query = \"\"\"DELETE FROM caro_playlistentry WHERE song_id=%s\"\"\"\n cur.execute(query, (song_id, ))",
"def song_changed(self, song):\n if song == NOTPLAYING:\n print(\"Not playing\")\n else:\n print(\"Changed to: {} - {}\". format(song.get('artist', 'Unknown artist'), song.get('title', 'Unknown title')))\n self._publish({TAGS[tag]: value for (tag, value) in song.items() if tag in TAGS})",
"def load(self, song):\n self.currentSongName = song\n self.currentSong = pygame.mixer.music.load(song)",
"def set_nowplaying_metadata(self, track, album, artist):\n\n\t\tparts = [artist[:30], album[:30], track[:30]]\n\t\tself._send_message(\"MUSIC_CONTROL\", self._pack_message_data(16, parts))",
"def set_nowplaying_metadata(self, track, album, artist):\n\n\t\tparts = [artist[:30], album[:30], track[:30]]\n\t\tself._send_message(\"MUSIC_CONTROL\", self._pack_message_data(16, parts))",
"def warning(self, message, code=None):\n\n if code is None:\n code = ''\n self._add_message( message, self.WARNING, code=code )\n self.n_warnings += 1",
"def set_song_count(self, count: int) -> None:\n\n self.song_count = count\n self.overall_total = 100 * count\n\n if not self.simple_tui:\n if self.song_count > 4:\n self.overall_task_id = self.rich_progress_bar.add_task(\n description=\"Total\",\n message=(\n f\"{self.overall_completed_tasks}/{int(self.overall_total / 100)} \"\n \"complete\"\n ),\n total=self.overall_total,\n visible=(not self.quiet),\n )",
"def set_now_playing_title(self, title):\n self.now_playing.text = title",
"def set_volume_music(self, value):\n\t\tif self._setting.get(FIFE_MODULE, \"PlaySounds\"):\n\t\t\tself.emitter['bgsound'].setGain(value)",
"def set_warning(warningTxt):\r\n if not core.does_item_exist(\"Warning##Warning\"):\r\n with simple.collapsing_header(\"Warning##Warning\", parent=\"##GroupStats\",\r\n default_open=True,\r\n closable=False,\r\n bullet=True):\r\n core.add_text(\"Warning\", default_value=warningTxt, color=(255, 255, 0, 255))",
"async def set_playlist_play(self, playlist_id, index):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_PLAY_PLAYLIST.format(playlist_id, index), data=None)\n self._current_playlist_id = playlist_id\n time.sleep(0.2)\n await self.async_update()"
] | [
"0.8164014",
"0.6481732",
"0.6222161",
"0.59834564",
"0.58476883",
"0.58476883",
"0.5832589",
"0.56763095",
"0.55746937",
"0.556166",
"0.54363596",
"0.54155654",
"0.54040843",
"0.53658545",
"0.52933496",
"0.5284162",
"0.52797306",
"0.526646",
"0.52188104",
"0.51960605",
"0.5157057",
"0.51344347",
"0.5120526",
"0.5120526",
"0.5112517",
"0.50919896",
"0.50832415",
"0.5076127",
"0.5045296",
"0.5022348"
] | 0.82101125 | 0 |
Plays the warning song | def play_warning_song(self):
if self._warning_song_num is None:
self.set_warning_song(0)
self._serial_conn.send_command("141 " + str(self._warning_song_num)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _play(self):\n # Play unless explicitely ignored in config\n if not self.alarm_builder.config._get_debug_option(\"DO_NOT_PLAY_ALARM\"):\n self.alarm_builder.play(AlarmWorker.audio)",
"def update(self, song: int) -> None:\n if 0 <= song < len(self.sounds):\n self.sounds[song].play()",
"def track_04():\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title='Sunshine Live', force_radio=True)\n return \"Ok\"",
"def track_02():\n sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer', title='Radio PSR Live', force_radio=True)\n return \"Ok\"",
"def sons():\n if pygame.mixer and SONS:\n music = os.path.join(main_dir, 'src/sound', '')\n pygame.mixer.music.load(music)\n pygame.mixer.music.play()",
"async def skip(self):\n await self.play()",
"def play_music(self):\n song_index = -1\n if self.num_songs == 0:\n sys.stdout.write(\"No songs found\\n\")\n sys.exit(0)\n \n # FIXME: spacebar/pause is an mplayer-specific command\n sys.stdout.write(\"Press spacebar to pause songs\\n\")\n sys.stdout.write(\"Press ctrl+c once to skip a song\\n\")\n sys.stdout.write(\"Hold ctrl+c to exit\\n\")\n sys.stdout.write(\"%d files found.\\n\" % self.num_songs)\n while True:\n try:\n song_index = self._get_song_index(song_index)\n if song_index == None:\n sys.exit(0)\n song = self.songs[song_index]\n sys.stdout.write(\"%s\\n\" % song)\n \n # Disabled the following as it got pretty annoying seeing a \n # torrent of notifications for non-music files (mplayer \n # gracefully skips these). \n #try:\n # notify_cmd=\"notify-send -t 1000 '%s'\" % \\\n # song.split(\"/\")[-1]\n # subprocess.check_call(notify_cmd, shell=True)\n #except:\n # pass\n #FIXME: escape quotes in songs\n play_cmd = '\"%s\" \"%s\" > /dev/null 2>&1 ' % \\\n (self.music_client, song) \n subprocess.check_call(play_cmd, shell=True)\n except KeyboardInterrupt:\n try:\n # HACK to allow repeated ctrl+c to exit outright\n time.sleep(0.1) \n except KeyboardInterrupt:\n sys.stderr.write(\"\\nExiting...\\n\")\n sys.exit(0)",
"def play(self):\n print(\"Bientôt ! :)\")",
"def play(self):\n pass",
"def show_playing(self):\n\n print(\"show_playing needs implementation\")",
"def track_03():\n sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen', force_radio=True)\n return \"Ok\"",
"def play_sound(self, sound) -> None:\n pass",
"def play_music(lang):\n\tutils.speak(data_json[\"TLA_BOT_ASK_MUSIC_NAME\"], lang)\n\tans = utils.hear(lang)\n\t#print(const.TLA_YOU_RESP + ans)\n\tlogging.info(const.TLA_YOU_RESP + ans)\n\tutils.speak(data_json[\"TLA_BOT_PLAY_MUSIC\"], lang)\n\t#driver = webdriver.Chrome()\n\ttry:\n\t\tdriver = webdriver.Chrome(ChromeDriverManager().install())\n\t\tdriver.implicitly_wait(10)\n\t\tdriver.maximize_window()\t\t\n\t\tdriver.get(const.TLA_YOUTUBE_LINK_SEARCH + ans)\n\t\tdriver.find_element_by_id(const.TLA_YOUTUBE_FIRST_VIDEO).click()\n\t\ttime.sleep(10)\n\texcept:\n\t\tresult = data_json[\"TLA_BOT_YOUTUBE_NOT_FOUND\"]\n\t\tlogging.error(result)\n\t\tutils.speak(result, lang)",
"def set_warning_song(self, song_number):\n self._warning_song_num = int(math.fabs(song_number)) % 5\n\n # Song is in c major scale and is the 5th (G) to the 3rd (E).\n cmd = \"140 \" + str(self._warning_song_num) + \" 2 67 16 64 16\"\n\n self._serial_conn.send_command(cmd)",
"def play_sound(self):\n # http://soundbible.com/2103-1-Person-Cheering.html\n my_path = os.path.dirname(__file__)\n sound_path = os.path.join(my_path, 'yay.mp3')\n sound = SoundLoader.load(sound_path)\n sound.play()",
"def bad_singing():\n time.sleep(2)\n\n port = 'sim'\n hal = new_create.Create(port)\n hal.toSafeMode() # So you can try this out on a table if you want.\n\n for k in range(31, 128):\n hal.playNote(k, 16)\n # Try with the following commented-out, then not commented-out.\n # time.sleep(16 / 64)\n\n song = [(60, 32), (64, 32), (67, 32), (72, 32)] # a slower C chord\n hal.playSong(song)\n hal.playSong(song)\n hal.playSong(song)\n\n hal.shutdown()",
"def play(filename):\n SoundClient(blocking=True).playWave(filename)",
"def playMessage(msg):\n tts = gTTS(msg, lang=\"pt-br\")\n file = \"./audios/temp.mp3\"\n\n tts.save(file)\n player = MediaPlayer(file)\n player.play()\n sleep(10)\n os.remove(file)",
"def warning(self, msg):\n oscid = self.app.global_osc_id()\n print(\"WARNING : /Llia/%s : %s\" % (oscid, msg))",
"def start(self):\n\tglobal mode\n\tmode=\"./music/\"\n\tglobal message\n\tif message!=2:\n\t\tmessage=1\n\t\tbot.loop.create_task(play())",
"def play(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackPlay())",
"def play_audio(self):\n if not self.voice.get_busy():\n self.voice.play(self.sound)\n else:\n pass",
"def nothing_playing(self):\n self.device.responses['playing'] = PlayingResponse()",
"def play(self):\n\t\tprint(\"play args:\")\n\t\tprint(args)\n\t\tpyglet.clock.schedule_once( self.play_next,\n\t\t\t\t\t\t\t\t\tself._autonext_interval_msec)\n\t\t# instead of using interval schedules, it just callls the same\n\t\t# function repeated so if the system is backed up it won't create\n\t\t# additional problems\n\t\tself._playing = True",
"def play_feedback(corr):\r\n if not PRELOADED_FEEDBACK:\r\n global FEEDBACK_SOUNDS\r\n p = os.path.join(os.path.dirname(os.getcwd()), 'stimuli', 'audio')\r\n FEEDBACK_SOUNDS = [pygame.mixer.Sound(os.path.join(p, 'Wrong.wav')),\r\n pygame.mixer.Sound(os.path.join(p, 'Correct.wav'))]\r\n FEEDBACK_SOUNDS[corr].play()",
"def play_happy_song(self):\n if self._happy_song_num is None:\n self.set_happy_song(1)\n\n self._serial_conn.send_command(\"141 \" + str(self._happy_song_num))",
"def say_information_out_loud(song, station_frequency):\n message = \"{}, by {} is playing on {} .\".format(song['name'], song['artist'], station_frequency)\n if switches.is_active_time_remaining_switch():\n message += 'There are {} seconds left.'.format(song['seconds_remaining'])\n gTTS(text=message, lang='en', slow=False).save(helpers.get_notification_output_path())\n # subprocess.Popen(['mpg321', helpers.get_notification_output_path()])\n os.system(\"mpg321 --quiet {}\".format(helpers.get_notification_output_path()))",
"def play(sound):\n if SOUNDDIR != \"\":\n call([\"aplay\", SOUNDDIR + sound])",
"def play_wakeup_music(self):\n list_of_music_files = []\n for track in os.listdir(project_path + '/music'):\n if track.endswith(\".mp3\"):\n list_of_music_files.append(str(project_path + '/music/' + str(track)))\n\n # figure out random track of the found mp3 files\n random_track = randint(0, len(list_of_music_files)-1)\n\n self.play_mp3_file(list_of_music_files[random_track])",
"def playSound(self,sound):\n sound.play()"
] | [
"0.6465088",
"0.6407871",
"0.63904196",
"0.63702834",
"0.63601696",
"0.6282263",
"0.6277445",
"0.62654936",
"0.62424767",
"0.6151498",
"0.61472845",
"0.6144805",
"0.6138071",
"0.61313224",
"0.6120033",
"0.61154944",
"0.61094755",
"0.6102339",
"0.6099751",
"0.6089556",
"0.60835165",
"0.6065285",
"0.6051023",
"0.6044531",
"0.60268974",
"0.60263103",
"0.60142064",
"0.600326",
"0.600171",
"0.5996715"
] | 0.85360086 | 0 |
Sets the happy song to the specified song number. This should be called before playing the happy song. | def set_happy_song(self, song_number):
self._happy_song_num = int(math.fabs(song_number)) % 5
# Song is in c major scale and is the 5th (G) to the 3rd (E).
cmd = "140 " + str(self._happy_song_num) + " 2 64 16 67 16"
self._serial_conn.send_command(cmd) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def play_happy_song(self):\n if self._happy_song_num is None:\n self.set_happy_song(1)\n\n self._serial_conn.send_command(\"141 \" + str(self._happy_song_num))",
"def update(self, song: int) -> None:\n if 0 <= song < len(self.sounds):\n self.sounds[song].play()",
"def song(self, value):\r\n self._song_id = value\r\n data = Song(value)\r\n self.songtitel = data.songtitel if data.found else \"\"",
"def set_warning_song(self, song_number):\n self._warning_song_num = int(math.fabs(song_number)) % 5\n\n # Song is in c major scale and is the 5th (G) to the 3rd (E).\n cmd = \"140 \" + str(self._warning_song_num) + \" 2 67 16 64 16\"\n\n self._serial_conn.send_command(cmd)",
"def bottle_song_for(num):\n pass",
"def play_warning_song(self):\n if self._warning_song_num is None:\n self.set_warning_song(0)\n\n self._serial_conn.send_command(\"141 \" + str(self._warning_song_num))",
"def bottle_song_while(num):\n pass",
"def load(self, song):\n self.currentSongName = song\n self.currentSong = pygame.mixer.music.load(song)",
"def set_playing(self, playing=None):\r\n self.add_play_move(PlayMove.SET_PLAYING, playing=playing)\r\n return self.player_control.set_playing(playing=playing)",
"def switch_player(self):\n first_is_playing = self.app.firstTrackFrame.playing\n self.app.firstTrackFrame.play(not first_is_playing)\n self.app.secondTrackFrame.play(first_is_playing)",
"def playMusic(self, music = None, index=0):\n if music == None:\n music = self.battleMusic\n if isinstance(music, list):\n self.songIndex = index\n while self.songIndex > len(music)-1:\n self.songIndex -= len(music)\n pygame.mixer.music.load(music[self.songIndex])\n else:\n self.songIndex = 0\n pygame.mixer.music.load(music)\n self.currentMusic = music\n pygame.mixer.music.play()\n pygame.mixer.music.set_volume(self.musicVolume)\n pygame.mixer.music.set_endevent(pygame.constants.USEREVENT)",
"def toggle_next(self):\n self.bot.loop.call_soon_threadsafe(self.play_next_song.set)",
"async def set_playing(self, value: bool):\n await self._pytheos.api.player.set_play_state(self.id, models.player.PlayState.Playing if value else models.player.PlayState.Stopped)",
"def set_songs(self, songs: List[Song]) -> None:\n\n self.songs = songs\n self.set_song_count(len(songs))",
"def change_music(self, track):\n try:\n if self.bg_volume != 0:\n self.current = self.music_lib[track]\n pygame.mixer.music.load(self.current)\n pygame.mixer.music.play(-1)\n self.current = track\n else:\n pygame.mixer.music.stop()\n except:\n print \"Couldn't load track '\", track + \"'!\"",
"def set_track(self, number: int, count: int) -> None:\n if count and count > 0:\n self.track_count = count\n else:\n self.track_count = None\n if number and number > 0:\n if self.track_count and number > self.track_count:\n # The track number cannot be greater than the total track number.\n self.track_number = self.track_count\n self.track_number = number\n return\n self.track_number = None",
"def currently_playing_change(song):\n socketio.emit('currently_playing_changed', song.to_dict())",
"def restart(self):\n self._song_idx = 0\n self._song_position = 0\n try:\n self._cur_song = self.songs[self._song_idx]\n except IndexError:\n self._cur_song = None",
"def songUpdate(song,cindex):\r\n if cindex == 0:\r\n song[MpMusic.SONGINDEX] = songGetAlbumIndex(song);\r\n return 0;",
"def set_song_count(self, count: int) -> None:\n\n self.song_count = count\n self.overall_total = 100 * count\n\n if not self.simple_tui:\n if self.song_count > 4:\n self.overall_task_id = self.rich_progress_bar.add_task(\n description=\"Total\",\n message=(\n f\"{self.overall_completed_tasks}/{int(self.overall_total / 100)} \"\n \"complete\"\n ),\n total=self.overall_total,\n visible=(not self.quiet),\n )",
"async def jump(self, ctx, song_index: int):\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\n\n if not player.is_connected:\n # We can't disconnect, if we're not connected.\n return await ctx.send(embed=self.error_embed(f'Not playing. [{ctx.message.author.mention}]'))\n\n if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):\n # Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot\n # may not disconnect the bot.\n return await ctx.send(embed=self.error_embed(f'Not connected to the same voice channel. [{ctx.message.author.mention}]'))\n\n if song_index > len(player.queue) + 1:\n return await ctx.send(embed=self.error_embed(\"There is no such song in the queue.\"))\n\n for i in range(song_index - 1):\n player.queue.pop(0)\n await player.skip()\n await ctx.message.add_reaction(\"✅\")",
"def play(self, i_hor, i_ver, number):\n assert isinstance(i_hor, int)\n assert isinstance(i_ver, int)\n assert isinstance(number, int)\n assert 1 <= i_hor <= 9\n assert 1 <= i_ver <= 9\n assert 1 <= number <= 9\n\n self.state[i_hor-1, i_ver-1] = number",
"def pause_song(self):\r\n if self.isPlaying:\r\n self.playSong[0].pause()\r\n print(\"Song paused. To continue type Play.\")\r\n else:\r\n print(\"Play a song first...\")",
"def _get_song_index(self, song_index): \n if self.randomise:\n song_index = randint(1, self.num_songs) - 1\n else:\n if (song_index + 1) == self.num_songs:\n if self.loop_songs:\n song_index = 0\n else:\n return None\n else:\n song_index += 1\n return song_index",
"def start_soundtrack(self):\n sources = screens['Intro']['music']\n self.source = choice(sources)\n Logger.info('Chose \"{}\" as the intro music.'.format(self.source))\n try:\n SoundManager.music[self.source]\n except KeyError:\n SoundManager.add_music(self.source, self)\n SoundManager.play_music(self.source)",
"def start_soundtrack(self):\n sources = screens['Combat']['music']\n self.source = choice(sources)\n Logger.info(\n 'Application: Chose \"{}\" as the combat music.'.format(self.source)\n )\n try:\n SoundManager.music[self.source]\n except KeyError:\n SoundManager.add_music(self.source, self)\n SoundManager.play_music(self.source)",
"def song_changed(self, song):\n if song == NOTPLAYING:\n print(\"Not playing\")\n else:\n print(\"Changed to: {} - {}\". format(song.get('artist', 'Unknown artist'), song.get('title', 'Unknown title')))\n self._publish({TAGS[tag]: value for (tag, value) in song.items() if tag in TAGS})",
"def play_music(self):\n song_index = -1\n if self.num_songs == 0:\n sys.stdout.write(\"No songs found\\n\")\n sys.exit(0)\n \n # FIXME: spacebar/pause is an mplayer-specific command\n sys.stdout.write(\"Press spacebar to pause songs\\n\")\n sys.stdout.write(\"Press ctrl+c once to skip a song\\n\")\n sys.stdout.write(\"Hold ctrl+c to exit\\n\")\n sys.stdout.write(\"%d files found.\\n\" % self.num_songs)\n while True:\n try:\n song_index = self._get_song_index(song_index)\n if song_index == None:\n sys.exit(0)\n song = self.songs[song_index]\n sys.stdout.write(\"%s\\n\" % song)\n \n # Disabled the following as it got pretty annoying seeing a \n # torrent of notifications for non-music files (mplayer \n # gracefully skips these). \n #try:\n # notify_cmd=\"notify-send -t 1000 '%s'\" % \\\n # song.split(\"/\")[-1]\n # subprocess.check_call(notify_cmd, shell=True)\n #except:\n # pass\n #FIXME: escape quotes in songs\n play_cmd = '\"%s\" \"%s\" > /dev/null 2>&1 ' % \\\n (self.music_client, song) \n subprocess.check_call(play_cmd, shell=True)\n except KeyboardInterrupt:\n try:\n # HACK to allow repeated ctrl+c to exit outright\n time.sleep(0.1) \n except KeyboardInterrupt:\n sys.stderr.write(\"\\nExiting...\\n\")\n sys.exit(0)",
"def set_artist_song_entry(self, artist, song):\n self.artist_name.set_text(artist)\n self.song_name.set_text(song)",
"async def set_playlist_play(self, playlist_id, index):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_PLAY_PLAYLIST.format(playlist_id, index), data=None)\n self._current_playlist_id = playlist_id\n time.sleep(0.2)\n await self.async_update()"
] | [
"0.8054545",
"0.664417",
"0.6517853",
"0.6176182",
"0.61182725",
"0.60147935",
"0.5744937",
"0.56604385",
"0.5570946",
"0.5564194",
"0.5535582",
"0.55184954",
"0.55106753",
"0.54808784",
"0.5475834",
"0.5468992",
"0.546218",
"0.5436016",
"0.54331815",
"0.5356054",
"0.53106105",
"0.53066957",
"0.5295981",
"0.52857167",
"0.52854735",
"0.5257974",
"0.5232873",
"0.5232132",
"0.5231054",
"0.5230187"
] | 0.8327107 | 0 |
Plays the happy song | def play_happy_song(self):
if self._happy_song_num is None:
self.set_happy_song(1)
self._serial_conn.send_command("141 " + str(self._happy_song_num)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def play_sound(self):\n # http://soundbible.com/2103-1-Person-Cheering.html\n my_path = os.path.dirname(__file__)\n sound_path = os.path.join(my_path, 'yay.mp3')\n sound = SoundLoader.load(sound_path)\n sound.play()",
"def play(self):\n pass",
"def play(self):\n\t\tprint(\"play args:\")\n\t\tprint(args)\n\t\tpyglet.clock.schedule_once( self.play_next,\n\t\t\t\t\t\t\t\t\tself._autonext_interval_msec)\n\t\t# instead of using interval schedules, it just callls the same\n\t\t# function repeated so if the system is backed up it won't create\n\t\t# additional problems\n\t\tself._playing = True",
"def play_audio(self):\n if not self.voice.get_busy():\n self.voice.play(self.sound)\n else:\n pass",
"def update(self, song: int) -> None:\n if 0 <= song < len(self.sounds):\n self.sounds[song].play()",
"def play(self):\n self.playing = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?",
"def play_music(self):\n song_index = -1\n if self.num_songs == 0:\n sys.stdout.write(\"No songs found\\n\")\n sys.exit(0)\n \n # FIXME: spacebar/pause is an mplayer-specific command\n sys.stdout.write(\"Press spacebar to pause songs\\n\")\n sys.stdout.write(\"Press ctrl+c once to skip a song\\n\")\n sys.stdout.write(\"Hold ctrl+c to exit\\n\")\n sys.stdout.write(\"%d files found.\\n\" % self.num_songs)\n while True:\n try:\n song_index = self._get_song_index(song_index)\n if song_index == None:\n sys.exit(0)\n song = self.songs[song_index]\n sys.stdout.write(\"%s\\n\" % song)\n \n # Disabled the following as it got pretty annoying seeing a \n # torrent of notifications for non-music files (mplayer \n # gracefully skips these). \n #try:\n # notify_cmd=\"notify-send -t 1000 '%s'\" % \\\n # song.split(\"/\")[-1]\n # subprocess.check_call(notify_cmd, shell=True)\n #except:\n # pass\n #FIXME: escape quotes in songs\n play_cmd = '\"%s\" \"%s\" > /dev/null 2>&1 ' % \\\n (self.music_client, song) \n subprocess.check_call(play_cmd, shell=True)\n except KeyboardInterrupt:\n try:\n # HACK to allow repeated ctrl+c to exit outright\n time.sleep(0.1) \n except KeyboardInterrupt:\n sys.stderr.write(\"\\nExiting...\\n\")\n sys.exit(0)",
"async def play_mood(mood):\n names = sounds[mood]\n for n in names:\n print('playing:', mood, n)\n await api.audio.play(n, blocking=True)",
"def play(self):\n print('Playing game...')",
"def play(self):\n print(\"Bientôt ! :)\")",
"def sons():\n if pygame.mixer and SONS:\n music = os.path.join(main_dir, 'src/sound', '')\n pygame.mixer.music.load(music)\n pygame.mixer.music.play()",
"def play_game():\n pass",
"def play(self, loop):\n if self.playMusic:\n pygame.mixer.music.play(loop)",
"def playSound(self,sound):\n sound.play()",
"def play(self):\n if self.state == 'play':\n super().play()\n return\n if self.player is None:\n self.load()\n\n self.player.play()\n self.state = 'play'\n super().play()",
"def play(self):\n\n #Call the superclass play\n return super().play()",
"async def playing(self, ctx):\n\n player = self.player\n\n if player.current is None:\n await self.bot.say('Not playing anything.')\n else:\n await self.bot.say('Now playing {} :'.format(player.current))",
"def play(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackPlay())",
"def play(self, state,currentplayer):\n\t\tpass",
"def play_music(lang):\n\tutils.speak(data_json[\"TLA_BOT_ASK_MUSIC_NAME\"], lang)\n\tans = utils.hear(lang)\n\t#print(const.TLA_YOU_RESP + ans)\n\tlogging.info(const.TLA_YOU_RESP + ans)\n\tutils.speak(data_json[\"TLA_BOT_PLAY_MUSIC\"], lang)\n\t#driver = webdriver.Chrome()\n\ttry:\n\t\tdriver = webdriver.Chrome(ChromeDriverManager().install())\n\t\tdriver.implicitly_wait(10)\n\t\tdriver.maximize_window()\t\t\n\t\tdriver.get(const.TLA_YOUTUBE_LINK_SEARCH + ans)\n\t\tdriver.find_element_by_id(const.TLA_YOUTUBE_FIRST_VIDEO).click()\n\t\ttime.sleep(10)\n\texcept:\n\t\tresult = data_json[\"TLA_BOT_YOUTUBE_NOT_FOUND\"]\n\t\tlogging.error(result)\n\t\tutils.speak(result, lang)",
"def start_soundtrack(self):\n sources = screens['Intro']['music']\n self.source = choice(sources)\n Logger.info('Chose \"{}\" as the intro music.'.format(self.source))\n try:\n SoundManager.music[self.source]\n except KeyError:\n SoundManager.add_music(self.source, self)\n SoundManager.play_music(self.source)",
"async def audio_player_task(self):\n while True:\n self.play_next_song.clear()\n self.current = await self.songs.get()\n await self.bot.send_message(self.current.channel, 'Now playing ' + str(self.current))\n self.current.player.start()\n if self.current.jukebox:\n if not self.current.player.title == 'translate_tts':\n k_str = 'JUKEBOX FOR **' + self.current.player.title + '**\\n'\n juke_m = await self.bot.send_message(self.current.channel, k_str)\n juke_cells = [':red_circle:', ':large_blue_circle:', ':green_heart:', ':diamond_shape_with_a_dot_inside:']\n sq_dia = 9\n while not self.play_next_song.is_set(): # :red_circle: :large_blue_circle: :green_heart:\n lines = []\n for i in range(sq_dia):\n cells = []\n for i in range(sq_dia):\n cells.append(random.choice(sem_cells))\n lines.append(' '.join(cells))\n await self.bot.edit_message(juke_m, k_str + '\\n'.join(lines))\n await asyncio.sleep(1.05)\n await self.bot.edit_message(juke_m, juke_m.content + '\\nSorry, nothing here anymore!\\n**FINISHED PLAYING SONG!**')\n else:\n await self.play_next_song.wait()",
"def start_soundtrack(self):\n sources = screens['Combat']['music']\n self.source = choice(sources)\n Logger.info(\n 'Application: Chose \"{}\" as the combat music.'.format(self.source)\n )\n try:\n SoundManager.music[self.source]\n except KeyError:\n SoundManager.add_music(self.source, self)\n SoundManager.play_music(self.source)",
"def quick_play(self, index=0):\n self.play(self.download(self.results[index]))",
"def start(self):\n\tglobal mode\n\tmode=\"./music/\"\n\tglobal message\n\tif message!=2:\n\t\tmessage=1\n\t\tbot.loop.create_task(play())",
"def play(self):\n self.__run_csound()",
"def play(song):\n # Show the metadata\n if (verbose==True):\n for s in song.keys():\n print s, \":\", \n print song[s]\n else:\n print \"Title:\", song[\"title\"]\n print \"Artisit:\", song[\"artist\"]\n print \"Album:\", song[\"albumtitle\"]\n print \"Year\", song[\"public_time\"]\n print \"Company:\", song[\"company\"]\n print \"Length\", song[\"length\"]\n print \"Playing...\"\n mp3_url = song[\"url\"]\n song_length = song[\"length\"]\n p = subprocess.Popen([\"mplayer\", \"-msglevel\", \"all=0\", mp3_url])\n\n # At the same time, download the song:\n u = urllib2.urlopen(mp3_url)\n local_mp3 = open(song[\"title\"] + \"-\" + song[\"artist\"] + \".mp3\", \"w\")\n local_mp3.write(u.read())\n local_mp3.close()\n # time.sleep(song_length)\n i = 0\n while(True):\n time.sleep(1)\n i += 1\n if i == song_length:\n # Kill the process when the song is finished.\n p.terminate()\n print \"#\" * 80\n break",
"async def playnow(self, ctx):\n server = ctx.message.server\n author = ctx.message.author\n voice_channel = author.voice_channel\n\n # Checking already connected, will join if not\n\n if not self.voice_connected(server):\n try:\n self.has_connect_perm(author, server)\n except AuthorNotConnected:\n await self.bot.say(\"You must join a voice channel before I can\"\n \" play anything.\")\n return\n except UnauthorizedConnect:\n await self.bot.say(\"I don't have permissions to join your\"\n \" voice channel.\")\n return\n except UnauthorizedSpeak:\n await self.bot.say(\"I don't have permissions to speak in your\"\n \" voice channel.\")\n return\n else:\n await self._join_voice_channel(voice_channel)\n else: # We are connected but not to the right channel\n if self.voice_client(server).channel != voice_channel:\n pass # TODO: Perms\n\n # Checking if playing in current server\n\n if self.is_playing(server):\n await self.bot.say(\"I'm already playing a song on this server!\")\n return # TODO: Possibly execute queue?\n\n # If not playing, spawn a downloader if it doesn't exist and begin\n # downloading the next song\n\n if self.currently_downloading(server):\n await self.bot.say(\"I'm already downloading a file!\")\n return\n\n lists = self._list_local_playlists()\n\n if not any(map(lambda l: os.path.split(l)[1] == name, lists)):\n await self.bot.say(\"Local playlist not found.\")\n return\n\n self._play_local_playlist(server, name)",
"def play(sound):\n if SOUNDDIR != \"\":\n call([\"aplay\", SOUNDDIR + sound])",
"def play_auto(self):\n from .players import get_player\n\n while not self.is_game_over:\n next = self.next_player\n player = self.player_x if next == 'X' else self.player_o\n if player == 'human':\n return\n\n player_obj = get_player(player)\n self.play(player_obj.play(self))"
] | [
"0.73099303",
"0.7217488",
"0.7202953",
"0.71257263",
"0.7124428",
"0.71038634",
"0.7079971",
"0.7036819",
"0.6993867",
"0.6915133",
"0.6888346",
"0.68812245",
"0.6830361",
"0.6828884",
"0.6825244",
"0.6780092",
"0.6757588",
"0.67484957",
"0.67401594",
"0.6732784",
"0.67304546",
"0.6711789",
"0.67087823",
"0.6704925",
"0.66844785",
"0.6671767",
"0.66674656",
"0.66651857",
"0.66627616",
"0.66395354"
] | 0.8363741 | 0 |
Reads the provided button's bit from the Buttons packet. This method is available to a robot in the PASSIVE, SAFE, or FULL state. | def read_button(self, button):
data = self._read_packet(Button.PACKET_ID, Button.DATA_BYTES)
# Gets first byte
if len(data) == Button.DATA_BYTES:
byte = struct.unpack("B", data)[0]
return bool(byte & button)
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read(self, button):\n\n return self.hardware_interfaces[self._gpio].read(self._b_names[button])",
"def ReadOne(self,button):\n return bool(self.Bus.Read_uInt8(self.Address, 0x40+button))",
"def read_buttons(self):\n data = self._read_packet(Button.PACKET_ID, Button.DATA_BYTES)\n\n # Gets first byte\n if len(data) == Button.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n return {\n Button.CLEAN: bool(byte & Button.CLEAN),\n Button.SPOT: bool(byte & Button.SPOT),\n Button.DOCK: bool(byte & Button.DOCK),\n Button.MINUTE: bool(byte & Button.MINUTE),\n Button.HOUR: bool(byte & Button.HOUR),\n Button.DAY: bool(byte & Button.DAY),\n Button.SCHEDULE: bool(byte & Button.SCHEDULE),\n Button.CLOCK: bool(byte & Button.CLOCK)\n }\n else:\n return {\n Button.CLEAN: False,\n Button.SPOT: False,\n Button.DOCK: False,\n Button.MINUTE: False,\n Button.HOUR: False,\n Button.DAY: False,\n Button.SCHEDULE: False,\n Button.CLOCK: False\n }",
"def ReadOneInv(self,button):\n return bool(self.Bus.Read_uInt8(self.Address, 0x20+button))",
"def ReadAll(self):\n v =self.Bus.Read_uInt8(self.Address,0x10)\n Buttons = []\n for i in range(0,self.PushButtons):\n Buttons.append(v & 2**i == 2**i)\n return Buttons",
"def test_01_ReadButtonData(self):\n l_button = Utility._read_base_device(self.m_xml.button, self.m_version)\n self.assertEqual(l_button.Name, TESTING_LIGHTING_BUTTON_NAME_0)\n self.assertEqual(l_button.Active, True)\n self.assertEqual(l_button.Comment, TESTING_DEVICE_COMMENT)\n self.assertEqual(l_button.DeviceFamily, TESTING_DEVICE_FAMILY_INSTEON)\n self.assertEqual(l_button.LightingType, 'Button', 'Bad Lighting Type')\n self.assertEqual(l_button.RoomName, TESTING_DEVICE_ROOM_NAME)",
"def button_b(self) -> bool:\n return bool(self.pressed & 0x4)",
"def readDriverButton(self, rawButtonId : int) -> bool:\n return self.driver.getRawButton(rawButtonId)",
"def Read_Byte(self):\r\n data = self.Port.read(1)\r\n return data",
"def get_button_state(self, button):\n\n\t\treturn self._interface.get_button_state(button)",
"def readKeyButton(self, keyBtn):\n if self._myKey.readKeyButton( keyBtn ) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton( keyBtn ) else 1\n return 0",
"def cb_button_bit(self, button, ident):\n if DEBUG: print(\"Buttons Bit Identity:\", ident)\n # Toggle 0 to 1 and 1 to 0. if a=0, a = abs(a-1), then a=1\n button.set_label(str(abs(int(button.get_label())-1)))\n self.update_frame_label()\n self.ieee754_breakdown()",
"def ReportPressed(self):\n v =self.Bus.Read_uInt8(self.Address,0x30)\n Buttons = []\n for i in range(0,self.PushButtons):\n Buttons.append(v & 2**i == 2**i)\n return Buttons",
"def get_touch_buttons(self):\n x=self.send_packet_check_response('\\x41')\n x=ord(x)\n return tuple(bool(x&(1<<i)) for i in range(2))",
"def get_button(self, button):\n return getattr(self.get_buttons_state(), button)",
"def process_spaceorb_buttondata(buf):\r\n return dict(buttons=buf[2] & 63,\r\n reset=(buf[2] & 64) != 0)",
"def test_02_ReadOneButtonXml(self):\n l_button = Utility._read_one_button_xml(self.m_pyhouse_obj, self.m_xml.button, self.m_version)\n self.assertEqual(l_button.Name, TESTING_LIGHTING_BUTTON_NAME_0)\n self.assertEqual(l_button.Active, True)\n self.assertEqual(l_button.Key, 0, 'Bad key')\n self.assertEqual(l_button.Name, TESTING_LIGHTING_BUTTON_NAME_0)\n self.assertEqual(l_button.DeviceFamily, 'Insteon', 'Bad Lighting family')\n self.assertEqual(l_button.LightingType, 'Button', 'Bad LightingType')\n self.assertEqual(l_button.InsteonAddress, conversions.dotted_hex2int(TESTING_INSTEON_ADDRESS_0))",
"def mouse_button_state():\n x, y = c_int(0), c_int(0)\n bmask = mouse.SDL_GetMouseState(byref(x), byref(y))\n return ButtonState(bmask)",
"def _read_byte(self):\n # Setup io pin as input mode\n self.gpio.setup(self._io_pin, GPIO.IN)\n\n byte = 0\n for i in range(8):\n # Read data on the falling edge of clk\n self.gpio.output(self._clk_pin, GPIO.HIGH)\n self._sleep()\n\n self.gpio.output(self._clk_pin, GPIO.LOW)\n self._sleep()\n\n bit = self.gpio.input(self._io_pin)\n byte |= ((2 ** i) * bit)\n\n return byte",
"def ReportPressedOnce(self):\n v =self.Bus.Read_uInt8(self.Address,0x31)\n Buttons = []\n for i in range(0,self.PushButtons):\n Buttons.append(v & 2**i == 2**i)\n return Buttons",
"def read_byte(self, opcode: int) -> int:\n\n # 0xff = 255\n\n return opcode & 0xFF",
"def get_bit(self):\n try:\n current_byte = self.contents[self.current_bit_position >> 3]\n except IndexError:\n raise EmptyStreamError(f\"Attempting read at bit position {self.current_bit_position} \"\n f\"(byte {self.current_bit_position >> 3})\")\n bit = min(1, current_byte & (1 << (7 - (self.current_bit_position % 8))))\n self.current_bit_position += 1\n return bit",
"def key_state(self, buttonCode):\r\n try:\r\n return self.buttons[buttonCode]\r\n except KeyError:\r\n return 0",
"def read_byte(self):\n return int.from_bytes(self.read(BitTypes.BYTE.value), byteorder='little')",
"def test_bit_get_int_accross_bytes(self):\n ops = [bitwise_operations.bit_get_int(self.test_bin_ones, 4, 8, False)]\n\n _, _, result = self.as_connection.operate(self.test_key, ops)\n\n expected_result = 16\n assert result[\"bitwise1\"] == expected_result",
"def test_bit_get(self):\n ops = [bitwise_operations.bit_get(self.five_255_bin, 0, 8)]\n\n _, _, result = self.as_connection.operate(self.test_key, ops)\n\n expected_result = bytearray([255] * 1)\n assert result[\"255\"] == expected_result",
"def test_bit_get_accross_bytes(self):\n ops = [bitwise_operations.bit_get(self.test_bin_ones, 4, 8)]\n\n _, _, result = self.as_connection.operate(self.test_key, ops)\n\n expected_result = bytearray([16] * 1)\n assert result[\"bitwise1\"] == expected_result",
"def button(channel, red, blue):\n return (pf_rc.CHANNEL[channel], OUTPUT[red], OUTPUT[blue])",
"def button_a(self) -> bool:\n return bool(self.pressed & 0x2)",
"def read_uint8(self):\n bytes = self.data[:1]\n value = struct.unpack('!B',bytes)[0]\n self.data = self.data[1:]\n return value"
] | [
"0.76606005",
"0.74095386",
"0.69239444",
"0.6785385",
"0.67204076",
"0.6530307",
"0.6455176",
"0.63392746",
"0.6320656",
"0.6304804",
"0.6298134",
"0.62084746",
"0.6171187",
"0.6151871",
"0.5996775",
"0.59908384",
"0.5934501",
"0.592418",
"0.59052503",
"0.5823691",
"0.5822068",
"0.581808",
"0.58173114",
"0.57685864",
"0.5755083",
"0.57193166",
"0.5679946",
"0.5677842",
"0.5642025",
"0.56335086"
] | 0.8228062 | 0 |
Reads the provided bump's bit from the Bump and wheel drop packet. This method is available to a robot in the PASSIVE, SAFE, or FULL state. | def read_bump(self, bump):
data = self._read_packet(Bump.PACKET_ID, Bump.DATA_BYTES)
if len(data) == Bump.DATA_BYTES:
byte = struct.unpack("B", data)[0]
return bool(byte & bump)
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_bump_wheel_drop(self):\n\n # Bump and Wheel drop packet information is interchangeable\n # in this case.\n data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)\n\n if len(data) == WheelDrop.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return {\n Bump.BUMP_L: bool(byte & Bump.BUMP_L),\n Bump.BUMP_R: bool(byte & Bump.BUMP_L),\n WheelDrop.WHEEL_DROP_L: bool(byte & WheelDrop.WHEEL_DROP_L),\n WheelDrop.WHEEL_DROP_R: bool(byte & WheelDrop.WHEEL_DROP_R)\n }\n else:\n return {\n Bump.BUMP_L: False,\n Bump.BUMP_R: False,\n WheelDrop.WHEEL_DROP_L: False,\n WheelDrop.WHEEL_DROP_R: False\n }",
"def read_light_bump(self, light_bump):\n data = self._read_packet(light_bump, Bump.LIGHT_DATA_BYTES)\n\n if len(data) == Bump.LIGHT_DATA_BYTES:\n return struct.unpack(\">h\", data)[0]\n else:\n return 0",
"def read_bumps(self):\n data = self._read_packet(Bump.PACKET_ID, Bump.DATA_BYTES)\n\n if len(data) == Bump.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return {\n Bump.BUMP_L: bool(byte & Bump.BUMP_L),\n Bump.BUMP_R: bool(byte & Bump.BUMP_R)\n }\n else:\n return {\n Bump.BUMP_L: False,\n Bump.BUMP_R: False\n }",
"def readBumper(self, msg):\n #\n # if(msg.state==1):#the bumper is pressed\n self.executeTrajectory()",
"def get_bump_sensors(self):\n x=self.send_packet_check_response('\\x40')\n x=ord(x)\n return tuple(bool(x&(1<<i)) for i in range(6))",
"def read_wheel_drop(self, wheel_drop):\n data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)\n\n if len(data) == WheelDrop.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n return bool(byte & wheel_drop)\n else:\n return False",
"def process_bump_sensing(self, data):\n if data.state == BumperEvent.PRESSED:\n self.bump = True\n self.bumpDir = data.bumper\n rospy.loginfo(\"Bumper Event :)\")\n rospy.loginfo(data.bumper)",
"def read_bits(self, num_bits):\n this_value = self.message[0:num_bits]\n self.message = self.message[num_bits:]\n return this_value",
"def read_dht11_dat():\n GPIO.setup(DHTPIN, GPIO.OUT)\n GPIO.output(DHTPIN, GPIO.HIGH)\n time.sleep(0.05)\n GPIO.output(DHTPIN, GPIO.LOW)\n time.sleep(0.02)\n GPIO.setup(DHTPIN, GPIO.IN, GPIO.PUD_UP)\n\n unchanged_count = 0\n last = -1\n data = []\n while True:\n current = GPIO.input(DHTPIN)\n data.append(current)\n if last != current:\n unchanged_count = 0\n last = current\n else:\n unchanged_count += 1\n if unchanged_count > MAX_UNCHANGE_COUNT:\n break\n\n state = STATE_INIT_PULL_DOWN\n\n lengths = []\n current_length = 0\n\n for current in data:\n current_length += 1\n\n if state == STATE_INIT_PULL_DOWN:\n if current == GPIO.LOW:\n state = STATE_INIT_PULL_UP\n else:\n continue\n if state == STATE_INIT_PULL_UP:\n if current == GPIO.HIGH:\n state = STATE_DATA_FIRST_PULL_DOWN\n else:\n continue\n if state == STATE_DATA_FIRST_PULL_DOWN:\n if current == GPIO.LOW:\n state = STATE_DATA_PULL_UP\n else:\n continue\n if state == STATE_DATA_PULL_UP:\n if current == GPIO.HIGH:\n current_length = 0\n state = STATE_DATA_PULL_DOWN\n else:\n continue\n if state == STATE_DATA_PULL_DOWN:\n if current == GPIO.LOW:\n lengths.append(current_length)\n state = STATE_DATA_PULL_UP\n else:\n continue\n if len(lengths) != 40:\n # print \"Data not good, skip\"\n return False\n\n shortest_pull_up = min(lengths)\n longest_pull_up = max(lengths)\n halfway = (longest_pull_up + shortest_pull_up) / 2\n bits = []\n the_bytes = []\n byte = 0\n\n for length in lengths:\n bit = 0\n if length > halfway:\n bit = 1\n bits.append(bit)\n # print \"bits: %s, length: %d\" % (bits, len(bits))\n for i in range(0, len(bits)):\n byte = byte << 1\n if (bits[i]):\n byte = byte | 1\n else:\n byte = byte | 0\n if ((i + 1) % 8 == 0):\n the_bytes.append(byte)\n byte = 0\n # print the_bytes\n checksum = (the_bytes[0] + the_bytes[1] + the_bytes[2] + the_bytes[3]) & 0xFF\n if the_bytes[4] != checksum:\n # print \"Data not good, skip\"\n return False\n\n return the_bytes[0], the_bytes[2]",
"def get_bit(self):\n try:\n current_byte = self.contents[self.current_bit_position >> 3]\n except IndexError:\n raise EmptyStreamError(f\"Attempting read at bit position {self.current_bit_position} \"\n f\"(byte {self.current_bit_position >> 3})\")\n bit = min(1, current_byte & (1 << (7 - (self.current_bit_position % 8))))\n self.current_bit_position += 1\n return bit",
"def get_pressurelsb(self):\n byte_list = self.i2c.readfrom_mem(\n self.device_address,\n self.REGISTER_PRESSURELSB,\n 1,\n addrsize=8\n )\n val = 0\n val = val << 8 | byte_list[0]\n return val",
"def readByte(self) :\n tag = ord(self.minfile[self.pos])\n self.pos += 1\n return tag",
"def read_uint8(self):\n bytes = self.data[:1]\n value = struct.unpack('!B',bytes)[0]\n self.data = self.data[1:]\n return value",
"def _read_byte(self, register):\r\n return self._read_register(register, 1)[0]",
"def get_bit(byte, bit_num):\n return (byte & (1 << bit_num)) >> bit_num",
"def read_lock_bits(self):\n self.writecmd(self.APP, self.WRITE3_READ1, 4, [0x54, 0x00, 0x00, 0x00])\n return [(ord(self.data[0]) >> x) & 1 for x in range(5)]",
"def read_pin(self, pin):\n value = 0\n pin = pin - 1\n if pin < 8:\n self.__port_a_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOA)\n value = self.__checkbit(self.__port_a_value, pin)\n else:\n pin = pin - 8\n self.__port_b_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOB)\n value = self.__checkbit(self.__port_b_value, pin)\n return value",
"def _read_byte(self):\n # Setup io pin as input mode\n self.gpio.setup(self._io_pin, GPIO.IN)\n\n byte = 0\n for i in range(8):\n # Read data on the falling edge of clk\n self.gpio.output(self._clk_pin, GPIO.HIGH)\n self._sleep()\n\n self.gpio.output(self._clk_pin, GPIO.LOW)\n self._sleep()\n\n bit = self.gpio.input(self._io_pin)\n byte |= ((2 ** i) * bit)\n\n return byte",
"def read_bytes(self, ctrl_pin):\n try:\n '''\n ctrl_pin1.value(0)\n time.sleep_ms(2)\n ctrl_pin1.value(1)\n time.sleep_ms(220)\n ctrl_pin1.value(0)\n temp = hspi.read(2)\n ctrl_pin1.value(1)\n '''\n pin_ = self.ctrl_pins[ctrl_pin]\n pin_.value(0)\n time.sleep_ms(2)\n pin_.value(1)\n time.sleep_ms(220)\n pin_.value(0)\n temp = self.hspi.read(2)\n pin_.value(1)\n except KeyError:\n print('requested pin not defined')\n temp = None\n return temp",
"def read_wheel_drops(self):\n data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)\n\n if len(data) == WheelDrop.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return {\n WheelDrop.WHEEL_DROP_L: bool(byte & WheelDrop.WHEEL_DROP_L),\n WheelDrop.WHEEL_DROP_R: bool(byte & WheelDrop.WHEEL_DROP_R)\n }\n else:\n return {\n WheelDrop.WHEEL_DROP_L: False,\n WheelDrop.WHEEL_DROP_R: False\n }",
"def get_byte():\n GPIO.setup(data_pins, GPIO.IN)\n # read the data pins\n GPIO.output(chip_select, 0)\n GPIO.output(clock_pin, 1)\n GPIO.output(clock_pin, 0)\n value = 0\n for i in range(0, 8):\n value += GPIO.input(data_pins[i]) << i\n return value",
"def _get_bit(byte, ii):\n return (byte >> (7 - ii)) & 1",
"def cal_reward(self,bump,DLightBump,AnalogBump,Infrared):\r\n\r\n r= 0.0\r\n ir_om= Infrared[0]\r\n ir_l = Infrared[1]\r\n ir_r = Infrared[2]\r\n if ir_om or ir_l or ir_r:\r\n # received infrared signal : r = +5\r\n r += self.reward_tb[\"infrared\"]\r\n s= self.real_state\r\n bonus_pos = (int(s[0]//10*10),int(s[1]//10*10))\r\n if self.bonus_pos.count(bonus_pos)==0:\r\n self.bonus_pos.append(bonus_pos)\r\n else:\r\n # bump something: r =-1\r\n r += self.reward_tb[\"hit\"] if bump & 1 != 0 or bump & 2 != 0 else 0\r\n\r\n\r\n # wheel drop: r = -2\r\n r += self.reward_tb[\"drop\"] if bump & 8 != 0 or bump & 4 != 0 else 0\r\n\r\n # Detected something light bumper: 0~-1\r\n threshold = 200\r\n sig_sum = 0.0\r\n for s in AnalogBump:\r\n sig_sum += s if s >threshold else 0.0\r\n sig_sum /= len(AnalogBump)\r\n r += self.reward_tb[\"light\"]*(sig_sum/self.max_strength)\r\n\r\n return r",
"def Read_Byte(self):\r\n data = self.Port.read(1)\r\n return data",
"def read_bits(fd, reg, bitStart, length):\n b = read_byte(fd, reg)\n mask = ((1 << length) - 1) << (bitStart - length + 1)\n b &= mask;\n b >>= (bitStart - length + 1);\n return b",
"def getHack(self, pin, board=0):\n return self.callModule('hackp', board, 0, 'read', [int(pin)])",
"def lo_band(self):\n return self._read(0x13, 7, 0x80)",
"def pull(self, pull: Optional[int] = None) -> Optional[int]:\n ...",
"def read(self, button):\n\n return self.hardware_interfaces[self._gpio].read(self._b_names[button])",
"def read( self ) -> int:\n\n if self._slave is not None:\n return self._filter( self._slave.read() )\n\n else:\n result = 0\n for pin in self._pins[ :: -1 ]:\n result = result << 1\n if pin.read():\n result |= 0b1\n return result"
] | [
"0.7068343",
"0.6726274",
"0.64553845",
"0.61745536",
"0.57517564",
"0.57068944",
"0.5390857",
"0.5379472",
"0.5366276",
"0.52374977",
"0.5218928",
"0.51846385",
"0.5162526",
"0.5160921",
"0.5150776",
"0.51350546",
"0.5082591",
"0.5051435",
"0.5050488",
"0.504368",
"0.5042952",
"0.502738",
"0.5010296",
"0.5007891",
"0.4979869",
"0.4953775",
"0.49527743",
"0.49481684",
"0.4930292",
"0.49122158"
] | 0.7672072 | 0 |
Reads the specified light bump sensor. This method is available in PASSIVE, SAFE, or FULL state. Only values from the Bump class should be passed in to this method as the specified light bump value. | def read_light_bump(self, light_bump):
data = self._read_packet(light_bump, Bump.LIGHT_DATA_BYTES)
if len(data) == Bump.LIGHT_DATA_BYTES:
return struct.unpack(">h", data)[0]
else:
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_bump(self, bump):\n data = self._read_packet(Bump.PACKET_ID, Bump.DATA_BYTES)\n\n if len(data) == Bump.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n return bool(byte & bump)\n else:\n return False",
"def read_light_bumps(self):\n rtn = {}\n\n for light_bump in Bump.light_bumps():\n rtn[light_bump] = self.read_light_bump(light_bump)\n\n return rtn",
"def illuminance_sensor():\n\n\tsensor_name = \"illuminance\"\n\treg_addr = 26\n\tdata_len = 1\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\tdata = rospy.wait_for_message(\"MediumSize/SensorHub/Illuminance\", Illuminance, 2)\n\tresult = data.illuminance\n\n\tdelete_sensor(sensor_name)\n\treturn result",
"def getLightSensor() -> int:\n pass",
"def get_brightness(self, channel=None):\n return float(self.getSensorData(\"ILLUMINATION\", channel))",
"def cal_reward(self,bump,DLightBump,AnalogBump,Infrared):\r\n\r\n r= 0.0\r\n ir_om= Infrared[0]\r\n ir_l = Infrared[1]\r\n ir_r = Infrared[2]\r\n if ir_om or ir_l or ir_r:\r\n # received infrared signal : r = +5\r\n r += self.reward_tb[\"infrared\"]\r\n s= self.real_state\r\n bonus_pos = (int(s[0]//10*10),int(s[1]//10*10))\r\n if self.bonus_pos.count(bonus_pos)==0:\r\n self.bonus_pos.append(bonus_pos)\r\n else:\r\n # bump something: r =-1\r\n r += self.reward_tb[\"hit\"] if bump & 1 != 0 or bump & 2 != 0 else 0\r\n\r\n\r\n # wheel drop: r = -2\r\n r += self.reward_tb[\"drop\"] if bump & 8 != 0 or bump & 4 != 0 else 0\r\n\r\n # Detected something light bumper: 0~-1\r\n threshold = 200\r\n sig_sum = 0.0\r\n for s in AnalogBump:\r\n sig_sum += s if s >threshold else 0.0\r\n sig_sum /= len(AnalogBump)\r\n r += self.reward_tb[\"light\"]*(sig_sum/self.max_strength)\r\n\r\n return r",
"def get_luminosity(name):\n all_data = mc.get('sensor_values')\n name = _lookup(name)\n try:\n return all_data[name][3]\n except KeyError:\n raise KeyError(\"No sensor with that name\")",
"def get_lux(self):\n\n svc = \"urn:micasaverde-com:serviceId:LightSensor1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"CurrentLevel\")",
"def readBumper(self, msg):\n #\n # if(msg.state==1):#the bumper is pressed\n self.executeTrajectory()",
"def track_moisture_level():\n try:\n normal_level_init = 470\n low_level_init = 560\n\n global LIMIT_FLAG\n sensor_read = sensorData.read_moisture()\n generate_json.define_structure(\"moisture\", sensor_read)\n\n if sensor_read > low_level_init:\n if LIMIT_FLAG != 3:\n # When it is dry (Moisture Level Low)\n LIMIT_FLAG = 3\n blynk.notify('Moisture Level Low! Irrigation Needed')\n blynk.email('[email protected]', 'Alert: Moisture Level Low',\n 'Moisture Level Low! Irrigation Needed')\n logging_write()\n elif normal_level_init <= sensor_read <= low_level_init:\n if LIMIT_FLAG != 2:\n LIMIT_FLAG = 2\n logging_write()\n else:\n if LIMIT_FLAG != 1:\n LIMIT_FLAG = 1\n logging_write()\n return sensor_read\n\n except Exception as e:\n logging_write(e)",
"def read_core_vbat(self) -> float:",
"def read_lumi_counter(device):\n return read(device, \"gt_mp7_frame.rb.tcm_status.luminosity_seg_nr\")",
"def read_humidity(self):\n hRaw = self._read_multiple_bytes_as_array(self.BME280_HUM_MSB, 2)\n\n return float(self._compensate_humidity((hRaw[0] << 8) + hRaw[1]))",
"def _get_brightness(self):\n result = self._client_cmd('backlight_tool --get_brightness')\n return int(result.stdout.rstrip())",
"def test_02_Light(self):\n l_xml = self.m_xml.light_sect[1]\n print(PrettyFormatAny.form(l_xml, 'C3-02-A - XML'))\n l_device = self.m_device_obj\n l_light = deviceXML.read_base_device_object_xml(l_device, l_xml)\n print(PrettyFormatAny.form(l_light, 'C3-02-B - Light'))\n self.assertEqual(l_light.Name, TESTING_LIGHT_NAME_1)\n self.assertEqual(l_device.RoomName, TESTING_LIGHT_ROOM_NAME_1)\n self.assertEqual(l_light.UPBAddress, convert.dotted_hex2int(TESTING_INSTEON_ADDRESS_0))",
"def light_bumps():\n return [Bump.LIGHT_BUMP_L, Bump.LIGHT_BUMP_FL, Bump.LIGHT_BUMP_CL,\n Bump.LIGHT_BUMP_CR, Bump.LIGHT_BUMP_FR, Bump.LIGHT_BUMP_R]",
"def get_brightness(self):\n response = self.parent.backlight.get_brightness()\n if response is not None:\n response = response[0]\n return response",
"def read_bumps(self):\n data = self._read_packet(Bump.PACKET_ID, Bump.DATA_BYTES)\n\n if len(data) == Bump.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return {\n Bump.BUMP_L: bool(byte & Bump.BUMP_L),\n Bump.BUMP_R: bool(byte & Bump.BUMP_R)\n }\n else:\n return {\n Bump.BUMP_L: False,\n Bump.BUMP_R: False\n }",
"def luminance(self) -> float:\n use_option = 1\n\n if use_option == 1:\n # 1st option\n msb = 0\n msb_2nd = 1\n while msb != msb_2nd:\n msb = self.read_byte_data(Reg.luminance_msb)\n lsb = self.read_byte_data(Reg.luminance_lsb)\n msb_2nd = self.read_byte_data(Reg.luminance_msb)\n\n elif use_option == 2:\n # 2nd option, which does not work on rpi OSError: [Errno 95] Operation not supported\n wr_msb = i2c_msg.write(self.device_addr, [Reg.luminance_msb])\n rd_msb = i2c_msg.read(self.device_addr, 1)\n wr_lsb = i2c_msg.write(self.device_addr, [Reg.luminance_lsb])\n rd_lsb = i2c_msg.read(self.device_addr, 1)\n self.i2c_rdwr(wr_msb, rd_msb, wr_lsb, rd_lsb)\n msb = ord(rd_msb.data)\n lsb = ord(rd_lsb.data)\n\n # Convert the data to lux\n exponent = (msb & 0xF0) >> 4\n mantissa = ((msb & 0x0F) << 4) | (lsb & 0x0F)\n return 2.0 ** exponent * mantissa * 0.045",
"def read_humidity(self):\n self._force_read(False)\n\n humADC = (self._read_register_1ubyte(self.BME680_HUM_MSB) << 8) | (self._read_register_1ubyte(self.BME680_HUM_LSB))\n\n return float(self._compensate_humidity(humADC))",
"def get_humidity(self):\n\n svc = \"urn:micasaverde-com:serviceId:HumiditySensor1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"CurrentLevel\")",
"def get_brightness():\n file = open (\"/home/fblaise/.i3/scripts/backlight_p.out\",'r')\n return \"{}%\".format(file.readline().strip())",
"def test_light_sensor(self):\n with patch.dict(TYPES, {'LightSensor': self.mock_type}):\n state = State('sensor.light', '900',\n {ATTR_DEVICE_CLASS: 'illuminance'})\n get_accessory(None, state, 2, {})",
"def read(self):\n # One method of getting a resource is calling get_resource from the client instance. get_resource\n # takes the lwm2m uri string as a parameter. The uri is the object id, then the instance id, then\n # the resource id.\n max_resource = lwm2m_client.get_resource(\"3323/1/5602\")\n # Resources can also be accessed using the index operator from the client instance.\n min_resource = lwm2m_client[3323][1][5601]\n \n pressure = self.pressure_sensor.read_psi()\n \n max_resource.value = max(max_resource.value, pressure)\n min_resource.value = min(min_resource.value, pressure)\n logger.debug(\"PressureValue read called: pressure = {}, max = {}, min = {}\".format(pressure, max_resource.value, min_resource.value))\n return pressure",
"def get_brightness(self) -> int:\r\n if not self.backlight:\r\n return -1\r\n\r\n return self.backlight.brightness",
"def low(self) -> float:\n return self.current_candle[4]",
"def read_sensor_data():\n global light_scheme_set, current_timeout\n\n # prevents very rapid changes of the color scheme\n if current_timeout is not 0:\n current_timeout -= 1\n return\n else:\n # call the shared library's sensor code\n reading = dll.readSensor()\n scheme = None\n\n # check if the scheme needs to be changed\n if reading >= settings.get('threshold') and light_scheme_set is not True:\n scheme = settings.get('light_color_scheme')\n light_scheme_set = True\n\n elif reading < settings.get('threshold') and light_scheme_set is not False:\n scheme = settings.get('dark_color_scheme')\n light_scheme_set = False\n\n # change user settings\n if scheme is not None:\n global_settings = sublime.load_settings('Preferences.sublime-settings')\n if global_settings.get('color_scheme') != scheme:\n global_settings.set('color_scheme', scheme)\n sublime.save_settings('Preferences.sublime-settings')\n current_timeout = settings.get('cycle_timeout')",
"def handle_temperature_low_received(msg: ReceiveMessage) -> None:\n self.handle_climate_attribute_received(\n msg, CONF_TEMP_LOW_STATE_TEMPLATE, \"_attr_target_temperature_low\"\n )",
"def brightness(self):\n _LOGGER.error(\"inside brightness\")\n url = self.urlx + '/dimstate'\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n state = int(int(json_data['dimState'])*1.5)\n\n # if int(self._dimmer) < 170:\n self._dimmer = state\n\n return self._dimmer",
"def low_batt(self, channel=None):\n return self.getAttributeData(\"LOW_BAT\", channel)"
] | [
"0.67314094",
"0.5991866",
"0.5798978",
"0.5798736",
"0.5619214",
"0.55873466",
"0.5552298",
"0.55446345",
"0.5469533",
"0.54468226",
"0.5320882",
"0.5320139",
"0.53098434",
"0.5296004",
"0.52721196",
"0.52486736",
"0.52097356",
"0.5209081",
"0.5206106",
"0.51948255",
"0.51740944",
"0.5162214",
"0.51391",
"0.51332706",
"0.51135397",
"0.511242",
"0.51049227",
"0.5080105",
"0.5067754",
"0.5066869"
] | 0.7984124 | 0 |
Reads all the light bump sensor on the iRobot Create 2. This method is available to a robot in the PASSIVE, SAFE, or FULL state. | def read_light_bumps(self):
rtn = {}
for light_bump in Bump.light_bumps():
rtn[light_bump] = self.read_light_bump(light_bump)
return rtn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_light_bump(self, light_bump):\n data = self._read_packet(light_bump, Bump.LIGHT_DATA_BYTES)\n\n if len(data) == Bump.LIGHT_DATA_BYTES:\n return struct.unpack(\">h\", data)[0]\n else:\n return 0",
"def get_bump_sensors(self):\n x=self.send_packet_check_response('\\x40')\n x=ord(x)\n return tuple(bool(x&(1<<i)) for i in range(6))",
"def test_02_Light(self):\n l_xml = self.m_xml.light_sect[1]\n print(PrettyFormatAny.form(l_xml, 'C3-02-A - XML'))\n l_device = self.m_device_obj\n l_light = deviceXML.read_base_device_object_xml(l_device, l_xml)\n print(PrettyFormatAny.form(l_light, 'C3-02-B - Light'))\n self.assertEqual(l_light.Name, TESTING_LIGHT_NAME_1)\n self.assertEqual(l_device.RoomName, TESTING_LIGHT_ROOM_NAME_1)\n self.assertEqual(l_light.UPBAddress, convert.dotted_hex2int(TESTING_INSTEON_ADDRESS_0))",
"def getLightSensor() -> int:\n pass",
"def light_bumps():\n return [Bump.LIGHT_BUMP_L, Bump.LIGHT_BUMP_FL, Bump.LIGHT_BUMP_CL,\n Bump.LIGHT_BUMP_CR, Bump.LIGHT_BUMP_FR, Bump.LIGHT_BUMP_R]",
"def getSensors( self ):\n resp = super().getSensors()\n self.virtualBumpers = tuple( resp[\"virtualBumpers\"] )",
"def test_light_sensor(self):\n with patch.dict(TYPES, {'LightSensor': self.mock_type}):\n state = State('sensor.light', '900',\n {ATTR_DEVICE_CLASS: 'illuminance'})\n get_accessory(None, state, 2, {})",
"def test_04_Light(self):\n l_xml = self.m_xml.light\n l_device = self.m_device_obj\n l_light = deviceXML.read_base_device_object_xml(l_device, l_xml)\n # print(PrettyFormatAny.form(l_light, 'C4-04-A - Light'))\n self.assertEqual(l_light.Name, TESTING_LIGHT_NAME_0)\n self.assertEqual(l_device.RoomName, TESTING_LIGHT_ROOM_NAME_0)",
"def get_light_sensors(self):\n x=self.send_packet_check_response('\\x50')\n LS=[]\n for i in range(8):\n a=bytearray(x[i*3:(i+1)*3])\n LS.append(a[0]|(a[1]&0xf)<<8)\n LS.append(a[1]>>4|a[2]<<4)\n return LS",
"def update(self):\n # Light sensor reading: 16-bit integer\n self.light = self.envirophat.light.light()\n if self.use_leds:\n self.envirophat.leds.on()\n # the three color values scaled against the overall light, 0-255\n self.light_red, self.light_green, self.light_blue = self.envirophat.light.rgb()\n if self.use_leds:\n self.envirophat.leds.off()\n\n # accelerometer readings in G\n (\n self.accelerometer_x,\n self.accelerometer_y,\n self.accelerometer_z,\n ) = self.envirophat.motion.accelerometer()\n\n # raw magnetometer reading\n (\n self.magnetometer_x,\n self.magnetometer_y,\n self.magnetometer_z,\n ) = self.envirophat.motion.magnetometer()\n\n # temperature resolution of BMP280 sensor: 0.01°C\n self.temperature = round(self.envirophat.weather.temperature(), 2)\n\n # pressure resolution of BMP280 sensor: 0.16 Pa, rounding to 0.1 Pa\n # with conversion to 100 Pa = 1 hPa\n self.pressure = round(self.envirophat.weather.pressure() / 100.0, 3)\n\n # Voltage sensor, reading between 0-3.3V\n (\n self.voltage_0,\n self.voltage_1,\n self.voltage_2,\n self.voltage_3,\n ) = self.envirophat.analog.read_all()",
"def read(self):\n \n #self.lego_bus.write_byte(self.address, 0xF5)\n #time.sleep(0.2)\n block=0\n rawhumidity=0\n counter=0\n toRead=0\n while counter < 10 and toRead != 3:\n self.lego_bus.write_byte(self.address, 0xFE)\n self.lego_bus.write_byte(self.address, 0xF5)\n sleep(1)\n \n try:\n block = self.lego_bus.read_i2c_block_data(self.address, 0, 3)\n print (block)\n rawhumidity = (( block[0] << 8) | block[1])\n print (rawhumidity)\n except:\n print(\"Error inesperado:\", sys.exc_info()[0])\n counter +=1\n \n\n rh=999 # if invalid checksum return error value\n if (self.check_crc(rawhumidity, block[2]) == 0): #Verify the checksum \n #rawhumidity &= 0xFFFC\n print (rawhumidity)\n tempRH = rawhumidity / 65536.0\n rh = -6.0 + (125.0 * tempRH)\n return rh",
"def track_moisture_level():\n try:\n normal_level_init = 470\n low_level_init = 560\n\n global LIMIT_FLAG\n sensor_read = sensorData.read_moisture()\n generate_json.define_structure(\"moisture\", sensor_read)\n\n if sensor_read > low_level_init:\n if LIMIT_FLAG != 3:\n # When it is dry (Moisture Level Low)\n LIMIT_FLAG = 3\n blynk.notify('Moisture Level Low! Irrigation Needed')\n blynk.email('[email protected]', 'Alert: Moisture Level Low',\n 'Moisture Level Low! Irrigation Needed')\n logging_write()\n elif normal_level_init <= sensor_read <= low_level_init:\n if LIMIT_FLAG != 2:\n LIMIT_FLAG = 2\n logging_write()\n else:\n if LIMIT_FLAG != 1:\n LIMIT_FLAG = 1\n logging_write()\n return sensor_read\n\n except Exception as e:\n logging_write(e)",
"def illuminance_sensor():\n\n\tsensor_name = \"illuminance\"\n\treg_addr = 26\n\tdata_len = 1\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\tdata = rospy.wait_for_message(\"MediumSize/SensorHub/Illuminance\", Illuminance, 2)\n\tresult = data.illuminance\n\n\tdelete_sensor(sensor_name)\n\treturn result",
"def lights(self):\n return list(self.GetLights())",
"def _read_all(self) -> list[float]:\n with self.lock:\n return [sensor.read() for sensor in self.sensors]",
"def read_sensor_data():\n global light_scheme_set, current_timeout\n\n # prevents very rapid changes of the color scheme\n if current_timeout is not 0:\n current_timeout -= 1\n return\n else:\n # call the shared library's sensor code\n reading = dll.readSensor()\n scheme = None\n\n # check if the scheme needs to be changed\n if reading >= settings.get('threshold') and light_scheme_set is not True:\n scheme = settings.get('light_color_scheme')\n light_scheme_set = True\n\n elif reading < settings.get('threshold') and light_scheme_set is not False:\n scheme = settings.get('dark_color_scheme')\n light_scheme_set = False\n\n # change user settings\n if scheme is not None:\n global_settings = sublime.load_settings('Preferences.sublime-settings')\n if global_settings.get('color_scheme') != scheme:\n global_settings.set('color_scheme', scheme)\n sublime.save_settings('Preferences.sublime-settings')\n current_timeout = settings.get('cycle_timeout')",
"def readBumper(self, msg):\n #\n # if(msg.state==1):#the bumper is pressed\n self.executeTrajectory()",
"def read_lumi_counter(device):\n return read(device, \"gt_mp7_frame.rb.tcm_status.luminosity_seg_nr\")",
"def cal_reward(self,bump,DLightBump,AnalogBump,Infrared):\r\n\r\n r= 0.0\r\n ir_om= Infrared[0]\r\n ir_l = Infrared[1]\r\n ir_r = Infrared[2]\r\n if ir_om or ir_l or ir_r:\r\n # received infrared signal : r = +5\r\n r += self.reward_tb[\"infrared\"]\r\n s= self.real_state\r\n bonus_pos = (int(s[0]//10*10),int(s[1]//10*10))\r\n if self.bonus_pos.count(bonus_pos)==0:\r\n self.bonus_pos.append(bonus_pos)\r\n else:\r\n # bump something: r =-1\r\n r += self.reward_tb[\"hit\"] if bump & 1 != 0 or bump & 2 != 0 else 0\r\n\r\n\r\n # wheel drop: r = -2\r\n r += self.reward_tb[\"drop\"] if bump & 8 != 0 or bump & 4 != 0 else 0\r\n\r\n # Detected something light bumper: 0~-1\r\n threshold = 200\r\n sig_sum = 0.0\r\n for s in AnalogBump:\r\n sig_sum += s if s >threshold else 0.0\r\n sig_sum /= len(AnalogBump)\r\n r += self.reward_tb[\"light\"]*(sig_sum/self.max_strength)\r\n\r\n return r",
"def test_set_and_get_led_brightness_level(self):",
"def achieve_data(self, selection='all'):\r\n Infra_Omi, Infra_L, Infra_R = None, None,None\r\n bump, L_cnt, R_cnt, DLightBump, AnalogBump = None,None,None,None,None\r\n if selection == 'b':\r\n # read bumper data only\r\n Infra_Omi,Infra_L, Infra_R, bump,DLightBump, L, FL, CL, CR, FR, R = self.Roomba.ReadQueryStream(17,52,53, 7,45, 46, 47,48, 49, 50, 51)\r\n AnalogBump = (L, FL, CL, CR, FR, R)\r\n elif selection == 'e':\r\n # read encoder data only\r\n L_cnt, R_cnt= self.Roomba.ReadQueryStream( 43, 44)\r\n else:\r\n # read all data\r\n Infra_Omi, Infra_L, Infra_R,bump, L_cnt,R_cnt, DLightBump, L,FL, CL,CR,FR,R =self.Roomba.ReadQueryStream(17,52,53,7, 43, 44, 45, 46,47,48,49,50,51 )\r\n AnalogBump = (L,FL, CL,CR,FR,R)\r\n\r\n return L_cnt,R_cnt, bump, DLightBump,AnalogBump,Infra_Omi, Infra_L, Infra_R",
"def read(self):\n interval = self._low_ticks + self._high_ticks\n\n if interval > 0:\n ratio = float(self._low_ticks)/float(interval)*100.0\n conc = 1.1*pow(ratio,3)-3.8*pow(ratio,2)+520*ratio+0.62;\n else:\n ratio = 0\n conc = 0.0\n\n self._start_tick = None\n self._last_tick = None\n self._low_ticks = 0\n self._high_ticks = 0\n\n return (self.gpio, ratio, conc)",
"def readUpdate(self):\n if self.trigger == 0:\n return self.update()\n else:\n #GPIO.setmode(GPIO.BCM) # No need to setup again\n triggered = GPIO.input(self.trigger) == GPIO.LOW\n if triggered:\n return self.update()\n else:\n return None, False, self.name",
"def read_sensors():\n previous_time = datetime.datetime.now()\n while True:\n now = datetime.datetime.now()\n delta = now - previous_time\n if delta.seconds >= sample_frequency:\n previous_time = now\n \n # Read SGP30.\n eCO2_data = sgp30.eCO2\n tvoc_data = sgp30.TVOC\n\n # Read VEML6070 and VEML7700, sample ten times.\n for j in range(10):\n light_data = light.lux\n uv_raw = uv.uv_raw\n uv_data = uv.get_index(uv_raw)\n\n # Read BME280.\n temp_data = bme280.temperature\n # Convert temperature (C->F)\n temp_data = temp_data * 1.8 + 32\n humid_data = bme280.humidity\n pressure_data = bme280.pressure\n\n # Write to database\n conn = sqlite3.connect(db)\n curs = conn.cursor()\n curs.execute(\"INSERT INTO data values(?, ?, ?, ?, ?, ?, ?, ?)\",\n (now, temp_data, humid_data, pressure_data, eCO2_data, tvoc_data,\n light_data, uv_data))\n conn.commit()\n conn.close()",
"def read_bumps(self):\n data = self._read_packet(Bump.PACKET_ID, Bump.DATA_BYTES)\n\n if len(data) == Bump.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return {\n Bump.BUMP_L: bool(byte & Bump.BUMP_L),\n Bump.BUMP_R: bool(byte & Bump.BUMP_R)\n }\n else:\n return {\n Bump.BUMP_L: False,\n Bump.BUMP_R: False\n }",
"def read(self):\n self.pi.write(self.gpio, pigpio.LOW)\n time.sleep(0.017) # 17 ms\n self.pi.set_mode(self.gpio, pigpio.INPUT)\n self.pi.set_watchdog(self.gpio, 200)\n time.sleep(0.2)",
"def read_core_vbat(self) -> float:",
"def get_humidity_latched(self):\n return self.__latched_states[4]",
"def get_lux(self):\n\n svc = \"urn:micasaverde-com:serviceId:LightSensor1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"CurrentLevel\")",
"def high_bri(self):\r\n for light in self.lights:\r\n bri = self.b.get_light(light,'bri')\r\n bri = bri + 50 \r\n if bri > 255:\r\n bri = 255 \r\n self.b.set_light(light,'bri',bri)"
] | [
"0.6413741",
"0.6008232",
"0.5942121",
"0.5925277",
"0.58272475",
"0.55617225",
"0.548089",
"0.547599",
"0.54566234",
"0.5400661",
"0.53792256",
"0.5349697",
"0.534868",
"0.52767146",
"0.5245811",
"0.52437663",
"0.523763",
"0.5191179",
"0.5162148",
"0.5152366",
"0.51242745",
"0.51235324",
"0.51104045",
"0.5101882",
"0.50913554",
"0.5091326",
"0.5075968",
"0.5075253",
"0.5064456",
"0.5045907"
] | 0.6591236 | 0 |
Reads the provided wheel drop's bit from the Bump and wheel drop packet. This method is available to a robot in the PASSIVE, SAFE, or FULL state. | def read_wheel_drop(self, wheel_drop):
data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)
if len(data) == WheelDrop.DATA_BYTES:
byte = struct.unpack("B", data)[0]
return bool(byte & wheel_drop)
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_bump_wheel_drop(self):\n\n # Bump and Wheel drop packet information is interchangeable\n # in this case.\n data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)\n\n if len(data) == WheelDrop.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return {\n Bump.BUMP_L: bool(byte & Bump.BUMP_L),\n Bump.BUMP_R: bool(byte & Bump.BUMP_L),\n WheelDrop.WHEEL_DROP_L: bool(byte & WheelDrop.WHEEL_DROP_L),\n WheelDrop.WHEEL_DROP_R: bool(byte & WheelDrop.WHEEL_DROP_R)\n }\n else:\n return {\n Bump.BUMP_L: False,\n Bump.BUMP_R: False,\n WheelDrop.WHEEL_DROP_L: False,\n WheelDrop.WHEEL_DROP_R: False\n }",
"def read_wheel_drops(self):\n data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)\n\n if len(data) == WheelDrop.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return {\n WheelDrop.WHEEL_DROP_L: bool(byte & WheelDrop.WHEEL_DROP_L),\n WheelDrop.WHEEL_DROP_R: bool(byte & WheelDrop.WHEEL_DROP_R)\n }\n else:\n return {\n WheelDrop.WHEEL_DROP_L: False,\n WheelDrop.WHEEL_DROP_R: False\n }",
"def check_wheel_move_during_closed_loop_bpod(data, wheel_gain=None, **_):\n # Get the Bpod extracted wheel data\n timestamps = data.get('wheel_timestamps_bpod', data['wheel_timestamps'])\n position = data.get('wheel_position_bpod', data['wheel_position'])\n\n return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=1)",
"def lo_band(self):\n return self._read(0x13, 7, 0x80)",
"def get_pressurelsb(self):\n byte_list = self.i2c.readfrom_mem(\n self.device_address,\n self.REGISTER_PRESSURELSB,\n 1,\n addrsize=8\n )\n val = 0\n val = val << 8 | byte_list[0]\n return val",
"def read_bits(fd, reg, bitStart, length):\n b = read_byte(fd, reg)\n mask = ((1 << length) - 1) << (bitStart - length + 1)\n b &= mask;\n b >>= (bitStart - length + 1);\n return b",
"def read_dht11_dat():\n GPIO.setup(DHTPIN, GPIO.OUT)\n GPIO.output(DHTPIN, GPIO.HIGH)\n time.sleep(0.05)\n GPIO.output(DHTPIN, GPIO.LOW)\n time.sleep(0.02)\n GPIO.setup(DHTPIN, GPIO.IN, GPIO.PUD_UP)\n\n unchanged_count = 0\n last = -1\n data = []\n while True:\n current = GPIO.input(DHTPIN)\n data.append(current)\n if last != current:\n unchanged_count = 0\n last = current\n else:\n unchanged_count += 1\n if unchanged_count > MAX_UNCHANGE_COUNT:\n break\n\n state = STATE_INIT_PULL_DOWN\n\n lengths = []\n current_length = 0\n\n for current in data:\n current_length += 1\n\n if state == STATE_INIT_PULL_DOWN:\n if current == GPIO.LOW:\n state = STATE_INIT_PULL_UP\n else:\n continue\n if state == STATE_INIT_PULL_UP:\n if current == GPIO.HIGH:\n state = STATE_DATA_FIRST_PULL_DOWN\n else:\n continue\n if state == STATE_DATA_FIRST_PULL_DOWN:\n if current == GPIO.LOW:\n state = STATE_DATA_PULL_UP\n else:\n continue\n if state == STATE_DATA_PULL_UP:\n if current == GPIO.HIGH:\n current_length = 0\n state = STATE_DATA_PULL_DOWN\n else:\n continue\n if state == STATE_DATA_PULL_DOWN:\n if current == GPIO.LOW:\n lengths.append(current_length)\n state = STATE_DATA_PULL_UP\n else:\n continue\n if len(lengths) != 40:\n # print \"Data not good, skip\"\n return False\n\n shortest_pull_up = min(lengths)\n longest_pull_up = max(lengths)\n halfway = (longest_pull_up + shortest_pull_up) / 2\n bits = []\n the_bytes = []\n byte = 0\n\n for length in lengths:\n bit = 0\n if length > halfway:\n bit = 1\n bits.append(bit)\n # print \"bits: %s, length: %d\" % (bits, len(bits))\n for i in range(0, len(bits)):\n byte = byte << 1\n if (bits[i]):\n byte = byte | 1\n else:\n byte = byte | 0\n if ((i + 1) % 8 == 0):\n the_bytes.append(byte)\n byte = 0\n # print the_bytes\n checksum = (the_bytes[0] + the_bytes[1] + the_bytes[2] + the_bytes[3]) & 0xFF\n if the_bytes[4] != checksum:\n # print \"Data not good, skip\"\n return False\n\n return the_bytes[0], the_bytes[2]",
"def read(self, reg):\n return self.bus.read_byte_data(self.address, reg)",
"def read_bits(self, num_bits):\n this_value = self.message[0:num_bits]\n self.message = self.message[num_bits:]\n return this_value",
"def Read_Byte(self):\r\n data = self.Port.read(1)\r\n return data",
"def _read_byte(self):\n # Setup io pin as input mode\n self.gpio.setup(self._io_pin, GPIO.IN)\n\n byte = 0\n for i in range(8):\n # Read data on the falling edge of clk\n self.gpio.output(self._clk_pin, GPIO.HIGH)\n self._sleep()\n\n self.gpio.output(self._clk_pin, GPIO.LOW)\n self._sleep()\n\n bit = self.gpio.input(self._io_pin)\n byte |= ((2 ** i) * bit)\n\n return byte",
"def read_lock_bits(self):\n self.writecmd(self.APP, self.WRITE3_READ1, 4, [0x54, 0x00, 0x00, 0x00])\n return [(ord(self.data[0]) >> x) & 1 for x in range(5)]",
"def read_bumps(self):\n data = self._read_packet(Bump.PACKET_ID, Bump.DATA_BYTES)\n\n if len(data) == Bump.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return {\n Bump.BUMP_L: bool(byte & Bump.BUMP_L),\n Bump.BUMP_R: bool(byte & Bump.BUMP_R)\n }\n else:\n return {\n Bump.BUMP_L: False,\n Bump.BUMP_R: False\n }",
"def read_led(self, pin):\n value = 0 #Default to nowt\n if self.iface.connected:\n try:\n value = self.iface.get_PWM_dutycycle(pin)\n except (AttributeError, IOError, pigpio.error):\n logging.error(\" Cannot read PWM of pin #%s\" % (pin,))\n else:\n logging.error(\" Interface not connected. Cannot read PWM of pin #%s.\" % (pin,))\n return value",
"def read_bytes(self, ctrl_pin):\n try:\n '''\n ctrl_pin1.value(0)\n time.sleep_ms(2)\n ctrl_pin1.value(1)\n time.sleep_ms(220)\n ctrl_pin1.value(0)\n temp = hspi.read(2)\n ctrl_pin1.value(1)\n '''\n pin_ = self.ctrl_pins[ctrl_pin]\n pin_.value(0)\n time.sleep_ms(2)\n pin_.value(1)\n time.sleep_ms(220)\n pin_.value(0)\n temp = self.hspi.read(2)\n pin_.value(1)\n except KeyError:\n print('requested pin not defined')\n temp = None\n return temp",
"def _read_byte(self, register):\r\n return self._read_register(register, 1)[0]",
"def rd_output(self, port, bit = None):\n hw = self.device.peripherals[port]\n val = hw.ODR.rd()\n if bit is None:\n return val\n return (val >> (bit & 15)) & 1",
"def read(self, bits_per_code_word):\n remaining_bits = bits_per_code_word\n acquired_bits = 0\n res = 0\n # while we need the remainder of the current byte\n while remaining_bits >= 8 -self.data_bit_idx:\n val = self.data[self.data_byte_idx] >> self.data_bit_idx\n res = res + (val << acquired_bits)\n remaining_bits = remaining_bits - (8 - self.data_bit_idx)\n acquired_bits = acquired_bits + (8 - self.data_bit_idx)\n self.data_byte_idx = self.data_byte_idx + 1\n self.data_bit_idx = 0\n\n # less than 8 (possibly 0) bits remain from last byte\n if remaining_bits > 0:\n val = self.data[self.data_byte_idx] & ((1<<remaining_bits)-1)\n res = res + (val << acquired_bits)\n acquired_bits = acquired_bits + remaining_bits\n self.data_bit_idx = remaining_bits\n remaining_bits = 0\n return res",
"def read_bump(self, bump):\n data = self._read_packet(Bump.PACKET_ID, Bump.DATA_BYTES)\n\n if len(data) == Bump.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n return bool(byte & bump)\n else:\n return False",
"def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_):\n # Get the Bpod extracted wheel data\n timestamps = data['wheel_timestamps']\n position = data['wheel_position']\n\n return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3)",
"def read_light_bump(self, light_bump):\n data = self._read_packet(light_bump, Bump.LIGHT_DATA_BYTES)\n\n if len(data) == Bump.LIGHT_DATA_BYTES:\n return struct.unpack(\">h\", data)[0]\n else:\n return 0",
"def digital_read(self, pin_number):\n command = (''.join(('RD', str(pin_number)))).encode()\n self.conn.write(command)\n line_received = self.conn.readline().decode().strip()\n header, value = line_received.split(':') # e.g. D13:1\n if header == ('D'+ str(pin_number)):\n # If header matches\n return int(value)",
"def read(self, pin, is_differential=False):\n\n self.cs.value(0) # select\n self._out_buf[1] = ((not is_differential) << 7) | (pin << 4)\n self._spi.write_readinto(self._out_buf, self._in_buf)\n self.cs.value(1) # turn off\n return ((self._in_buf[1] & 0x03) << 8) | self._in_buf[2]",
"def rd_input(self, port, bit = None):\n hw = self.device.peripherals[port]\n val = hw.IDR.rd()\n if bit is None:\n return val\n return (val >> (bit & 15)) & 1",
"def _read(self, register):\n\n addr, num_bytes = register\n data = response = error = None\n if num_bytes == 1:\n data, response, error = self.packet_handler.read1ByteTxRx(\n self.port_handler, self._id, addr\n )\n elif num_bytes == 2:\n data, response, error = self.packet_handler.read2ByteTxRx(\n self.port_handler, self._id, addr\n )\n else:\n data, response, error = self.packet_handler.read4ByteTxRx(\n self.port_handler, self._id, addr\n )\n\n # Check response\n self._error_handler(response, error)\n\n return data",
"def _read(self):\n \n try:\n d = self._get_byte()\n ts = time.time()\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n packet = [d]\n d = self._get_byte()\n if d == self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n else:\n packet.append(d)\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n packet.append(d)\n if self._debug == True:\n print \"Serial:_read: unescaped\", packet\n packet = self._unescape(packet)\n \n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n \n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n return RawPacket(ts, packet[1:-3], crc == packet_crc)\n except socket.timeout:\n return None",
"def read_data(self):\r\n\t\tdata = bus.read_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT)\r\n\t\t\r\n\t\t# Convert the data to 4-bits\r\n\t\tdata = (data & 0x0F)\r\n\t\t\r\n\t\tif (data & (2 ** self.pin)) == 0 :",
"def _butter_bandpass(lowcut: float, highcut: float, fs: float, order: int = 5) -> tuple:\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype=\"band\")\n return b, a",
"def read(self, button):\n\n return self.hardware_interfaces[self._gpio].read(self._b_names[button])",
"def get_lsb(self, x, w=32):\n mask = (1 << w) - 1\n return int(x & mask)"
] | [
"0.73851025",
"0.69642377",
"0.5358084",
"0.5344035",
"0.5337506",
"0.5197816",
"0.5173082",
"0.5127063",
"0.51059866",
"0.5095983",
"0.5042041",
"0.5022783",
"0.50138235",
"0.50050724",
"0.49956858",
"0.49634555",
"0.49330726",
"0.49276993",
"0.4920853",
"0.48635933",
"0.4863102",
"0.4858868",
"0.4816624",
"0.48093578",
"0.48040494",
"0.47881016",
"0.47832295",
"0.47821975",
"0.47749987",
"0.47740775"
] | 0.78514016 | 0 |
Reads all the wheel drops on the iRobot Create 2. This method is available to a robot in the PASSIVE, SAFE, or FULL state. | def read_wheel_drops(self):
data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)
if len(data) == WheelDrop.DATA_BYTES:
byte = struct.unpack("B", data)[0]
return {
WheelDrop.WHEEL_DROP_L: bool(byte & WheelDrop.WHEEL_DROP_L),
WheelDrop.WHEEL_DROP_R: bool(byte & WheelDrop.WHEEL_DROP_R)
}
else:
return {
WheelDrop.WHEEL_DROP_L: False,
WheelDrop.WHEEL_DROP_R: False
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_wheel_drop(self, wheel_drop):\n data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)\n\n if len(data) == WheelDrop.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n return bool(byte & wheel_drop)\n else:\n return False",
"def read_bump_wheel_drop(self):\n\n # Bump and Wheel drop packet information is interchangeable\n # in this case.\n data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)\n\n if len(data) == WheelDrop.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return {\n Bump.BUMP_L: bool(byte & Bump.BUMP_L),\n Bump.BUMP_R: bool(byte & Bump.BUMP_L),\n WheelDrop.WHEEL_DROP_L: bool(byte & WheelDrop.WHEEL_DROP_L),\n WheelDrop.WHEEL_DROP_R: bool(byte & WheelDrop.WHEEL_DROP_R)\n }\n else:\n return {\n Bump.BUMP_L: False,\n Bump.BUMP_R: False,\n WheelDrop.WHEEL_DROP_L: False,\n WheelDrop.WHEEL_DROP_R: False\n }",
"def wheel():\n wheel_pos = read_npy_file('wheel.position.npy')\n wheel_timestamps = read_npy_file('wheel.timestamps.npy')\n wheel_rate = get_rate(wheel_timestamps)\n\n wheel_ts = TimeSeries(\n name='wheel_position',\n starting_time=wheel_timestamps[0, 1],\n rate=wheel_rate,\n data=np.ravel(wheel_pos),\n unit='mm',\n conversion=0.135,\n description='The position reading of the rotary encoder attached to '\n 'the rubber wheel that the mouse pushes left and right '\n 'with his forelimbs.',\n comments='The wheel has radius 31 mm and 1440 ticks per revolution, '\n 'so multiply by 2*pi*r/tpr=0.135 to convert to millimeters. '\n 'Positive velocity (increasing numbers) correspond to clockwise '\n 'turns (if looking at the wheel from behind the mouse), i.e. '\n 'turns that are in the correct direction for stimuli presented '\n 'to the left. Likewise negative velocity corresponds to right choices.'\n )\n nwb_file.add_acquisition(wheel_ts)",
"def wheel_attributes(self):\n wheel1 = self.wheel\n wheel2 = TranslatedShape(shape_in=wheel1,\n displacement=Vector(0.,\n 0.,\n self.positions[1][0]\n - self.positions[1][1]))\n wheel3 = MirroredShape(shape_in=wheel1,\n reference_point=translate(self.position,\n \"y\",\n self.width_car / 2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n wheel4 = MirroredShape(shape_in=wheel2,\n reference_point=translate(self.position,\n \"y\",\n self.width_car/2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n return [wheel1, wheel2, wheel3, wheel4]",
"def Drive(PL):\n #RETURNS TUPLE (RIGHTWHEELSPEED,LEFTWHEELSPEED)\n SteerPoints = Steering(PL[0])\n SpeedPoints = Speed(PL[1])\n FinalControl = Combine(SpeedPoints[0], SpeedPoints[1], SteerPoints[0], SteerPoints[1])\n return FinalControl",
"def spinWheels(vel1, vel2, time):\n global pub\n\n r = wheel_rad\n b = wheel_base\n #compute wheel speeds\n u = (r / 2) * (vel1 + vel2)\n w = (r / b) * (vel2 - vel2)\n start = rospy.Time().now().secs\n #create movement and stop messages\n move_msg = Twist()\n move_msg.linear.x = u\n move_msg.angular.z = w\n stop_msg = Twist()\n stop_msg.linear.x = 0\n stop_msg.angular.z = 0\n #publish move message for desired time\n while(rospy.Time().now().secs - start < time and not rospy.is_shutdown()):\n pub.publish(move_msg)\n pub.publish(stop_msg)",
"def wheels_properties(self):\n height_wheels = 180.\n radius_wheels = 300.\n width_wheels = 80.\n return height_wheels, radius_wheels, width_wheels",
"def spinWheels(self, v_left, v_right, duration):\n print('spinwheels')\n diameter = 0.23 # based on wheel track from https://yujinrobot.github.io/kobuki/doxygen/enAppendixKobukiParameters.html\n\n\n driveStartTime = rospy.Time.now().secs\n\n w=(v_right-v_left)/diameter\n\n u=(v_left+v_right)/2\n\n move_msg=Twist()\n move_msg.linear.x=u\n move_msg.angular.z=w\n\n stop_msg =Twist()\n stop_msg.linear.x=0\n stop_msg.angular.z=0\n\n start=time.time()\n currentTime=start\n\n while(currentTime - start <duration and not rospy.is_shutdown()):\n currentTime=time.time()\n self._vel_pub.publish(move_msg)\n print('spinwheels: moving')\n print('\\n time: '+str(time)+'start: '+str(start)+'current: '+str(currentTime))\n self._vel_pub.publish(stop_msg)\n print('spinwheels: stoped')",
"def wheels(self, left, right):\n\t\twith self.data_lock:\n\t\t\tself.leftSpeed \t= left\t/500.0\t# leftSpeed and rightSpeed are in pixels per second\n\t\t\tself.rightSpeed = right\t/500.0",
"def main():\n\n #robot = S2Serial( \"/dev/ttyUSB0\" )\n robot = S2Fluke2( \"/dev/rfcomm2\" )\n\n for i in range( 30 ):\n print( \"getIRLeft : \", robot.getIRLeft() )\n print( \"getIRRight: \", robot.getIRRight() )\n print( \"getAllIR : \", robot.getAllIR() )\n print( \"getIrEx(0): \", robot.getIrEx( 0, 128 ) )\n print( \"getIrEx(1): \", robot.getIrEx( 1, 128 ) )\n\n robot.close()",
"def two_wheel_drive(x, y, heading, speed, length, steering_angle, gas, brake, gas_to_acc=1, brake_to_acc=1):\n\n front_wheel_x = x + length / 2 * math.cos(heading)\n front_wheel_y = y + length / 2 * math.sin(heading)\n back_wheel_x = x - length / 2 * math.cos(heading)\n back_wheel_y = y - length / 2 * math.sin(heading)\n\n speed += (\n gas * gas_to_acc * sim_c.DT - (\n brake * brake_to_acc * sim_c.DT) - road_c.DRAG_COEF * speed * sim_c.DT)\n speed = speed if speed > 0 else 0\n\n # update wheel positions\n front_wheel_x += speed * c.DT * math.cos(heading + steering_angle)\n front_wheel_y += speed * c.DT * math.sin(heading + steering_angle)\n back_wheel_x += speed * c.DT * math.cos(heading)\n back_wheel_y += speed * c.DT * math.sin(heading)\n\n # update car position and heading\n x = (front_wheel_x + back_wheel_x) / 2\n y = (front_wheel_y + back_wheel_y) / 2\n heading = math.atan2((front_wheel_y - back_wheel_y), (front_wheel_x - back_wheel_x))\n\n return x, y, heading, speed",
"def _reset_wheel(self):\n [j.reset_dynamic_object() for j in self.wheels]\n\n p = [[-pi / 4, 0, 0], [pi / 4, 0, pi], [-pi / 4, 0, 0], [pi / 4, 0, pi]]\n\n for i in range(self.num_wheels):\n self.joints_slipping[i].set_position([0, 0, 0],\n relative_to=self.joints[i],\n reset_dynamics=False)\n self.joints_slipping[i].set_orientation(p[i],\n relative_to=self.joints[i],\n reset_dynamics=False)\n self.wheels[i].set_position([0, 0, 0], relative_to=self.joints[i],\n reset_dynamics=False)\n self.wheels[i].set_orientation([0, 0, 0],\n relative_to=self.joints[i],\n reset_dynamics=False)",
"def servo_read_all(self):\n msg = b'\\x25\\x00'\n parameter_len = 16\n ans = self.__bt.read(msg, parameter_len)\n if ans is not None:\n return [x for x in ans]\n return None",
"def getGyroReadings():\n\n gyro_readings = RoboCaller().call(\"getGyroReadings\", \"int\")\n for i in range(len(gyro_readings)):\n gyro_readings[i] = (gyro_readings[i] + 2**15) % 2**16 - 2**15\n return gyro_readings",
"def get_wheel_type(self):",
"def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_):\n # Get the Bpod extracted wheel data\n timestamps = data['wheel_timestamps']\n position = data['wheel_position']\n\n return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3)",
"def check_wheel_move_during_closed_loop_bpod(data, wheel_gain=None, **_):\n # Get the Bpod extracted wheel data\n timestamps = data.get('wheel_timestamps_bpod', data['wheel_timestamps'])\n position = data.get('wheel_position_bpod', data['wheel_position'])\n\n return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=1)",
"def control_wheel(self, om_w, time_for_move, side):\n for i in range(1, len(time_for_move)):\n if side == 'left':\n print(\"OMEGA LEFT CONTROL\")\n self.set_speed(om_w[i - 1], 10)\n rospy.sleep(time_for_move[i] - time_for_move[i - 1])\n else:\n print(\"OMEGA RIGHT CONTROL\")\n self.set_speed(10, om_w[i - 1])\n rospy.sleep(time_for_move[i] - time_for_move[i - 1] - 0.5)\n self.flag = True\n self.set_speed(10, 10)",
"def init_devices(timeStep):\r\n\r\n robot = Robot()\r\n \r\n # Obtener dispositivos correspondientes a las ruedas.\r\n leftWheel = robot.getDevice('left wheel motor')\r\n rightWheel = robot.getDevice('right wheel motor')\r\n # Utilizamos velocidad, establecemos posición a infinito.\r\n leftWheel.setPosition(float('inf'))\r\n rightWheel.setPosition(float('inf')) \r\n leftWheel.setVelocity(0)\r\n rightWheel.setVelocity(0)\r\n\r\n # Obtener y activar el dispositivo de la cámara \r\n camera = robot.getDevice('camera')\r\n camera.enable(timeStep*10)\r\n\r\n\r\n #activar sensores ultrasónicos\r\n ultrasonic_sensors = []\r\n i = 0\r\n for sensor in ultrasonic_sensors_names: \r\n ultrasonic_sensors = ultrasonic_sensors + [robot.getDevice(sensor)]\r\n ultrasonic_sensors[i].enable(timeStep)\r\n i = i + 1\r\n print(ultrasonic_sensors_names)\r\n\r\n #activar sensores infrarojos\r\n infrared_sensors = []\r\n i = 0\r\n for sensor in infrared_sensors_names: \r\n infrared_sensors = infrared_sensors + [robot.getDevice(sensor)]\r\n infrared_sensors[i].enable(timeStep)\r\n i = i + 1\r\n \r\n return robot, camera, leftWheel, rightWheel , infrared_sensors, ultrasonic_sensors",
"def read(self):\n \n #self.lego_bus.write_byte(self.address, 0xF5)\n #time.sleep(0.2)\n block=0\n rawhumidity=0\n counter=0\n toRead=0\n while counter < 10 and toRead != 3:\n self.lego_bus.write_byte(self.address, 0xFE)\n self.lego_bus.write_byte(self.address, 0xF5)\n sleep(1)\n \n try:\n block = self.lego_bus.read_i2c_block_data(self.address, 0, 3)\n print (block)\n rawhumidity = (( block[0] << 8) | block[1])\n print (rawhumidity)\n except:\n print(\"Error inesperado:\", sys.exc_info()[0])\n counter +=1\n \n\n rh=999 # if invalid checksum return error value\n if (self.check_crc(rawhumidity, block[2]) == 0): #Verify the checksum \n #rawhumidity &= 0xFFFC\n print (rawhumidity)\n tempRH = rawhumidity / 65536.0\n rh = -6.0 + (125.0 * tempRH)\n return rh",
"def set_wheel_velocity(self, vel):\n assert len(vel) == 2, \"Expect velocity to be array of size two\"\n p.setJointMotorControlArray(self.pybullet_id, self.joint_ids, p.VELOCITY_CONTROL,\n targetVelocities=vel)",
"def get_motors():\n msg = envia(ser, 'GetMotors LeftWheel RightWheel').split('\\n')\n \n # For better understanding see the neato commands PDF.\n \n L = int(msg[4].split(',')[1])\n R = int(msg[8].split(',')[1])\n \n return (L, R)",
"def set_wheel_velocity(self, vel):\n assert len(vel) == 2, \"Expect velocity to be array of size two\"\n p.setJointMotorControlArray(self.pybullet_id, self.joint_ids, p.VELOCITY_CONTROL,\n targetVelocities=vel)",
"def set_wheel_velocity(self, vel):\n assert len(vel) == 2, \"Expect velocity to be array of size two\"\n p.setJointMotorControlArray(self.pybullet_id, self.joint_ids, p.VELOCITY_CONTROL,\n targetVelocities=vel)",
"def do_line_scan_shutter_closed(self):\n scan = self.scan\n laser = self.devices[scan['laser']['name']]\n shutter = self.scan['shutter']\n ni_daq = self.devices['NI-DAQ']\n ni_daq.driver.digital_output(shutter['port'], False)\n if not isinstance(shutter['delay'], Q_):\n delay = Q_(shutter['delay'])\n else:\n delay = shutter['delay']\n delay = delay.m_as('s')\n if delay > 0:\n time.sleep(delay)\n ni_daq.driver.digital_output(shutter['port'], True)\n print('Pumped Scan')\n laser.driver.execute_sweep()\n approx_time_to_scan = (laser.params['stop_wavelength'] - laser.params['start_wavelength']) / laser.params['wavelength_speed']*laser.params['wavelength_sweeps']\n\n while laser.driver.sweep_condition != 'Stop':\n sleep(approx_time_to_scan.m/config.monitor_read_scan)\n ni_daq.driver.digital_output(shutter['port'], False)\n laser.driver.wavelength = scan['laser']['params']['start_wavelength']\n \n # Repeat with shutter closed to get reference scan\n \n ni_daq.driver.digital_output(shutter['port'], False)\n if not isinstance(shutter['delay'], Q_):\n delay = Q_(shutter['delay'])\n else:\n delay = shutter['delay']\n delay = delay.m_as('s')\n if delay > 0:\n time.sleep(delay)\n print('Reference Scan')\n laser.driver.execute_sweep()\n approx_time_to_scan = (laser.params['stop_wavelength'] - laser.params['start_wavelength']) / laser.params['wavelength_speed']*laser.params['wavelength_sweeps']\n while laser.driver.sweep_condition != 'Stop':\n sleep(approx_time_to_scan.m/config.monitor_read_scan)\n laser.driver.wavelength = scan['laser']['params']['start_wavelength']\n \n return True",
"def wheel(ticks):\n m = PyMouse()\n m.scroll(ticks)",
"def main():\n\n #robot = S2Serial( \"/dev/ttyUSB0\" )\n robot = S2Fluke2( \"/dev/rfcomm2\" )\n\n print( \"getMotorStats : \", robot.getMotorStats() )\n print( \"getEncoders : \", robot.getEncoders( 1 ) )\n print( \"getStall : \", robot.getStall() )\n print( \"setMotors 100, -100 : \", robot.setMotors( 100, -100) )\n time.sleep( 3.0 )\n print( \"setMotors -100, 100 : \", robot.setMotors( -100, 100) )\n time.sleep( 3.0 )\n print( \"setMotorsOff : \", robot.setMotorsOff() )\n\n robot.close()",
"def read_odometer(self):\r\n print(\"Este Carro Tem \" + str(self.odometer_reading) + \" Milhas Rodadas.\")",
"def getAccelReadings():\n\n accel_readings = RoboCaller().call(\"getAccelReadings\", \"int\")\n for i in range(len(accel_readings)):\n accel_readings[i] = ((accel_readings[i] + 2**15) % 2**16 - 2**15)\n return accel_readings",
"def read(self):\n self.pi.write(self.gpio, pigpio.LOW)\n time.sleep(0.017) # 17 ms\n self.pi.set_mode(self.gpio, pigpio.INPUT)\n self.pi.set_watchdog(self.gpio, 200)\n time.sleep(0.2)"
] | [
"0.56443477",
"0.553086",
"0.5527709",
"0.5387049",
"0.5382978",
"0.5253048",
"0.520016",
"0.5176538",
"0.51617205",
"0.51523834",
"0.5095421",
"0.50654536",
"0.50101584",
"0.49713272",
"0.4913931",
"0.4907659",
"0.49053866",
"0.49001467",
"0.48717758",
"0.48663768",
"0.47951072",
"0.47923222",
"0.47915727",
"0.47915727",
"0.47795177",
"0.47746608",
"0.47692353",
"0.47619534",
"0.47602144",
"0.4737774"
] | 0.6462726 | 0 |
Reads all the wheel drops and bumps on the iRobot Create 2 in the bump and wheel drop packet. This method is available to a robot in the PASSIVE, SAFE, or FULL state. | def read_bump_wheel_drop(self):
# Bump and Wheel drop packet information is interchangeable
# in this case.
data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)
if len(data) == WheelDrop.DATA_BYTES:
byte = struct.unpack("B", data)[0]
return {
Bump.BUMP_L: bool(byte & Bump.BUMP_L),
Bump.BUMP_R: bool(byte & Bump.BUMP_L),
WheelDrop.WHEEL_DROP_L: bool(byte & WheelDrop.WHEEL_DROP_L),
WheelDrop.WHEEL_DROP_R: bool(byte & WheelDrop.WHEEL_DROP_R)
}
else:
return {
Bump.BUMP_L: False,
Bump.BUMP_R: False,
WheelDrop.WHEEL_DROP_L: False,
WheelDrop.WHEEL_DROP_R: False
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_wheel_drops(self):\n data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)\n\n if len(data) == WheelDrop.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return {\n WheelDrop.WHEEL_DROP_L: bool(byte & WheelDrop.WHEEL_DROP_L),\n WheelDrop.WHEEL_DROP_R: bool(byte & WheelDrop.WHEEL_DROP_R)\n }\n else:\n return {\n WheelDrop.WHEEL_DROP_L: False,\n WheelDrop.WHEEL_DROP_R: False\n }",
"def read_wheel_drop(self, wheel_drop):\n data = self._read_packet(WheelDrop.PACKET_ID, WheelDrop.DATA_BYTES)\n\n if len(data) == WheelDrop.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n return bool(byte & wheel_drop)\n else:\n return False",
"def read(self):\n \n #self.lego_bus.write_byte(self.address, 0xF5)\n #time.sleep(0.2)\n block=0\n rawhumidity=0\n counter=0\n toRead=0\n while counter < 10 and toRead != 3:\n self.lego_bus.write_byte(self.address, 0xFE)\n self.lego_bus.write_byte(self.address, 0xF5)\n sleep(1)\n \n try:\n block = self.lego_bus.read_i2c_block_data(self.address, 0, 3)\n print (block)\n rawhumidity = (( block[0] << 8) | block[1])\n print (rawhumidity)\n except:\n print(\"Error inesperado:\", sys.exc_info()[0])\n counter +=1\n \n\n rh=999 # if invalid checksum return error value\n if (self.check_crc(rawhumidity, block[2]) == 0): #Verify the checksum \n #rawhumidity &= 0xFFFC\n print (rawhumidity)\n tempRH = rawhumidity / 65536.0\n rh = -6.0 + (125.0 * tempRH)\n return rh",
"def main():\n\n #robot = S2Serial( \"/dev/ttyUSB0\" )\n robot = S2Fluke2( \"/dev/rfcomm2\" )\n\n for i in range( 30 ):\n print( \"getIRLeft : \", robot.getIRLeft() )\n print( \"getIRRight: \", robot.getIRRight() )\n print( \"getAllIR : \", robot.getAllIR() )\n print( \"getIrEx(0): \", robot.getIrEx( 0, 128 ) )\n print( \"getIrEx(1): \", robot.getIrEx( 1, 128 ) )\n\n robot.close()",
"def servo_read_all(self):\n msg = b'\\x25\\x00'\n parameter_len = 16\n ans = self.__bt.read(msg, parameter_len)\n if ans is not None:\n return [x for x in ans]\n return None",
"def read_bank_2():\n return _pigpio_command(_control, _PI_CMD_BR2, 0, 0)",
"def read_dht11_dat():\n GPIO.setup(DHTPIN, GPIO.OUT)\n GPIO.output(DHTPIN, GPIO.HIGH)\n time.sleep(0.05)\n GPIO.output(DHTPIN, GPIO.LOW)\n time.sleep(0.02)\n GPIO.setup(DHTPIN, GPIO.IN, GPIO.PUD_UP)\n\n unchanged_count = 0\n last = -1\n data = []\n while True:\n current = GPIO.input(DHTPIN)\n data.append(current)\n if last != current:\n unchanged_count = 0\n last = current\n else:\n unchanged_count += 1\n if unchanged_count > MAX_UNCHANGE_COUNT:\n break\n\n state = STATE_INIT_PULL_DOWN\n\n lengths = []\n current_length = 0\n\n for current in data:\n current_length += 1\n\n if state == STATE_INIT_PULL_DOWN:\n if current == GPIO.LOW:\n state = STATE_INIT_PULL_UP\n else:\n continue\n if state == STATE_INIT_PULL_UP:\n if current == GPIO.HIGH:\n state = STATE_DATA_FIRST_PULL_DOWN\n else:\n continue\n if state == STATE_DATA_FIRST_PULL_DOWN:\n if current == GPIO.LOW:\n state = STATE_DATA_PULL_UP\n else:\n continue\n if state == STATE_DATA_PULL_UP:\n if current == GPIO.HIGH:\n current_length = 0\n state = STATE_DATA_PULL_DOWN\n else:\n continue\n if state == STATE_DATA_PULL_DOWN:\n if current == GPIO.LOW:\n lengths.append(current_length)\n state = STATE_DATA_PULL_UP\n else:\n continue\n if len(lengths) != 40:\n # print \"Data not good, skip\"\n return False\n\n shortest_pull_up = min(lengths)\n longest_pull_up = max(lengths)\n halfway = (longest_pull_up + shortest_pull_up) / 2\n bits = []\n the_bytes = []\n byte = 0\n\n for length in lengths:\n bit = 0\n if length > halfway:\n bit = 1\n bits.append(bit)\n # print \"bits: %s, length: %d\" % (bits, len(bits))\n for i in range(0, len(bits)):\n byte = byte << 1\n if (bits[i]):\n byte = byte | 1\n else:\n byte = byte | 0\n if ((i + 1) % 8 == 0):\n the_bytes.append(byte)\n byte = 0\n # print the_bytes\n checksum = (the_bytes[0] + the_bytes[1] + the_bytes[2] + the_bytes[3]) & 0xFF\n if the_bytes[4] != checksum:\n # print \"Data not good, skip\"\n return False\n\n return the_bytes[0], the_bytes[2]",
"def readBumper(self, msg):\n #\n # if(msg.state==1):#the bumper is pressed\n self.executeTrajectory()",
"def get_bump_sensors(self):\n x=self.send_packet_check_response('\\x40')\n x=ord(x)\n return tuple(bool(x&(1<<i)) for i in range(6))",
"def receive(self):\r\n try:\r\n while True:\r\n while True:\r\n try:\r\n dat, (ip, _) = self.sock.recvfrom(packet.R2H_PACKET_SIZE)\r\n break\r\n except socket.error:\r\n continue\r\n tssrx_k = time.perf_counter()\r\n\r\n if self.robot_ip == '0.0.0.0':\r\n # set ip addr to the addr from which sensor data was received\r\n self.robot_ip = ip\r\n\r\n if len(dat) is packet.R2H_PACKET_SIZE:\r\n self.rx_cntr.inc()\r\n\r\n data = struct.unpack(packet.R2H_PACKET_FORMAT, dat)\r\n seq_number, gyro_rate, enc_l, enc_r,\\\r\n gyro_offset, motorVoltageApplied_left, motorVoltageApplied_right = self.read_control_measurements(data)\r\n\r\n self.gyro_rate_logger.timestamp(timestamp=tssrx_k, value=gyro_rate)\r\n self.motor_position_left_logger.timestamp(timestamp=tssrx_k, value=enc_l)\r\n self.motor_position_right_logger.timestamp(timestamp=tssrx_k, value=enc_r)\r\n self.gyro_offset_logger.timestamp(timestamp=tssrx_k, value=gyro_offset)\r\n self.motor_voltage_applied_left_logger.timestamp(timestamp=tssrx_k, value=motorVoltageApplied_left)\r\n self.motor_voltage_applied_right_logger.timestamp(timestamp=tssrx_k, value=motorVoltageApplied_right)\r\n\r\n # sequence number evaluation\r\n if seq_number == self.old_seq_number + 1:\r\n # everything is shiny proceed\r\n pass\r\n elif seq_number > self.old_seq_number + 1:\r\n # we are receiving newer packet than expected -> lost packets\r\n logging.debug(\"seqnr: %s, oldseqnr: %s\", seq_number, self.old_seq_number)\r\n self.lost_cntr.inc()\r\n else:\r\n # seq_number is smaller than already received one -> out of order\r\n self.reord_cntr.inc()\r\n # throw away out of order packets\r\n continue\r\n\r\n self.old_seq_number = seq_number\r\n\r\n # Timestamp extraction\r\n self.tssrx_ks[seq_number + 1] = tssrx_k\r\n if data[packet.R2H_PACKET_VARS.Tasrx_k] is not 0:\r\n self.tsr_ks[seq_number] = packet.time2float(data[packet.R2H_PACKET_VARS.Tsr_k])\r\n self.tsstx_ks[seq_number] = packet.time2float(data[packet.R2H_PACKET_VARS.Tsstx_k])\r\n self.tasrx_ks[seq_number] = packet.time2float(data[packet.R2H_PACKET_VARS.Tasrx_k])\r\n self.taw_ks[seq_number] = packet.time2float(data[packet.R2H_PACKET_VARS.Taw_k])\r\n\r\n x = [seq_number, gyro_rate, enc_l, enc_r,\r\n gyro_offset, motorVoltageApplied_left, motorVoltageApplied_right]\r\n return x\r\n\r\n else:\r\n logging.debug(\"wrong packet size\")\r\n\r\n except socket.timeout:\r\n logging.error('Rx timeout')\r\n except socket.error:\r\n logging.error('Rx error')\r\n except IndexError:\r\n logging.error('Index error')\r\n traceback.print_exc()\r\n except KeyboardInterrupt:\r\n self.stat.stop_logging()\r\n logging.error('Closing socket')\r\n self.sock.close()\r\n return",
"def read(self):\r\n\t\tself.mmio.write(iop_const.MAILBOX_OFFSET +\r\n\t\t\t\t\t\tiop_const.MAILBOX_PY2IOP_CMD_OFFSET, 0x3) \r\n\t\twhile (self.mmio.read(iop_const.MAILBOX_OFFSET +\r\n\t\t\t\t\t\t\t\tiop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 0x3):\r\n\t\t\tpass\r\n\t\tmx = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET))\r\n\t\tmy = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET+4))\r\n\t\tmz = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET+8))\r\n\t\treturn [float(\"{0:.2f}\".format(mx*0.004)),\r\n\t\t\t\tfloat(\"{0:.2f}\".format(my*0.004)),\r\n\t\t\t\tfloat(\"{0:.2f}\".format(mz*0.004))]",
"def __init__(self, pads):\n mfioCommon.__init__(self, pads)\n\n mfio_width = len(pads)\n #\n mfio_o = Signal(mfio_width)\n mfio_oe = Signal(mfio_width)\n mfio_i = Signal(mfio_width)\n # create single pin tristate buffers\n for b in range(mfio_width):\n self.submodules += mfioSinglePin(pads, b, mfio_i[b], mfio_o[b], mfio_oe[b]) \t\n\n # Wishbone \n self.bus = bus = wishbone.Interface()\n\n #\n sel = Signal(mfio_width)\n inbit = Signal(1)\n\n # todo: dynamic address width calc to optimize the decode logic\n addr_width = 12\n # 1024 IO max\n seladr = Signal(10)\n\n # 10 bits of address = 1024 pins max\n self.comb += seladr.eq(self.bus.adr[:10]) \n \n # address decoder\n for b in range(mfio_width):\n self.comb += sel[b].eq(seladr == b)\n\n self.comb += inbit.eq( (mfio_i & sel) != 0 )\n\n # Read bit\n rdbus = Signal(32)\n self.comb += [\n rdbus[0].eq(inbit),\n bus.dat_r.eq(rdbus)\n ]\t\n\n # process output \n outbit = Signal(1)\n oebit = Signal(1)\n wren = Signal(1)\n\n # PINAPI 1.0 compatible: 0 = drive 0, 1 drive 1, 3 = HiZ\n self.comb += outbit.eq( bus.dat_w[0] )\n self.comb += oebit.eq ( ~bus.dat_w[1] )\n\n # write enable\n self.comb += wren.eq(self.bus.stb & self.bus.cyc & self.bus.we) \n\n for b in range(mfio_width):\n self.sync += If(wren & sel[b], mfio_o[b].eq(outbit), mfio_oe[b].eq(oebit) )\n\n seq = [\n (1, [bus.ack.eq(1)]), #\n (1, [bus.ack.eq(0)]), #\n (0, []),\n ]\n \n t, tseq = 0, []\n for dt, a in seq:\n tseq.append((t, a))\n t += dt\n\n self.sync += timeline(bus.cyc & bus.stb, tseq)",
"def main():\n\n #robot = S2Serial( \"/dev/ttyUSB0\" )\n robot = S2Fluke2( \"/dev/rfcomm2\" )\n\n print( \"getMotorStats : \", robot.getMotorStats() )\n print( \"getEncoders : \", robot.getEncoders( 1 ) )\n print( \"getStall : \", robot.getStall() )\n print( \"setMotors 100, -100 : \", robot.setMotors( 100, -100) )\n time.sleep( 3.0 )\n print( \"setMotors -100, 100 : \", robot.setMotors( -100, 100) )\n time.sleep( 3.0 )\n print( \"setMotorsOff : \", robot.setMotorsOff() )\n\n robot.close()",
"def read(self):\n try:\n cmd = 'SAMP:COUN 1' \n self.handle.write(cmd) #one sample per trigger\n self.handle.write('TRIG:SOUR BUS') #triggered by command\n self.handle.write('TRIG:COUN 1') #one trigger to return to wait for trg\n self.handle.write('INIT:IMM') #DVM to \"wait for trigger\" \n self.handle.write('*TRG')\n startTime = time.time()\n while True: #wait until measuring flag goes to 0\n try:\n measured = self.handle.ask(\"DATA:POIN?\")\n measured = measured.strip() #remove CR \n measured = int(measured) #convert to number\n if measured == 1: #final number of samples achieved\n break;\n except Exception:\n print('Dvm34411:read() polling failed !')\n raise\n \n if time.time() - startTime > self.timeout:\n print('Dvm34411:read() timeout !')\n return False\n \n time.sleep(1) \n reading = self.handle.ask('R? 1;') #definite-Length block format\n except Exception:\n print('Dvm34411.read() failed !')\n raise\n if reading[0] != '#':\n print('Dvm34411.read() DLB format error - # expected !')\n return False\n digits = int(reading[1])\n reading = reading[2 + digits:]\n rdg = float(reading)\n return rdg",
"def read_control_measurements(measurement):\r\n seq_number = measurement[packet.R2H_PACKET_VARS.Seq_number]\r\n gyro_rate = packet.sensor2float(measurement[packet.R2H_PACKET_VARS.Gyro_rate])\r\n enc_l = packet.sensor2float(measurement[packet.R2H_PACKET_VARS.Motor_position_left])\r\n enc_r = packet.sensor2float(measurement[packet.R2H_PACKET_VARS.Motor_position_right])\r\n gyro_offset = packet.sensor2float(measurement[packet.R2H_PACKET_VARS.Gyro_offset])\r\n motorVoltageApplied_left = packet.sensor2float(measurement[packet.R2H_PACKET_VARS.Motor_voltage_applied_left])\r\n motorVoltageApplied_right = packet.sensor2float(measurement[packet.R2H_PACKET_VARS.Motor_voltage_applied_right])\r\n\r\n return seq_number, gyro_rate, enc_l, enc_r, gyro_offset, motorVoltageApplied_left, motorVoltageApplied_right",
"def read_bumps(self):\n data = self._read_packet(Bump.PACKET_ID, Bump.DATA_BYTES)\n\n if len(data) == Bump.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return {\n Bump.BUMP_L: bool(byte & Bump.BUMP_L),\n Bump.BUMP_R: bool(byte & Bump.BUMP_R)\n }\n else:\n return {\n Bump.BUMP_L: False,\n Bump.BUMP_R: False\n }",
"def usbp_wb(\r\n reset, # System reset \r\n ifclk, # IFCLK from FX2\r\n sys_clk, # Internal FPGA clk,\r\n \r\n # ---- FX2 FIFO Interface ----\r\n FLAGA, # EP2(OUT) Empty\r\n FLAGB, # EP4(OUT) Empty\r\n FLAGC, # EP6(IN) Full\r\n FLAGD, # EP8(IN) Full\r\n SLOE, # Output Enable, Slave FIFO\r\n SLRD, # Read Signal\r\n SLWR, # Write Signal\r\n FIFOADR, # Which of the 4 FIFO currently interfacing with. \r\n PKTEND, # Packet End, Tell FX2 to send data without FIFO Full\r\n FDI, # Fifo Data In\r\n FDO, # Fifo Data Out \r\n \r\n # ---- Wishbone Bus ----\r\n # Note clk_i signal has been excluded. Using sys_clk input\r\n wb_clk_o, # Sync clock == sys_clk\r\n wb_rst_o, # Wishbone Reset\r\n wb_dat_o, # Data bus out\r\n wb_dat_i, # Data bus in\r\n wb_adr_o, # Address bus out\r\n wb_cyc_o, # Bus cycle in process\r\n wb_ack_i, # Normal termination of bus cycle\r\n wb_err_i, # Bus cycle ended in error\r\n wb_lock_o, # Non interruptable bus cycle, == cyc_o\r\n wb_rty_i, # Retry bus cycle\r\n wb_sel_o, # Valid bytes, only byte bus\r\n wb_stb_o, # Strobe output\r\n wb_we_o, # Write Enable\r\n \r\n # Wishbone signals not used.\r\n # wb_tgd_o,wb_tdg_i,wb_tga_o,wb_tgc_o\r\n \r\n # ---- Async FIFO Interface ----\r\n fifo_rd, # Read Strobe\r\n fifo_do, # FIFO data output\r\n fifo_do_vld, # FIFO data output valid\r\n fifo_empty, # Empty control signal\r\n fifo_wr, # Write Strobe\r\n fifo_di, # FIFO data input \r\n fifo_full, # Full control signal\r\n fifo_hold, # Wait, enables complete packet etc.\r\n fifo_clr, # Reset the data fifo\r\n loopback, # Loopback, status to the top\r\n dbg = None, # Run-time debug signals\r\n\r\n # ---- Parameters ----\r\n C_REVISION = 0x12345678, # Revision 32 bit value\r\n C_WB_DAT_SZ = 8, # Wishbone data width\r\n C_WB_ADR_SZ = 16, # Wishbone address width\r\n C_BASE_ADDR = 0, # Base Wishbone Address for internal regs\r\n C_INC_ERR_STAT = True # Include error and status counters\r\n ): \r\n\r\n # Async FIFO signals from FX2 interface, FX2 perspective\r\n # \"di\" data-in to FX2, \"do\" data-out from FX2\r\n fx2_wb_di = Signal(intbv(0)[C_WB_DAT_SZ:])\r\n fx2_wb_di_vld = Signal(False)\r\n fx2_wb_do = Signal(intbv(0)[C_WB_DAT_SZ:])\r\n fx2_wb_full = Signal(False)\r\n fx2_wb_empty = Signal(True)\r\n fx2_wb_wr = Signal(False)\r\n fx2_wb_rd = Signal(False)\r\n \r\n fx2_fifo_di = Signal(intbv(0)[C_WB_DAT_SZ:])\r\n fx2_fifo_di_vld = Signal(False)\r\n fx2_fifo_do = Signal(intbv(0)[C_WB_DAT_SZ:])\r\n fx2_fifo_full = Signal(False)\r\n fx2_fifo_empty = Signal(True)\r\n fx2_fifo_wr = Signal(False)\r\n fx2_fifo_rd = Signal(False)\r\n \r\n # External and looback async FIFO signals\r\n lp_fifo_di = Signal(intbv(0)[C_WB_DAT_SZ:])\r\n lp_fifo_do = Signal(intbv(0)[C_WB_DAT_SZ:])\r\n lp_fifo_do_vld = Signal(False)\r\n lp_fifo_full = Signal(False)\r\n lp_fifo_empty = Signal(True)\r\n lp_fifo_wr = Signal(False)\r\n lp_fifo_rd = Signal(False)\r\n _lp_fifo_wr = Signal(False)\r\n _lp_fifo_rd = Signal(False)\r\n \r\n i_fifo_di = Signal(intbv(0)[C_WB_DAT_SZ:])\r\n i_fifo_do = Signal(intbv(0)[C_WB_DAT_SZ:])\r\n i_fifo_do_vld = Signal(False)\r\n i_fifo_full = Signal(False)\r\n i_fifo_empty = Signal(True)\r\n i_fifo_wr = Signal(False)\r\n i_fifo_rd = Signal(False)\r\n\r\n \r\n fifo_rst = Signal(False)\r\n fifo_hwm = Signal(False)\r\n hwma = Signal(False)\r\n\r\n\r\n # Some debug registers counters\r\n err_wr_fxfifo = Signal(intbv(0)[32:])\r\n err_rd_fxfifo = Signal(intbv(0)[32:])\r\n err_wr_ififo = Signal(intbv(0)[32:])\r\n err_rd_ififo = Signal(intbv(0)[32:])\r\n d_in_cnt = Signal(intbv(0)[64:])\r\n d_out_cnt = Signal(intbv(0)[64:])\r\n wb_in_cnt = Signal(intbv(0)[64:])\r\n wb_out_cnt = Signal(intbv(0)[64:])\r\n \r\n # Fake out the conversion so that the collection of registers\r\n # looks like a memory. Thought the ShadowSignals would make this OK??\r\n # status_regs = [err_wr_fxfifo[32:24], err_wr_fxfifo[24:16], err_wr_fxfifo[16:8], err_wr_fxfifo[8:0],\r\n # err_rd_fxfifo[32:24], err_rd_fxfifo[24:16], err_rd_fxfifo[16:8], err_rd_fxfifo[8:0],\r\n # err_wr_ififo[32:24], err_wr_ififo[24:16], err_wr_ififo[16:8], err_wr_ififo[8:0],\r\n # err_rd_ififo[32:24], err_rd_ififo[24:16], err_rd_ififo[16:8], err_rd_ififo[8:0]]\r\n fake_mem = [Signal(intbv(0)[8:]) for ii in range(0x20)]\r\n status_regs = fake_mem\r\n \r\n wb_cmd_in_prog = Signal(False)\r\n fx2_dbg = Signal(intbv(0)[8:])\r\n ireset = Signal(False)\r\n\r\n if dbg is not None:\r\n @always_comb\r\n def debug_sigs():\r\n dbg.next[0] = ireset\r\n dbg.next[1] = FLAGC\r\n dbg.next[2] = FLAGD\r\n dbg.next[3] = wb_cmd_in_prog\r\n dbg.next[4] = fx2_wb_empty\r\n dbg.next[5] = fx2_fifo_empty\r\n dbg.next[6] = fx2_wb_wr\r\n dbg.next[7] = 0\r\n \r\n\r\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n # Keep this module in reset until the FX2 is ready, the software\r\n # will have to make sure the IN endpoints are empty.\r\n @always(ifclk.posedge)\r\n def rtl1():\r\n if reset:\r\n ireset.next = True\r\n else:\r\n # FLAGD is not always cleared, not 100% reliable\r\n if not FLAGC: #and not FLAGD:\r\n ireset.next = False\r\n\r\n \r\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n # Slave FIFO Interface\r\n fx2 = fx2_sfifo(reset, ifclk,\r\n # FX2 slave fifo control and data signals\r\n FLAGA, FLAGB, FLAGC, FLAGD, SLOE,\r\n SLRD, SLWR, FIFOADR, PKTEND, FDI, FDO,\r\n # Wishbone data fifo signals\r\n fx2_wb_di, fx2_wb_di_vld, fx2_wb_do, fx2_wb_full,\r\n fx2_wb_empty, fx2_wb_wr, fx2_wb_rd,\r\n # Stream data fifo signals\r\n fx2_fifo_di, fx2_fifo_di_vld, fx2_fifo_do, fx2_fifo_full,\r\n fx2_fifo_empty, fx2_fifo_wr, fx2_fifo_rd, fifo_hold, fifo_hwm,\r\n # misc\r\n wb_cmd_in_prog, fx2_dbg)\r\n \r\n \r\n wb_controller = wb_master(sys_clk, ireset, wb_clk_o, wb_rst_o, wb_dat_o, wb_dat_i,\r\n wb_adr_o, wb_cyc_o, wb_ack_i, wb_err_i, wb_lock_o, wb_rty_i,\r\n wb_sel_o, wb_stb_o, wb_we_o,\r\n # From/To FX2\r\n ifclk,\r\n fx2_wb_wr, fx2_wb_full, fx2_wb_do,\r\n fx2_wb_rd, fx2_wb_empty, fx2_wb_di, fx2_wb_di_vld,\r\n FLAGA, wb_cmd_in_prog, loopback, status_regs,\r\n C_REVISION = C_REVISION,\r\n C_WB_DAT_SZ = C_WB_DAT_SZ,\r\n C_WB_ADR_SZ = C_WB_ADR_SZ,\r\n C_BASE_ADDR = C_BASE_ADDR)\r\n\r\n \r\n # Nk external FIFO, streaming data FIFO\r\n #ex_fifo = fifo_two_port_sync(fifo_rst, ifclk,\r\n # # A channel FX2 write -- internal read\r\n # fx2_fifo_wr, i_fifo_rd,\r\n # fx2_fifo_full, i_fifo_empty,\r\n # fx2_fifo_do, i_fifo_do, i_fifo_do_vld,\r\n # # B channel internal write -- FX2 read\r\n # i_fifo_wr, fx2_fifo_rd,\r\n # i_fifo_full, fx2_fifo_empty, \r\n # i_fifo_di, fx2_fifo_di, fx2_fifo_di_vld,\r\n # hwma=hwma, hwmb=fifo_hwm,\r\n # # @todo flush?\r\n # DSZ=8, ASZ=10, C_HWMA=0, C_HWMB=512)\r\n\r\n\r\n @always_comb\r\n def rtl_fifo_rst():\r\n fifo_rst.next = ireset or fifo_clr\r\n \r\n\r\n # Data path error counters, because external modules interface with the\r\n # FIFO and many more corner cases error counters included for debug\r\n # and run-time info. The wishbone interface is much more controlled\r\n # and consistent with the FIFO interface.\r\n @always(ifclk.posedge)\r\n def rtl2():\r\n if reset:\r\n err_wr_fxfifo.next = 0\r\n err_rd_fxfifo.next = 0\r\n err_wr_ififo.next = 0\r\n err_rd_ififo.next = 0\r\n else:\r\n if fx2_fifo_wr and fx2_fifo_full:\r\n err_wr_fxfifo.next = err_wr_fxfifo +1\r\n\r\n if fx2_fifo_rd and fx2_fifo_empty:\r\n err_rd_fxfifo.next = err_rd_fxfifo + 1\r\n\r\n if i_fifo_wr and i_fifo_full:\r\n err_wr_ififo.next = err_wr_ififo + 1\r\n\r\n if i_fifo_rd and i_fifo_empty:\r\n err_rd_ififo.next = err_rd_ififo + 1\r\n\r\n # @todo add FX2 external FIFO counters, FLAGB and FLAGD\r\n\r\n @always(ifclk.posedge)\r\n def rtl_data_counters():\r\n if reset:\r\n wb_in_cnt.next = 0 # Data in to FPGA\r\n wb_out_cnt.next = 0 # Data out of FPGA\r\n d_in_cnt.next = 0 # Data in to FPGA (opposite of USB perspective)\r\n d_out_cnt.next = 0 # Data out of FPGA\r\n else:\r\n if fx2_fifo_rd:\r\n d_out_cnt.next = d_out_cnt + 1\r\n\r\n if fx2_fifo_wr:\r\n d_in_cnt.next = d_in_cnt + 1\r\n\r\n \r\n if fx2_wb_rd:\r\n wb_out_cnt.next = wb_out_cnt + 1\r\n\r\n if fx2_wb_wr:\r\n wb_in_cnt.next = wb_in_cnt + 1\r\n\r\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n # Error and Status Counters\r\n # Offset (C_BASE_ADDR + 0x10)\r\n # 0-3 : FX2 -> Internal FIFO error\r\n # 4-7 : Internal FIFO -> FX2 error\r\n # 8-11 : External Logic -> Internal FIFO error\r\n # 12-15 : Internal FIFO -> External Logic error\r\n # 16-23 : Wishbone bytes in\r\n # 24-31 : Wishbone bytes out\r\n # 32-39 : Data stream bytes in\r\n # 40:47 : Data stream bytes out\r\n @always_comb\r\n def rtl_fake_ram():\r\n fake_mem[0].next = err_wr_fxfifo[32:24]; fake_mem[1].next = err_wr_fxfifo[24:16];\r\n fake_mem[2].next = err_wr_fxfifo[16:8]; fake_mem[3].next = err_wr_fxfifo[8:0];\r\n\r\n fake_mem[4].next = err_rd_fxfifo[32:24]; fake_mem[5].next = err_rd_fxfifo[24:16];\r\n fake_mem[6].next = err_rd_fxfifo[16:8]; fake_mem[7].next = err_rd_fxfifo[8:0];\r\n\r\n fake_mem[8].next = err_wr_ififo[32:24]; fake_mem[9].next = err_wr_ififo[24:16];\r\n fake_mem[10].next = err_wr_ififo[16:8]; fake_mem[11].next = err_wr_ififo[8:0];\r\n\r\n fake_mem[12].next = err_rd_ififo[32:24]; fake_mem[13].next = err_rd_ififo[24:16];\r\n fake_mem[14].next = err_rd_ififo[16:8]; fake_mem[15].next = err_rd_ififo[8:0];\r\n\r\n # @todo byte counters\r\n \r\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n # Built in loop back\r\n @always_comb\r\n def rtl3():\r\n if loopback:\r\n i_fifo_di.next = lp_fifo_di\r\n i_fifo_wr.next = lp_fifo_wr\r\n i_fifo_rd.next = lp_fifo_rd\r\n lp_fifo_do.next = i_fifo_do\r\n lp_fifo_do_vld.next = i_fifo_do_vld\r\n lp_fifo_full.next = i_fifo_full\r\n lp_fifo_empty.next = i_fifo_empty\r\n \r\n fifo_do.next = 0\r\n fifo_do_vld.next = False\r\n fifo_full.next = False\r\n fifo_empty.next = False\r\n \r\n else:\r\n i_fifo_di.next = fifo_di\r\n i_fifo_wr.next = fifo_wr\r\n i_fifo_rd.next = fifo_rd\r\n fifo_do.next = i_fifo_do\r\n fifo_do_vld.next = i_fifo_do_vld\r\n fifo_full.next = i_fifo_full\r\n fifo_empty.next = i_fifo_empty\r\n\r\n lp_fifo_do.next = 0\r\n lp_fifo_do_vld.next = False\r\n lp_fifo_full.next = False\r\n lp_fifo_empty.next = True\r\n\r\n\r\n @always_comb\r\n def rtl4():\r\n lp_fifo_di.next = lp_fifo_do\r\n lp_fifo_rd.next = _lp_fifo_rd and not lp_fifo_full and not lp_fifo_empty \r\n lp_fifo_wr.next = _lp_fifo_wr and not lp_fifo_full and not lp_fifo_empty and lp_fifo_do_vld\r\n\r\n\r\n @always(sys_clk.posedge)\r\n def rtl5():\r\n if ireset:\r\n _lp_fifo_wr.next = False\r\n _lp_fifo_rd.next = False\r\n else:\r\n if not lp_fifo_empty and not lp_fifo_full:\r\n _lp_fifo_rd.next = True\r\n _lp_fifo_wr.next = True\r\n else:\r\n _lp_fifo_rd.next = False\r\n _lp_fifo_wr.next = False\r\n\r\n \r\n return instances()",
"def get_readings(self):\n buf = self._read(0x020001)\n data = decode(buf[1:])\n return data",
"def read_data(self):\n temperature_data = RS485.read_temperature(self.data_path)\n humidity_data = RS485.read_humidity(self.data_path)\n moisture_data = RH_010_GN.read_moisture(self.data_path)\n o2_data = LB_856.read_o2(self.data_path)\n co2_data = LB_856.read_co2(self.data_path)\n\n self.data = [temperature_data, humidity_data, moisture_data, o2_data, co2_data]",
"def read_loop(self):\n log.info(\"Reading IR inputs\")\n\n pulses: List[Pulse] = []\n\n def track_pulse(pulse):\n nonlocal pulses\n if pulse and \\\n self.MIN_PULSE_READ < pulse.length < self.MAX_PULSE_READ:\n log.debug(pulse)\n pulses.append(pulse)\n elif len(pulses) > 0:\n message = None\n try:\n message = self._pulses_to_binary_message(pulses)\n except ValueError as e:\n log.error(e, exc_info=True)\n finally:\n if message:\n log.info(hex(message))\n pulses = []\n\n while 1:\n # empty space (registers as 1 on IR Receiver)\n space_pulse = self._sense_pulse(True)\n track_pulse(space_pulse)\n # burst pulse (registers as 0 on IR Receiver)\n burst_pulse = self._sense_pulse(False)\n track_pulse(burst_pulse)",
"def _read_cardiochip(self):\n cur_leadstatus = 0\n sample_count =0\n while self.connected:\n sample_count+=1\n #check for sync bytes\n readbyte = ord(self.ser.read(1))\n #print readbyte, SYNC_BYTE\n if readbyte != SYNC_BYTE:\n continue\n readbyte = ord(self.ser.read(1))\n if readbyte != SYNC_BYTE:\n continue\n\n #parse length byte\n while True:\n pLength = ord(self.ser.read(1))\n if pLength != SYNC_BYTE:\n break\n if pLength > 169:\n continue\n #print \"L: %i\" % pLength\n\n # collect payload bytes\n payload = self.ser.read(pLength)\n payload = [ord(x) for x in payload] #convert to int from string\n #print \"payload: \" + str(payload).strip('[]')\n # ones complement inverse of 8-bit payload sum\n checksum = sum(payload) & 0xFF\n checksum = ~checksum & 0xFF\n\n # catch and verify checksum byte\n chk = ord(self.ser.read(1))\n #print \"chk: \" + str(checksum)\n if chk != checksum:\n print \"checksum error, %i != %i\" % (chk, checksum)\n continue\n\n output = self._parseData(payload)\n\n lead_status = next(( d for d in output if 'leadoff' in d), None)\n if lead_status is not None:\n if cur_leadstatus != lead_status['leadoff']:\n #we have a change\n if lead_status['leadoff']==200:\n print \"LEAD ON\"\n elif lead_status['leadoff']==0:\n print \"LEAD OFF\"\n cur_leadstatus = lead_status['leadoff']\n\n # store the output data in a queue\n # first, create a tuple with the sample index and dict with the timestamp and ecg\n ecgdict = next(((i,d) for i,d in enumerate(output) if 'ecg_raw' in d), None)\n if ecgdict is not None and sample_count>self.Fs*2:\n #let's just ignore the first 2 seconds of crappy data\n ecgdict[1]['leadoff'] = cur_leadstatus\n #print ecgdict[1]\n self.ecg_buffer.put(ecgdict[1]) # this should save the ecg and timestamp keys\n\n return",
"def read_live_data(wearable_port):\r\n IMU1_num = []\r\n IMU2_num = []\r\n IMU3_num = []\r\n\r\n try:\r\n wearable = serial.Serial(wearable_port, baudrate=115200, timeout=5)\r\n #arduino = serial.Serial(arduino_port, timeout=1)\r\n # Delay for 2 seconds to wait for serial port to be ready.\r\n print(\"Waiting 2 seconds for serial to be ready.\")\r\n time.sleep(2)\r\n except Exception as e:\r\n print(e)\r\n print('Please check the port')\r\n return\r\n\r\n input(\"Press Enter to continue...\")\r\n str(wearable.write(bytes(33)))\r\n # Open file to store the data; filename includes date and time; format: data-YYYYMMDDHHmmss.csv\r\n filename = \"data-\" + str(dt.datetime.now().strftime(\"%Y%m%d%H%M%S\")) + \".csv\"\r\n filenamplot = \"plot-\" + str(dt.datetime.now().strftime(\"%Y%m%d%H%M%S\")) + \".png\"\r\n print(\"Opening %s\" % filename)\r\n f = open(filename, \"a+\")\r\n # f.write(\"power,rpm\\n\")\r\n count = 1000\r\n # Get data and continuously yield Power and RPM as integers\r\n\r\n while (count >0):\r\n count = count -1\r\n #if arduino.in_waiting > 0:\r\n wearable.flushInput()\r\n\r\n '''\r\n arduino_output = arduino.readline().decode(\"utf_8\", \"strict\")\r\n print(\"Distance: %s\" % arduino_output)\r\n f.writelines(\"%s\" % arduino_output)\r\n if arduino_output == \"Hard Stop\\r\\n\":\r\n break\r\n arduino_output = arduino_output.replace(\"\\r\\n\", \"\")\r\n Distance.append(int(float(arduino_output)))\r\n '''\r\n\r\n try:\r\n data = wearable.readline().decode(\"utf_8\", \"strict\")\r\n data = data.replace(\"\\r\\n\", \"\\n\").split()\r\n IMU1= data[2].replace(\"\\n\", \"\")\r\n IMU1_num.append(int(IMU1))\r\n IMU2 = data[3].replace(\"\\n\", \"\")\r\n IMU2_num.append(int(IMU2))\r\n IMU3 = data[4].replace(\"\\n\", \"\")\r\n IMU3_num.append(int(IMU3))\r\n print(\"IMU1: %s\\t IMU2: %s\\t IMU3: %s\\t\" % (IMU1, IMU2, IMU3))\r\n f.writelines(\"%s,%s,%s,%s\\n\" % (IMU1, IMU2, IMU3))\r\n yield int(IMU1), int(IMU2), int(IMU3)\r\n except Exception as e:\r\n print('error')\r\n f.writelines(\"Error\\n\")\r\n\r\n print('Program ended.')\r\n t = numpy.linspace(1, len(IMU1_num), len(IMU1_num))\r\n fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=(16.0, 9.0)) # create figure & 1 axis\r\n ax1.plot(t, IMU1_num, t, IMU2_num,t, IMU3_num)\r\n ax1.set_title('IMU')\r\n ax1.legend(('IMU1', 'IMU2', 'IMU3'))\r\n # manager = plt.get_current_fig_manager()\r\n # manager.resize(*manager.window.maxsize())\r\n fig.savefig(filenamplot)\r\n plt.show()\r\n\r\n f.close()\r\n #arduino.close()\r\n wearable.close()",
"def _read_until(self, expected_cmds, adb_info):\n cmd, _, _, data = self._io_manager.read(expected_cmds, adb_info, allow_zeros=True)\n\n # Acknowledge write packets\n if cmd == constants.WRTE:\n self._okay(adb_info)\n\n return cmd, data",
"def _read_v2(self):\n return self.usb_dev.read(self.ep_in, self.rdbuf_chunksize, self.usb_rd_timeout)",
"def testAllRead(self):\n import time,copy\n time.sleep(2)\n to_config = self.config['vdevs']['slave']['icsifaces'][0]\n from_config = self.config['vdevs']['master']['clientifaces'][0]\n points = self.config['vdevs']['slave']['points']\n client = ModbusRTU(to_config, points, from_config)\n\n\n pts = copy.deepcopy(self.config['vdevs']['slave']['points'])\n for i in xrange(50):\n ptnames = [ pt['name'] for pt in pts ]\n reply = client.readPoints(ptnames)\n #print \"Reply: \", reply\n for pt in ptnames:\n value = filter(lambda x: x['name']==pt, pts)[0]['value']\n #assert value == reply[ptnames.index(pt)]\n received = reply[ptnames.index(pt)]\n if not value == received: \n print pt, ' was %s but should be %s'%(str(received),str(value))",
"def Drive(PL):\n #RETURNS TUPLE (RIGHTWHEELSPEED,LEFTWHEELSPEED)\n SteerPoints = Steering(PL[0])\n SpeedPoints = Speed(PL[1])\n FinalControl = Combine(SpeedPoints[0], SpeedPoints[1], SteerPoints[0], SteerPoints[1])\n return FinalControl",
"def fetch_holding_registers(self):\n log.debug(\"Starting the next cycle\")\n result = self.read_holding_registers(*STATUS_REGS, unit=UNIT)\n result.addCallbacks(self.send_holding_registers, self.error_handler)",
"def achieve_data(self, selection='all'):\r\n Infra_Omi, Infra_L, Infra_R = None, None,None\r\n bump, L_cnt, R_cnt, DLightBump, AnalogBump = None,None,None,None,None\r\n if selection == 'b':\r\n # read bumper data only\r\n Infra_Omi,Infra_L, Infra_R, bump,DLightBump, L, FL, CL, CR, FR, R = self.Roomba.ReadQueryStream(17,52,53, 7,45, 46, 47,48, 49, 50, 51)\r\n AnalogBump = (L, FL, CL, CR, FR, R)\r\n elif selection == 'e':\r\n # read encoder data only\r\n L_cnt, R_cnt= self.Roomba.ReadQueryStream( 43, 44)\r\n else:\r\n # read all data\r\n Infra_Omi, Infra_L, Infra_R,bump, L_cnt,R_cnt, DLightBump, L,FL, CL,CR,FR,R =self.Roomba.ReadQueryStream(17,52,53,7, 43, 44, 45, 46,47,48,49,50,51 )\r\n AnalogBump = (L,FL, CL,CR,FR,R)\r\n\r\n return L_cnt,R_cnt, bump, DLightBump,AnalogBump,Infra_Omi, Infra_L, Infra_R",
"def read_raw(self):\n beats = self.microblaze.read_mailbox(0x4)\n interval_ms = self.microblaze.read_mailbox(0x8 + (beats % 4)*4)\n return beats, interval_ms",
"def wheel_attributes(self):\n wheel1 = self.wheel\n wheel2 = TranslatedShape(shape_in=wheel1,\n displacement=Vector(0.,\n 0.,\n self.positions[1][0]\n - self.positions[1][1]))\n wheel3 = MirroredShape(shape_in=wheel1,\n reference_point=translate(self.position,\n \"y\",\n self.width_car / 2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n wheel4 = MirroredShape(shape_in=wheel2,\n reference_point=translate(self.position,\n \"y\",\n self.width_car/2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n return [wheel1, wheel2, wheel3, wheel4]"
] | [
"0.6611901",
"0.5658661",
"0.5465292",
"0.5347873",
"0.53358924",
"0.52502316",
"0.5249884",
"0.52008724",
"0.5177317",
"0.51620775",
"0.51261",
"0.5113082",
"0.5106844",
"0.50573146",
"0.5048263",
"0.50303495",
"0.502867",
"0.5025428",
"0.50118834",
"0.49771735",
"0.4921565",
"0.4919707",
"0.4916556",
"0.4909684",
"0.48955518",
"0.48589978",
"0.4854583",
"0.4847986",
"0.48399398",
"0.4805747"
] | 0.65595144 | 1 |
Reads the specified encoder's count. | def read_encoder(self, encoder):
counts = self._read_encoder_raw(encoder)
return counts*math.pi*_WHEEL_DIAMETER / _COUNTS_PER_REV | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_delta_encoders_count_state(self):\n pass",
"def encoders_count(self):\r\n return self._get('encoders_count', {})",
"def read_count(buffer, offset, count):\n\n return buffer[offset:offset + count]",
"def get_count(self):\n return unpack(os.read(self.fd, 8))",
"def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt64()",
"def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarSize()",
"def read_count(self):\n return self._read_count",
"def read(self) -> int:\n ...",
"def Read_Encoders(self):\n max_diff = 100\n while max_diff > 1:\n max_diff = 0\n time.sleep(0.2)\n for i in range(3):\n last_read_pos = self.read_pos[i]\n value = self.fd_channel[i].read(3)+b'\\x00' \n # read the 24 bit register (3 bytes) and add a fourth byte \n # to make it an integer.\n signed_value = struct.unpack(\"=I\", value)[0] \n # Convert byte string to int\n if signed_value > 2**23:\n signed_value = signed_value - 2**24\n self.read_pos[i] = signed_value\n max_diff = max(max_diff, abs(last_read_pos - self.read_pos[i]))\n return",
"def read(self) -> int:",
"def read(self, reader: BitStreamReader, _index: int) -> int:\n\n return reader.readBits(self._numBits)",
"def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt()",
"def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt32()",
"def read(self, n=1):\n return 0",
"def read_count(self, read_count):\n\n self._read_count = read_count",
"def read_data(fobj, coding, count, bit_width):\n out = np.empty(count, dtype=np.int32)\n o = encoding.Numpy32(out)\n if coding == parquet_thrift.Encoding.RLE:\n while o.loc < count:\n encoding.read_rle_bit_packed_hybrid(fobj, bit_width, o=o)\n else:\n raise NotImplementedError('Encoding %s' % coding)\n return out",
"def read_count(f, n):\n buf = ''\n while len(buf) < n:\n nextchunk = f.read(n - len(buf))\n if not nextchunk:\n return ''\n buf += nextchunk\n return buf",
"def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt16()",
"def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarUInt64()",
"def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarUInt32()",
"def bedcount_reader(bedcount, compression=None, chunksize=10000):\n reader = pd.read_table(bedcount, compression=compression,\n chunksize=chunksize, header=0,\n dtype={'#chrom': str, 'start': np.int})\n return reader",
"def reads(self, n):\n val = self.f.read(n)\n self.cs and self.cs.add(val)\n try:\n val = val.decode('utf-8')\n except:\n if self.debug_level > 5:\n print(\"ERROR DECODING: {}\".format(val))\n pass\n return val",
"def bits(self, count):\n\n if count < 0:\n raise ValueError\n\n if count > self._bits:\n n_bytes = (count - self._bits + 7) // 8\n data = self._fileobj.read(n_bytes)\n if len(data) != n_bytes:\n raise BitReaderError(\"not enough data\")\n for b in bytearray(data):\n self._buffer = (self._buffer << 8) | b\n self._bits += n_bytes * 8\n\n self._bits -= count\n value = self._buffer >> self._bits\n self._buffer &= (1 << self._bits) - 1\n assert self._bits < 8\n return value",
"def read(self, num=1):\n contents = self.stream.read(num)\n self.bitsRead += len(contents)\n self.observerRead(contents)\n return contents",
"def read(self, reader: BitStreamReader, _index: int) -> int:\n\n return reader.readSignedBits(self._numBits)",
"def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarUInt()",
"def read(self, count):\n d = self.sock.recv(count, socket.MSG_WAITALL)\n assert len(d) == count\n return d",
"def read(self, count):\n # TIMEOUT: Since read is called in a loop, wait for self.timeout period before\n # calling serial.read(). See comment on serial.Serial() call above about\n # timeout.\n time.sleep(self.read_timeout)\n c = str()\n try:\n if self.serial:\n c = self.serial.read(count)\n except SerialException as e:\n self.serial = None\n self.LAST_ERROR = \"connection lost, serial.read(%d): %s\" % (count, str(e))\n self.logger.prn_err(str(e))\n return c",
"def count(self):\n return len(self.read_ints())",
"def read(self, num_bytes_to_read):\n pass"
] | [
"0.682963",
"0.64673257",
"0.62826335",
"0.6097306",
"0.60596293",
"0.60385656",
"0.60026616",
"0.5991026",
"0.5969984",
"0.5962667",
"0.59440327",
"0.593011",
"0.59193146",
"0.58999217",
"0.5863255",
"0.5799434",
"0.5738165",
"0.5690645",
"0.5682714",
"0.56652296",
"0.55958104",
"0.5593851",
"0.55736506",
"0.55618167",
"0.55528104",
"0.55082923",
"0.549711",
"0.5471628",
"0.5404413",
"0.5375908"
] | 0.6531314 | 1 |
Read the specified IR character packet. | def read_ir_char(self, dir):
data = self._read_packet(dir, Dock.DATA_BYTES)
if len(data) == Dock.DATA_BYTES:
byte = struct.unpack("B", data)[0]
return byte
else:
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_char(self):\n return self._packers[\"b\"].unpack(self.read(1))[0]",
"def ReadChar(self):\n return self.unpack('c')",
"def _read_char(self):\n if self.read_pos >= len(self.data):\n self.char = \"\"\n else:\n self.char = self.data[self.read_pos]\n\n self.pos = self.read_pos\n self.read_pos += 1",
"def read(self, nChar=None):\n raise NotImplementedError()",
"def read_char(data):\n s_type = \"=%s\" % get_type(\"char\")\n return struct.unpack(s_type, data.read(1))[0]",
"async def char_read(\n client: AIOHomeKitBleakClient,\n encryption_key: EncryptionKey | None,\n decryption_key: DecryptionKey | None,\n handle: BleakGATTCharacteristic,\n iid: int,\n) -> bytes:\n pdu_status, data = await ble_request(\n client, encryption_key, decryption_key, OpCode.CHAR_READ, handle, iid\n )\n return _decode_pdu_tlv_value(client, pdu_status, data)",
"def readchar(self) -> int:",
"def _read(self):\n \n try:\n d = self._get_byte()\n ts = time.time()\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n packet = [d]\n d = self._get_byte()\n if d == self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n else:\n packet.append(d)\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n packet.append(d)\n if self._debug == True:\n print \"Serial:_read: unescaped\", packet\n packet = self._unescape(packet)\n \n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n \n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n return RawPacket(ts, packet[1:-3], crc == packet_crc)\n except socket.timeout:\n return None",
"def read_rawchar(self):\n ch = self._get_next_character()\n return ch",
"def rcvChar(self):\r\n\t\treturn self.rcvByte()",
"def read(self, index):\n # Read Opcode\n isNotFirstCmd = False\n self.__ser_wr_trans(RG_RD, isNotFirstCmd)\n # Read Address\n self.__ser_wr_trans(index)\n rdata = self.__port.read()\n if rdata == '':\n raise ValueError('Serial i/O error - ord() expects a character, but string of length 0 is present.')\n result = ord(rdata)\n return result",
"def _get_data(self, read_size):\n return self._character_device.read(read_size)",
"def read_unsigned_char(data):\n s_type = \"=%s\" % get_type(\"unsigned_char\")\n return struct.unpack(s_type, data.read(1))[0]",
"def read(self) -> bytes:\n line = self.device.readline()\n if len(line) > 0 and line[-1] == 10:\n line += self.device.readline()\n return line",
"def read_char(self):\n self._skip_white_space()\n ch = self._get_next_character()\n return ch",
"def _read(self, valid):\n start = self.pos\n while valid(self.char) and self.pos < self.length:\n self._read_char()\n\n return self.data[start : self.pos]",
"def _read_packet(self, data, jarm_details):\n try:\n if not data:\n raise Exception(\"No data\")\n\n jarm = \"\"\n # Server hello error.\n if data[0] == 21:\n raise Exception(\"Server hello error\")\n # Check for server hello.\n elif (data[0] == 22) and (data[5] == 2):\n counter = data[43]\n # Find server's selected cipher.\n selected_cipher = data[counter+44:counter+46]\n # Find server's selected version.\n version = data[9:11]\n jarm += str(selected_cipher.hex())\n jarm += \"|\"\n jarm += str(version.hex())\n jarm += \"|\"\n extensions = (self._extract_extension_info(data, counter))\n jarm += extensions\n return jarm\n else:\n raise Exception(\"Unexpected result\")\n except Exception:\n return \"|||\"",
"def readpacket(self, n):\n try:\n msg = self.sock.recv(n)\n except BaseException:\n msg = ''\n return msg",
"def read_byte():\n try:\n result = ord(self._buffer[read_cursor[0]])\n read_cursor[0] += 1\n return result\n except IndexError:\n raise ASN1WantMore('Premature end of input.')",
"def readByte(self) -> int:\n return ord(self._unpack('c', 1))",
"def read_string(self):\n return self.bits.read('bytes:{0}'.format(self.read_int())).decode(\"utf-8\", 'replace')",
"def _readchar(self):\n if len(self.user) == 0:\n return sys.stdin.read(1)\n\n else:\n iRet = self.user[0]\n self.user = self.user[1:]\n return iRet",
"def _i2c_read(self, register, bank=None):\n if bank is not None:\n self.set_bank(bank)\n return self.i2c.read_byte_data(self.address, register)",
"def _read(self):\n # because protocol has no termination chars the read reads the number\n # of bytes in the buffer\n bytes_in_buffer = self.visa_handle.bytes_in_buffer\n # a workaround for a timeout error in the pyvsia read_raw() function\n with(self.visa_handle.ignore_warning(visa.constants.VI_SUCCESS_MAX_CNT)):\n mes = self.visa_handle.visalib.read(\n self.visa_handle.session, bytes_in_buffer)\n mes = str(mes[0].decode()) # cannot be done on same line for some reason\n # if mes[1] != 0:\n # # see protocol descriptor for error codes\n # raise Exception('IVVI rack exception \"%s\"' % mes[1])\n return mes",
"def readline(self):\n returnIndex = self._RX_buf.index(\"\\n\") # \\r\\n technically\n if returnIndex != -1:\n s = self._RX_buf[0:returnIndex + 1]\n self._RX_buf = self._RX_buf[returnIndex + 1:]\n return s # bytes(s, encoding='ascii') # s\n else:\n return 0x04 # ''",
"def read_ascii_response(self):\n str = ''\n empties = 0\n while(empties < 5 and str[-3:] != '}\\r\\n'):\n time.sleep(.1)\n newdata = self.read()\n str += newdata\n if newdata:\n empties = 0\n else:\n empties += 1\n if empties: # last result must have gotten data, so empties should be zero\n raise LabProTimeout(\n 'timeout getting ascii data, current result: ' + repr(str))\n goodstart = str.find('{')\n if goodstart < 0:\n raise LabProDataError('bad ascii data: ' + repr(str))\n return map(eval, str[goodstart + 1:-3].split(','))",
"def _read(self, timeout=None):\n\n # Developer notes:\n #\n # Packet data read from Serial is in this format:\n # [HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]\n #\n # [Escaped data] is encoded so that [HDLC_FLAG_BYTE] byte\n # values cannot occur within it. When [Escaped data] has been\n # unescaped, the last 2 bytes are a 16-bit CRC of the earlier\n # part of the packet (excluding the initial HDLC_FLAG_BYTE\n # byte)\n #\n # It's also possible that the serial device was half-way\n # through transmitting a packet when this function was called\n # (app was just started). So we also neeed to handle this case:\n #\n # [Incomplete escaped data][HDLC_FLAG_BYTE][HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]\n #\n # In this case we skip over the first (incomplete) packet.\n #\n\n if self._s.timeout != timeout and timeout != None:\n if self._debug:\n print \"Set the timeout to %s, previous one was %s\" % (timeout, self._s.timeout)\n self._s.timeout = timeout\n\n try:\n # Read bytes until we get to a HDLC_FLAG_BYTE value\n # (either the end of a packet, or the start of a new one)\n d = self._get_byte(timeout)\n ts = time.time()\n if self._debug and d != self.HDLC_FLAG_BYTE:\n print \"Skipping incomplete packet\"\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte(timeout)\n ts = time.time()\n\n # Store HDLC_FLAG_BYTE at the start of the retrieved packet\n # data:\n packet = [d]\n\n # Is the next byte also HDLC_FLAG_BYTE?\n d = self._get_byte(timeout)\n if d == self.HDLC_FLAG_BYTE:\n # Yes. This means that the previous byte was for\n # the end of the previous packet, and this byte is for\n # the start of the next packet.\n\n # Get the 2nd byte of the new packet:\n d = self._get_byte(timeout)\n ts = time.time()\n\n # We are now on the 2nd byte of the packet. Add it to\n # our retrieved packet data:\n packet.append(d)\n\n # Read bytes from serial until we read another\n # HDLC_FLAG_BYTE value (end of the current packet):\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte(timeout)\n packet.append(d)\n\n # Done reading a whole packet from serial\n if self._debug:\n print \"SimpleSerial:_read: unescaped\", packet\n\n # Decode the packet, and check CRC:\n packet = self._unescape(packet)\n\n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n\n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n raise ReadCRCError\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n\n # Packet was successfully retrieved, so return it in a\n # RawPacket wrapper object (but leave out the\n # HDLC_FLAG_BYTE and CRC bytes)\n return RawPacket(ts, packet[1:-3])\n except socket.timeout:\n raise ReadTimeoutError",
"def _read_byte(self, register):\r\n return self._read_register(register, 1)[0]",
"def read_char(stream, indent=INDENT):\n value = streambyte_to_int(stream, 1)\n formats = list(convert(value[0]))\n\n formats[0] = 'HEX: ' + formats[0]\n formats[1] = 'BIN: ' + formats[1]\n formats[2] = 'ASCII: ' + formats[2]\n formats[3] = 'INT: ' + str(formats[3])\n\n return ('\\n' + (' ' * indent)).join(formats)",
"def read(self, n=1):\n s = self._RX_buf[0:n]\n self._RX_buf = self._RX_buf[n:]\n # print(\"read op occurred: RX_buf = {}\".format(self._RX_buf), end='\\n\\n')\n return s # bytes(s, encoding='ascii')"
] | [
"0.6626781",
"0.6615035",
"0.6597647",
"0.65835327",
"0.6410242",
"0.63561577",
"0.6355298",
"0.6334242",
"0.6323016",
"0.61830074",
"0.6181135",
"0.61219525",
"0.5915346",
"0.5860435",
"0.58445275",
"0.5834801",
"0.5817188",
"0.5785153",
"0.57540584",
"0.57190937",
"0.56792307",
"0.56511134",
"0.56473553",
"0.5600084",
"0.55937",
"0.5580096",
"0.55586386",
"0.5549904",
"0.5536286",
"0.55312514"
] | 0.6784839 | 0 |
Retrieves the charging state of the robot via the charging state packet. | def read_charging_state(self):
data = self._read_packet(Charging.PACKET_ID, Charging.DATA_BYTES)
if len(data) == Charging.DATA_BYTES:
return struct.unpack("B", data)[0]
else:
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_device_state(self, charger):\n data = {\n \"device_id\": self.uuid,\n \"cmd\": \"get_state\",\n \"token\": charger.token(),\n \"account_token\": self.api_token\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n response = requests.post(\"{}/box_api_secure\".format(self.BASE_URL),\n data=json.dumps(data),\n headers=headers)\n response_json = response.json()\n return response_json",
"def get_state(self):\n ret = self.send(\"?S\", recv=True)\n assert ret in \"WDR\"\n return ret",
"def state(self):\n return self._battery",
"def get_battery_state(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... (.). .*? .*? .*? .*? .*? . .*? .*? . . . .*?'\n state = re.findall(pattern,summary).pop()\n if state == 'f':\n msg = 'OFF'\n elif state == 'd':\n msg = 'DISCHARGING.'\n elif state == 'c':\n msg = 'CHARGING.'\n elif state == 'b':\n msg = 'BALANCING.'\n return state,msg",
"def _get_state(self):\n print(\"GET STATE\")\n res = self._send_command(\n \"RS;\",\n fb_required=True,\n res_pattern=\"STATE:\")\n # The received answer is supposed to be something like\n # STATE:0|1|-1\n state = int(res.split(':')[1])\n if state == PVDriver.IDLE:\n return \"IDLE\"\n elif state == PVDriver.MOVING:\n return \"MOVING\"\n else:\n return \"ERROR\"",
"def state(self):\n return self.device.status(station=self.station_number)",
"def get_device_state(self) -> DeviceState:\n hex_device_state = hexlify(self.message)[266:268].decode()\n return (\n DeviceState.ON\n if hex_device_state == DeviceState.ON.value\n else DeviceState.OFF\n )",
"def get_device_state(self):\n\t\treturn call_sdk_function('PrlSrvCfgDev_GetDeviceState', self.handle)",
"def state(self) -> 'outputs.DeviceStateResponse':\n return pulumi.get(self, \"state\")",
"def state(self):\n return self.roller.battery",
"def state(self):\n return self.device.value()",
"def get_state(self, state):\n status = [u'noState', u'poweredOn', u'blocked', u'suspended', \n u'poweredOff', u'poweredOff', u'crashed']\n return status[int(state)]",
"def state(self):\n return self._device.value",
"def status(self):\n ret = self.dev.ctrl_transfer(0xc0, 0x01, 0x0081, 0x0000, 0x0001)\n if ret[0] == 0xa0:\n return self.POWER_ON\n return self.POWER_OFF",
"def getState(self):\n return self.motorState",
"def get_status(self):\n return self.o.read_register(self.dev_id, STATUS)",
"def get_charger_state(self, charger_id: str, raw=False) -> ChargerState:\n for circuit in self[\"circuitStates\"]:\n for charger_data in circuit[\"chargerStates\"]:\n if charger_data[\"chargerID\"] == charger_id:\n return ChargerState(charger_data[\"chargerState\"], raw)\n\n return None",
"def state(self):\n return self.coordinator.data[METER_DEVICE_TYPE][self.base_unique_id][METER_STATE]",
"def status(self):\n\n # --- get 0 padded string representation of status register\n response = self.send_lens_cmd(['90', 'B9', '00'], fast_mode=True)\n state_str = bin(int('0x' + response['MISO'][2], 16))\n state_str = state_str[2:]\n for p in range(8 - len(state_str)):\n state_str = '0' + state_str\n\n self._status = dict(AF_switch=bool(int(state_str[0])),\n F_move=bool(int(state_str[5])),\n F_acc=bool(int(state_str[2])),\n FD_endStop=bool(int(state_str[3])),\n status_byte=state_str)\n\n return self._status",
"def _get_state(self):\n # gst's get_state function returns a 3-tuple; we just want the\n # status flag in position 1.\n return self.pipeline.get_state(Gst.CLOCK_TIME_NONE)[1]",
"def get_state(self):\n return self.controller.get_state()",
"def state(self):\n return self.device.device_data[self.device_id][self._sensor_type]",
"def get_status(self, state):\n raise NotImplementedError",
"def is_on(self):\n return self.car.data[DATA_CHARGING]",
"def status(self):\n return self._bp.get_motor_status(self._port)",
"def _read_device_state():\n \n try:\n _debug_print(\"Connecting to bus...\")\n i2c_bus = smbus.SMBus(_bus_id)\n\n current_state = i2c_bus.read_byte(_device_addr) & 0x0F\n\n return int(current_state)\n\n except:\n print(\"Error: There was a problem reading from the device\")\n # Best to re-raise as we can't recover from this\n raise",
"def read_status(ctl):\n\tr = ctl.bus_read_struct_coherent(tm.status_addr, 'BBBBI')\n\treturn r",
"def status(self, action=None):\n if action:\n function = _status_functions[action]\n done, data = self._request(function)\n if done:\n if data:\n return states[int(data[0], 16)]\n else:\n raise EvseError\n done, data = self._request('GS')\n if done:\n return states[int(data[0])]\n\n raise EvseError",
"def state(self):\n return self.coordinator.data[PVS_DEVICE_TYPE][self.base_unique_id][PVS_STATE]",
"def GetState(self):\r\n \r\n return self.state"
] | [
"0.7075033",
"0.6326543",
"0.6314042",
"0.631315",
"0.6306474",
"0.6249806",
"0.62402195",
"0.6222469",
"0.61727875",
"0.6145988",
"0.6135337",
"0.6102278",
"0.60604155",
"0.6010413",
"0.5940022",
"0.59325576",
"0.5901887",
"0.5881399",
"0.584315",
"0.583885",
"0.58024025",
"0.5769137",
"0.57637817",
"0.57469577",
"0.5721135",
"0.57012624",
"0.5696783",
"0.569015",
"0.5659514",
"0.5658674"
] | 0.75002456 | 0 |
Calculates the change in angle between two encoder values. Both angles should be of the same unit (degree or radian). This method only support a differential drive system. | def angle(self, ref_angle, new_angle=None, radians=False, cw=True):
if new_angle is None:
new_angle = self.read_encoders()
diff = {}
# Only add the difference of the keys that match the value of encoder
# left or encoder right.
for dist in ref_angle:
if dist in new_angle:
forward = None
# Note for Differential Drive:
# CW = enc_L -> Forward & enc_R -> Backward
# CCW = enc_L -> Backward & enc_R -> Forward
if dist == Drive.ENCODER_L:
forward = not cw
elif dist == Drive.ENCODER_R:
forward = cw
if forward is not None:
diff[dist] = Robot._encoder_diff(ref_angle[dist],
new_angle[dist],
forward)
if len(diff) != 2:
print "Not enough encoder values provided to calculate angle for" \
" a differential drive system."
return 0
angle = (diff[Drive.ENCODER_L] - diff[Drive.ENCODER_R]) / WHEEL_BASE
if radians:
return angle
else:
# Formula Source:
# x(in degrees)/y(in radians) = 360/2pi -> x = y*180*pi
return angle*180/math.pi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def angle_difference(ang1,ang2,units):\n ang1r = angle_to_radians(ang1,units)\n ang2r = angle_to_radians(ang2,units)\n y = np.sin(ang2r-ang1r)\n x = np.cos(ang2r-ang1r)\n angdiffr = np.arctan2(y,x)\n return radians_to_angle(angdiffr,units)",
"def calculate_angle(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.atan2(dy, dx) * 180.0 / math.pi",
"def angle(self, other):\n return acosd(np.clip(self.uv().dot(other.uv()), -1, 1))",
"def angle_difference(θ1, θ2):\n ordinary_diff = (θ2 - θ1) % np.pi\n return (np.pi / 2) - np.abs(ordinary_diff - (np.pi / 2))",
"def angle_diff(a1, a2):\n a = a1 - a2\n if abs(a) > 180:\n return np.sign(a)*360 - a\n else:\n return a",
"def angle_diff(self,a,b):\n self.a = self.angle_normalize(a)\n self.b = self.angle_normalize(b)\n self.d1 = a-b\n self.d2 = 2*math.pi - math.fabs(self.d1)\n if self.d1 > 0:\n self.d2 *= -1.0\n if math.fabs(self.d1) < math.fabs(self.d2):\n return self.d1\n else:\n return self.d2",
"def angle_diff(self,a,b):\n self.a = self.angle_normalize(a)\n self.b = self.angle_normalize(b)\n self.d1 = a-b\n self.d2 = 2*math.pi - math.fabs(self.d1)\n if self.d1 > 0:\n self.d2 *= -1.0\n if math.fabs(self.d1) < math.fabs(self.d2):\n return self.d1\n else:\n return self.d2",
"def angle_diff(self, a, b):\n a = self.angle_normalize(a)\n b = self.angle_normalize(b)\n d1 = a-b\n d2 = 2*math.pi - math.fabs(d1)\n if d1 > 0:\n d2 *= -1.0\n if math.fabs(d1) < math.fabs(d2):\n return d1\n else:\n return d2",
"def angle(self, other):\n return acosd(self.normalized().dot(other.normalized()))",
"def angle(p1, p2):\n dx = p2[0] - p1[0]\n dy = p2[1] - p1[1]\n if dx == 0:\n if dy == 0:\n return 0\n return 90\n alpha = math.atan(dy / dx) * 180 / math.pi\n if alpha < 0:\n alpha = 180 - alpha\n return alpha",
"def CalculateCompassDifference(a, b):\n delta = NormalizeAngle(a - b)\n return delta",
"def ang_diff(self, theta1, theta2):\n\n return (theta1 - theta2 + np.pi) % (2 * np.pi) - np.pi",
"def getAngle(self):\n return self.articulateEncoder.getDistance()+self.angleOffset",
"def angle_diff(self, a, b):\n\n\t\td1 = a-b\n\t\td2 = 2*math.pi - math.fabs(d1)\n\t\tif d1 > 0:\n\t\t\td2 *= -1.0\n\t\tif math.fabs(d1) < math.fabs(d2):\n\t\t\treturn d1\n\t\telse:\n\t\t\treturn d2",
"def angle_diff(self, a, b):\n\n\t\td1 = a-b\n\t\td2 = 2*math.pi - math.fabs(d1)\n\t\tif d1 > 0:\n\t\t\td2 *= -1.0\n\t\tif math.fabs(d1) < math.fabs(d2):\n\t\t\treturn d1\n\t\telse:\n\t\t\treturn d2",
"def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))",
"def angle(self, v1, v2):\r\n cosang = np.dot(v1, v2)\r\n sinang = np.linalg.norm(np.cross(v1, v2))\r\n return np.arctan2(sinang, cosang)",
"def angle(p0, p1, prv_ang=0):\r\n ang = math.atan2(p0[1] - p1[1], p0[0] - p1[0])\r\n a0 = (ang - prv_ang)\r\n a0 = a0 % (PI * 2) - PI\r\n return a0",
"def get_angle(p1, p2):\n return math.atan2(p2[1] - p1[1], p2[0] - p1[0])",
"def compute_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n angle = np.arctan2(sinang, cosang)\n return angle",
"def get_angle_degrees_between(self, other):\n return math.degrees(self.get_angle_between(other))",
"def getAngle(p1, p2, unit=\"rad\"):\n\n t = math.atan((p2.x - p1.x)/(p2.y - p1.y))\n \n if unit == 'rad':\n return t\n elif unit=='deg':\n return t * 180/math.pi",
"def angle_difference(self, x, y):\n return 180 - abs(abs(x - y) - 180)",
"def angular_separation(r1: np.ndarray, r2: np.ndarray) -> float:\n # First compute the rotation that maps r1 to r2.\n dr = r2 @ r1.transpose()\n # Then extract the angle.\n _, angle = transforms3d.axangles.mat2axangle(dr)\n # Normalise the angle.\n if angle > np.pi:\n angle = 2 * np.pi - angle\n\n # Return the angle in degrees.\n return angle * 180 / np.pi",
"def angle_to(self, other):\n return other.angle - self.angle",
"def get_exact_angle(pt1, pt2):\n dx, dy = pt2[0]-pt1[0], pt2[1]-pt1[1]\n return math.atan2(dy,dx)",
"def angle(p1, p2):\n x_dist = p2[0] - p1[0]\n y_dist = p2[1] - p1[1]\n return math.atan2(-y_dist, x_dist) % (2 * math.pi)",
"def angle(v1, v2, acute=True):\n angle = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))\n if acute == True:\n return angle\n else:\n return 2 * np.pi - angle",
"def angle_difference(x, y):\n return 180 - abs(abs(x - y) - 180)",
"def angle(self) -> float:\n ..."
] | [
"0.72310394",
"0.7070686",
"0.6966585",
"0.6913901",
"0.6875004",
"0.67967904",
"0.67967904",
"0.67649186",
"0.6754971",
"0.6737686",
"0.6717077",
"0.6657913",
"0.6624008",
"0.66195256",
"0.66195256",
"0.6610916",
"0.6595404",
"0.65914005",
"0.659087",
"0.6585312",
"0.6569988",
"0.6549654",
"0.6512102",
"0.649915",
"0.6453865",
"0.64006644",
"0.6379211",
"0.6372998",
"0.63686305",
"0.6364897"
] | 0.7255634 | 0 |
This begins by converting the provided value into a 16 bit two's complement integer. Next, it bounds the converted integer between the provided upper and lower bounds. | def _convert_bound(value, lower_bound, upper_bound):
# Converts value to 16 bit two's complement integer via bitwise.
most_sig_bit = 0x8000
# Gets the two least significant bits
convert_val = value & _BYTE << _BYTE_SIZE | value & _BYTE
# Extends the most significant bit if it is a 1. This is done by
# carrying out the most significant bit.
if bool(convert_val & most_sig_bit):
convert_val |= ~(_BYTE << _BYTE_SIZE | _BYTE)
# Bounds the converted value
if convert_val > upper_bound:
return upper_bound
elif convert_val < lower_bound:
return lower_bound
return convert_val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def map_01_to_16bit(value):\n # result = None\n # result = int(map_bound(value, 0.0, 1.0, 0, 65535))\n # return result\n # return int(map_bound(value, 0.0, 1.0, 0, 65535))\n # result = None\n # if value <= 0:\n # # result = 0\n # return 0\n # else:\n # if value >= 1:\n # # result = 65535\n # return 65535\n # else:\n # # simplified\n # # result = 65535 * value / 1\n # return int(65535 * value)\n # return result\n return int(65535 * value)",
"def convertSigned16(self, val):\r\n if val > self.TC16_MIDPOINT:\r\n val = val - self.TC16_MIDPOINT + self.TC16_OFFSET\r\n return val",
"def calculate_16bit_values(value, mode_16bit=False):\n high_byte = 0\n low_byte = 0\n # if mode_16bit:\n if value > 65535:\n value = 65535\n low_byte, high_byte = struct.unpack(\n \"<BB\",\n struct.pack(\"<H\", value)\n )\n # else:\n # if value > 255:\n # # convert 16bit range to 8bit range\n # value = value / 256\n # # check for bounds\n # if value > 255:\n # value = 255\n # if value < 0:\n # value = 0\n # high_byte = value\n return high_byte, low_byte",
"def map_16bit_to_8bit(value):\n if not (0 <= value < 65535):\n value = min(max(value, 0), 65535)\n return value >> 8",
"def map_16bit_to_01(value):\n result = None\n result = map_bound(value, 0, 65535, 0.0, 1.0)\n return result",
"def calculate_16bit_parts(value):\n if not (0 <= value < 65535):\n value = min(max(value, 0), 65535)\n # high_byte = value // 256\n # low_byte = value % 256\n # return high_byte, low_byte\n # faster:\n # return value // 256, value % 256\n # faster again:\n return value >> 8, value & 255",
"def twos_complement(input_value, num_bits=16):\n mask = 2 ** (num_bits - 1)\n return -(input_value & mask) + (input_value & ~mask)",
"def sign_val(self,value):\n if value >= 0x8000:\n value -= 0x10000\n return value",
"def _fromTwosComplement(x, bits=16):\n _checkInt(bits, minvalue=0, description='number of bits')\n\n _checkInt(x, description='input')\n upperlimit = 2 ** (bits) - 1\n lowerlimit = 0\n if x > upperlimit or x < lowerlimit:\n raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \\\n .format(x, lowerlimit, upperlimit, bits))\n\n # Calculate inverse(?) of two'2 complement\n limit = 2 ** (bits - 1) - 1\n if x <= limit:\n return x\n return x - 2 ** bits",
"def twos_complement_to_unsigned(val, bits):\n if val >= 0:\n return val\n all_one = (1 << bits)-1\n val = ((-val)^all_one)+1\n\n return val",
"def test_ushort_int_out_of_upper_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_short, 65536)",
"def swap16(value: int) -> int:\n return swap(value, 16)",
"def _twosComplement(x, bits=16):\n _checkInt(bits, minvalue=0, description='number of bits')\n _checkInt(x, description='input')\n upperlimit = 2 ** (bits - 1) - 1\n lowerlimit = -2 ** (bits - 1)\n if x > upperlimit or x < lowerlimit:\n raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \\\n .format(x, lowerlimit, upperlimit, bits))\n\n # Calculate two'2 complement\n if x >= 0:\n return x\n return x + 2 ** bits",
"def twos_complement(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is",
"def test_ushort_int_out_of_lower_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_short, -1)",
"def _calculate_16bit_values(self, value):\n high_byte = 0\n low_byte = 0\n high_byte, low_byte = calculate_16bit_values(value, self.mode_16bit)\n return high_byte, low_byte",
"def twos_complement(value: int, width: int) -> int:\n signmask = 1 << (width - 1)\n if (value & signmask) == 0:\n # Mask off sign bit.\n return value & (signmask - 1)\n else:\n # Two's complement.\n return -bit_invert(value, width - 1) - 1",
"def _vint_signedto2sc(self, number):\n return number & self._vint_2sc_mask",
"def u16(value: bytes, endian: str = \"little\", sign: bool = False) -> int:\n return unpack(value, 16, endian, sign)",
"def _16bit_unsigned(LSB, MSB):\n\treturn (MSB << 8) | LSB",
"def ones_complement(val):\n #mask = (1 << val.bit_length()) - 1\n #return int(hex(val ^ mask), 16)\n b = bin(val)\n b = b.replace('0', 'x')\n b = b.replace('1', '0')\n b = b.replace('x', '1')\n b = b.replace('1b', '0b')\n return int(b, 2)",
"def _float_to_16_bit_sample(value):\n sample = int(32767.0 * value)\n byte0 = sample & 255\n byte1 = (sample >> 8) & 255\n return byte0, byte1",
"def _vint_2sctosigned(self, number):\n assert number >= 0, 'number is less than 0'\n if (number >> (self._vint_2sc_max_bits - 1)) & 1:\n number = ~(~number & self._vint_2sc_mask)\n return number",
"def add_int16(self, value):\n self._check_int_type(value, _INT_2BYTE_UPPERLIMIT)\n self._data += value.to_bytes(2, byteorder=\"little\")",
"def twos_complement(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set\n val = val - (2 ** bits) # compute negative value\n return val",
"def from_uint16(traces, scalers):\n mask = traces < 1\n min_val, max_val = scalers\n traces -= 1\n traces = traces.astype(float) * max_val / (65535 - 1)\n traces += min_val\n return traces, mask",
"def _FixInt(value: int) -> int:\n if value < 0:\n value &= 0xFFFFFFFF\n return value",
"def simplebounds(cls, val, lower, upper):\n if val < lower:\n val = lower\n if val > upper:\n val = upper\n return val",
"def GetUInt16(start, numBytes, ens):\n try:\n return struct.unpack(\"b\", ens[start:start + numBytes])[0]\n except Exception as e:\n logging.error(\"Error creating a UInt16 from bytes. \" + str(e))\n return 0",
"def clamp(lower, value, upper):\n if lower > value:\n return lower\n if upper < value:\n return upper\n return value"
] | [
"0.6709142",
"0.6585885",
"0.62387407",
"0.6119893",
"0.6076029",
"0.6074267",
"0.603281",
"0.6030608",
"0.6009207",
"0.5907811",
"0.59070516",
"0.5887754",
"0.584717",
"0.5768137",
"0.5753733",
"0.57271814",
"0.5713832",
"0.5703631",
"0.5701616",
"0.5684892",
"0.56532973",
"0.56084615",
"0.5595952",
"0.558478",
"0.553842",
"0.5501801",
"0.5484338",
"0.5465536",
"0.5421422",
"0.5412173"
] | 0.7287549 | 0 |
Sends the sensor command with the provided packet id to the robot and reads the robots response. | def _read_packet(self, packet_id, data_bytes):
self._serial_conn.send_command(_SENSORS_OPCODE+" "+str(packet_id))
return self._serial_conn.read_data(data_bytes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _command(self, servo_id, instruction, *params):\n length = 3 + len(params)\n #print('length', length)\n \"\"\"\n checksum calculation:\n checksum = ~(ID + length+instruction+parms) if the numbers in the brackets\n are calculated and exceeded 255, then it takes the lowest one byte, \"~\"\n means Negation\n \"\"\"\n checksum = 255 - ((servo_id + length + instruction + sum(params))% 256)\n #print('checksum', checksum)\n packet = [0x55, 0x55, servo_id, length, instruction, *params, checksum]\n #print('packet', packet)\n self._serial.write(bytearray(packet))\n #print('Sending packet', packet)",
"def _execute(self, message):\n logging.info(__name__ + ' : Send the following command to the device: %s' % message)\n self.visa_handle.write('@%s%s' % (self._number, message))\n sleep(70e-3) # wait for the device to be able to respond\n result = self._read()\n if result.find('?') >= 0:\n print(\"Error: Command %s not recognized\" % message)\n else:\n return result",
"def _send_command(self, command):\n command = \"%s\\n\" % (command.strip())\n self.server.write(command)\n self.server.flush()\n\n #read the length of the result\n length = int(self.server.readline())\n output = self.server.read(length)\n\n result = pickle.loads(output)\n if result[0] == 'ok':\n return result[1]\n else:\n raise RobotCommandError(str(result))",
"def send(self, serial_cmnd):\n self.sio.write(serial_cmnd+\"\\n\") # TextIOWrapper object converts the newline character to \"\\r\\n\", this is required by the device \n self.sio.flush() # it is buffering. required to get the data out *now*\n response = self.sio.readline()\n response = response.rstrip() # Trim the newline character\n if (response == \"ok\"):\n return True\n else:\n logging.debug(\"Board response:\" + response) \n return response",
"def __send(self, cmd_val, data):\n # Proof the input\n if cmd_val not in command.values():\n raise ValueError(\"{}: the provided command value {} is not valid.\".format(self.sensor_name, cmd_val))\n if not isinstance(data, bytearray):\n raise TypeError(\"{}: command data must be of type byte array.\".format(self.sensor_name))\n\n # Initialise the command bytes array\n bytes_to_send = bytearray()\n bytes_to_send.append(self.__SerialStart)\n bytes_to_send.append(self.__SendByte)\n bytes_to_send.append(cmd_val)\n\n # Add data and set zero to the remainder\n for i in range(0, 12):\n if i < len(data):\n bytes_to_send.append(data[i])\n else:\n bytes_to_send.append(0)\n\n # Last two bytes before the checksum is the CommandTerminator\n # TODO : rename command terminator to sensor ID\n bytes_to_send.append(self.__CommandTerminator)\n bytes_to_send.append(self.__CommandTerminator)\n\n # Calculate and append the checksum\n checksum = self.__checksum_make(bytes_to_send)\n bytes_to_send.append(checksum % 256)\n\n # Append the terminator for serial message\n bytes_to_send.append(self.__SerialEnd)\n\n self.logger.info(\"{}: sending {} {} command with {} message.\".format(self.sensor_name, command_mode.keys()[command_mode.values().index(bytes_to_send[3])], command.keys()[command.values().index(cmd_val)], \":\".join(\"%02x\" % b for b in bytes_to_send)))\n\n if len(bytes_to_send) != self.__CommandLength:\n raise IOError(\"{}: sent {} bytes, expected {}.\".format(self.sensor_name, len(bytes_to_send), self.__CommandLength))\n\n # Send the command\n written_bytes = self.device.write(bytes_to_send)\n self.device.flush()\n\n if written_bytes != len(bytes_to_send):\n raise IOError(\"{}: not all bytes written.\".format(self.sensor_name))\n\n # Check the received values\n received = self.__response(cmd_val)\n\n if len(received) != self.__ResponseLength:\n raise IOError(\"{}: received {} bytes, expected {}.\".format(self.sensor_name, len(received), self.__ResponseLength))\n\n if len(received) == 0:\n raise IOError(\"{}: sensor is not responding.\".format(self.sensor_name))\n\n # When no command or command is request command,\n # second byte has to be ReceiveByte\n if (cmd_val is None or cmd_val == command[\"Request\"]) and received[1] != self.__ReceiveByte:\n raise ValueError(\"{}: expected to receive value {:#X} on a value request. Received: \\\"{}\\\".\".format(self.sensor_name, self.__ReceiveByte, received[1]))\n\n # Check, if the response is response of the command, except request command\n if cmd_val != command[\"Request\"]:\n if received[2] != cmd_val:\n raise ValueError(\"{}: sensor response does not belong to the command sent before.\".format(self.sensor_name))\n else:\n return received[3: -2]\n else:\n return received",
"def do_command(command):\n send_command(command)\n # time.sleep(0.1) # may be required on slow machines\n response = get_response()\n print(\"Rcvd: <<< \" + response)\n return response",
"def send_command(self, command: str) -> str:\r\n if self.serial_ok():\r\n print(f\"Sending command '{ command }' (ascii-code: { int.from_bytes(command.encode(), 'little') })\")\r\n self.ser.write(command.encode())\r\n return self.recv_response()\r\n else:\r\n print(\"Error: Can't send Arduino command. Serial not initialized.\")",
"def send_command(command):\n\n try:\n dict_command = json.loads(command)\n except Exception as e:\n print(e)\n return\n default = \"\"\n scommand = dict_command.get('command', default)\n\n print(\"Processing: \"+scommand)\n if scommand == 'ON' or scommand == 'OFF':\n id = str(dict_command.get('id', default))\n\n if not id in dict_ac:\n print(\"MQTT Unknown Actuator: \" + id)\n return\n else:\n print(\"Actuator id: \" + id)\n structure = dict_ac[id]\n structure['status']=scommand",
"async def send_sensor_command(\n driver: AbstractCanDriver, command: SensorRun, csv: bool\n) -> None:\n if command.mount == \"left\":\n node = NodeId.pipette_left\n elif command.mount == \"right\":\n node = NodeId.pipette_right\n elif command.mount == \"gripper\":\n node = NodeId.gripper\n else:\n node = NodeId.broadcast\n if command.sensor_type == SensorType.pressure:\n await handle_pressure_sensor(command, driver, node, csv, log)\n elif command.sensor_type == SensorType.capacitive:\n await handle_capacitive_sensor(command, driver, node, csv, log)\n else:\n await handle_environment_sensor(command, driver, node, csv, log)",
"def _read_packet_from_device(self, adb_info):\n msg = self._read_bytes_from_device(constants.MESSAGE_SIZE, adb_info)\n cmd, arg0, arg1, data_length, data_checksum = unpack(msg)\n command = constants.WIRE_TO_ID.get(cmd)\n\n if not command:\n raise exceptions.InvalidCommandError(\"Unknown command: %d = '%s' (arg0 = %d, arg1 = %d, msg = '%s')\" % (cmd, int_to_cmd(cmd), arg0, arg1, msg))\n\n if data_length == 0:\n return command, arg0, arg1, b\"\"\n\n data = self._read_bytes_from_device(data_length, adb_info)\n actual_checksum = checksum(data)\n if actual_checksum != data_checksum:\n raise exceptions.InvalidChecksumError(\"Received checksum {} != {}\".format(actual_checksum, data_checksum))\n\n return command, arg0, arg1, data",
"def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response",
"def servo_read(self, servo_id):\n # Adding 1 to the servo_id because the robot starts counting at 1\n servo_id = bytes([servo_id+1])\n msg = b'\\x24' + servo_id\n parameter_len = 2\n ans = self.__bt.read(msg, parameter_len)\n if ans is not None:\n # Check that the received value corresponds to the specified servo\n if ans[:1] == servo_id:\n return int.from_bytes(ans[1:], \"big\")\n return None",
"def send_to_port(self):\r\n time.sleep(2)\r\n # ser.write(\"R\".encode())\r\n ser.flush()\r\n ser.write(\"{},{},{},{},{}\".format(self.x_Pos, self.y_Pos, self.t_Tap, self.U_on, self.u_off).encode())\r\n # ser.flush()\r\n # while (1 == 1):\r\n # mydata = ser.readline().lstrip()\r\n # print(mydata.decode('utf-8'))\r\n # value = str(mydata)\r",
"def __send_and_receive(self, cmnd, timeout=None):\n\n if not self.is_connected():\n printf(\"Communication| Tried to send a command while robot was not connected!\")\n return \"\"\n\n # Prepare and send the command to the robot\n self.__gen_serial_id()\n cmnd = \"#{} {}\".format(self.serial_id,cmnd)\n printf(\"Coummunication | Send Message: {}, total length: {}\".format(cmnd,len(cmnd)), type=DEBUG)\n if PY3:\n cmndString = bytes(cmnd + \"\\n\", encoding='ascii')\n else:\n cmndString = bytes(cmnd + \"\\n\")\n\n try:\n self.__serial.write(cmndString)\n\n except serial.serialutil.SerialException as e:\n # printf(\"while sending command {}. Disconnecting Serial! \\nError: {}\".format(cmndString, str(e)),type=ERROR)\n self.__isConnected = False\n return \"\"\n\n try:\n if PY3:\n response = str(self.__serial.readline(),encoding='ascii')\n else:\n response = self.__serial.readline()\n if response.startswith(\"${}\".format(self.serial_id)):\n if \"E20\" in response or \"E21\" in response:\n printf(\"Communication| ERROR: send {}, received error from robot: {}\".format(cmndString, response), type=ERROR)\n return \"\"\n response = response.replace('\\n', '')\n response = response.replace('${} '.format(self.serial_id),'')\n printf(\"Communication| [{}] {}{}\".format(cmnd, \" \" * (30 - len(cmnd)), response), type=DEBUG)\n else:\n printf(\"Communication| ERROR: send {}, received error from robot: {}\".format(cmndString, response), type=ERROR)\n # printf(\"Communication| ERROR: received error from robot: {}\".format(response),type=ERROR)\n return \"\"\n return response.lower()\n except serial.serialutil.SerialException as e:\n printf(\"while sending command {}. Disconnecting Serial! \\nError: {}\".format(cmnd,str(e)), type=ERROR)\n self.__isConnected = False\n return \"\"",
"def send_cmd_rd_response ( self,\r\r\n cmd_str=r'AT',\r\r\n rsp_str ='ok'):\r\r\n loggerModem = logging.getLogger(__name__ + 'send_cmd_rd_response')\r\r\n text_str = \"AT command\"\r\r\n loggerModem.debug(\"%-15s:\\t%s\" %(text_str, cmd_str))\r\r\n cmd_str = cmd_str + '\\r\\n'\r\r\n\r\r\n self.serObj.write(cmd_str) # write a string\r\r\n\r\r\n timeout_sec = 30\r\r\n remaining_time = timeout_sec\r\r\n poll_time_sec=2\r\r\n response = \"\"\r\r\n\r\r\n while remaining_time > 0:\r\r\n response = self.serObj.read(2048)\r\r\n time.sleep(poll_time_sec)\r\r\n remaining_time -= poll_time_sec\r\r\n loggerModem.debug(\"remaining time %s\" %remaining_time)\r\r\n reg_expr = r'\\b' + re.escape(rsp_str) + r'\\b'\r\r\n matchObj = re.search (reg_expr, response, re.M|re.I)\r\r\n if matchObj:\r\r\n break\r\r\n\r\r\n if matchObj:\r\r\n text_str = \"Response\"\r\r\n loggerModem.debug (\"%-15s:\\t%s\" %(text_str, matchObj.group()))\r\r\n return (0, response)\r\r\n else:\r\r\n loggerModem.debug(\"Ok, string not found in the response message\")\r\r\n return (1, response)",
"def transmit_command(command, socket, guit):\n if command == \"get_map_update\":\n send_data(socket, \"SEND_MAP\")\n ack = receive_data(socket)\n robot_map_data = json.loads(receive_data(socket)) # [[robot_x, robot_y], [map..]\n guit.receive_command([\"update_map\", robot_map_data[0], robot_map_data[1]])\n elif command == \"sync_mode\":\n send_data(socket, \"SYNC_MODE\") #0 is autonomous, 1 is manual\n ack = receive_data(socket)\n current_mode_integer = receive_data(socket)\n guit.receive_command([\"update_mode\", current_mode_integer]) \n elif len(command) > 3 and command[:4] == \"key_\": #Fulhack that will save us many rows.\n send_data(socket, \"KEY_EVENT\")\n ack = receive_data(socket)\n send_data(socket, command[4:])\n elif len(command) > 4 and command [:5] == \"mode_\":\n send_data(socket, \"TOGGLE_MODE\")\n ack = receive_data(socket)\n send_data(socket, command[5:])\n elif command == \"get_motor_data\":\n send_data(socket, \"FORWARD_MOTOR_INFO\")\n ack = receive_data(socket)\n motor_data = json.loads(receive_data(socket))\n dir_mod_left = 1 if motor_data[\"LEFT_SIDE_DIRECTION\"] else -1\n dir_mod_right = 1 if motor_data[\"RIGHT_SIDE_DIRECTION\"] else -1\n speed_left = motor_data[\"LEFT_SIDE_SPEED\"]*dir_mod_left\n speed_right = motor_data[\"RIGHT_SIDE_SPEED\"]*dir_mod_right\n guit.receive_command([\"set_motors\", speed_left, speed_right])\n guit.receive_command([\"set_servo\", motor_data[\"SERVO_ANGLE\"]])\n elif command == \"get_sensor_data\":\n send_data(socket, \"FORWARD_SENSOR_INFO\")\n ack = receive_data(socket)\n sensor_data = json.loads(receive_data(socket))\n guit.receive_command([\"set_sensors\", sensor_data])",
"def directive_send(arb_id, payload, response_handler):\n arb_id = \"0x\" + arb_id\n send_msg = payload_to_str_base(payload)\n with CanActions(int_from_str_base(arb_id)) as can_wrap:\n # Send the message on the CAN bus and register a callback\n # handler for incoming messages\n can_wrap.send_single_message_with_callback(list_int_from_str_base(send_msg), response_handler)\n # Letting callback handler be active for CALLBACK_HANDLER_DURATION seconds\n sleep(CALLBACK_HANDLER_DURATION)\n # can_wrap.clear_listeners()",
"def send_command(self):\n button = self.sender()\n answer: str = self.UsbHost.send_command(self.state.ser, self.command_dict[button], str(self.state.device_id))\n if answer == 'Ok':\n self.statusbar.showMessage(self.result_dict[button])\n else:\n error_message(self.error_dict[button])\n self.statusbar.showMessage(answer_translate[answer])\n self.create_log_message(self.command_dict[button], answer, \"\")",
"def __response(self, cmd_val=None):\n # Receive the response while listening serial input\n bytes_received = bytearray(1)\n while True:\n one_byte = self.device.read(1)\n '''If no bytes are read the sensor might be in sleep mode.\n It makes no sense to raise an exception here. The raise condition\n should be checked in a context outside of this function.'''\n if len(one_byte) > 0:\n bytes_received[0] = ord(one_byte)\n # if this is true, serial data is coming in\n if bytes_received[0] == self.__SerialStart:\n single_byte = self.device.read(1)\n if ((cmd_val is not None and cmd_val != command[\"Request\"]) and ord(single_byte) == self.__ResponseByte) or ((cmd_val is None or cmd_val is command[\"Request\"]) and ord(single_byte) == self.__ReceiveByte):\n bytes_received.append(ord(single_byte))\n break\n else:\n if self.__duty_cycle == 0:\n self.logger.error(\"{}: a sensor response has not arrived within timeout limit. If the sensor is in sleeping mode wake it up first! Returning an empty byte array as response!\".format(self.sensor_name))\n else:\n self.logger.info(\"{}: no response. Expected while in duty cycle.\".format(self.sensor_name))\n return bytearray()\n\n response_bytes = struct.unpack('BBBBBBBB', self.device.read(8))\n bytes_received.extend(response_bytes)\n\n if cmd_val is not None and cmd_val != command[\"Request\"]:\n\n if bytes_received[1] is not self.__ResponseByte:\n raise IOError(\"{}: no ResponseByte found in the response.\".format(self.sensor_name))\n\n if bytes_received[2] != cmd_val:\n raise IOError(\"{}: third byte of serial data \\\"{}\\\" received is not the expected response to the previous command: \\\"{}\\\"\".format(self.sensor_name, bytes_received[2], cmd_val.name))\n\n if cmd_val is None or cmd_val == command[\"Request\"]:\n if bytes_received[1] is not self.__ReceiveByte:\n raise IOError(\"{}: received byte not found in the response.\".format(self.sensor_name))\n\n # Evaluate checksum\n if self.__checksum_make(bytes_received[0:-2]) != bytes_received[-2]:\n raise IOError(\"{}: checksum of received data is invalid.\".format(self.sensor_name))\n\n # Set device_id if device id is not initialized or proof it, if it's not None\n if self.__device_id is None:\n self.__device_id = bytes_received[-4:-2]\n elif self.__device_id is not None and not self.__device_id.__eq__(bytes_received[-4:-2]):\n raise ValueError(\"{}: data received ({}) does not belong to this device with id {}.\".format(self.sensor_name, bytes_received, self.__device_id))\n\n self.logger.info(\"{}: the response was successful with message {}.\".format(self.sensor_name, \"\".join(\"%02x:\" % b for b in bytes_received)))\n return bytes_received",
"def process_command(self, command):\n robot = self.server.robot\n\n parts = command.split()\n\n if parts[0] not in (\n 'status', 'stop', 'brake', 'reset', 'go', 'speed', 'left', 'right'):\n raise CommandError(\"invalid command '%s'\" % command)\n\n\n if parts[0] == 'status':\n status = robot.status\n status['monitor'] = self.server.monitor.status\n\n output = status\n\n else:\n acquired = self.server.control_lock.acquire(blocking = 0)\n if not acquired:\n raise Exception(\"another connection is controlling the robot\")\n\n try:\n if parts[0] == 'stop':\n robot.stop()\n output = 'robot stopped'\n\n elif parts[0] == 'brake':\n try:\n new_speed = self.parse_speed(parts)\n if new_speed < 1 or new_speed > 100:\n raise ValueError(\"out of range\")\n except:\n raise CommandError(\"brake must be a number from 1 to 100\")\n\n robot.brake(new_speed)\n output = 'braking initiated'\n\n elif parts[0] == 'reset':\n robot.reset()\n output = \"robot reset successful\"\n\n elif parts[0] == 'go':\n robot.go()\n output = \"robot ready to run\"\n\n elif parts[0] in ('speed', 'left', 'right'):\n #try to get a number out of parts[1]\n try:\n new_speed = self.parse_speed(parts)\n if new_speed is None or new_speed < -100 or new_speed > 100:\n raise ValueError(\"out of range\")\n except Exception, e:\n raise CommandError(\"speed must be a number from -100 to 100, %s\" % e)\n\n #figure out which motor(s) we want to deal with\n motor = parts[0] if parts[0] in ('left', 'right') else 'both'\n robot.set_speed(new_speed, motor)\n\n printable_motor = \"%s motor\" if motor in ('left', 'right') else \"both motors\"\n output = \"speed on %s set to %s\" % (printable_motor, new_speed)\n finally:\n self.server.control_lock.release()\n\n return output",
"def command_arduino(self, message):\n dispatcher.send(message=\"CI\" + str(message) + \";\", signal=ts.RPI_ARDUINO_SIGNAL, sender=ts.RPI_SENDER)\n logging.info(\"rpi received message from algorithm and write message to arduino: \" + str(message))",
"def run(self):\n \n # read sensor value (actual value)\n x = self.ORION_CB.get_entity_attribute_value(entity_name=self.params['sensor_entity_name'],\n attribute_name=self.params['sensor_attrs'])\n # set 0 if empty\n if x == '\" \"':\n x = '0'\n # convert to float\n self.x_act = float(x) \n # calculate PID output\n self.y = self.PID.run(x_act = self.x_act, x_set = self.params['setpoint'])\n # send post command\n self.ORION_CB.post_cmd_v1(self.params['actuator_entity_name'], \n self.params['actuator_type'],\n self.params['actuator_command'], round(self.y,3))",
"async def _async_send_command(self, data_cmd):\n device_id = self._device_id\n if not device_id:\n return\n if not data_cmd:\n return\n\n api_device = f\"{API_DEVICES}/{device_id}\"\n api_command = f\"{api_device}/commands\"\n\n async with self._session.post(\n api_command,\n headers=_headers(self._api_key),\n data=data_cmd,\n raise_for_status=True,\n ) as resp:\n await resp.json()\n\n await self._device_refresh()",
"def device_patch_sensor(deviceid, sensorid):\n sensor_response = requests.patch(\"http://sensor-access:5600/v1/sensors/{}\".format(sensorid), json=request.json)\n return make_response(sensor_response.content, sensor_response.status_code)",
"def run_command(self, server_id, cmd):\n status, data, errors, messages = self._make_post_request(MCAPIRoutes.SEND_CMD, extra_params={'id': server_id}, body={'command':cmd})\n \n if status == 200:\n return data\n elif status == 500:\n self._check_errors(errors, messages)",
"async def send_command(\n self, device_id: str, command: str, arg: Optional[Union[str, int]]\n ) -> Dict[str, Any]:\n path = f\"devices/{device_id}/{command}\"\n if arg:\n path += f\"/{arg}\"\n _LOGGER.debug(\"Sending command %s(%s) to %s\", command, arg, device_id)\n return await self._api_request(path)",
"def runner(socket,id):\n socket.send('proceed')\n \n while True:\n data = socket.recv()\n print(\"id(\",id,\")=\",data)\n if not data: break\n \n elif data == \"info\":\n run_time = time.ctime(start_time)\n statusMessage = \"SERVER STATUS: Running...\\nInterface id:\"+str(id)+\"\\nBeen running since: \"+str(run_time)+\"\\n\"\n socket.send(statusMessage)\n \n elif data == \"plug\": #talk to plugin? aka. other commands \n pass\n \n else: #not valid command.\n socket.send(\"invalid command\")\n \n socket.close() \n print(\"closed connection\") #means the thread is also quitting",
"def send_command(self, command):\r\n print (\">> send cmd: {}\".format(command))\r\n self.abort_flag = False\r\n timer = threading.Timer(self.command_timeout, self.set_abort_flag)\r\n\r\n self.socket.sendto(command.encode('utf-8'), self.tello_address)\r\n\r\n timer.start()\r\n while self.response is None:\r\n if self.abort_flag is True:\r\n break\r\n timer.cancel()\r\n \r\n if self.response is None:\r\n response = 'none_response'\r\n else:\r\n response = self.response.decode('utf-8')\r\n\r\n self.response = None\r\n\r\n return response",
"def servo_write(self, servo_id, angle, travelling=20):\n # Adding 1 to the servo_id because the robot starts counting at 1\n servo_id = bytes([servo_id+1])\n angle = bytes([angle])\n run_time = bytes([travelling])\n time_frames = b'\\x00\\x10'\n msg = b'\\x22' + servo_id + angle + run_time + time_frames\n parameter_len = 2\n ans = self.__bt.read(msg, parameter_len)\n if ans is not None:\n # Check that the received value corresponds to the specified servo\n if ans[:1] == servo_id:\n return int.from_bytes(ans[1:], \"big\")\n return None",
"def handle(self):\n global latest_status\n data = self.request[0]\n socket = self.request[1]\n logging.info(\"Received {} bytes from {}\".format(len(data), self.client_address[0]))\n jss = interface.joystick_status_pb2.JoystickStatus()\n jss.ParseFromString(data)\n sent = jss.sent.ToDatetime()\n if not latest_status:\n latest_status = jss\n else:\n if latest_status.sent.ToDatetime() < sent:\n latest_status = jss\n else:\n logging.warning(\"Discarded stray package.\")\n ack = interface.joystick_status_pb2.JoystickAck()\n ack.sent.CopyFrom(jss.sent)\n ack.received.GetCurrentTime()\n response = ack.SerializeToString()\n socket.sendto(response, self.client_address)"
] | [
"0.58767503",
"0.58414966",
"0.5820011",
"0.58169806",
"0.58055025",
"0.5673132",
"0.5644523",
"0.5616639",
"0.55147266",
"0.54833007",
"0.547546",
"0.54654247",
"0.5423958",
"0.5413912",
"0.54121715",
"0.5402151",
"0.54013747",
"0.53871274",
"0.5371295",
"0.5369249",
"0.5366505",
"0.53533393",
"0.5347814",
"0.5346129",
"0.5342461",
"0.5284341",
"0.52798206",
"0.52735823",
"0.52595",
"0.5257702"
] | 0.69131565 | 0 |
Calculate the difference between two encoder distances. This will consider the turnover. | def _encoder_diff(ref_count, new_count, forward=True):
if forward and ref_count > new_count:
return (Drive.MAX_DIST - ref_count) - (Drive.MIN_DIST - new_count)
elif not forward and new_count > ref_count:
return (Drive.MIN_DIST - new_count) - (Drive.MAX_DIST - ref_count)
else:
return new_count - ref_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance(self, ref_dist, new_dist=None, forward=True):\n if new_dist is None:\n new_dist = self.read_encoders()\n\n enc_count = 0\n enc_sum = 0\n # For each encoder in both dictionaries increment encoder count and\n # add the difference between the encoder's value to a running summation.\n for dist in ref_dist:\n if dist in new_dist:\n enc_count += 1\n enc_sum += Robot._encoder_diff(ref_dist[dist],\n new_dist[dist],\n forward)\n\n # Average the difference between encoders values\n return enc_sum / enc_count",
"def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))",
"def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance",
"def calcDelta(self, energy1, energy2):\n \n return math.fabs(energy2-energy1)",
"def calcDistance(self, left, right):\n\n return math.fabs(right-left)",
"def get_distance(self, resp1, resp2):\n feed_dict = {self.anchor: resp1}\n embed1 = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n\n feed_dict = {self.anchor: resp2}\n embed2 = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n\n return np.sqrt(np.sum((embed1-embed2)**2, 1))",
"def calculate_delta(\n self,\n genome_one: StorageGenome.StorageGenome,\n genome_two: StorageGenome.StorageGenome\n ) -> float:\n excess_coefficient = float(\n self.clustering_parameters[\"excess_coefficient\"]\n )\n disjoint_coefficient = float(\n self.clustering_parameters[\"disjoint_coefficient\"]\n )\n weight_delta_coefficient = float(\n self.clustering_parameters[\"weight_difference_coefficient\"]\n )\n\n bigger_genome, smaller_genome = (genome_one, genome_two) \\\n if len(genome_one.genes) > len(genome_two.genes) \\\n else (genome_two, genome_one)\n\n bigger_genome_gene_ids = [gene for gene in bigger_genome.genes]\n smaller_genome_gene_ids = [gene for gene in smaller_genome.genes]\n\n all_gene_ids = smaller_genome_gene_ids + bigger_genome_gene_ids\n\n matching_genes = [gene_id for gene_id in bigger_genome_gene_ids\n if gene_id in smaller_genome_gene_ids]\n\n differing_genes = [gene_id for gene_id in all_gene_ids\n if gene_id not in matching_genes]\n\n n = len(bigger_genome.genes)\n if n == 0:\n return 0\n\n disjoint_count, excess_count = self.calculate_disjoint_excess_count(smaller_genome_gene_ids, differing_genes)\n\n w_bar = self.calculate_average_weight_difference(bigger_genome, smaller_genome, matching_genes)\n\n return ((excess_coefficient * excess_count) / n) + \\\n ((disjoint_coefficient * disjoint_count) / n) + \\\n (weight_delta_coefficient * w_bar)",
"def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))",
"def CalculateCompassDifference(a, b):\n delta = NormalizeAngle(a - b)\n return delta",
"def algdelta(alg1, alg2, *args):\n file_delta = ord(alg2[0]) - ord(alg1[0])\n rank_delta = ord(alg2[1]) - ord(alg1[1])\n return file_delta, rank_delta",
"def distance(self, wn1, wn2):\n return abs(self.chunk_map[wn1] - self.chunk_map[wn2])",
"def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))",
"def calc_difference(ndvi_tile1, ndvi_tile2, output):\n \n #open dataset and get Affine transformation and bounding properties \n with rio.open(ndvi1) as src1:\n meta = src1.meta.copy()\n transform = src1.meta[\"transform\"]\n x = meta['width']\n y = meta['height']\n band1 = src1.read()\n \n #open dataset \n with rio.open(ndvi2) as src2:\n #read the band as ndarray with the same dimension of src1\n band2 = src2.read(out_shape=(src1.height, src1.width), \n resampling=rio.enums.Resampling.bilinear)\n #create destination for reprojection of src2\n dst_crs = {'init': 'EPSG:32632'}\n proj_band2 = np.empty(src1.shape, dtype=np.float32)\n #reproject the src2 to match src1\n warp.reproject(band2, destination=proj_band2, src_transform=src2.transform, src_crs=src2.crs, \n dst_transform=transform, dst_crs=dst_crs) \n \n #calculate difference between reprojected band2 and band1\n difference = np.subtract(proj_band2, band1)\n #create outfile\n outfile = output\n #write outfile with the properties and resolution of src1\n with rio.open(outfile, 'w', **meta) as dst:\n dst.write(difference, window=rio.windows.Window(col_off=0, row_off=0, width=x, height=y))\n\n return outfile",
"def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)",
"def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)",
"def test_Euclidian_distances_w_one_layer(self):\n\t\tm1 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2, method=EUCLIDEAN, layers=[self.fc2_layer])\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 0.0\n\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)\n\t\t# TODO: test length of distances also",
"def edit_distance_between_seqs(seq1, seq2):\n aln1, aln2 = needleman_wunsch(seq1, seq2)\n return edit_distance_from_aln_strings(aln1, aln2)",
"def _dist(a, b):\n return torch.pow(a - b, 2).sum(-1)",
"def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre",
"def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)",
"def distance(self, keyOne, keyTwo):",
"def test_distances(self):\n\t\tm1 = models.vgg11()\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2)\n\t\n\t\tprint(avg_dW,avg_db)\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 46.485\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)\n\t\t\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.67622\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)\n\t\t\n\t\tprint(distances)",
"def distance(A, B):\n return abs(A - B)",
"def distance(A, B):\n return abs(A - B)",
"def _pairwise_dist(self,s1,s2):\n\n return 0.0",
"def test_same_distances(self):\n \n\t\tm1 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2)\n\t\t\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tprint(distances)",
"def edit_distance(self, other):\r\n union = len(self) + len(other)\r\n return 1.0 - 2.0*(self.intersection(other)/union)",
"def delta_e_76(lab1, lab2):\n\n l1, a1, b1 = lab1\n l2, a2, b2 = lab2\n return (l1 - l2) ** 2 + (a1 - a2) ** 2 + (b1 - b2) ** 2",
"def distance(self, other):\n ...",
"def test_Euclidian_distances(self):\n \n\t\tm1 = models.vgg11()\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2, method=EUCLIDEAN)\n\t\t\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 30.2\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)\n\t\t\n\t\t# biased not implemented yet in layers\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.00\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)"
] | [
"0.68873227",
"0.6565167",
"0.64041114",
"0.633405",
"0.6329175",
"0.6290871",
"0.625009",
"0.62264484",
"0.62226146",
"0.62065905",
"0.62048024",
"0.61978215",
"0.618405",
"0.61829215",
"0.6159514",
"0.6158887",
"0.6154213",
"0.6135386",
"0.6125355",
"0.6118691",
"0.61173",
"0.60952747",
"0.6092969",
"0.6092969",
"0.6057973",
"0.60526717",
"0.6048851",
"0.604613",
"0.6041143",
"0.6031584"
] | 0.67269135 | 1 |
Return the time series spec or create one if it doesn't exist. New time series is linked to Instrument instr by FieldName. | def create_time_series(fieldName, instr):
spec = acm.FTimeSeriesSpec[fieldName]
if not spec:
spec = acm.FTimeSeriesSpec()
spec.Description('%s PnL History' % instr)
spec.FieldName(fieldName)
spec.RecType(acm.EnumFromString('B92RecordType', 'Instrument'))
spec.Commit()
return spec | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_time_series(for_date, instr, spec):\n return acm.FTimeSeries.Select01(\"day = '%s' and recaddr = %i \"\n \"and timeSeriesSpec = %i and runNo = 1\"\n % (for_date, instr.Oid(), spec.Oid()), '')",
"def create(self, name: str, meta: Dict = {}, type: str = \"timeseries\", **kwargs):\n return super()._create(\n {\"name\": name, \"type\": type, \"meta\": meta, **kwargs},\n f=lambda x: registry.getObject(x, self.session),\n )",
"def to_timeseries(self, dataset_name, light=False):\n timeseries = tokio.timeseries.TimeSeries()\n timeseries.dataset_name = dataset_name\n\n try:\n dataset = self[dataset_name]\n except KeyError:\n # can't attach because dataset doesn't exist; pass this back to caller so it can init\n return None\n\n timeseries.dataset = dataset if light else dataset[:, :]\n\n # load and decode version of dataset and file schema\n timeseries.global_version = self['/'].attrs.get('version')\n timeseries.version = self.get_version(dataset_name)\n if isinstance(timeseries.version, bytes):\n timeseries.version = timeseries.version.decode()\n\n # copy columns into memory\n columns = self.get_columns(dataset_name)\n timeseries.set_columns(columns)\n\n # copy metadata into memory\n for key, value in dataset.attrs.items():\n if isinstance(value, bytes):\n timeseries.dataset_metadata[key] = value.decode()\n else:\n timeseries.dataset_metadata[key] = value\n for key, value in dataset.parent.attrs.items():\n if isinstance(value, bytes):\n timeseries.group_metadata[key] = value.decode()\n else:\n timeseries.group_metadata[key] = value\n\n timeseries.timestamp_key = get_timestamps_key(self, dataset_name)\n timeseries.timestamps = self[timeseries.timestamp_key]\n timeseries.timestamps = timeseries.timestamps if light else timeseries.timestamps[:]\n\n timeseries.timestep = timeseries.timestamps[1] - timeseries.timestamps[0]\n return timeseries",
"def _create_time_series_data(\n self, run_name: str, tag_name: str, metadata: tf.compat.v1.SummaryMetadata\n ) -> tensorboard_data.TimeSeriesData:\n time_series_resource_name = (\n self._one_platform_resource_manager.get_time_series_resource_name(\n run_name,\n tag_name,\n lambda: tensorboard_time_series.TensorboardTimeSeries(\n display_name=tag_name,\n value_type=self._value_type,\n plugin_name=metadata.plugin_data.plugin_name,\n plugin_data=metadata.plugin_data.content,\n ),\n )\n )\n\n time_series_data_proto = tensorboard_data.TimeSeriesData(\n tensorboard_time_series_id=time_series_resource_name.split(\"/\")[-1],\n value_type=self._value_type,\n )\n\n self._byte_budget_manager.add_time_series(time_series_data_proto)\n self._run_to_tag_to_time_series_data[run_name][\n tag_name\n ] = time_series_data_proto\n return time_series_data_proto",
"def read_field(self, fieldname):\n # special implementation case for time field which is not\n # available as a variable in Soprano files\n if fieldname != 'time':\n return NCFile.read_field(self, fieldname)\n else:\n # create a field for time\n variable = Variable(\n shortname=fieldname,\n description='acquisition time of image',\n authority=self.get_naming_authority(),\n standardname='time'\n )\n field = Field(\n variable,\n collections.OrderedDict([('time', 1)]),\n datatype=numpy.dtype(numpy.int64),\n units='seconds since 1981-01-01 00:00:00'\n )\n field.attach_storage(self.get_field_handler(fieldname))\n return field",
"def model():\n return TimeSeriesMultiReg()",
"def index_tseries_single(fname,reanalyze=False,genIndex=True):\n if not os.path.exists(fname+\"/RoiSet.zip\"):\n print(fname,\"<-- NEEDS ROI!!!!!\")\n if not os.path.exists(fname+\"/experiment.txt\"):\n print(fname,\"<-- NEEDS EXPERIMENT TEXT FILE!!!!!\")\n if os.path.exists(fname+\"/SWH2P/index.html\"):\n print(fname,\"<-- already indexed\")\n indexNeeded=False\n else:\n print(fname,\"<-- needs indexing\")\n indexNeeded=True\n if indexNeeded or reanalyze:\n print(fname,\"<-- analyzing\")\n TS=TSeries(fname)\n TS.autoAnalyze()\n if genIndex:\n index_indexes(os.path.dirname(fname))",
"def retrieve_timeseries(start_time, end_time, channel_name, IFO, frame_type):\n\td = pylal.frutils.AutoqueryingFrameCache(frametype=frame_type, scratchdir=None)\n\tdata = d.fetch(channel_name, start_time, end_time)\n\t\n\ttime_series = {\n\t\t'waveform': data.A,\n\t\t'dt' : data.metadata.dt,\n\t\t'fs' : 1.0/data.metadata.dt,\n\t}\n\treturn time_series",
"def entries_from_goes_ts_file2(file, default_waveunit=None):\n\n headers = fits.get_header(file)\n if isinstance(file, (str, six.text_type)):\n filename = file\n else:\n filename = getattr(file, 'name', None)\n\n statinfo = os.stat(file)\n #print('a header')\n entry = DatabaseEntry(path=filename)\n size = statinfo.st_size\n\n # Add/tweak start/end entries for GOES\n if headers[0].get('TELESCOP','') != '':\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = headers[0]['TELESCOP']\n if (headers[0].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[0]['DATE-OBS'])\n elif (headers[1].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[1]['DATE-OBS'])\n\n if (headers[0].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[0]['DATE-END'])\n elif (headers[1].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[1]['DATE-END'])\n else:\n end_time = start_time + timedelta(days=1,seconds=-1)\n\n # Add these to the entry\n observation_time_start = start_time\n observation_time_end = end_time\n\n wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n metadata = MetaDict(headers[1])\n #entry.tags = sunpy.database.attrs.Tag('raw')\n\n entry = DatabaseEntry(observation_time_start=start_time,\n observation_time_end = end_time,\n instrument='EIT',\n wavemin=wavemin,\n wavemax=wavemax,\n metadata=metadata,\n size=size)\n\n return entry",
"def getTs(self):\n\n if not self.ts:\n raise ValueError('No time series file given.')\n\n data = self.ts.getData()\n # remove \"Diff\" from column headers\n columnHeader = [title.split('Diff')[0] for title in data.columns]\n data.columns = columnHeader\n return data",
"def get_spectrum_data():\n from resistics.spectra.data import SpectrumData\n import numpy as np\n\n # add some data\n startTime = \"2020-01-01 00:00:00.000000\"\n stopTime = \"2020-01-01 00:00:00.062500\"\n data = {}\n data[\"Ex\"] = np.array([1 + 3j, -2 + 5j, 7 - 6j, 3 + 2j, 4 + 8j])\n data[\"Ey\"] = np.array([12 - 4j, -6 + 2j, 2 + 6j, -4 - 2j, -6 - 6j])\n data[\"Hx\"] = np.array([-3 + 3j, -11 + 7j, 4 - 1j, 1 + 9j, 2 + 2j])\n data[\"Hy\"] = np.array([2 + 9j, 9 + 1j, 8 + 8j, 6 + 2j, 5 + 2j])\n specData = SpectrumData(8, 5, 128, startTime, stopTime, data)\n evalfreq = np.array([24, 40])\n return specData, evalfreq",
"def createTDIPSurvey(self, fname_obs, fname_sim, **kwargs):\n return ValueError('Not yet implemented')",
"def _inmate_record_get_or_create(self):\n raise NotImplementedError('_inmate_record_get_or_create needs to be implemented with the new format')",
"def _get_timeseries_class():\n global _timeseries_class\n if not _timeseries_class:\n from energyquantified.data import Timeseries\n _timeseries_class = Timeseries\n return _timeseries_class",
"def SwigTimeSeries_from_DetData(DetData):\n\n TimeSeries = lal.CreateREAL8TimeSeries('timeseries', DetData.epoch, 0,\n DetData.td_signal.delta_t, lal.StrainUnit, len(DetData.td_signal))\n\n TimeSeries.data.data = np.copy(DetData.td_signal.data)\n \n return TimeSeries",
"def load_timeseries(timeseries_file, ts=\"roi\"):\n if (ts == \"roi\") or (ts == \"voxel\"):\n timeseries = np.load(timeseries_file)[\"roi\"]\n return timeseries\n else:\n print(\n \"You have not selected a valid timeseries type.\"\n + \"options are ts='roi' or ts='voxel'.\"\n )\n pass",
"def create(records):\n version = '1.0.0'\n\n iversion = [int(x) for x in version.split('.')]\n if iversion[1] > 0 or iversion[2] > 0:\n raise IOError(\"SEF versions > 0.0 are not supported\")\n\n latitude = 42.331\n longitude = -83.046\n altitude = 'NA'\n\n header = {\n 'SEF': version, 'ID': 'Detroit_Anthon', 'Name': 'Detroit, MI',\n 'Lat': latitude, 'Lon': longitude, 'Alt': altitude, 'Source': 'C3S-DRS',\n 'Link': '', 'Vbl': 'ta', 'Stat': 'point',\n 'Units': 'C', 'Meta': 'Observer=George Christian Anthon',\n }\n\n index_temperatures = 0\n index_times = 0\n\n time_offset = longitude * 12 / 180\n\n temp_dict = defaultdict(list)\n\n temperatures = []\n\n times = [datetime.time(7, 0), datetime.time(12, 0), datetime.time(20, 0)]\n original_time = [\"7:00AM\", \"12:00PM\", \"20:00PM\"]\n\n for index in range(len(records)):\n temperatures.append(records[index][datetime.time(7, 0)])\n temperatures.append(records[index][datetime.time(12, 0)])\n temperatures.append(records[index][datetime.time(20, 0)])\n for time in original_time:\n if isinstance(temperatures[index_temperatures], str):\n value = 'NA'\n else:\n value = round(((float(temperatures[index_temperatures]) - 32) * 5 / 9), 1)\n\n date = str(records[index]['Year']) \\\n + \"-\" \\\n + str(records[index]['Month']) \\\n + \"-\" + str(records[index]['Day']) \\\n + \" \" + str(times[index_times])\n\n date_time = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\n\n utc = date_time - datetime.timedelta(hours=time_offset)\n\n year = str(utc)[:4]\n month = str(utc)[5:7]\n day = str(utc)[8:10]\n hour = str(utc)[11:13]\n minutes = str(utc)[14:16]\n\n data_dict = {\n 'Data': pd.DataFrame({\n 'Year': year,\n 'Month': month,\n 'Day': day,\n 'Hour': hour,\n 'Minute': minutes,\n 'Period': 0,\n 'Value': value,\n 'Meta': \"orig=\" + str(temperatures[index_temperatures])\n + 'F' + \"|orig.time=\" + str(time)\n + \"|orig.date=\" + str(records[index]['Year']) + '-' + str(records[index]['Month'])\n + '-' + str(records[index]['Day'])\n\n }, index=[0])\n }\n temp_dict['Data'].append(data_dict['Data'])\n\n index_times += 1\n if index_times > 2:\n index_times = 0\n\n index_temperatures += 1\n\n header.update(temp_dict)\n\n return header",
"def get_simplespecphot(self, verbose = False):\n\n if hasattr(self, 'lcfit') and hasattr(self, 'spec'):\n # if verbose: print(\"Foo\")\n\n # try:\n # # self.simplespecphot = LCfitClass()\n # self.simplespecphot = PhotometryClass()\n #\n # lenstring = np.nanmax([len(i) for i in self.lcfit.data_filters.keys()]) ## object dtype is slow\n # self.simplespecphot.phot = Table(names = ('MJD', 'flux', 'flux_err', 'filter'),\n # dtype = [float, float, float, '|S'+str(lenstring)])\n #\n # for i, spectrum in enumerate(self.spec):\n #\n # for filter_name in self.spec[spectrum]._overlapping_filter_list:\n # if verbose: print(i, spectrum, filter_name)\n #\n # mjd = self.spec[spectrum].mjd_obs\n # flux = self.lcfit.spline[filter_name](mjd)\n # flux_err = self.lcfit.spline[filter_name + \"_err\"](mjd)\n # newrow = {'MJD': mjd, 'flux': flux, 'flux_err': flux_err, 'filter':filter_name}\n # self.simplespecphot.phot.add_row([mjd, flux, flux_err, filter_name])\n #\n # self.simplespecphot.unpack()\n # except:\n # warnings.warn(\"simplespecphot failed\")\n\n\n # self.simplespecphot = LCfitClass()\n self.simplespecphot = PhotometryClass()\n\n lenstring = np.nanmax([len(i) for i in self.lcfit.data_filters.keys()]) ## object dtype is slow\n # self.simplespecphot.phot = Table(names=('MJD', 'flux', 'flux_err', 'filter'),\n # dtype=[float, float, float, '|S' + str(lenstring)])\n\n mjd_list = []\n flux_list = []\n flux_err_list = []\n filter_list = []\n\n for i, spectrum in enumerate(self.spec):\n\n for filter_name in self.spec[spectrum]._overlapping_filter_list:\n if verbose: print(i, spectrum, filter_name, type(filter_name))\n\n mjd = self.spec[spectrum].mjd_obs\n flux = self.lcfit.spline[filter_name](mjd)\n flux_err = self.lcfit.spline[filter_name + \"_err\"](mjd)\n # newrow = {'MJD': mjd, 'flux': flux, 'flux_err': flux_err, 'filter': filter_name}\n # if i == 0:\n # self.simplespecphot.phot = Table(newrow)\n # else:\n # self.simplespecphot.phot.add_row([mjd, flux, flux_err, filter_name])\n\n mjd_list.append(mjd)\n flux_list.append(flux)\n flux_err_list.append(flux_err)\n filter_list.append(filter_name)\n\n self.simplespecphot.phot = Table((mjd_list, flux_list, flux_err_list, filter_list), names=('MJD', 'flux', 'flux_err', 'filter'))\n\n self.simplespecphot.unpack(verbose=verbose)\n\n pass",
"def spec(self):\n if self._spec is not None:\n return(self._spec)\n\n if self._fid is not None:\n self._spec = np.fft.fftshift(np.fft.fft(self._fid))\n return(self._spec)\n\n return(None)",
"def dataseries(self, city, field):\r\n df = self.dframe(city)\r\n indices = self.time_indices(df)\r\n data = self.field_numpy(city, field)\r\n return DataSeries(city, data, indices)",
"def TimeSeries(*args, **kwargs):\n if args or kwargs:\n underride(kwargs, dtype=float)\n series = pd.Series(*args, **kwargs)\n else:\n series = pd.Series([], dtype=float)\n\n series.index.name = 'Time'\n if 'name' not in kwargs:\n series.name = 'Quantity'\n return series",
"def createTimeLapseSurvey(self, fnames_obs, fnames_sim):\n return ValueError('Not yet implemented')",
"def fake_data(sample_rate=512,psd_segment_length=60,nsegs=16):\n epoch = 1153742417.0\n ts_data = numpy.random.normal(0,1,sample_rate*psd_segment_length*nsegs)\n ts_data = types.TimeSeries(ts_data,delta_t=1.0/sample_rate,epoch=epoch)\n return ts_data",
"def retrieve_data_timeseries(hfile, setname):\n dset = hfile[setname]\n sample_rate = dset.attrs[\"SamplingRate(Hz)\"]\n gps_epoch = construct_utc_from_metadata(dset.attrs[\"Date\"], dset.attrs[\"t0\"])\n data = retrieve_channel_data(hfile, setname)\n ts_data = TimeSeries(data, sample_rate=sample_rate, epoch=gps_epoch)\n return ts_data",
"def read_smet(path, var):\n\n # Load .smet file as a Pandas data frame\n df = pd.read_csv(path)\n\n # Determine indices for data retrieval\n bump = 2\n\n fields_row = np.where(df[df.columns[0]].str.startswith(\"fields\"))[0][0] + bump\n\n data_row = np.where(df[df.columns[0]] == '[DATA]')[0][0] + bump\n\n fields = np.loadtxt(path, skiprows=fields_row - 1, max_rows=1, dtype='str')\n\n data_col = np.where(fields == var)[0][0] - bump\n\n # Creates pandas data frame\n time = np.loadtxt(path, skiprows=data_row, usecols=0, dtype = 'str')\n\n time = pd.to_datetime(time, format='%Y-%m-%dT%H:%M:%S')\n\n data = np.loadtxt(path, skiprows=data_row, usecols=data_col)\n\n ts = pd.DataFrame(data, index=time)\n\n # Set no data values to nan\n ts[ts == -999] = np.nan\n\n # Return time series as Pandas data frame\n return ts",
"def init_from_data(calc_id,\n i_ts,\n field_name,\n xx=None,\n fitting_type='pl',\n nk_plot=20,\n fig_param=None):\n manip=tdc_FFT_Manip(fig_param)\n manip.read_from_data(calc_id,\n i_ts,\n field_name,\n xx=xx,\n fitting_type=fitting_type,\n nk_plot=nk_plot)\n return manip",
"def _get_model_df(model_path_or_tfs):\n if isinstance(model_path_or_tfs, basestring):\n LOG.debug(\"Creating TwissOptics from '{:s}'\".format(model_path_or_tfs))\n df = tfs.read_tfs(model_path_or_tfs, index=\"NAME\")\n else:\n LOG.debug(\"Creating TwissOptics from input DataFrame\")\n df = model_path_or_tfs\n if (len(df.index.values) == 0) or not isinstance(df.index.values[0], basestring):\n raise IndexError(\"Index of DataFrame needs to be the element names.\"\n \"This does not seem to be the case.\")\n return df",
"def read_timeseries(filename, names=None, only_wells=None):\n if only_wells is not None:\n assert isinstance(only_wells, list), 'only_wells must be a list, or None'\n assert all(isinstance(well, str) for well in only_wells), \\\n 'only_wells must be a list of strings'\n with pd.HDFStore(filename, 'r') as f:\n if only_wells is None:\n series = f['timeseries_data']\n else:\n query_str = 'well_name in {}'.format(only_wells)\n series = f['timeseries_data'].query(query_str)\n if names is None:\n return series\n else:\n return series[names]",
"def get_ts_series(self):\n ts = pd.Series(dict(self.ts_list))\n ts.rename(self.col_names[1], inplace=True)\n ts.sort_index()\n\n return ts",
"def getSpecie(name):\n for spec in Species:\n if spec.name == name:\n return spec\n return None"
] | [
"0.5437413",
"0.53278315",
"0.5132824",
"0.4910915",
"0.48935473",
"0.48539826",
"0.48432764",
"0.48351282",
"0.4805524",
"0.47721493",
"0.47332403",
"0.4718104",
"0.47017023",
"0.4692835",
"0.4662472",
"0.46499807",
"0.4611805",
"0.46080402",
"0.4596876",
"0.4580391",
"0.4576826",
"0.4572463",
"0.4562627",
"0.4544188",
"0.4501777",
"0.44797572",
"0.44758382",
"0.44600248",
"0.4449315",
"0.44462395"
] | 0.7088663 | 0 |
Return FTimeSeries object (i.e. an object containing value from a time series) for specified time series spec, date and instrument. | def get_time_series(for_date, instr, spec):
return acm.FTimeSeries.Select01("day = '%s' and recaddr = %i "
"and timeSeriesSpec = %i and runNo = 1"
% (for_date, instr.Oid(), spec.Oid()), '') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_time_series(fieldName, instr):\n spec = acm.FTimeSeriesSpec[fieldName]\n if not spec:\n spec = acm.FTimeSeriesSpec()\n spec.Description('%s PnL History' % instr)\n spec.FieldName(fieldName)\n spec.RecType(acm.EnumFromString('B92RecordType', 'Instrument'))\n spec.Commit()\n return spec",
"def get_spectrum_data():\n from resistics.spectra.data import SpectrumData\n import numpy as np\n\n # add some data\n startTime = \"2020-01-01 00:00:00.000000\"\n stopTime = \"2020-01-01 00:00:00.062500\"\n data = {}\n data[\"Ex\"] = np.array([1 + 3j, -2 + 5j, 7 - 6j, 3 + 2j, 4 + 8j])\n data[\"Ey\"] = np.array([12 - 4j, -6 + 2j, 2 + 6j, -4 - 2j, -6 - 6j])\n data[\"Hx\"] = np.array([-3 + 3j, -11 + 7j, 4 - 1j, 1 + 9j, 2 + 2j])\n data[\"Hy\"] = np.array([2 + 9j, 9 + 1j, 8 + 8j, 6 + 2j, 5 + 2j])\n specData = SpectrumData(8, 5, 128, startTime, stopTime, data)\n evalfreq = np.array([24, 40])\n return specData, evalfreq",
"def SwigTimeSeries_from_DetData(DetData):\n\n TimeSeries = lal.CreateREAL8TimeSeries('timeseries', DetData.epoch, 0,\n DetData.td_signal.delta_t, lal.StrainUnit, len(DetData.td_signal))\n\n TimeSeries.data.data = np.copy(DetData.td_signal.data)\n \n return TimeSeries",
"def retrieve_data_timeseries(hfile, setname):\n dset = hfile[setname]\n sample_rate = dset.attrs[\"SamplingRate(Hz)\"]\n gps_epoch = construct_utc_from_metadata(dset.attrs[\"Date\"], dset.attrs[\"t0\"])\n data = retrieve_channel_data(hfile, setname)\n ts_data = TimeSeries(data, sample_rate=sample_rate, epoch=gps_epoch)\n return ts_data",
"def model():\n return TimeSeriesMultiReg()",
"def get_time_series(rics: list, fields: list, start_date: str, end_date: str) -> pd.DataFrame:\n # Check date formats are YYYY-MM-DD.\n try:\n dt.datetime.strptime(start_date, '%Y-%m-%d')\n except ValueError:\n raise ValueError('\"start_date\" has incorrect format.')\n try:\n dt.datetime.strptime(end_date, '%Y-%m-%d')\n except ValueError:\n raise ValueError('\"end_date\" has incorrect format.')\n\n data = ek.get_timeseries(rics, fields, start_date=start_date, end_date=end_date)\n return data",
"def from_serial_instrument(self, instrument):\n return FunctionGenerator(ser=instrument._ser)",
"def retrieve_timeseries(start_time, end_time, channel_name, IFO, frame_type):\n\td = pylal.frutils.AutoqueryingFrameCache(frametype=frame_type, scratchdir=None)\n\tdata = d.fetch(channel_name, start_time, end_time)\n\t\n\ttime_series = {\n\t\t'waveform': data.A,\n\t\t'dt' : data.metadata.dt,\n\t\t'fs' : 1.0/data.metadata.dt,\n\t}\n\treturn time_series",
"def get_spectra(time_series, method=None):\r\n if method is None:\r\n method = {'this_method': 'welch'} # The default\r\n # If no choice of method was explicitly set, but other parameters were\r\n # passed, assume that the method is mlab:\r\n this_method = method.get('this_method', 'welch')\r\n\r\n if this_method == 'welch':\r\n NFFT = method.get('NFFT', default_nfft)\r\n Fs = method.get('Fs', 2 * np.pi)\r\n detrend = method.get('detrend', mlab.detrend_none)\r\n window = method.get('window', mlab.window_hanning)\r\n n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2.0)))\r\n\r\n # The length of the spectrum depends on how many sides are taken, which\r\n # depends on whether or not this is a complex object:\r\n if np.iscomplexobj(time_series):\r\n fxy_len = NFFT\r\n else:\r\n fxy_len = NFFT / 2.0 + 1\r\n\r\n # If there is only 1 channel in the time-series:\r\n if len(time_series.shape) == 1 or time_series.shape[0] == 1:\r\n temp, f = mlab.csd(time_series, time_series,\r\n NFFT, Fs, detrend, window, n_overlap,\r\n scale_by_freq=True)\r\n\r\n fxy = temp.squeeze() # the output of mlab.csd has a weird\r\n # shape\r\n else:\r\n fxy = np.zeros((time_series.shape[0],\r\n time_series.shape[0],\r\n fxy_len), dtype=complex) # Make sure it's complex\r\n\r\n for i in range(time_series.shape[0]):\r\n for j in range(i, time_series.shape[0]):\r\n #Notice funny indexing, in order to conform to the\r\n #conventions of the other methods:\r\n temp, f = mlab.csd(time_series[j], time_series[i],\r\n NFFT, Fs, detrend, window, n_overlap,\r\n scale_by_freq=True)\r\n\r\n fxy[i][j] = temp.squeeze() # the output of mlab.csd has a\r\n # weird shape\r\n elif this_method in ('multi_taper_csd', 'periodogram_csd'):\r\n # these methods should work with similar signatures\r\n mdict = method.copy()\r\n func = eval(mdict.pop('this_method'))\r\n freqs, fxy = func(time_series, **mdict)\r\n f = utils.circle_to_hz(freqs, mdict.get('Fs', 2 * np.pi))\r\n\r\n else:\r\n raise ValueError(\"Unknown method provided\")\r\n\r\n return f, fxy.squeeze()",
"def get_series(self, series_code: str, date: datetime):\n\n raise NotImplementedError",
"def _tseries_from_nifti_helper(coords, data, TR, filter, normalize, average):\r\n if coords is not None:\r\n out_data = np.asarray(data[coords[0], coords[1], coords[2]])\r\n else:\r\n out_data = data\r\n\r\n tseries = ts.TimeSeries(out_data, sampling_interval=TR)\r\n\r\n if filter is not None:\r\n if filter['method'] not in ('boxcar', 'fourier', 'fir', 'iir'):\r\n e_s = \"Filter method %s is not recognized\" % filter['method']\r\n raise ValueError(e_s)\r\n else:\r\n #Construct the key-word arguments to FilterAnalyzer:\r\n kwargs = dict(lb=filter.get('lb', 0),\r\n ub=filter.get('ub', None),\r\n boxcar_iterations=filter.get('boxcar_iterations', 2),\r\n filt_order=filter.get('filt_order', 64),\r\n gpass=filter.get('gpass', 1),\r\n gstop=filter.get('gstop', 60),\r\n iir_ftype=filter.get('iir_ftype', 'ellip'),\r\n fir_win=filter.get('fir_win', 'hamming'))\r\n\r\n F = tsa.FilterAnalyzer(tseries, **kwargs)\r\n\r\n if filter['method'] == 'boxcar':\r\n tseries = F.filtered_boxcar\r\n elif filter['method'] == 'fourier':\r\n tseries = F.filtered_fourier\r\n elif filter['method'] == 'fir':\r\n tseries = F.fir\r\n elif filter['method'] == 'iir':\r\n tseries = F.iir\r\n\r\n if normalize == 'percent':\r\n tseries = tsa.NormalizationAnalyzer(tseries).percent_change\r\n elif normalize == 'zscore':\r\n tseries = tsa.NormalizationAnalyzer(tseries).z_score\r\n\r\n if average:\r\n if coords is None:\r\n tseries.data = np.mean(np.reshape(tseries.data,\r\n (np.array(tseries.shape[:-1]).prod(),\r\n tseries.shape[-1])),0)\r\n else:\r\n tseries.data = np.mean(tseries.data, 0)\r\n\r\n return tseries",
"def time_series_daily(symbol: str, outputsize: str = 'compact') -> Tuple[pd.DataFrame, dict]:\n response = _fetch(symbol=symbol, function='TIME_SERIES_DAILY', outputsize=outputsize)\n\n response_dict = json.loads(response.content)\n\n df = pd.DataFrame.from_dict(response_dict[f'Time Series (Daily)'], orient='index', dtype=np.float64)\n df.index = pd.to_datetime(df.index)\n df = df.rename(columns=_string_map(df.columns))\n\n metadata = response_dict['Meta Data']\n _rename_dict_keys(metadata)\n\n metadata['begin_datetime'] = df.index.min()\n metadata['end_datetime'] = df.index.max()\n\n return df, metadata",
"def get_spectrum(self) -> pd.Series:\n short_time_spectrum = self.get_short_time_spectrum()\n raw_spectrum = short_time_spectrum.sum(axis=1)\n frequencies = librosa.fft_frequencies(\n sr=self.sample_rate,\n n_fft=self.n_fft\n )\n spectrum = pd.Series(raw_spectrum, frequencies)\n return spectrum",
"def __getitem__(self, index):\n return self._timeseriesData[index]",
"def retrieve_time_series(api, series_ID):\r\n #Retrieve Data By Series ID \r\n series_search = api.data_by_series(series=series_ID)\r\n ##Create a pandas dataframe from the retrieved time series\r\n df = pd.DataFrame(series_search)\r\n return df",
"def get_daily_data():\n class C:\n pass\n\n def get_ticker(ticker):\n vals = []\n\n datafile = cbook.get_sample_data('%s.csv' % ticker, asfileobj=False)\n\n lines = open(datafile).readlines()\n for line in lines[1:]:\n vals.append([float(val) for val in line.split(',')[1:]])\n\n M = array(vals)\n c = C()\n c.open = M[:, 0]\n c.high = M[:, 1]\n c.low = M[:, 2]\n c.close = M[:, 3]\n c.volume = M[:, 4]\n return c\n c1 = get_ticker('intc')\n c2 = get_ticker('msft')\n return c1, c2",
"def get_data_set(symbol, config):\n try:\n df = get_stock_historical_data(symbol, config)\n except:\n df = None\n else:\n # Makes it easier to use Volume as an indicator, like price\n df['Volume'] = df['Volume'].astype(np.float64)\n\n if config.get('split_date'):\n return {\n 'train': df[config['start_date']:config['split_date']],\n 'test': df[config['split_date']:config['end_date']]\n }\n else:\n return df",
"def test_Series():\n # create from hdr image files\n Series(hdf5, image_dir=data_dir,\n time_interval=time_interval, cachedir=cache)\n # loading from hdf5 file\n Series(hdf5, cachedir=cache)",
"def test_technical_indicator_sma_pandas(self, mock_request):\n ti = TechIndicators(\n key=TestAlphaVantage._API_KEY_TEST, output_format='pandas')\n url = \"http://www.alphavantage.co/query?function=SMA&symbol=MSFT&interval=15min&time_period=10&series_type=close&apikey=test\"\n path_file = self.get_file_from_url(\"mock_technical_indicator\")\n with open(path_file) as f:\n mock_request.get(url, text=f.read())\n data, _ = ti.get_sma(\"MSFT\", interval='15min',\n time_period=10, series_type='close')\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')",
"def time_series_rdd_from_observations(dt_index, df, ts_col, key_col, val_col):\n jvm = df._sc._jvm\n jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD.timeSeriesRDDFromObservations( \\\n dt_index._jdt_index, df._jdf, ts_col, key_col, val_col)\n return TimeSeriesRDD(None, None, jtsrdd, df._sc)",
"def __pos__(self):\n ts = self._fsm.get(self._id)\n return SMTimeSeries(ts._time, ts._value, self._fsm)",
"def values_in_time(obj, t, tau=None):\n\n if hasattr(obj, '__call__'):\n return obj(t, tau)\n\n if isinstance(obj, pd.Series) or isinstance(obj, pd.DataFrame):\n try:\n if isinstance(obj.index, pd.MultiIndex):\n return obj.loc[(t, tau)]\n else:\n return obj.loc[t]\n except KeyError:\n return obj\n\n return obj",
"def get_series(model, primary_field=None, extra_fields=[], **filters):\n # Put a slice on it so we get more than 10 (the default), but limit to 365.\n qs = (model.search().order_by('-date').filter(**filters)\n .values_dict('date', 'count', primary_field, *extra_fields))[:365]\n for val in qs:\n # Convert the datetimes to a date.\n date_ = date(*val['date'].timetuple()[:3])\n\n if primary_field:\n rv = dict(count=val[primary_field], date=date_, end=date_)\n else:\n rv = dict(count=val['count'], date=date_, end=date_)\n\n for extra_field in extra_fields:\n rv[extra_field] = val[extra_field]\n yield rv",
"def technicalIndicatorsDf(daily_data):\n o = daily_data['Open'].values\n c = daily_data['Close'].values\n h = daily_data['High'].values\n l = daily_data['Low'].values\n v = daily_data['Volume'].astype(float).values\n # define the technical analysis matrix\n\n # Most data series are normalized by their series' mean\n ta = {} #pd.DataFrame()\n ta['MA5'] = talib.MA(c, timeperiod=5)\n ta['MA10'] = talib.MA(c, timeperiod=10)\n ta['MA20'] = talib.MA(c, timeperiod=20)\n ta['MA60'] = talib.MA(c, timeperiod=60)\n ta['MA120'] = talib.MA(c, timeperiod=120)\n ta['MA5'] = talib.MA(v, timeperiod=5)\n ta['MA10'] = talib.MA(v, timeperiod=10)\n ta['MA20'] = talib.MA(v, timeperiod=20)\n ta['ADX'] = talib.ADX(h, l, c, timeperiod=14)\n ta['ADXR'] = talib.ADXR(h, l, c, timeperiod=14)\n ta['MACD'] = talib.MACD(c, fastperiod=12, slowperiod=26, signalperiod=9)[0]\n ta['RSI'] = talib.RSI(c, timeperiod=14)\n ta['BBANDS_U'] = talib.BBANDS(c, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)[0]\n ta['BBANDS_M'] = talib.BBANDS(c, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)[1]\n ta['BBANDS_L'] = talib.BBANDS(c, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)[2]\n ta['AD'] = talib.AD(h, l, c, v) \n ta['ATR'] = talib.ATR(h, l, c, timeperiod=14) \n ta['HT_DC'] = talib.HT_DCPERIOD(c) \n ta[\"High/Open\"] = h / o\n ta[\"Low/Open\"] = l / o\n ta[\"Close/Open\"] = c / o\n ta['Open'] = o\n ta['Close'] = c\n ta['High'] = h\n ta['Low'] = l\n ta['Volume'] = v\n\n # Normalized values\n ta['MA5-normalized'] = talib.MA(c, timeperiod=5) / np.nanmean(talib.MA(c, timeperiod=5))\n ta['MA10-normalized'] = talib.MA(c, timeperiod=10) / np.nanmean(talib.MA(c, timeperiod=10))\n ta['MA20-normalized'] = talib.MA(c, timeperiod=20) / np.nanmean(talib.MA(c, timeperiod=20))\n ta['MA60-normalized'] = talib.MA(c, timeperiod=60) / np.nanmean(talib.MA(c, timeperiod=60))\n ta['MA120-normalized'] = talib.MA(c, timeperiod=120) / np.nanmean(talib.MA(c, timeperiod=120))\n ta['MA5-normalized'] = talib.MA(v, timeperiod=5) / np.nanmean(talib.MA(v, timeperiod=5))\n ta['MA10-normalized'] = talib.MA(v, timeperiod=10) / np.nanmean(talib.MA(v, timeperiod=10))\n ta['MA20-normalized'] = talib.MA(v, timeperiod=20) / np.nanmean(talib.MA(v, timeperiod=20))\n ta['ADX-normalized'] = talib.ADX(h, l, c, timeperiod=14) / np.nanmean(talib.ADX(h, l, c, timeperiod=14))\n ta['ADXR-normalized'] = talib.ADXR(h, l, c, timeperiod=14) / np.nanmean(talib.ADXR(h, l, c, timeperiod=14))\n ta['MACD-normalized'] = talib.MACD(c, fastperiod=12, slowperiod=26, signalperiod=9)[0] / \\\n np.nanmean(talib.MACD(c, fastperiod=12, slowperiod=26, signalperiod=9)[0])\n ta['RSI-normalized'] = talib.RSI(c, timeperiod=14) / np.nanmean(talib.RSI(c, timeperiod=14))\n ta['BBANDS_U-normalized'] = talib.BBANDS(c, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)[0] / \\\n np.nanmean(talib.BBANDS(c, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)[0])\n ta['BBANDS_M-normalized'] = talib.BBANDS(c, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)[1] / \\\n np.nanmean(talib.BBANDS(c, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)[1])\n ta['BBANDS_L-normalized'] = talib.BBANDS(c, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)[2] / \\\n np.nanmean(talib.BBANDS(c, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)[2])\n ta['AD-normalized'] = talib.AD(h, l, c, v) / np.nanmean(talib.AD(h, l, c, v))\n ta['ATR-normalized'] = talib.ATR(h, l, c, timeperiod=14) / np.nanmean(talib.ATR(h, l, c, timeperiod=14))\n ta['HT_DC-normalized'] = talib.HT_DCPERIOD(c) / np.nanmean(talib.HT_DCPERIOD(c))\n return ta",
"def dataseries(self, city, field):\r\n df = self.dframe(city)\r\n indices = self.time_indices(df)\r\n data = self.field_numpy(city, field)\r\n return DataSeries(city, data, indices)",
"def mock_query_object(suvi_client):\n # Creating a Query Response Object\n start = '2019/05/25 00:50'\n end = '2019/05/25 00:52'\n wave = 94 * u.Angstrom\n obj = {\n 'Start Time': parse_time(start),\n 'End Time': parse_time(end),\n 'Instrument': 'SUVI',\n 'Physobs': 'flux',\n 'Source': 'GOES',\n 'Provider': 'NOAA',\n 'Level': '2',\n 'Wavelength': wave,\n 'url': ('https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites'\n '/goes/goes16/l2/data/suvi-l2-ci094/2019/05/25/'\n 'dr_suvi-l2-ci094_g16_s20190525T005200Z_e20190525T005600Z_v1-0-0.fits')\n }\n results = QueryResponse([obj], client=suvi_client)\n return results",
"def __init__(self, ts_df, time_format=\"%Y-%m-%d %H:%M:%S\", freq='D',\n fill_method='ffill',\n n_test=0, n_val=0,\n hyper_params=None,\n test='adf',\n trend=None,\n seasonal=False,\n seasonal_periods=1,\n **kwds):\n self._ts_df_cols = ['ds', 'y']\n\n self.ts_df = ts_df\n self.time_format = time_format\n self.freq = freq\n self.fill_method = fill_method.lower()\n self.n_test = int(n_test)\n self.n_val = int(n_val)\n self.transform = None\n self._boxcox_lmbda = None\n\n self._mode = ''\n\n self._train_dt = None\n self._test_dt = None\n self._val_dt = None\n\n self.model_fit = None\n self.fittedvalues = None\n self.residuals = None\n self.rmse = 0\n self._gs = tsa.GridSearchClass()\n self.hyper_params = hyper_params\n self.best_model = dict()\n\n \"\"\"\n self.rmse_test = 0\n self.rmse_val = 0\n \"\"\"\n\n self.upper_whisker_res = None\n self.lower_conf_int = None\n self.upper_conf_int = None\n\n self.forecast = None\n self.residuals_forecast = None\n\n self._res_decomp = None\n self._arr_seasonal = None\n self._arr_trend = None\n self._arr_baseline = None\n\n self._test = test\n self._trend = trend\n if self._trend is not None:\n self._trend = self._trend.lower()\n self._seasonal = seasonal\n if isinstance(self._seasonal, str):\n self._seasonal = self._seasonal.lower()\n self._seasonal_periods = seasonal_periods\n\n self._uvts_cls_logger = Logger('uvts_cls')\n\n UVariateTimeSeriesClass.assertions(self)\n # work with ts_df\n self.ts_df = self.ts_df.reset_index()\n self.ts_df.columns = self._ts_df_cols\n self.ts_df['y'] = self.ts_df['y'].apply(np.float64, errors='coerce')\n self.ts_df.set_index('ds', inplace=True)\n self._uvts_cls_logger.info(\n \"Received time series data of range: \" + str(min(self.ts_df.index)) + ' - ' + str(\n max(self.ts_df.index)) + \" and shape: \" + str(self.ts_df.shape))\n\n if not isinstance(self.ts_df.index, pd.DatetimeIndex):\n self._uvts_cls_logger.warning(\"Time conversion required...\")\n self.ts_df = self.ts_df.reset_index()\n try:\n self.ts_df['ds'] = self.ts_df['ds'].apply(\n lambda x: datetime.datetime.strptime(\n str(x).translate({ord('T'): ' ', ord('Z'): None})[:-1],\n self.time_format))\n except ValueError as e:\n self._uvts_cls_logger.warning(\"Zulu time conversion not successful: {}\".format(e))\n self._uvts_cls_logger.warning(\"Will try without assuming zulu time...\")\n try:\n self.ts_df['ds'] = self.ts_df['ds'].apply(\n lambda x: datetime.datetime.strptime(str(x), self.time_format))\n except ValueError as e:\n self._uvts_cls_logger.info(\"Time conversion not successful. Check your time_format: {}\".format(e))\n sys.exit(\"STOP\")\n else:\n self._uvts_cls_logger.info(\"Time conversion successful!\")\n else:\n self._uvts_cls_logger.info(\"Time conversion successful!\")\n # set index\n self.ts_df.set_index('ds', inplace=True)\n #\n self.ts_df.index = pd.to_datetime(self.ts_df.index)\n self.ts_df.sort_index(inplace=True)\n # resample\n self.ts_resample()\n UVariateTimeSeriesClass.assertions(self, post=True)\n #\n if self.n_val > len(self.ts_df) - self.n_test:\n self.n_val = len(self.ts_df) - self.n_test\n\n if self.n_test == 0 and self.n_val == 0:\n self._mode = 'forecast'\n elif self.n_test > 0:\n self._mode = 'test'\n elif self.n_test == 0 and self.n_val > 0:\n self._mode = 'validate'\n \n # delegate just for good programming style here\n super(UVariateTimeSeriesClass, self).__init__(**kwds)",
"def time_series_intraday(symbol: str, interval: str = '60min',\n outputsize: str = 'compact') -> Tuple[pd.DataFrame, dict]:\n response = _fetch(symbol=symbol, function='TIME_SERIES_INTRADAY', interval=interval,\n outputsize=outputsize)\n\n response_dict = json.loads(response.content)\n\n df = pd.DataFrame.from_dict(response_dict[f'Time Series ({interval})'], orient='index', dtype=np.float64)\n df.index = pd.to_datetime(df.index)\n df = df.rename(columns=_string_map(df.columns))\n\n metadata = response_dict['Meta Data']\n _rename_dict_keys(metadata)\n\n metadata['begin_datetime'] = df.index.min()\n metadata['end_datetime'] = df.index.max()\n\n return df, metadata",
"def get_series(self, series_id, observation_start=None, observation_end=None, **kwargs):\n url = \"https://api.stlouisfed.org/fred/series/observations?series_id=%s&api_key=%s\" % (series_id, self.api_key)\n from pandas import to_datetime, Series\n\n if observation_start is not None:\n observation_start = to_datetime(observation_start, errors='raise')\n url += '&observation_start=' + observation_start.strftime('%Y-%m-%d')\n if observation_end is not None:\n observation_end = to_datetime(observation_end, errors='raise')\n url += '&observation_end=' + observation_end.strftime('%Y-%m-%d')\n\n if kwargs is not None:\n url += '&' + urlencode(kwargs)\n\n root = self.__fetch_data(url)\n if root is None:\n raise ValueError('No data exists for series id: ' + series_id)\n data = {}\n for child in root.getchildren():\n val = child.get('value')\n if val == self.nan_char:\n val = float('NaN')\n else:\n val = float(val)\n data[self._parse(child.get('date'))] = val\n return Series(data)",
"def test_time_series_intraday_pandas(self, mock_request):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas')\n url = \"http://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&apikey=test&datatype=json\"\n path_file = self.get_file_from_url(\"mock_time_series\")\n with open(path_file) as f:\n mock_request.get(url, text=f.read())\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')"
] | [
"0.61378187",
"0.5532435",
"0.53222436",
"0.53155756",
"0.5268903",
"0.5263808",
"0.52208465",
"0.51617223",
"0.51468915",
"0.51186866",
"0.50985974",
"0.505968",
"0.50076693",
"0.5005106",
"0.49813068",
"0.49161428",
"0.49107486",
"0.48923215",
"0.4852867",
"0.48494852",
"0.48436815",
"0.48276287",
"0.48273808",
"0.48270717",
"0.48202094",
"0.48180073",
"0.4813775",
"0.47989485",
"0.4784878",
"0.4772578"
] | 0.72298265 | 0 |
Return all loan cash flow for specifc date from specified leg. Loan cash flow is identified by PS_DepositType add info being equal to specified type. | def GetCallAccCashFlow(leg, run_date, cf_type):
query = acm.CreateFASQLQuery('FCashFlow', 'AND')
query.AddAttrNode('Leg.Oid', 'EQUAL', leg.Oid())
query.AddAttrNode('PayDate', 'EQUAL', run_date)
query.AddAttrNode('CashFlowType', 'EQUAL', "Fixed Amount")
query.AddAttrNode('AdditionalInfo.PS_DepositType', 'EQUAL', cf_type)
cashFlows = query.Select()
if cashFlows:
return cashFlows[0]
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cashflows_less_than(leg, run_date, cf_type):\n query = acm.CreateFASQLQuery('FCashFlow', 'AND')\n query.AddAttrNode('Leg.Oid', 'EQUAL', leg.Oid())\n query.AddAttrNode('PayDate', 'LESS', run_date)\n query.AddAttrNode('CashFlowType', 'EQUAL', \"Fixed Amount\")\n query.AddAttrNode('AdditionalInfo.PS_DepositType', 'EQUAL', cf_type)\n cashFlows = query.Select()\n return cashFlows",
"def _get_pay_calendars(self, leg):\n pay_calendars = [\n leg.PayCalendar(),\n leg.Pay2Calendar(),\n leg.Pay3Calendar(),\n leg.Pay4Calendar(),\n leg.Pay5Calendar()\n ]\n return [cal for cal in pay_calendars if cal is not None]",
"def generate_cashflows(path_account, isin_cash):\n # Read and parse\n df_account = pd.read_excel(path_account)\n df_account = df_account.rename(\n columns={\n \"Variación\": \"ccyDelta\",\n \"Unnamed: 8\": \"delta\",\n \"Saldo\": \"ccyAmount\",\n \"Unnamed: 10\": \"amount\",\n \"Fecha valor\": \"date\",\n }\n )\n\n df_account[\"date\"] = pd.to_datetime(df_account[\"date\"], dayfirst=True)\n df_account = df_account.drop(\n columns=[\"Fecha\", \"Hora\", \"Producto\", \"Tipo\", \"ID Orden\"]\n )\n\n # Generate changes in position\n deltas_df = df_account.groupby([\"date\", \"ISIN\", \"amount\"])[\"delta\"].sum()\n deltas_df = pd.DataFrame(deltas_df).reset_index()\n\n # Compute cashflows\n cashflows_df = deltas_df.pivot_table(\n index=\"date\", columns=\"ISIN\", values=\"delta\", aggfunc=\"sum\"\n )\n\n # Compute external cashflows\n cashflows_external_df = df_account.loc[\n df_account[\"Descripción\"].isin([\"Ingreso\", \"Retirada\"])\n ]\n\n # For some reason DEGIRO has the cashflows mark to market shifted by one.\n # and my guess is that unless there is a position transaction, they dont\n # write cash mark to markets on Fridays ...\n cashflows_df = cashflows_df.asfreq(\"D\")\n cashflows_df[isin_cash] = cashflows_df[isin_cash].shift()\n cashflows_df = cashflows_df.asfreq(\"B\")\n\n return cashflows_df, cashflows_external_df",
"def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh",
"def cash_flow_response_to_df(\n portfolio_cash_flows_response: lusid.ResourceListOfPortfolioCashFlow,\n sum_by_date: bool = True\n) -> pd.DataFrame:\n\n def select_cols(\n df: pd.DataFrame,\n filter_col: str,\n filter_value: str,\n cols_to_keep: list,\n ) -> pd.DataFrame:\n return df[df[filter_col] == filter_value][cols_to_keep]\n\n # Extract cash payment data from cash flow response\n cash_flows_dict = portfolio_cash_flows_response.to_dict()\n cash_flow_data = pd.json_normalize(cash_flows_dict[\"values\"])\n\n # Split pays and receives and handle -ve signage for pay outflows\n pay_data = select_cols(\n cash_flow_data,\n \"diagnostics.PayReceive\",\n \"Pay\",[\"payment_date\", \"amount\", \"source_transaction_id\"]\n )\n pay_data[\"amount\"] = pay_data[\"amount\"].apply(lambda x: -1 * x)\n pay_data.rename(columns={\"amount\": \"payAmount\"}, inplace=True)\n rec_data = select_cols(\n cash_flow_data,\n \"diagnostics.PayReceive\",\n \"Receive\",\n [\"payment_date\", \"amount\", \"source_transaction_id\"]\n )\n rec_data.rename(columns={\"amount\": \"receiveAmount\"}, inplace=True)\n\n # Merge on payment date and ignore join dupes\n merged_df = pay_data.merge(rec_data, on=[\"payment_date\", \"source_transaction_id\"])\n merged_df.drop_duplicates(subset=[\"payment_date\", \"source_transaction_id\"], keep=\"first\", inplace=True,\n ignore_index=True)\n\n # Add net flows and reduce index to dates\n merged_df['netAmount'] = merged_df['payAmount'] + merged_df['receiveAmount']\n merged_df[\"payment_date\"] = merged_df[\"payment_date\"].apply(lambda x: x.date())\n merged_df.set_index(keys=\"payment_date\", inplace=True)\n\n # Aggregate sub-holdings\n if sum_by_date:\n merged_df = merged_df.groupby(merged_df.index).sum()\n\n return merged_df",
"def payments(self, loan):\n self.currency_interest = \"XBT\"\n \n \"\"\"The lender agrees to provide the borrower half of the loan amount\n on the initial loan on the initial date\"\"\"\n loan.fund(on=self.initial_loan_date,\n amount=self.total_loan_amount * \\\n Decimal(0.5))\n \"\"\"The lender agrees to pledge the remaining loan amount toward\n the kickstarter campaign of the borrower.\"\"\"\n loan.fund(on=self.kickstarter_payment_date,\n amount=self.total_loan_amount * \\\n Decimal(0.5))\n \"\"\" Standard payment schedule - The borrower intends to\n payback period will be separated into 8 installments and\n completed in 8 months. The payback will begin in the 5th\n month. However, unless the special conditions are triggered,\n the borrower is required to only pay the interest on the loan\n until the final payment date.\"\"\"\n\n \"\"\" Special payment schedule - If First campaign funded over\n USD 65,000, the borrower must pay back entire loan including\n one year interest within the two months after Crowd Funding\n Platform pay the fund.\"\"\"\n\n \"\"\" If First campaign funded over USD 58,000, will pay back 4\n Installment in advance, after Crowd Funding Platform pay the\n fund. The rest of the loan will keep paying followed the\n standard schedule until all loan including interest is paid\n back.\"\"\"\n\n if (self.kickstarter_revenue > Money(65000, \"USD\")):\n payment_date = self.kickstarter_payment_date + \\\n relativedelta(months=2)\n loan.add_to_balance(on=payment_date,\n amount = loan.interest(payment_date,\n self.final_payment_date,\n loan.remaining_balance()))\n loan.payment(on=payment_date,\n amount = loan.remaining_balance())\n else:\n if (self.kickstarter_revenue > Money(58000, \"USD\")):\n payment_date = self.kickstarter_payment_date + \\\n relativedelta(months=2)\n loan.payment(on=payment_date,\n amount = lambda : loan.remaining_principal()() * Decimal(0.5))\n start_payment_date = self.initial_loan_date + \\\n relativedelta(months=4)\n loan.amortize(on=start_payment_date,\n amount = loan.remaining_balance(),\n payments=8,\n interval=relativedelta(months=1))\n \"\"\"The borrower agrees to pay back the any remaining principal\n and accrued interest one year after the loan is issued.\"\"\"\n loan.payment(on=self.final_payment_date,\n amount= loan.remaining_balance())",
"def pnl(qbo_session, period = \"YEARLY\", start_date=\"first\", end_date=\"last\",\n **kwargs):\n\n pnl_account_types = [\n \n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n \n ]\n\n \n\n # go through the accounts, collecting a list of those that are \n # pnl accounts\n\n relevant_accounts = []\n\n coa = qbo_session.chart_of_accounts()\n\n AccountType_i = coa[0].index(\"AccountType\")\n fqa_i = coa[0].index(\"FullyQualifiedName\")\n\n for a in coa:\n\n AccountType = a[AccountType_i]\n\n if AccountType in pnl_account_types:\n\n relevant_accounts.append(a[fqa_i])\n \n # now collect the ledger_lines that are even relevant to the time\n # period and pnl accounts (and we'll handle presentation last)\n\n relevant_activity = {} #{account:[relevant lines]}\n\n all_ledger_lines = qbo_session.ledger_lines(None, None, None, True,\n **kwargs)\n\n headers = all_ledger_lines[0]\n\n account_i = headers.index(\"account\") \n amount_i = headers.index(\"amount\")\n date_i = headers.index(\"TxnDate\")\n \n earliest_date = datetime(2100,1,1)\n latest_date = datetime(1900,1,1)\n\n for line in all_ledger_lines[1:]:\n\n account = line[account_i]\n line_date = line[date_i]\n\n #first apply the date filter!\n if not start_date == \"first\" and line_date < start_date:\n continue\n \n if not end_date == \"last\" and line_date > end_date:\n continue\n \n #if it's made the cut, we can update the report date bounds\n earliest_date = min(line_date,earliest_date)\n latest_date = max(line_date,latest_date)\n\n #then apply the account filter!\n\n if not account in relevant_activity:\n #then let's confirm that its account type is a pnl one\n \n if not account in relevant_accounts:\n \n continue\n\n else:\n relevant_activity[account] = []\n\n relevant_activity[account].append(line)\n\n #now let's do presentation\n #TODO -- incorporate pandas tables...do only minimal work on it until then\n\n pnl_lines = []\n\n if period == \"YEARLY\":\n\n report_start_date = datetime(earliest_date.year,1,1)\n report_end_date = datetime(latest_date.year,12,31)\n\n period_start_dates = list(rrule(YEARLY, bymonth=1, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonth=12, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date))\n\n elif period == \"MONTHLY\":\n\n report_start_date = datetime(earliest_date.year,\n earliest_date.month,\n 1)\n report_end_date = datetime(latest_date.year,\n latest_date.month,\n calendar.monthrange(latest_date.year,\n latest_date.month)[1])\n\n period_start_dates = list(rrule(MONTHLY, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date)) \n\n header_1 = [\"\", \"Period Start -->\"] + period_start_dates\n header_2 = [\"Account\", \"Period End -->\"] + period_end_dates\n\n pnl_lines.append(header_1)\n pnl_lines.append(header_2)\n\n \"\"\"Clearly, there's a way to do this with only one pass of the data...\n let's get that right in the first re-write...probably with pandas\"\"\"\n\n #now let's fill up the pnl_lines with what we know to be the relevant data\n #for now, we'll rely on the knowledge that the data is coming to us in\n #date order, but that should be fixed too...\n\n for account in relevant_activity:\n\n account_row = [account, \"\"] #one value per period \n\n current_period_index = 0 #primitive counter, yes!\n this_period_total = 0 #this will be this period's total\n\n for line in relevant_activity[account]:\n \n line_amount = line[amount_i]\n line_date = line[date_i] \n\n if line_date > period_end_dates[current_period_index]:\n\n account_row.append(this_period_total)\n this_period_total = line_amount\n current_period_index +=1\n\n else:\n \n this_period_total = round(this_period_total +\n line_amount, 2)\n\n \"\"\"super sloppy...\"\"\"\n account_row.append(this_period_total) #for the last period\n current_period_index +=1\n\n while current_period_index < len(period_end_dates):\n account_row.append(0)\n current_period_index +=1\n\n pnl_lines.append(account_row)\n\n return pnl_lines",
"def calcDecision(self):\n final_decision = []\n #print(self.security.columns.values)\n #print(self.indicators_names)\n if not self.indicators_names: #Checks if empty\n print(\"Debes cargar los indicadores primero\".encode('utf-8'))\n for day in self.security.index.values:\n decision = pd.Series([])\n for indicator in self.indicators_names:\n decision = decision.append(pd.Series(self.security[indicator+'_decision'].loc[day]), ignore_index=True)\n decision.dropna()\n if decision.empty:\n final_decision.append(None)\n else:\n sell_count = len(decision[decision == TransactionType.SELL])\n buy_count = len(decision[decision == TransactionType.BUY])\n if sell_count > buy_count:\n final_decision.append(TransactionType.SELL)\n elif sell_count < buy_count:\n final_decision.append(TransactionType.BUY)\n elif sell_count == buy_count:\n final_decision.append(final_decision[-1])\n self.security['FinalDecision'] = final_decision\n self.last_decision = self.security['FinalDecision'].iloc[-1]",
"def _get_loan_payment(self, payment_type='2'):\n loan_payments = {\n \"2\":\n {},\n \"3\":\n loan\n }\n return loan_payments[payment_type]",
"def request_loan(self, loan_type):\n payload = {'type': loan_type}\n r = requests.post(self.base_url + f'/users/{self.username}/loans', headers=self.auth_header, params=payload)\n return r.text",
"def loan_data():\n return pd.read_csv(data_path / \"credit_data.csv\")",
"def get_flows(self, num_flows_per_entry):\n flows = []\n for tenant in self._tenants:\n for contract in tenant.get_children(only_class=Contract):\n providing_epgs = contract.get_all_providing_epgs()\n consuming_epgs = contract.get_all_consuming_epgs()\n for providing_epg in providing_epgs:\n vlan_ifs = providing_epg.get_all_attached(L2Interface)\n if len(vlan_ifs):\n providing_vlan = vlan_ifs[0].encap_id\n phys_ifs = vlan_ifs[0].get_all_attached(Interface)\n if len(phys_ifs):\n providing_phys_if = phys_ifs[0].name\n for consuming_epg in consuming_epgs:\n vlan_ifs = consuming_epg.get_all_attached(L2Interface)\n if len(vlan_ifs):\n consuming_vlan = vlan_ifs[0].encap_id\n phys_ifs = vlan_ifs[0].get_all_attached(Interface)\n if len(phys_ifs):\n consuming_phys_if = phys_ifs[0].name\n if providing_vlan == consuming_vlan and providing_phys_if == consuming_phys_if:\n # Skip this case since traffic would be switched outside fabric\n continue\n for filter_entry in contract.get_all_filter_entries():\n for i in range(0, num_flows_per_entry):\n flow = Flow()\n flow.ethertype = filter_entry.etherT\n if flow.ethertype == 'arp':\n flow.arp_opcode = filter_entry.arpOpc\n flow.populate_random_ip_addresses()\n elif flow.ethertype == 'ip':\n flow.populate_random_ip_addresses()\n flow.proto = filter_entry.prot\n if flow.proto == '6' or flow.proto == '17':\n dFromPort = int(filter_entry.dFromPort)\n dToPort = int(filter_entry.dToPort)\n sFromPort = int(filter_entry.sFromPort)\n sToPort = int(filter_entry.sToPort)\n if dFromPort == 0:\n dFromPort = 1\n dToPort += 1\n if sFromPort == 0:\n sFromPort = 1\n sToPort += 1\n if dToPort > 65534:\n dToPort = 65534\n if sToPort > 65534:\n sToPort = 65534\n flow.dport = str(random_number(dFromPort,\n dToPort))\n flow.sport = str(random_number(sFromPort,\n sToPort))\n if flow.proto == '6':\n flow.tcp_rules = filter_entry.tcpRules\n flow.svlan = providing_vlan\n flow.dvlan = consuming_vlan\n flow.src_intf = providing_phys_if\n flow.dst_intf = consuming_phys_if\n\n # Is the flow expected to succeed ?\n flow.expected_action = 'drop'\n providing_bd = providing_epg.get_bd()\n consuming_bd = consuming_epg.get_bd()\n if providing_bd and consuming_bd:\n if providing_bd == consuming_bd:\n if providing_bd.get_context() == consuming_bd.get_context():\n flow.expected_action = 'permit'\n flow.populate_random_mac_addresses()\n flows.append(flow)\n return flows",
"def fetchFinancialInfo(self, company_symbol, company_name, type):\n result = []\n utility = RevUtility()\n\n url = self.__REVENUE_BASE_URL + \"/\" + company_symbol + \"/\" + company_name + \"/\" + type\n pageContent = requests.get(url = url)\n tree = html.fromstring(pageContent.content)\n table = tree.xpath('//*[@id=\"style-1\"]/div[2]/table/tbody/tr')\n\n for item in table:\n data = item.xpath('td/text()')\n\n date = utility.convertToDate(data[0])\n try:\n revenue = float(data[1].replace(\",\", \"\").replace(\"$\",\"\"))\n except:\n ## handle revenue or income is negative\n revenue = 0\n result.append((date,revenue))\n\n result.reverse()\n return result",
"def _calculate_fees(corp_type, filing_info):\n fees = []\n service_fee_applied: bool = False\n for filing_type_info in filing_info.get('filingTypes'):\n current_app.logger.debug(f\"Getting fees for {filing_type_info.get('filingTypeCode')} \")\n fee: FeeSchedule = FeeSchedule.find_by_corp_type_and_filing_type(\n corp_type=corp_type,\n filing_type_code=filing_type_info.get('filingTypeCode', None),\n valid_date=filing_info.get('date', None),\n jurisdiction=None,\n is_priority=filing_type_info.get('priority'),\n is_future_effective=filing_type_info.get('futureEffective'),\n waive_fees=filing_type_info.get('waiveFees'),\n quantity=filing_type_info.get('quantity')\n )\n # If service fee is already applied, do not charge again.\n if service_fee_applied:\n fee.service_fees = 0\n elif fee.service_fees > 0:\n service_fee_applied = True\n\n if fee.variable:\n fee.fee_amount = Decimal(str(filing_type_info.get('fee', 0)))\n\n if filing_type_info.get('filingDescription'):\n fee.description = filing_type_info.get('filingDescription')\n\n fees.append(fee)\n return fees",
"def cash_flow(self, request, pk=None, **kwargs):\n # Get the goal even though we don't need it (we could ust use the pk)\n # so we can ensure we have permission to do so.\n goal = self.get_object()\n txs = Transaction.objects.filter(Q(to_goal=goal) | Q(from_goal=goal),\n status=Transaction.STATUS_EXECUTED,\n reason__in=Transaction.CASH_FLOW_REASONS)\n txs = txs.order_by('executed').values_list('to_goal', 'executed', 'amount')\n return Response([(dt2ed(tx[1]), tx[2] if tx[0] else -tx[2]) for tx in txs])",
"def get_exchange_trading_fee(self, exchange, pair, type):\n return self.ccxt.get_exchange_trading_fee(exchange, pair, type)",
"def get_pl_balances(self):\n\n\t\tdimension_fields = ['t1.cost_center']\n\n\t\tself.accounting_dimensions = get_accounting_dimensions()\n\t\tfor dimension in self.accounting_dimensions:\n\t\t\tdimension_fields.append('t1.{0}'.format(dimension))\n\n\t\treturn frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.account, t2.account_currency, {dimension_fields},\n\t\t\t\tsum(t1.debit_in_account_currency) - sum(t1.credit_in_account_currency) as bal_in_account_currency,\n\t\t\t\tsum(t1.debit) - sum(t1.credit) as bal_in_company_currency\n\t\t\tfrom `tabGL Entry` t1, `tabAccount` t2\n\t\t\twhere t1.is_cancelled = 0 and t1.account = t2.name and t2.report_type = 'Profit and Loss'\n\t\t\tand t2.docstatus < 2 and t2.company = %s\n\t\t\tand t1.posting_date between %s and %s\n\t\t\tgroup by t1.account, {dimension_fields}\n\t\t\"\"\".format(dimension_fields = ', '.join(dimension_fields)), (self.company, self.get(\"year_start_date\"), self.posting_date), as_dict=1)",
"async def fetch_deposits_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n currency = None\n request = {}\n method = 'privatePostAuthRMovementsHist'\n if code is not None:\n currency = self.currency(code)\n request['currency'] = currency['uppercaseId']\n method = 'privatePostAuthRMovementsCurrencyHist'\n if since is not None:\n request['start'] = since\n if limit is not None:\n request['limit'] = limit # max 1000\n response = await getattr(self, method)(self.extend(request, params))\n #\n # [\n # [\n # 13293039, # ID\n # 'ETH', # CURRENCY\n # 'ETHEREUM', # CURRENCY_NAME\n # null,\n # null,\n # 1574175052000, # MTS_STARTED\n # 1574181326000, # MTS_UPDATED\n # null,\n # null,\n # 'CANCELED', # STATUS\n # null,\n # null,\n # -0.24, # AMOUNT, negative for withdrawals\n # -0.00135, # FEES\n # null,\n # null,\n # '0x38110e0Fc932CB2BE...........', # DESTINATION_ADDRESS\n # null,\n # null,\n # null,\n # '0x523ec8945500.....................................', # TRANSACTION_ID\n # \"Purchase of 100 pizzas\", # WITHDRAW_TRANSACTION_NOTE, might also be: null\n # ]\n # ]\n #\n return self.parse_transactions(response, currency, since, limit)",
"def refresh(self, country, ntype=\"Total\"):\n self.selected_country = country\n result = self.data_reader.cumulative_filter(country)\n confirmed = panel.format_number(result.Confirmed)\n recovered = panel.format_number(result.Recovered)\n deaths = panel.format_number(result.Deaths)\n\n c_chart = self.__create_timeserie_chart(country, case_type=1, ntype=ntype)\n d_chart = self.__create_timeserie_chart(country, case_type=2, ntype=ntype)\n return confirmed, recovered, deaths, c_chart, d_chart",
"def read_general_ledger(self, path):\n return pd.read_excel(io=f\"{path}/input/general_ledger.xlsx\")",
"async def fetch_deposits_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n request = {\n 'currency': 'all',\n # 'start': 123,\n }\n #\n # if since is not None:\n # # date-based pagination not supported\n # }\n #\n currency = None\n if code is not None:\n currency = self.currency(code)\n request['currency'] = currency['id']\n if limit is not None:\n request['count'] = limit\n response = await self.privateGetUserWalletHistory(self.extend(request, params))\n transactions = self.filter_by_array(response, 'transactType', ['Withdrawal', 'Deposit'], False)\n return self.parse_transactions(transactions, currency, since, limit)",
"def get_deposit_withdraw(self, op_type: 'str', currency: 'str' = None, from_id: 'int' = None, size: 'int' = None,\n direct: 'str' = None) -> list:\n check_should_not_none(op_type, \"operate type\")\n\n params = {\n \"currency\": currency,\n \"type\": op_type,\n \"from\": from_id,\n \"direct\": direct,\n \"size\": size\n }\n\n from huobi.service.wallet.get_deposit_withdraw import GetDepositWithdrawService\n return GetDepositWithdrawService(params).request(**self.__kwargs)",
"def calcAdl(self):\n adl = []\n for i,j in enumerate(self.stock_data['Close']):\n #Calculating money flow measure\n mfm_nom = ((j-self.stock_data['Low'][i])-(self.stock_data['High'][i]))\n mfm_deno = self.stock_data['High'][i]-self.stock_data['Low'][i]\n mfm = mfm_nom/mfm_deno\n #Calculating money flow volume\n mfv = mfm*self.stock_data['Volume'][i]\n #Calculating accumulated distributin line\n if not adl:\n print(mfm)\n adl.append(mfv)\n else:\n print(adl)\n adl.append(mfv+adl)\n def_dates, def_points, k = self.angle(self.dates, adl, self.type) \n return k",
"def dprime_fnc_from_type(dprime_type):\n\n dprime_fncs = [no_mask(),\n scotoma_hard(),\n scotoma_soft(),\n glaucoma_hard(),\n glaucoma_soft(),\n hemianopsia_hard('left'),\n hemianopsia_soft('left'),\n hemianopsia_hard('right'),\n hemianopsia_soft('right')\n ]\n if not dprime_type in Dprime_types:\n raise ValueError(\"Unknown dprime type {}.\".format(dprime_type))\n return dprime_fncs[Dprime_types.index(dprime_type)]",
"def covid_data_get(type_to_get): # type in ['confirmed', 'death', 'recovered'], case insensitive\n type_to_get = str(type_to_get).lower()\n path = \"https://raw.githubusercontent.com/\" \\\n \"CSSEGISandData/COVID-19/master/\" \\\n \"csse_covid_19_data/\" \\\n \"csse_covid_19_time_series/\"\n file1 = \"time_series_covid19_confirmed_global.csv\"\n file2 = \"time_series_covid19_deaths_global.csv\"\n file3 = \"time_series_covid19_recovered_global.csv\"\n file_to_get = \"\"\n if type_to_get == 'confirmed':\n file_to_get = file1\n elif type_to_get == 'death':\n file_to_get = file2\n elif type_to_get == 'recovered':\n file_to_get = file3\n else:\n print('data type not exist')\n\n df = pd.read_csv(path + file_to_get)\n df = remodeling(df, type_to_get)\n df = df.reset_index()\n df['date'] = df['date'].astype(str)\n return df",
"def get_done_cards_obj(self, days, filter):\n done_sources = []\n print('Searching Trello cards..\\n')\n for list in self.my_lists:\n for card in list.list_cards(card_filter=filter):\n if ((\"Done\" in card.name) or (\"Moved\" in card.name)): \n name = card.name.split()[0]\n date = re.compile('[0-9]{2}/[0-9]{2}/[0-9]{2,4}')\n date = date.findall(card.name)\n if len(date) > 0:\n date = date[0]\n else:\n continue\n # print(name, date)\n if date in days:\n # done_sources.append(name + ' ' + date)\n done_sources.append(card)\n # exit(0)\n # print(card.name)\n return done_sources",
"def get_calls_list(self, session, date=None) -> List:\n\n if date == None:\n calls = session.query(\n Calls.id,\n Calls.planned_at,\n Calls.linkedin,\n Calls.leadgen_id\n ).all()\n else:\n calls = session.query(\n Calls.id,\n Calls.planned_at,\n Calls.linkedin,\n Calls.leadgen_id\n ).filter(Calls.planned_at.date()==date).all()\n return calls",
"def model_lemmon_graph(self, l_types=False): # ['LEV_LTB', 'LEV_LTM']\n\n if type(l_types) == str:\n l_types = [l_types]\n\n for l in l_types:\n # portfolios' names\n index = ['vhigh', 'high', 'med', 'low']\n # numbers 0-x where x is number of years in research-1 (-6 because of lack of obs)\n columns = list(range(8))\n print(columns)\n # create output DF\n lemmon_graph = pd.DataFrame(index=index, columns=columns)\n lemmon_graph_limit_up = pd.DataFrame(index=index, columns=columns)\n lemmon_graph_limit_down = pd.DataFrame(index=index, columns=columns)\n lemmon_graph_n = pd.DataFrame(index=index, columns=columns)\n\n # list of calendar years in research period, sorted\n years_range = sorted(list(range(\n self.uber_data.loc[:, 'D_BZ_date'].max().year, self.uber_data.loc[:, 'D_BZ_date'].min().year - 1, -1)\n ))\n for calendar_year in years_range:\n for relative_year in lemmon_graph.columns:\n for portfolio in ['vhigh', 'high', 'med', 'low']:\n self.logger.debug('lemon prep for {} {} {}'.format(calendar_year, relative_year, portfolio))\n if portfolio == 'vhigh':\n threshold_up = 2\n threshold_dn = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.75)\n elif portfolio == 'high':\n threshold_up = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.75)\n threshold_dn = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.5)\n elif portfolio == 'med':\n threshold_up = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.5)\n threshold_dn = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.25)\n elif portfolio == 'low':\n threshold_up = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.25)\n threshold_dn = -1\n else:\n raise Exception\n if relative_year == 0:\n self.uber_data.loc[\n (self.uber_data.loc[:, l] < threshold_up) &\n (self.uber_data.loc[:, l] > threshold_dn) &\n (self.uber_data.loc[:, 'year'] == calendar_year),\n f'{calendar_year}_{relative_year}'] = portfolio\n else:\n self.uber_data.loc[:, f'{calendar_year}_{relative_year}'] = np.nan\n for i, x in self.uber_data.loc[\n self.uber_data.loc[:, f'{calendar_year}_{relative_year-1}'].notnull(), :\n ].iterrows():\n self.uber_data.loc[\n (self.uber_data.loc[:, 'isin'] == x['isin']) &\n (self.uber_data.loc[:, 'year'] == x['year'] + 1),\n f'{calendar_year}_{relative_year}'] = \\\n x[f'{calendar_year}_{relative_year-1}']\n\n for relative in lemmon_graph.columns:\n for portfolio, r in lemmon_graph.iterrows():\n for_mean = []\n for i in years_range:\n try:\n for_mean += self.uber_data.loc[\n self.uber_data.loc[:, f'{i}_{relative}'] == portfolio, l].tolist()\n except TypeError:\n self.logger.info(f'type error in lemmon: i: {i} p: {portfolio} r: {relative}.')\n pass\n pd_for_mean = pd.Series(for_mean)\n lemmon_graph.at[portfolio, relative] = pd_for_mean.mean()\n lemmon_graph_n.at[portfolio, relative] = pd_for_mean.count()\n lemmon_graph_limit_up.at[portfolio, relative] = sms.DescrStatsW(pd_for_mean).tconfint_mean()[1]\n lemmon_graph_limit_down.at[portfolio, relative] = sms.DescrStatsW(pd_for_mean).tconfint_mean()[0]\n\n results = {'main_data': lemmon_graph,\n 'amount': lemmon_graph_n,\n 'limit_up': lemmon_graph_limit_up,\n 'limit_down': lemmon_graph_limit_down\n }\n return results",
"def dishlist_cal(n: list) -> list:\r\n return [dish.calories for dish in n]",
"def getCshCal(self, usrCals):\n assert self.checkCashCal(self.usrCals) is True\n for calen in usrCals['items']:\n if 'Cash Flow' in calen['summary']:\n return calen"
] | [
"0.58007056",
"0.51954895",
"0.5128794",
"0.5107095",
"0.5090592",
"0.476301",
"0.47382727",
"0.47313467",
"0.4641812",
"0.46323827",
"0.46226966",
"0.46067303",
"0.45926726",
"0.45881712",
"0.45019516",
"0.44865096",
"0.44601813",
"0.4456895",
"0.4439898",
"0.44268337",
"0.44115",
"0.44042867",
"0.43922716",
"0.43858787",
"0.4357002",
"0.43373382",
"0.43236035",
"0.4313461",
"0.42985803",
"0.4295662"
] | 0.6541988 | 0 |
Return all loan cash flows before specifc date from specified leg. Loan cash flow is identified by PS_DepositType add info being equal to specified type. | def get_cashflows_less_than(leg, run_date, cf_type):
query = acm.CreateFASQLQuery('FCashFlow', 'AND')
query.AddAttrNode('Leg.Oid', 'EQUAL', leg.Oid())
query.AddAttrNode('PayDate', 'LESS', run_date)
query.AddAttrNode('CashFlowType', 'EQUAL', "Fixed Amount")
query.AddAttrNode('AdditionalInfo.PS_DepositType', 'EQUAL', cf_type)
cashFlows = query.Select()
return cashFlows | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetCallAccCashFlow(leg, run_date, cf_type):\n query = acm.CreateFASQLQuery('FCashFlow', 'AND')\n query.AddAttrNode('Leg.Oid', 'EQUAL', leg.Oid())\n query.AddAttrNode('PayDate', 'EQUAL', run_date)\n query.AddAttrNode('CashFlowType', 'EQUAL', \"Fixed Amount\")\n query.AddAttrNode('AdditionalInfo.PS_DepositType', 'EQUAL', cf_type)\n cashFlows = query.Select()\n if cashFlows:\n return cashFlows[0]\n return None",
"def get_loans(self, margin_type: str, asset: Optional[str] = None, isolated_symbol: Optional[str] = None,\n start_time: Optional[int] = None, end_time: Optional[int] = None):\n conditions_list = []\n if margin_type == 'cross':\n table = tables.CROSS_MARGIN_LOAN_TABLE\n elif margin_type == 'isolated':\n table = tables.ISOLATED_MARGIN_LOAN_TABLE\n if isolated_symbol is not None:\n conditions_list.append((table.isolated_symbol,\n SQLConditionEnum.equal,\n isolated_symbol))\n else:\n raise ValueError(f\"margin type should be 'cross' or 'isolated' but {margin_type} was received\")\n\n if asset is not None:\n conditions_list.append((table.asset,\n SQLConditionEnum.equal,\n asset))\n if start_time is not None:\n conditions_list.append((table.loanTime,\n SQLConditionEnum.greater_equal,\n start_time))\n if end_time is not None:\n conditions_list.append((table.loanTime,\n SQLConditionEnum.lower,\n end_time))\n return self.get_conditions_rows(table, conditions_list=conditions_list)",
"def pnl(qbo_session, period = \"YEARLY\", start_date=\"first\", end_date=\"last\",\n **kwargs):\n\n pnl_account_types = [\n \n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n \n ]\n\n \n\n # go through the accounts, collecting a list of those that are \n # pnl accounts\n\n relevant_accounts = []\n\n coa = qbo_session.chart_of_accounts()\n\n AccountType_i = coa[0].index(\"AccountType\")\n fqa_i = coa[0].index(\"FullyQualifiedName\")\n\n for a in coa:\n\n AccountType = a[AccountType_i]\n\n if AccountType in pnl_account_types:\n\n relevant_accounts.append(a[fqa_i])\n \n # now collect the ledger_lines that are even relevant to the time\n # period and pnl accounts (and we'll handle presentation last)\n\n relevant_activity = {} #{account:[relevant lines]}\n\n all_ledger_lines = qbo_session.ledger_lines(None, None, None, True,\n **kwargs)\n\n headers = all_ledger_lines[0]\n\n account_i = headers.index(\"account\") \n amount_i = headers.index(\"amount\")\n date_i = headers.index(\"TxnDate\")\n \n earliest_date = datetime(2100,1,1)\n latest_date = datetime(1900,1,1)\n\n for line in all_ledger_lines[1:]:\n\n account = line[account_i]\n line_date = line[date_i]\n\n #first apply the date filter!\n if not start_date == \"first\" and line_date < start_date:\n continue\n \n if not end_date == \"last\" and line_date > end_date:\n continue\n \n #if it's made the cut, we can update the report date bounds\n earliest_date = min(line_date,earliest_date)\n latest_date = max(line_date,latest_date)\n\n #then apply the account filter!\n\n if not account in relevant_activity:\n #then let's confirm that its account type is a pnl one\n \n if not account in relevant_accounts:\n \n continue\n\n else:\n relevant_activity[account] = []\n\n relevant_activity[account].append(line)\n\n #now let's do presentation\n #TODO -- incorporate pandas tables...do only minimal work on it until then\n\n pnl_lines = []\n\n if period == \"YEARLY\":\n\n report_start_date = datetime(earliest_date.year,1,1)\n report_end_date = datetime(latest_date.year,12,31)\n\n period_start_dates = list(rrule(YEARLY, bymonth=1, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonth=12, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date))\n\n elif period == \"MONTHLY\":\n\n report_start_date = datetime(earliest_date.year,\n earliest_date.month,\n 1)\n report_end_date = datetime(latest_date.year,\n latest_date.month,\n calendar.monthrange(latest_date.year,\n latest_date.month)[1])\n\n period_start_dates = list(rrule(MONTHLY, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date)) \n\n header_1 = [\"\", \"Period Start -->\"] + period_start_dates\n header_2 = [\"Account\", \"Period End -->\"] + period_end_dates\n\n pnl_lines.append(header_1)\n pnl_lines.append(header_2)\n\n \"\"\"Clearly, there's a way to do this with only one pass of the data...\n let's get that right in the first re-write...probably with pandas\"\"\"\n\n #now let's fill up the pnl_lines with what we know to be the relevant data\n #for now, we'll rely on the knowledge that the data is coming to us in\n #date order, but that should be fixed too...\n\n for account in relevant_activity:\n\n account_row = [account, \"\"] #one value per period \n\n current_period_index = 0 #primitive counter, yes!\n this_period_total = 0 #this will be this period's total\n\n for line in relevant_activity[account]:\n \n line_amount = line[amount_i]\n line_date = line[date_i] \n\n if line_date > period_end_dates[current_period_index]:\n\n account_row.append(this_period_total)\n this_period_total = line_amount\n current_period_index +=1\n\n else:\n \n this_period_total = round(this_period_total +\n line_amount, 2)\n\n \"\"\"super sloppy...\"\"\"\n account_row.append(this_period_total) #for the last period\n current_period_index +=1\n\n while current_period_index < len(period_end_dates):\n account_row.append(0)\n current_period_index +=1\n\n pnl_lines.append(account_row)\n\n return pnl_lines",
"def _get_pay_calendars(self, leg):\n pay_calendars = [\n leg.PayCalendar(),\n leg.Pay2Calendar(),\n leg.Pay3Calendar(),\n leg.Pay4Calendar(),\n leg.Pay5Calendar()\n ]\n return [cal for cal in pay_calendars if cal is not None]",
"async def fetch_deposits_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n request = {\n 'currency': 'all',\n # 'start': 123,\n }\n #\n # if since is not None:\n # # date-based pagination not supported\n # }\n #\n currency = None\n if code is not None:\n currency = self.currency(code)\n request['currency'] = currency['id']\n if limit is not None:\n request['count'] = limit\n response = await self.privateGetUserWalletHistory(self.extend(request, params))\n transactions = self.filter_by_array(response, 'transactType', ['Withdrawal', 'Deposit'], False)\n return self.parse_transactions(transactions, currency, since, limit)",
"def get_inputs(self, contracts, date_from, date_to):\n res = super(PayslipLateCheckIn, self).get_inputs(contracts, date_to, date_from)\n late_check_in_type = self.env.ref('employee_late_check_in.late_check_in')\n contract = self.contract_id\n late_check_in_id = self.env['late.check_in'].search([('employee_id', '=', self.employee_id.id),\n ('date', '<=', self.date_to),\n ('date', '>=', self.date_from),\n ('state', '=', 'approved'),\n ])\n amount = late_check_in_id.mapped('amount')\n cash_amount = sum(amount)\n if late_check_in_id:\n self.late_check_in_ids = late_check_in_id\n input_data = {\n 'name': late_check_in_type.name,\n 'code': late_check_in_type.code,\n 'amount': cash_amount,\n 'contract_id': contract.id,\n }\n res.append(input_data)\n return res",
"def BacktestStrategy1(start_cond_dict, df, stock_exchange, invt_dict):\n total_days=df.shape[0]\n today_invt_dict=invt_dict\n invt_daily_list=[] # invt after today's transaction\n net_wealth_list=[]\n\n for i in range(total_days):\n if i==0:\n today_invt_dict=stock_exchange.FullBuyStocks(today_invt_dict, i)\n elif i==total_days-1: # last day\n today_invt_dict=stock_exchange.FullSellStocks(today_invt_dict, i)\n invt_daily_list.append(today_invt_dict)\n net_wealth_list.append(stock_exchange.EstimateNetWealth(today_invt_dict, i))\n \n PrintResult(\"Baseline Strategy\", net_wealth_list)\n plt.plot(net_wealth_list)\n plt.title(\"Baseline (1st day buy->hold->last day sell) Strategy\")\n plt.ylabel('Net Worth in USD') # Cash + Stock worth\n plt.show()\n return",
"def get_flows(self, num_flows_per_entry):\n flows = []\n for tenant in self._tenants:\n for contract in tenant.get_children(only_class=Contract):\n providing_epgs = contract.get_all_providing_epgs()\n consuming_epgs = contract.get_all_consuming_epgs()\n for providing_epg in providing_epgs:\n vlan_ifs = providing_epg.get_all_attached(L2Interface)\n if len(vlan_ifs):\n providing_vlan = vlan_ifs[0].encap_id\n phys_ifs = vlan_ifs[0].get_all_attached(Interface)\n if len(phys_ifs):\n providing_phys_if = phys_ifs[0].name\n for consuming_epg in consuming_epgs:\n vlan_ifs = consuming_epg.get_all_attached(L2Interface)\n if len(vlan_ifs):\n consuming_vlan = vlan_ifs[0].encap_id\n phys_ifs = vlan_ifs[0].get_all_attached(Interface)\n if len(phys_ifs):\n consuming_phys_if = phys_ifs[0].name\n if providing_vlan == consuming_vlan and providing_phys_if == consuming_phys_if:\n # Skip this case since traffic would be switched outside fabric\n continue\n for filter_entry in contract.get_all_filter_entries():\n for i in range(0, num_flows_per_entry):\n flow = Flow()\n flow.ethertype = filter_entry.etherT\n if flow.ethertype == 'arp':\n flow.arp_opcode = filter_entry.arpOpc\n flow.populate_random_ip_addresses()\n elif flow.ethertype == 'ip':\n flow.populate_random_ip_addresses()\n flow.proto = filter_entry.prot\n if flow.proto == '6' or flow.proto == '17':\n dFromPort = int(filter_entry.dFromPort)\n dToPort = int(filter_entry.dToPort)\n sFromPort = int(filter_entry.sFromPort)\n sToPort = int(filter_entry.sToPort)\n if dFromPort == 0:\n dFromPort = 1\n dToPort += 1\n if sFromPort == 0:\n sFromPort = 1\n sToPort += 1\n if dToPort > 65534:\n dToPort = 65534\n if sToPort > 65534:\n sToPort = 65534\n flow.dport = str(random_number(dFromPort,\n dToPort))\n flow.sport = str(random_number(sFromPort,\n sToPort))\n if flow.proto == '6':\n flow.tcp_rules = filter_entry.tcpRules\n flow.svlan = providing_vlan\n flow.dvlan = consuming_vlan\n flow.src_intf = providing_phys_if\n flow.dst_intf = consuming_phys_if\n\n # Is the flow expected to succeed ?\n flow.expected_action = 'drop'\n providing_bd = providing_epg.get_bd()\n consuming_bd = consuming_epg.get_bd()\n if providing_bd and consuming_bd:\n if providing_bd == consuming_bd:\n if providing_bd.get_context() == consuming_bd.get_context():\n flow.expected_action = 'permit'\n flow.populate_random_mac_addresses()\n flows.append(flow)\n return flows",
"def generate_cashflows(path_account, isin_cash):\n # Read and parse\n df_account = pd.read_excel(path_account)\n df_account = df_account.rename(\n columns={\n \"Variación\": \"ccyDelta\",\n \"Unnamed: 8\": \"delta\",\n \"Saldo\": \"ccyAmount\",\n \"Unnamed: 10\": \"amount\",\n \"Fecha valor\": \"date\",\n }\n )\n\n df_account[\"date\"] = pd.to_datetime(df_account[\"date\"], dayfirst=True)\n df_account = df_account.drop(\n columns=[\"Fecha\", \"Hora\", \"Producto\", \"Tipo\", \"ID Orden\"]\n )\n\n # Generate changes in position\n deltas_df = df_account.groupby([\"date\", \"ISIN\", \"amount\"])[\"delta\"].sum()\n deltas_df = pd.DataFrame(deltas_df).reset_index()\n\n # Compute cashflows\n cashflows_df = deltas_df.pivot_table(\n index=\"date\", columns=\"ISIN\", values=\"delta\", aggfunc=\"sum\"\n )\n\n # Compute external cashflows\n cashflows_external_df = df_account.loc[\n df_account[\"Descripción\"].isin([\"Ingreso\", \"Retirada\"])\n ]\n\n # For some reason DEGIRO has the cashflows mark to market shifted by one.\n # and my guess is that unless there is a position transaction, they dont\n # write cash mark to markets on Fridays ...\n cashflows_df = cashflows_df.asfreq(\"D\")\n cashflows_df[isin_cash] = cashflows_df[isin_cash].shift()\n cashflows_df = cashflows_df.asfreq(\"B\")\n\n return cashflows_df, cashflows_external_df",
"def get_trades(exer_date):\n exer_date = ael.date(str(exer_date))\n exer_trades = []\n all_trades = ael.Trade.select()\n for t in all_trades:\n if t.time > 0 and ael.date_from_time(t.time) == exer_date:\n if (t.type in ('Exercise', 'Assign') and\n t.insaddr.instype in ('Option', 'Warrant')):\n pass\n elif (t.type == 'Closing' and\n t.insaddr.instype == 'Future/Forward' and\n t.insaddr.settlement == 'Cash'):\n pass\n else:\n continue\n exer_trades.append(t)\n msg = ('Will update trade {0} in {1}.'.format(t.trdnbr,\n t.insaddr.insid))\n Logme()(msg)\n return exer_trades",
"def get_state_before(course_key, date):\n previous_stat = (\n EnrollmentTabCache.objects\n .filter(course_id=course_key, created__lt=date)\n .values('unenroll', 'enroll', 'total')\n .order_by('-created')\n )\n return previous_stat.first() if previous_stat.exists() else {'unenroll': 0, 'enroll': 0, 'total': 0}",
"def cash_flow_response_to_df(\n portfolio_cash_flows_response: lusid.ResourceListOfPortfolioCashFlow,\n sum_by_date: bool = True\n) -> pd.DataFrame:\n\n def select_cols(\n df: pd.DataFrame,\n filter_col: str,\n filter_value: str,\n cols_to_keep: list,\n ) -> pd.DataFrame:\n return df[df[filter_col] == filter_value][cols_to_keep]\n\n # Extract cash payment data from cash flow response\n cash_flows_dict = portfolio_cash_flows_response.to_dict()\n cash_flow_data = pd.json_normalize(cash_flows_dict[\"values\"])\n\n # Split pays and receives and handle -ve signage for pay outflows\n pay_data = select_cols(\n cash_flow_data,\n \"diagnostics.PayReceive\",\n \"Pay\",[\"payment_date\", \"amount\", \"source_transaction_id\"]\n )\n pay_data[\"amount\"] = pay_data[\"amount\"].apply(lambda x: -1 * x)\n pay_data.rename(columns={\"amount\": \"payAmount\"}, inplace=True)\n rec_data = select_cols(\n cash_flow_data,\n \"diagnostics.PayReceive\",\n \"Receive\",\n [\"payment_date\", \"amount\", \"source_transaction_id\"]\n )\n rec_data.rename(columns={\"amount\": \"receiveAmount\"}, inplace=True)\n\n # Merge on payment date and ignore join dupes\n merged_df = pay_data.merge(rec_data, on=[\"payment_date\", \"source_transaction_id\"])\n merged_df.drop_duplicates(subset=[\"payment_date\", \"source_transaction_id\"], keep=\"first\", inplace=True,\n ignore_index=True)\n\n # Add net flows and reduce index to dates\n merged_df['netAmount'] = merged_df['payAmount'] + merged_df['receiveAmount']\n merged_df[\"payment_date\"] = merged_df[\"payment_date\"].apply(lambda x: x.date())\n merged_df.set_index(keys=\"payment_date\", inplace=True)\n\n # Aggregate sub-holdings\n if sum_by_date:\n merged_df = merged_df.groupby(merged_df.index).sum()\n\n return merged_df",
"def model_lemmon_graph(self, l_types=False): # ['LEV_LTB', 'LEV_LTM']\n\n if type(l_types) == str:\n l_types = [l_types]\n\n for l in l_types:\n # portfolios' names\n index = ['vhigh', 'high', 'med', 'low']\n # numbers 0-x where x is number of years in research-1 (-6 because of lack of obs)\n columns = list(range(8))\n print(columns)\n # create output DF\n lemmon_graph = pd.DataFrame(index=index, columns=columns)\n lemmon_graph_limit_up = pd.DataFrame(index=index, columns=columns)\n lemmon_graph_limit_down = pd.DataFrame(index=index, columns=columns)\n lemmon_graph_n = pd.DataFrame(index=index, columns=columns)\n\n # list of calendar years in research period, sorted\n years_range = sorted(list(range(\n self.uber_data.loc[:, 'D_BZ_date'].max().year, self.uber_data.loc[:, 'D_BZ_date'].min().year - 1, -1)\n ))\n for calendar_year in years_range:\n for relative_year in lemmon_graph.columns:\n for portfolio in ['vhigh', 'high', 'med', 'low']:\n self.logger.debug('lemon prep for {} {} {}'.format(calendar_year, relative_year, portfolio))\n if portfolio == 'vhigh':\n threshold_up = 2\n threshold_dn = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.75)\n elif portfolio == 'high':\n threshold_up = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.75)\n threshold_dn = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.5)\n elif portfolio == 'med':\n threshold_up = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.5)\n threshold_dn = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.25)\n elif portfolio == 'low':\n threshold_up = self.uber_data.loc[\n self.uber_data.loc[:, 'year'] == calendar_year, l].quantile(0.25)\n threshold_dn = -1\n else:\n raise Exception\n if relative_year == 0:\n self.uber_data.loc[\n (self.uber_data.loc[:, l] < threshold_up) &\n (self.uber_data.loc[:, l] > threshold_dn) &\n (self.uber_data.loc[:, 'year'] == calendar_year),\n f'{calendar_year}_{relative_year}'] = portfolio\n else:\n self.uber_data.loc[:, f'{calendar_year}_{relative_year}'] = np.nan\n for i, x in self.uber_data.loc[\n self.uber_data.loc[:, f'{calendar_year}_{relative_year-1}'].notnull(), :\n ].iterrows():\n self.uber_data.loc[\n (self.uber_data.loc[:, 'isin'] == x['isin']) &\n (self.uber_data.loc[:, 'year'] == x['year'] + 1),\n f'{calendar_year}_{relative_year}'] = \\\n x[f'{calendar_year}_{relative_year-1}']\n\n for relative in lemmon_graph.columns:\n for portfolio, r in lemmon_graph.iterrows():\n for_mean = []\n for i in years_range:\n try:\n for_mean += self.uber_data.loc[\n self.uber_data.loc[:, f'{i}_{relative}'] == portfolio, l].tolist()\n except TypeError:\n self.logger.info(f'type error in lemmon: i: {i} p: {portfolio} r: {relative}.')\n pass\n pd_for_mean = pd.Series(for_mean)\n lemmon_graph.at[portfolio, relative] = pd_for_mean.mean()\n lemmon_graph_n.at[portfolio, relative] = pd_for_mean.count()\n lemmon_graph_limit_up.at[portfolio, relative] = sms.DescrStatsW(pd_for_mean).tconfint_mean()[1]\n lemmon_graph_limit_down.at[portfolio, relative] = sms.DescrStatsW(pd_for_mean).tconfint_mean()[0]\n\n results = {'main_data': lemmon_graph,\n 'amount': lemmon_graph_n,\n 'limit_up': lemmon_graph_limit_up,\n 'limit_down': lemmon_graph_limit_down\n }\n return results",
"def _search_account_history(cyclos, account, direction, begin_date, end_date, payment_types=[]):\n current_page = 0\n account_history = []\n while True:\n search_history_data = {\n 'account': account,\n 'direction': direction,\n 'period':\n {\n 'begin': begin_date,\n 'end': end_date,\n },\n 'orderBy': 'DATE_ASC',\n 'pageSize': 1000, # maximum pageSize: 1000\n 'currentPage': current_page,\n }\n search_history_res = cyclos.post(method='account/searchAccountHistory', data=search_history_data)\n account_history.extend(search_history_res['result']['pageItems'])\n page_count = search_history_res['result']['pageCount']\n if page_count == 0 or current_page + 1 == page_count:\n break\n else:\n current_page += 1\n filtered_history = []\n for entry in account_history:\n # On filtre d'abord par type de paiement et ensuite on regarde\n # si le paiement a fait l'objet d'une opposition de paiement\n # (dans cet ordre car pour voir s'il y a une oppostion de\n # paiement, il faut faire une requête au serveur).\n # On récupère les données de la transaction et on vérifie si la\n # donnée 'chargedBackBy' est présente dans le transfert associé.\n #\n # Note : Les transactions importées lors de la migration de\n # Cyclos 3 à Cyclos 4 sont de type ImportedTransactionData et\n # n'ont pas de transfert associé. Elles ne peuvent pas être\n # annulées. Les transactions enregistrées depuis (les\n # transactions \"normales\" en quelque sorte), sont de type\n # PaymentData.\n if entry['type']['id'] in payment_types:\n get_data_res = cyclos.get(method='transaction/getData/{}'.format(entry['transactionId']))\n transaction_data = get_data_res['result']\n if (transaction_data['class'] ==\n 'org.cyclos.model.banking.transactions.ImportedTransactionData'\n or (transaction_data['class'] ==\n 'org.cyclos.model.banking.transactions.PaymentData'\n and'chargedBackBy' not in transaction_data['transfer'].keys())):\n filtered_history.append(entry)\n return filtered_history",
"def BacktestStrategy2(start_cond_dict, df, stock_exchange, invt_dict):\n total_days=df.shape[0]\n today_invt_dict=invt_dict\n invt_daily_list=[] # invt after today's transaction\n net_wealth_list=[]\n recent_max=0 # recent max = 전고점 가격\n for i in range(total_days):\n if i==0: # 첫날은 일단 풀매수\n recent_max=stock_exchange.GetDayHighestPrice(i)\n today_invt_dict=stock_exchange.FullBuyStocks(today_invt_dict, i)\n else: # 다른날은 전부 전략대로 수행\n recent_max=max(recent_max, stock_exchange.GetDayHighestPrice(i-1)) # 전고점 갱신 확인\n # 만약 어제 종가가 전고점*threshold 미만이라면: 풀매도 \n if (stock_exchange.GetDayClosePrice(i-1) < \n (start_cond_dict['sell_threshold_percent']/100)*recent_max):\n today_invt_dict=stock_exchange.FullSellStocks(today_invt_dict, i)\n # 매도조건을 만족 안 시킨 상황에서 n개월 모멘텀이 (+)면: 풀매수 -- n개월이 안지났으면 스킵\n elif (i > start_cond_dict['buy_momentum_days'] and \n stock_exchange.GetDayHighestPrice(i-start_cond_dict['buy_momentum_days']) <\n stock_exchange.GetDayOpenPrice(i)):\n today_invt_dict=stock_exchange.FullBuyStocks(today_invt_dict, i)\n # 나머지 상황에선 포지션 홀드\n else:\n pass\n invt_daily_list.append(today_invt_dict)\n #print(today_invt_dict) # for debug :)\n net_wealth_list.append(stock_exchange.EstimateNetWealth(today_invt_dict, i))\n \n PrintResult(\"Experimental Strategy\", net_wealth_list)\n plt.plot(net_wealth_list)\n plt.title(\"Experimental Strategy\")\n plt.ylabel('Net Worth in USD') # Cash + Stock worth\n plt.show()",
"def visitBefore(self, date):\n raise NotImplementedError()",
"def before_trading_start(context, data):\r\n context.output = pipeline_output('pipeline')\r\n\r\n # sort by earning yield\r\n context.output = context.output.sort(\r\n columns='Free Cash Flow', ascending=False)\r\n\r\n # get top 20 stocks as security list\r\n context.eligible_assets = context.output.iloc[:19]",
"def getOldData(self, data_type):\n df = pd.read_csv(self.sources[str(data_type)]['local_path'])\n return df",
"def bollinger(ldt_timestamps,symbols_gen,lookback,thresh):\n \n ls_keys = ['close','actual_close']\n ls_symbols = dataobj.get_symbols_from_list(symbols_gen) \n ls_symbols.append('SPY')\n ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)\n d_data = dict(zip(ls_keys, ldf_data))\n for s_key in ls_keys:\n d_data[s_key] = d_data[s_key].fillna(method = 'ffill')\n d_data[s_key] = d_data[s_key].fillna(method = 'bfill')\n d_data[s_key] = d_data[s_key].fillna(1.0)\n price = copy.deepcopy(d_data['close'])\n price_Rolling_mean = pd.rolling_mean(price,lookback)\n price_Rolling_std = pd.rolling_std(price,lookback)\n bollinger_val = (price - price_Rolling_mean) / (price_Rolling_std)\n evt_data = bollinger_val \n print \"Finding Events\"\n \n df_events = copy.deepcopy(evt_data)\n df_events = df_events * np.NAN\n\n\n for i in range(1, len(ldt_timestamps)):\n SPY_BV = evt_data['SPY'].ix[ldt_timestamps[i]]\n if SPY_BV>= 1.1:\n for s_sym in ls_symbols:\n f_sym_indicator_today = evt_data[s_sym].ix[ldt_timestamps[i]]\n f_sym_indicator_yest = evt_data[s_sym].ix[ldt_timestamps[i - 1]]\n \n if f_sym_indicator_yest >= thresh and f_sym_indicator_today < thresh:\n df_events[s_sym].ix[ldt_timestamps[i]] = 1\n \n ep.eventprofiler(b_evt, d_data, i_lookback=20, i_lookforward=20,\n s_filename='MyEventStudy'+ symbols_gen+'.pdf', b_market_neutral=True, b_errorbars=True,\n s_market_sym='SPY')\n\n \n return df_events,evt_data",
"def get_calls_list(self, session, date=None) -> List:\n\n if date == None:\n calls = session.query(\n Calls.id,\n Calls.planned_at,\n Calls.linkedin,\n Calls.leadgen_id\n ).all()\n else:\n calls = session.query(\n Calls.id,\n Calls.planned_at,\n Calls.linkedin,\n Calls.leadgen_id\n ).filter(Calls.planned_at.date()==date).all()\n return calls",
"async def fetch_deposits_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n currency = None\n request = {}\n method = 'privatePostAuthRMovementsHist'\n if code is not None:\n currency = self.currency(code)\n request['currency'] = currency['uppercaseId']\n method = 'privatePostAuthRMovementsCurrencyHist'\n if since is not None:\n request['start'] = since\n if limit is not None:\n request['limit'] = limit # max 1000\n response = await getattr(self, method)(self.extend(request, params))\n #\n # [\n # [\n # 13293039, # ID\n # 'ETH', # CURRENCY\n # 'ETHEREUM', # CURRENCY_NAME\n # null,\n # null,\n # 1574175052000, # MTS_STARTED\n # 1574181326000, # MTS_UPDATED\n # null,\n # null,\n # 'CANCELED', # STATUS\n # null,\n # null,\n # -0.24, # AMOUNT, negative for withdrawals\n # -0.00135, # FEES\n # null,\n # null,\n # '0x38110e0Fc932CB2BE...........', # DESTINATION_ADDRESS\n # null,\n # null,\n # null,\n # '0x523ec8945500.....................................', # TRANSACTION_ID\n # \"Purchase of 100 pizzas\", # WITHDRAW_TRANSACTION_NOTE, might also be: null\n # ]\n # ]\n #\n return self.parse_transactions(response, currency, since, limit)",
"def is_before(self,other_date):",
"def check_absent_pre_date(self, cr, uid, att, context=None):\n if att:\n # check employee absent pre date\n pre_att_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), \n ('name', '<', att.name), \n ('action', 'in', ('sign_in', 'sign_out'))], \n limit=1)\n param_obj = self.pool.get('ir.config_parameter')\n working_hour_obj = self.pool.get('hr.payroll.working.hour')\n max_early = param_obj.get_param(cr, uid, 'maximum_early_minutes', default=60)\n max_late = param_obj.get_param(cr, uid, 'maximum_late_minutes', default=60)\n trobz_base_obj = self.pool.get('trobz.base')\n att_name = datetime.strptime(att.name_tz, DEFAULT_SERVER_DATETIME_FORMAT)\n try:\n max_early = int (max_early)\n max_late = int (max_late)\n except:\n raise except_osv(_(\"Warning !\"),_(\"maximum_early_minutes or maximum_late_minutes in config parameter is incorrect\"))\n \n time_late = att_name - timedelta(minutes = max_late)\n \n working_hour_ids=[] #Payroll Working Hours (Only read working PWH, Not Leave or Overtime PWH) \n if not pre_att_ids:\n working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_end', '<', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('plan_line_id', '!=', False)\n ], \n context=context)\n else:\n pre_time_early = self.read(cr, uid, pre_att_ids[0], ['name_tz'], context=context)['name_tz']\n time_start_early = datetime.strptime(pre_time_early, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(minutes = max_early)\n working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_start', '>', time_start_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('expected_end', '<', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('plan_line_id', '!=', False)\n ], context=context, order='date DESC')\n if not working_hour_ids:\n return False\n else:\n for working in working_hour_obj.browse(cr, uid, working_hour_ids, context=context):\n # check public holiday\n holiday_ids = self.pool.get('trobz.hr.public.holidays').search(cr, uid, [('date','=', working.date)], context=context) \n if holiday_ids:\n return False\n # full\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND line.first_date < '%s' AND line.last_date > '%s'\n AND h.state = 'validate'\n '''% (working.employee_id.id, working.date, working.date)\n cr.execute(sql)\n if cr.fetchall():\n continue\n else:\n sql = False\n expected_start = trobz_base_obj.convert_from_utc_to_current_timezone(cr, uid, working.expected_start, False, DEFAULT_SERVER_DATETIME_FORMAT, False, context=context)\n time_start = expected_start.hour\n expected_end = trobz_base_obj.convert_from_utc_to_current_timezone(cr, uid, working.expected_end, False, DEFAULT_SERVER_DATETIME_FORMAT, False, context=context)\n time_end = expected_end.hour\n # wh afternoon\n if time_start >= 12 and time_end >=12:\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND (line.first_date = '%s' OR line.last_date = '%s')\n AND h.state = 'validate'\n AND (line.last_date_type = 'afternoon' OR line.first_date_type = 'afternoon')\n '''% (working.employee_id.id, working.date, working.date)\n # wh morning\n elif time_start < 12 and time_end <= 12:\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND (line.first_date = '%s' OR line.last_date = '%s')\n AND h.state = 'validate'\n AND (line.last_date_type = 'morning' OR line.first_date_type = 'morning')\n '''% (working.employee_id.id, working.date, working.date)\n \n if sql:\n cr.execute(sql)\n if cr.fetchall():\n continue\n # wh full\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND (line.first_date = '%s' OR line.last_date = '%s')\n AND h.state = 'validate'\n AND (line.last_date_type = 'full' OR line.first_date_type = 'full')\n '''% (working.employee_id.id, working.date, working.date)\n cr.execute(sql)\n res = cr.fetchall()\n if res or (time_late >= expected_start and time_late <= expected_end):\n continue\n return True\n return False",
"def historical(config, stid, start_date):\n\n # Get dates\n start_date = start_date.replace(hour=6)\n end_date = datetime.utcnow()\n start, end = meso_api_dates(start_date, end_date)\n\n # Download CF6 files\n get_cf6_files(config, stid, 12)\n\n # Get the daily verification\n dailys = get_verification(config, stid, start, end, use_climo=True)\n\n return dailys",
"def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh",
"def payments(self, loan):\n self.currency_interest = \"XBT\"\n \n \"\"\"The lender agrees to provide the borrower half of the loan amount\n on the initial loan on the initial date\"\"\"\n loan.fund(on=self.initial_loan_date,\n amount=self.total_loan_amount * \\\n Decimal(0.5))\n \"\"\"The lender agrees to pledge the remaining loan amount toward\n the kickstarter campaign of the borrower.\"\"\"\n loan.fund(on=self.kickstarter_payment_date,\n amount=self.total_loan_amount * \\\n Decimal(0.5))\n \"\"\" Standard payment schedule - The borrower intends to\n payback period will be separated into 8 installments and\n completed in 8 months. The payback will begin in the 5th\n month. However, unless the special conditions are triggered,\n the borrower is required to only pay the interest on the loan\n until the final payment date.\"\"\"\n\n \"\"\" Special payment schedule - If First campaign funded over\n USD 65,000, the borrower must pay back entire loan including\n one year interest within the two months after Crowd Funding\n Platform pay the fund.\"\"\"\n\n \"\"\" If First campaign funded over USD 58,000, will pay back 4\n Installment in advance, after Crowd Funding Platform pay the\n fund. The rest of the loan will keep paying followed the\n standard schedule until all loan including interest is paid\n back.\"\"\"\n\n if (self.kickstarter_revenue > Money(65000, \"USD\")):\n payment_date = self.kickstarter_payment_date + \\\n relativedelta(months=2)\n loan.add_to_balance(on=payment_date,\n amount = loan.interest(payment_date,\n self.final_payment_date,\n loan.remaining_balance()))\n loan.payment(on=payment_date,\n amount = loan.remaining_balance())\n else:\n if (self.kickstarter_revenue > Money(58000, \"USD\")):\n payment_date = self.kickstarter_payment_date + \\\n relativedelta(months=2)\n loan.payment(on=payment_date,\n amount = lambda : loan.remaining_principal()() * Decimal(0.5))\n start_payment_date = self.initial_loan_date + \\\n relativedelta(months=4)\n loan.amortize(on=start_payment_date,\n amount = loan.remaining_balance(),\n payments=8,\n interval=relativedelta(months=1))\n \"\"\"The borrower agrees to pay back the any remaining principal\n and accrued interest one year after the loan is issued.\"\"\"\n loan.payment(on=self.final_payment_date,\n amount= loan.remaining_balance())",
"def get_pre_df(temp_pre_df):\n \n event_time_max = temp_pre_df['event_time'].max()\n cat_dfs = []\n for num in np.arange(0,(1080/2)+1,30)[1:]:\n # making <= null i.e keeping >\n temp_pre_df.loc[temp_pre_df['event_time'] <= int(num), 'event_time'] = np.nan\n for col in ['event_name', 'specialty', 'plan_type']:\n cat_df = temp_pre_df.groupby([\"id\", col]).agg({\"event_time\": 'count'}).unstack(level=col)\n cat_df = cat_df/(event_time_max-num)\n cat_df.columns = ['__'.join(['normChange', col, name, str(int(num))]) for name in cat_df.columns.droplevel()]\n cat_dfs.append(cat_df)\n pre_df = pd.concat(cat_dfs, axis = 1) \n return pre_df.fillna(0)",
"def points(recruit, type, date):\n if type == \"Join\":\n return 1\n else:\n x = models.Recruits.query.filter_by(recruit=recruit).order_by(desc(models.Recruits.id)).first()\n # todo change date and x.recruit_date to dates instead of strings\n days_in_clan = date - x.recruit_date\n if days_in_clan.days >= 7:\n return 0\n return -1",
"def isolate_positive_lateral_flow_tests(self, time: int, positive_nodes: List[Node]):\n\n for node in positive_nodes:\n node.received_positive_test_result = True\n\n if node.will_uptake_isolation:\n node.isolated = True\n\n node.avenue_of_testing = TestType.lfa\n node.positive_test_time = time\n node.being_lateral_flow_tested = False\n\n if not node.household.applied_household_positive_policy and \\\n not self.LFA_testing_requires_confirmatory_PCR:\n node.household.apply_positive_policy(time, self.household_positive_policy)",
"def tmp_LA_pre(self, step):\n self.proof[step.seq_num] = self.proof[step.assms[0]].on_prop(\n conv.top_conv(conv.rewr_conv(\"tmp_LA_pre_int\")),\n conv.top_conv(conv.rewr_conv(\"tmp_LA_pre_real\")),\n )"
] | [
"0.5900546",
"0.4622312",
"0.4520557",
"0.44921994",
"0.44547892",
"0.44215465",
"0.44210744",
"0.44176406",
"0.44091272",
"0.43490028",
"0.43442965",
"0.43382567",
"0.4313873",
"0.43065736",
"0.43013513",
"0.4298931",
"0.42945644",
"0.42921874",
"0.42917156",
"0.42787942",
"0.42731026",
"0.42628422",
"0.4207615",
"0.4202213",
"0.42002517",
"0.41992524",
"0.4191416",
"0.41864255",
"0.4185963",
"0.4183225"
] | 0.706523 | 0 |
Returns True if session creation is allowed (as determined by the super class's is_session_creation_enabled value and no requestspecific override has disabled sessions for this subject, False otherwise. This means session creation is disabled if the super is_session_creation_enabled property is False or if a request attribute is discovered that turns off sessions for the current request. | def session_creation_enabled(self):
return (self._session_creation_enabled and
self.web_registry.session_creation_enabled) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def allow_new_session(self):\n return self._currSessionDiscardable",
"def private_session(self):\n return bool(\n lib.sp_session_is_private_session(self._session._sp_session))",
"def sessionValid(self):\n return self.request.getSession().isValid()",
"def sessionNew(self):\n return self.request.getSession().isNew();",
"def is_impersonated_session(request):\n return (\n hasattr(request, \"session\") and la_settings.USER_SESSION_FLAG in request.session\n )",
"def use_mandatory_session_management(self):\n # Session state will be saved and can not be closed by consumers\n self._session_management = MANDATORY",
"def use_mandatory_session_management(self):\n self._session_management = MANDATORY",
"def org_apache_felix_https_jetty_session_cookie_http_only(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_https_jetty_session_cookie_http_only",
"def requires_session(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"requires_session\")",
"def try_sessions(self, request, **kwargs):\n csrf_token = _sanitize_token(request.COOKIES.get(settings.CSRF_COOKIE_NAME, ''))\n\n if request.is_secure():\n referer = request.META.get('HTTP_REFERER')\n\n if referer is None:\n return False\n\n good_referer = 'https://%s/' % request.get_host()\n\n if not same_origin(referer, good_referer):\n return False\n\n # Tastypie docstring says accessing POST here isn't safe, but so far it's not causing any problems...\n # This is necessary for downloads that post the csrf token from an iframe\n request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '') or request.POST.get('csrfmiddlewaretoken', '')\n\n if not constant_time_compare(request_csrf_token, csrf_token):\n return False\n\n return request.user.is_authenticated()",
"def is_session_applicable(self, request, response) -> bool:\n return response.content_type == \"text/html\"",
"def isActive(self):\n return self.sess is not None and self.sess.isValid()",
"def is_valid(self):\n if \"lti_validsession\" not in self.__httprequest.session:\n return False\n return True",
"def can_reuse_session() -> bool:\n if AgentClient.__agent_version is None:\n return False\n\n return version.parse(AgentClient.__agent_version) >= version.parse(\n AgentClient.MIN_SESSION_REUSE_CAPABLE_VERSION\n )",
"def disable_session_management(self):\n self._session_management = DISABLED\n self.close_sessions()",
"def disable_session_management(self):\n self._session_management = DISABLED\n self.close_sessions()",
"def has_object_create_permission(self, request):\n user = request.user\n if user.is_superuser:\n return user.is_superuser\n\n return self.user == user",
"def create_enabled(self):\n return self._create_enabled",
"def can_create(self):\n return True",
"def session(self, request):\n if request.method != 'GET':\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n data = {'valid': request.user.is_authenticated()}\n return Response(data, status=status.HTTP_200_OK)",
"def allowForSessionCookies(self):\n if not self.__loaded:\n self.__load()\n \n return self.__exceptionsAllowForSession",
"def new_session(self):\n body = yield from self._fetch_json(URL_LOGIN, self._new_session_data)\n self.sma_sid = jmespath.search('result.sid', body)\n if self.sma_sid:\n return True\n\n msg = 'Could not start session, %s, got {}'.format(body)\n\n if body.get('err'):\n if body.get('err') == 503:\n _LOGGER.error(\"Max amount of sesions reached\")\n else:\n _LOGGER.error(msg, body.get('err'))\n else:\n _LOGGER.error(msg, \"Session ID expected [result.sid]\")\n return False",
"def is_configured(self):\n return self._session is not None",
"def org_apache_felix_http_session_invalidate(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_http_session_invalidate",
"def isExpired(self):\n return self.sess is not None and not self.sess.isValid()",
"def enable_authentication(self) -> bool:\n return pulumi.get(self, \"enable_authentication\")",
"def isInactive(self):\n return self.sess is None or not self.sess.isValid()",
"def check_session_can_start(\n self, session_request_to_use: typing.Optional[SessionRequest]\n ):\n self.check_total_sessions_exceeded()\n self.check_active_sessions_exceeded()\n self.check_session_requests_exist(session_request_to_use)",
"def can_be_disabled(self) -> bool:\n return True",
"def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session"
] | [
"0.6674522",
"0.6288012",
"0.611987",
"0.58222526",
"0.5800123",
"0.57902366",
"0.5765897",
"0.56896955",
"0.561705",
"0.56134635",
"0.55813694",
"0.5521292",
"0.5455969",
"0.54093575",
"0.53659534",
"0.53659534",
"0.53562754",
"0.5306992",
"0.52831423",
"0.52757114",
"0.52687365",
"0.5267443",
"0.52643186",
"0.5253676",
"0.5178081",
"0.51707673",
"0.5170033",
"0.51645803",
"0.5094277",
"0.507768"
] | 0.8258925 | 0 |
Open the first file in the list and create the header from the first line of the first file. | def create_header_from_file(file_list):
with open(file_list[0], 'r') as csvfile:
contents = csv.reader(csvfile)
row_number = 0
for row in contents:
if row_number ==0:
header = row
print("File header: ", header)
row_number += 1
return header | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_header(filepath):\n header = None\n for i, x in enumerate(open(filepath)):\n if i == 0:\n header = x\n return(header)",
"def pp_file_header(self):\n self.separator()\n for item in self.file_header:\n print(item.ljust(27, ' ') + \": {}\".format(self.file_header[item]))\n \n self.separator()",
"def check_file_header(fnames, nlines=5):\n from itertools import islice\n for fname in fnames:\n print(f\"\\nPrinting header from {fname} \\n#########################################\")\n with open(fname) as f:\n head = list(islice(f, nlines))\n for line in head:\n print(line)",
"def _parseFileHeader(self):\n self.fileheader = FileHeader()\n self.fileheader.parse(self.f)\n #print('Parsed fileheader')",
"def read_scamp_head(fname, header=None):\n\n with open(fname) as fobj:\n lines = fobj.readlines()\n\n lines = [l.strip() for l in lines if l[0:3] != 'END']\n\n # if header is None an empty FITSHDR is created\n hdr = FITSHDR(header)\n\n for l in lines:\n hdr.add_record(l)\n\n return hdr",
"def _extract_headers(self):\n\n with open(self.file_path, \"rt\", encoding=self._encoding) as csv_file:\n for row in csv.reader(csv_file):\n if self._file_headings:\n return [header if header != \"\" else f\"Untitled_{index + 1}\" for index, header in enumerate(row)]\n\n else:\n return [f\"Untitled_{i + 1}\" for i in range(len(row[0]))]",
"def write_header(self, fd):\n fd.write(f\"BEGIN {self.name}\")\n if len(self.data_items) > 0:\n if isinstance(self.data_items[0], mfdatascalar.MFScalar):\n one_based = (\n self.data_items[0].structure.type == DatumType.integer\n )\n entry = self.data_items[0].get_file_entry(\n values_only=True, one_based=one_based\n )\n else:\n entry = self.data_items[0].get_file_entry()\n fd.write(str(entry.rstrip()))\n if len(self.data_items) > 1:\n for data_item in self.data_items[1:]:\n entry = data_item.get_file_entry(values_only=True)\n fd.write(\"%s\" % (entry.rstrip()))\n if self.get_comment().text:\n fd.write(\" \")\n self.get_comment().write(fd)\n fd.write(\"\\n\")",
"def pareHeader(headerFile,Ldontcares=['GData','BiasCoeff','headerFile','y_m_d','TimeZero']):\n reload(chd) # KEN SCOPE ISSUE?\n dHeader = chd.main(['headerFile=' + headerFile])\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d','TimeZero']\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d']\n for k in Ldontcares:\n del dHeader[k]\n dataFile = split(headerFile,'.header')[0] # toss extension\n return dHeader,dataFile",
"def readHeader(self) -> None:\n # read header files\n self.headersList = []\n self.chanHeadersList = []\n for headerFile in self.headerF:\n if \"xtrx\" in headerFile.lower():\n headers, chanHeaders = self.readHeaderXTRX(headerFile)\n else:\n headers, chanHeaders = self.readHeaderXTR(headerFile)\n self.headersList.append(headers)\n self.chanHeadersList.append(chanHeaders)\n\n # check to make sure no gaps, calculate out the sample ranges and list the data files for each sample\n self.mergeHeaders(self.headersList, self.chanHeadersList)",
"def get_primary_header(input_lst):\n lst = [\n # 12345678 12345678901234567890123456789012345678901234567\n ('SIMPLE' , 'file does conform to FITS standard' ),\n ('BITPIX' , 'number of bits per data pixel' ),\n ('NAXIS' , 'number of data axes' ),\n ('NAXIS1' , 'length of data axis 1' ),\n ('NAXIS2' , 'length of data axis 2' ),\n ('BSCALE' , 'factor to linearly scale the data pixel values' ),\n ('BZERO' , 'offset to linearly scale the data pixel values' ),\n ('BUNIT' , 'physical unit of the data pixel values' ),\n ('BLANK' , 'value representing undefined physical values' ),\n ('DISPAXIS', 'main dispersion axis of the spectral data' ),\n ('DATATYPE', 'type of data (calibration/science)' ),\n ('OBJECT' , 'object observed' ),\n ('DATE-OBS', 'start date of observation run' ),\n ('MJD-OBS' , 'Modified Julian Date of observation run' ),\n ('TIMESYS' , 'time system' ),\n ('FRAMEID' , 'frame ID in observation run' ),\n ('RA' , 'right ascension of object' ),\n ('DEC' , 'declination of object' ),\n ('RADESYS' , 'name of reference frame' ),\n ('EQUINOX' , 'epoch of the mean equator and equinox in years' ),\n ('EXPTIME' , 'exposure time in seconds' ),\n ('PHO-OFF' , 'offset of photon middle time' ),\n ('UTC-STA' , 'UTC at start of exposure' ),\n ('UTC-MID' , 'UTC at middle of exposure' ),\n ('UTC-PHO' , 'UTC at photon middle of exposure' ),\n ('UTC-END' , 'UTC at end of exposure' ),\n ('LT-STA' , 'local time at start of exposure' ),\n ('LT-MID' , 'local time at middle of exposure' ),\n ('LT-PHO' , 'local time at photon middle of exposure' ),\n ('LT-END' , 'local time at end of exposure' ),\n ('LST-STA' , 'local sidereal time at start' ),\n ('LST-MID' , 'local sidereal time at middle' ),\n ('LST-PHO' , 'local sidereal time at photon middle' ),\n ('LST-END' , 'local sidereal time at end' ),\n ('MJD-STA' , 'Modified Julian Date of UTC-STA' ),\n ('MJD-MID' , 'Modified Julian Date of UTC-MID' ),\n ('MJD-PHO' , 'Modified Julian Date of UTC-PHO' ),\n ('MJD-END' , 'Modified Julian Date of UTC-END' ),\n ('AIRM-STA', 'airmass at start of exposure' ),\n ('AIRM-MID', 'airmass at middle of exposure' ),\n ('AIRM-PHO', 'airmass at photon middle of exposure' ),\n ('AIRM-END', 'airmass at end of exposure' ),\n ('AIRMASS' , 'effective airmass during exposure' ),\n ('ALT-STA' , 'telescope altitude at start' ),\n ('ALT-MID' , 'telescope altitude at middle' ),\n ('ALT-PHO' , 'telescope altitude at photon middle' ),\n ('ALT-END' , 'telescope altitude at end' ),\n ('AZ-STA' , 'telescope azimuth at start' ),\n ('AZ-MID' , 'telescope azimuth at middle' ),\n ('AZ-PHO' , 'telescope azimuth at photon middle' ),\n ('AZ-END' , 'telescope azimuth at end' ),\n ('MOON-AGE', 'days past new moon at middle of exposure' ),\n ('MOON-ALT', 'moon altitude at middle of exposure' ),\n ('MOON-AZ' , 'moon azimuth at middle of exposure' ),\n ('MOON-DIS', 'angular distance to moon (in degree)' ),\n ('TWI-END' , 'end time of astronomical twilight in UTC' ),\n ('TWI-STA' , 'start time of astronomical twilight in UTC' ),\n ('PROP-ID' , 'proposal ID' ),\n ('PROP-TIT', 'title of proposal' ),\n ('PROP-PI' , 'principal investigator of proposal' ),\n ('OBSERVER', 'people who acquire the data' ),\n ('OBSERVAT', 'observatory where the data is acquired' ),\n ('TELESCOP', 'telescope used to acquire the data' ),\n ('OBS-LONG', 'longitude of the telescope' ), \n ('OBS-LAT' , 'latitude of the telescope' ),\n ('OBS-ALT' , 'altitude of the telescope in meter' ),\n ('INSTRUME', 'instrument used to acquire the data' ),\n ('SETUP-ID', 'ID of the instrument setup' ),\n ('SLT-WID' , 'slit width (in mm)' ),\n ('SLT-LEN' , 'slit length (in mm)' ),\n ('NCHANNEL', 'number of simultaneous channels' ),\n ('CHANNEL1', 'object of channel 1' ),\n ('CHANNEL2', 'object of channel 2' ),\n ('FILTER1' , 'filter in channel 1' ),\n ('FILTER2' , 'filter in channel 2' ),\n ('EXPMETER', 'usage of exposure meter' ),\n ('SHAK_STA', 'status of fiber shaker (on/off)' ),\n ('SHAK_FRE', 'frequency of fiber shaker (in Hz)' ),\n ('SHAK_AMP', 'amplitude of fiber shaker' ),\n ('DETECTOR', 'detector used to acquire the data' ),\n ('GAIN' , 'readout gain of detector (in electron/ADU)' ),\n ('RO-SPEED', 'read out speed of detector' ),\n ('RO-NOISE', 'read out noise of detector' ),\n ('BINAXIS1', 'binning factor of data axis 1' ),\n ('BINAXIS2', 'binning factor of data axis 2' ),\n ('TEMP-DET', 'temperature of detector (in degree)' ),\n ('TEMP-BOX', 'temperature inside instrument box (in degree)' ),\n ('TEMP-ROO', 'temperature inside instrument room (in degree)' ),\n ('PRES-BOX', 'pressure inside instrument box (in hPa)' ),\n ('DATE' , 'file creation date' ),\n ('ORI-NAME', 'original filename' ),\n ('ORIGIN' , 'organization responsible for the FITS file' ),\n ('HEADVER' , 'version of header' ),\n ]\n now = datetime.datetime.now()\n header_lst = []\n for key, comment in lst:\n if key in input_lst.keys():\n value = input_lst[key]\n else:\n value = None\n if type(value) == type('a'):\n value = \"'%-8s'\"%value\n value = value.ljust(20)\n elif type(value) == type(u'a'):\n value = value.encode('ascii','replace')\n value = \"'%-8s'\"%value\n value = value.ljust(20)\n elif type(value) == type(1):\n value = '%20d'%value\n elif type(value) == type(1.0):\n if key[0:4]=='MJD-':\n # for any keywords related to MJD, keep 6 decimal places.\n # for reference, 1 sec = 1.16e-5 days\n value = '%20.6f'%value\n else:\n value = str(value).rjust(20)\n value = value.replace('e','E')\n elif type(value) == type(now):\n # if value is a python datetime object\n value = \"'%04d-%02d-%02dT%02d:%02d:%02d.%03d'\"%(\n value.year, value.month, value.day,\n value.hour, value.minute, value.second,\n int(round(value.microsecond*1e-3))\n )\n elif value == True:\n value = 'T'.rjust(20)\n elif value == False:\n value = 'F'.rjust(20)\n elif value == None:\n value = \"''\".ljust(20)\n else:\n print('Unknown value: {}'.format(value))\n string = '%-8s= %s / %s'%(key,value,comment)\n if len(string)>=80:\n string = string[0:80]\n else:\n string = string.ljust(80)\n\n header_lst.append(string)\n\n return header_lst",
"def get_header(fname, Nrows_header_total=None):\n\n if Nrows_header_total==None:\n Nrows_header_total = header_len(fname)\n\n output = []\n with open(fname) as f:\n for i in range(Nrows_header_total):\n line = f.readline().strip()\n output.append(line)\n\n return output",
"def head(file_name):\n #from itertools import islice\n with open('../test_files/' + file_name, 'r') as infile:\n list = infile.readlines()\n #printing the 1st 10 lines\n print('list of first 10 lines',list[:10])",
"def first(self):\n return self._make_position(self._header._next)",
"def BuildHeadList(all_file_contents):\n head_list = []\n list_all_file_contents = (all_file_contents)\n for line in list_all_file_contents: \n if line[0:4] != 'ATOM':\n head_list.append(line)\n else:\n break\n\n return head_list",
"def _ReadFileHeader(self, file_object):\n data_type_map = self._GetDataTypeMap('recycle_bin_metadata_file_header')\n\n file_header, _ = self._ReadStructureFromFileObject(\n file_object, 0, data_type_map, 'file header')\n\n if self._debug:\n debug_info = self._DEBUG_INFORMATION.get(\n 'recycle_bin_metadata_file_header', None)\n self._DebugPrintStructureObject(file_header, debug_info)\n\n if file_header.format_version not in self._SUPPORTED_FORMAT_VERSION:\n raise errors.ParseError(\n f'Unsupported format version: {file_header.format_version:d}')\n\n return file_header",
"def get_header(self, root):\n header = etree.SubElement(root, \"FileHeader\")\n header.set(\"revMajor\", \"1\")\n header.set(\"revMinor\", \"0\")\n header.set(\"date\", datetime.today().strftime(\"%Y-%m-%dT%H:%M:%S\"))\n header.set(\"description\", \"Generated OpenSCENARIO File\")\n header.set(\"author\", \"QGIS OSCGenerator Plugin\")",
"def read_header(self):\n # Read entire header into memory in one read to minimize Disk I/O.\n self.fh.seek(0)\n hdr = self.fh.read(self.header['header size'])\n\n # Find several markers in the byte-string\n # Each of these may occur more than once, find last.\n polylist_pos = hdr.rfind(b'Poly_list\\x00')\n champslist_pos = hdr.rfind(b'Champs_list\\x00')\n offsetlist_pos = hdr.rfind(b'Offset_list\\x00')\n\n # Find first occurance for these.\n # analparam_pos = hdr.find(b'Anal_param\\x00')\n analparamnano_pos = hdr.find(b'Anal_param_nano\\x00')\n analparamnanobis_pos = hdr.find(b'Anal_param_nano_bis\\x00')\n\n # Turn byte-string into BytesIO file-like object; reading and\n # keeping track of where we are is easier that way than trying to\n # slice byte-string as an array and keeping track of indices.\n hdr = io.BytesIO(hdr)\n\n # Main header\n hdr.seek(12)\n self.header.update(self._main_header(hdr))\n\n # NanoSIMS header, starts with PolyList/ChampsList/OffsetList\n # The following configurations have been found in the wild, so far:\n # 1. NS header\n # 2. PL, NS header\n # 3. PL, CL, OL, NS header\n # 4. PL, CL, OL, partial NS header, PL, NS header, PL, CL, OL,\n # partial NS header, PL, NS header\n # Note: I have not seen any *lists with contents (only length 0).\n # From OpenMIMS documentation I know that PolyList is as list of\n # Species dicts, but don't know how to read ChampsList or OffsetList.\n if polylist_pos < 0:\n # Case 1: No PL marker, so far only found for Real Time Images,\n # beam stability, or secondary ion beam centering files.\n if (self.header['analysis type'].endswith('rti') or\n self.header['file type'] == 35):\n hdr.seek(216, 1)\n elif self.header['file type'] == 31:\n if (self.header['analysis type'].endswith('hmr') or\n self.header['analysis type'].endswith('trolley step scan')):\n hdr.seek(120, 1)\n else:\n # secondary ion beam\n hdr.seek(600, 1)\n else:\n raise NotImplementedError('No PolyList marker found in header '\n 'and not and RTI image. Don\\'t know '\n 'how to continue.')\n elif (champslist_pos < 0 and offsetlist_pos < 0):\n # Case 2: PL, NS header\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n elif (polylist_pos < champslist_pos < offsetlist_pos):\n # Case 3: PL, CL, OL, NS header\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)\n self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)\n elif (champslist_pos < offsetlist_pos < polylist_pos):\n # Case 4: PL, CL, OL, partial NS header, PL, NS header\n # with possible repeat\n self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)\n self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n else:\n raise NotImplementedError(\n 'An unknown order of the Poly/Champs/Offset Lists occured.\\n'\n 'Positions: PL = {}, CL = {}, OL = {}'\n ''.format(polylist_pos, champslist_pos, offsetlist_pos))\n\n self.header['NanoSIMSHeader'] = self._nanosims_header(hdr)\n\n # How much to skip? Chomping does not work; what if first value is 0?\n # This is correct so far, for nsheader v8 and 9\n hdr.seek(948, 1)\n self.header['BFields'] = []\n for b in range(self.header['NanoSIMSHeader']['b fields']):\n bf = self._bfield(hdr)\n bf['counting frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['counting frame height'] * \\\n self.header['NanoSIMSHeader']['counting frame width']\n bf['scanning frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['scanning frame height'] * \\\n self.header['NanoSIMSHeader']['scanning frame width']\n bf['working frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['working frame height'] * \\\n self.header['NanoSIMSHeader']['working frame width']\n self.header['BFields'].append(bf)\n # End nanosims_header/bfield based on Poly_list position\n\n # Analytical parameters\n\n # anal_param is not in OpenMIMS at all, represents file\n # Cameca NanoSIMS Data/raw_spec/cur_anal_par\n # However, only few useful things in this section, all of\n # which are also in other sections. Skip.\n # if analparam_pos < 0:\n # msg = 'Anal_param not found in header, skipping.'\n # warnings.warn(msg)\n # else:\n # hdr.seek(analparam_pos + 24)\n # print(analparam_pos)\n # d = {}\n # d['primary ion'], d['primary current begin'], \\\n # d['primary current end'], d['raster'], \\\n # d['X 00 always 1.0'], \\\n # d['X 01 always 1'], d['X 02 always 0'], \\\n # d['X 03 always 1'], d['X 04 always 0'], \\\n # d['X 05 always 0'], d['X 06 (not0 always 0'], \\\n # d['X 07 (not) always 0'], d['X 08 always 0'], \\\n # d['pressure 1'], d['e0w'], d['X 09 always 35 or #'], \\\n # d['X 10 junk'], \\\n # d['X 11 always 1'], d['X 12 always 0'], \\\n # d['X 13 always 1'], d['X 14 always 0'], \\\n # d['X 15 always 0'], d['X 16 always 0'], \\\n # d['X 17 always 0'], d['X 18 always 0'], \\\n # d['X 19 always 0'], d['X 20 always 300'], \\\n # d['X 21'], d['X 22'], d['X 23'], d['X 24'], \\\n # d['pressure 2'], d['X 25 junk'] = \\\n # unpack(self._bo + '24s 4d 8i 48s d i 28s 14i 8s 176s', hdr.read(416))\n #\n # d['pressure 1'] = self._cleanup_string(d['pressure 1'])\n # d['pressure 2'] = self._cleanup_string(d['pressure 2'])\n # d['primary ion'] = self._cleanup_string(d['primary ion'])\n #\n # self.header['AnalParam'] = d\n\n # Called AnalyticalParamNano AND AnalysisParamNano in OpenMIMS.\n # Here, split out Primary and Secondary beam.\n # Represents the file Cameca NanoSIMS Data/raw_spec/cur_anal_par_nano\n if analparamnano_pos < 0:\n msg = 'Anal_param_nano not found in header, '\n msg += 'don\\'t know where PrimaryBeam section starts.'\n warnings.warn(msg)\n else:\n hdr.seek(analparamnano_pos + 16)\n self.header['analysis version'], self.header['n50large'], \\\n self.header['comment'] = \\\n unpack(self._bo + '2i 8x 256s', hdr.read(272))\n\n self.header['n50large'] = bool(self.header['n50large'])\n self.header['comment'] = self._cleanup_string(self.header['comment'])\n\n self.header['PrimaryBeam'] = self._primary_beam(hdr)\n self.header['SecondaryBeam'] = self._secondary_beam(hdr)\n self.header['Detectors'] = self._detectors1(hdr)\n\n self.header['SecondaryBeam']['E0S'] = self.header['Detectors'].pop('E0S')\n self.header['SecondaryBeam']['pressure multicollection chamber'] = \\\n self.header['Detectors'].pop('pressure multicollection chamber')\n\n # Add overall mode of machine, based on E0W\n if self.header['SecondaryBeam']['E0W'] < 0:\n self.header['polarity'] = '+'\n else:\n self.header['polarity'] = '-'\n\n # Combine pixel size from NanoSIMSHeader and raster from PrimaryBeam\n # Prevent ZeroDivisionError if undefined\n wfw = self.header['NanoSIMSHeader']['working frame width']\n if not wfw:\n wfw = 1\n self.header['NanoSIMSHeader']['working frame raster'] = \\\n self.header['PrimaryBeam']['raster']\n self.header['NanoSIMSHeader']['scanning frame raster'] = \\\n self.header['NanoSIMSHeader']['working frame raster'] * \\\n self.header['NanoSIMSHeader']['scanning frame width'] / wfw\n self.header['NanoSIMSHeader']['counting frame raster'] = \\\n self.header['NanoSIMSHeader']['working frame raster'] * \\\n self.header['NanoSIMSHeader']['counting frame width'] / wfw\n\n # Header for non-nano SIMS\n magic = unpack(self._bo + 'i', hdr.read(4))[0]\n if magic != 2306:\n msg = 'SIMSHeader magic number not found here at byte {}.'\n msg = msg.format(hdr.tell()-4)\n raise ValueError(msg)\n self.header['SIMSHeader'] = self._sims_header(hdr)\n\n if self.header['analysis version'] >= 5:\n if analparamnanobis_pos < 0:\n msg = 'Anal_param_nano_bis not found in header, '\n msg += 'don\\'t know where second Detectors section starts.'\n warnings.warn(msg)\n else:\n hdr.seek(analparamnanobis_pos + 24)\n self.header['Detectors'].update(self._detectors2(hdr))\n xl = self.header['Detectors'].pop('exit slit xl')\n for n in range(7):\n det = self.header['Detectors']['Detector {}'.format(n+1)]\n w = list(det['exit slit widths'])\n w[2] = xl[5*n:5*(n+1)]\n det['exit slit widths'] = tuple(w)\n h = list(det['exit slit heights'])\n h[2] = xl[5*(n+1):5*(n+2)]\n det['exit slit heights'] = tuple(h)\n\n # Presets\n self.header['Presets'] = self._presets(hdr)\n\n # End Detectors pt 2 based on anal_param_nano_bis position\n\n # Last part of detectors\n if self.header['analysis version'] >= 6:\n d3 = self._detectors3(hdr)\n self.header['Detectors']['TIC'] = d3.pop('TIC')\n for k, v in d3.items():\n self.header['Detectors'][k].update(v)\n # End PrimaryBeam/SecondaryBeam/Presets/Detectors based on anal_param_nano position\n\n # Image header, at end of overall header\n if self.header['file type'] == 26:\n hdr.seek(-176, 2)\n self.header['Isotopes'] = self._isotopes_hdr(hdr)\n elif self.header['file type'] in (21, 22, 31, 35):\n # no image header for line scan or beam stability\n pass\n else:\n hdr.seek(-84, 2)\n self.header['Image'] = self._image_hdr(hdr)\n\n # Done reading header. Check for and read external files for extra info.\n if os.path.exists(os.path.splitext(self.filename)[0] + '.chk_is'):\n self._read_chk_is()",
"def add_header(header, filename, i):\n with open(filename, 'r+') as f:\n content = f.readlines()\n content[0] = header\n f.seek(0,0)\n f.write(f'<!-- Generated with XMLGenerator.py {__ver__} | {get_app_name(i)} -->\\n')\n f.writelines(content)",
"def print_header(filename):\n\n date_list = filename[0:10].split('_')\n # Hint: CWB Metadata cannot contain dashes -\n name = 'id=\"{}\"'.format(filename[0:-4].replace('-', '_'))\n date = 'date=\"{}\"'.format('_'.join(date_list))\n year = 'year=\"{}\"'.format(date_list[0])\n month = 'month=\"{}\"'.format(date_list[1])\n day = 'day=\"{}\"'.format(date_list[2])\n\n header = '<text {} {} {} {} {}>'.format(name, date, year, month, day)\n\n print(header)",
"def _read_header(self, line):\n try:\n creation_date = datetime.strptime(line[23:33], '%y%m%d%H%M')\n except ValueError as err:\n print('Error parsing file creation date -> ' + str(err))\n creation_date = '000000'\n\n self.file_header = {'Priority Code': line[1:3],\n 'Immediate Destination': line[3:13].strip(),\n 'Immediate Origin': line[13:23].strip(),\n 'Creation Date': creation_date,\n 'File ID Modifier': line[33],\n 'Record Size': int(line[34:37].strip()),\n 'Blocking Factor': int(line[37:39]),\n 'Format Code': line[39],\n 'Immediate Destination Name': line[40:63].strip(),\n 'Immediate Origin Name': line[63:86].strip(),\n 'Reference Code': line[86:93]}",
"def write_opening_header(final_file, **header_params):\n\n final_file.seek(0) # Reset file pointer.\n file_contents = final_file.read() # Save content.\n\n final_file.seek(0) # Write at the top.\n\n if header_params[\"extensions\"]:\n if len(header_params[\"extensions\"]) > 1:\n write_data(\n final_file,\n \"# Title: StevenBlack/hosts with the {0} and {1} extensions\\n#\\n\".format(\n \", \".join(header_params[\"extensions\"][:-1]),\n header_params[\"extensions\"][-1],\n ),\n )\n else:\n write_data(\n final_file,\n \"# Title: StevenBlack/hosts with the {0} extension\\n#\\n\".format(\n \", \".join(header_params[\"extensions\"])\n ),\n )\n else:\n write_data(final_file, \"# Title: StevenBlack/hosts\\n#\\n\")\n\n write_data(\n final_file,\n \"# This hosts file is a merged collection \"\n \"of hosts from reputable sources,\\n\",\n )\n write_data(final_file, \"# with a dash of crowd sourcing via GitHub\\n#\\n\")\n write_data(\n final_file,\n \"# Date: \" + time.strftime(\"%d %B %Y %H:%M:%S (%Z)\", time.gmtime()) + \"\\n\",\n )\n\n if header_params[\"extensions\"]:\n write_data(\n final_file,\n \"# Extensions added to this file: \"\n + \", \".join(header_params[\"extensions\"])\n + \"\\n\",\n )\n\n write_data(\n final_file,\n (\n \"# Number of unique domains: {:,}\\n#\\n\".format(\n header_params[\"numberofrules\"]\n )\n ),\n )\n write_data(\n final_file,\n \"# Fetch the latest version of this file: \"\n \"https://raw.githubusercontent.com/StevenBlack/hosts/master/\"\n + path_join_robust(header_params[\"outputsubfolder\"], \"\").replace(\"\\\\\", \"/\")\n + \"hosts\\n\",\n )\n write_data(\n final_file, \"# Project home page: https://github.com/StevenBlack/hosts\\n\"\n )\n write_data(\n final_file,\n \"# Project releases: https://github.com/StevenBlack/hosts/releases\\n#\\n\",\n )\n write_data(\n final_file,\n \"# ===============================================================\\n\",\n )\n write_data(final_file, \"\\n\")\n\n if not header_params[\"skipstatichosts\"]:\n write_data(final_file, \"127.0.0.1 localhost\\n\")\n write_data(final_file, \"127.0.0.1 localhost.localdomain\\n\")\n write_data(final_file, \"127.0.0.1 local\\n\")\n write_data(final_file, \"255.255.255.255 broadcasthost\\n\")\n write_data(final_file, \"::1 localhost\\n\")\n write_data(final_file, \"::1 ip6-localhost\\n\")\n write_data(final_file, \"::1 ip6-loopback\\n\")\n write_data(final_file, \"fe80::1%lo0 localhost\\n\")\n write_data(final_file, \"ff00::0 ip6-localnet\\n\")\n write_data(final_file, \"ff00::0 ip6-mcastprefix\\n\")\n write_data(final_file, \"ff02::1 ip6-allnodes\\n\")\n write_data(final_file, \"ff02::2 ip6-allrouters\\n\")\n write_data(final_file, \"ff02::3 ip6-allhosts\\n\")\n write_data(final_file, \"0.0.0.0 0.0.0.0\\n\")\n\n if platform.system() == \"Linux\":\n write_data(final_file, \"127.0.1.1 \" + socket.gethostname() + \"\\n\")\n write_data(final_file, \"127.0.0.53 \" + socket.gethostname() + \"\\n\")\n\n write_data(final_file, \"\\n\")\n\n preamble = path_join_robust(BASEDIR_PATH, \"myhosts\")\n maybe_copy_example_file(preamble)\n\n if os.path.isfile(preamble):\n with open(preamble, \"r\") as f:\n write_data(final_file, f.read())\n\n final_file.write(file_contents)",
"def prepend_header(rendered_header):\n debug(\"adding header\")\n _range = CURRENT_BUFFER.range(0, 0)\n _range.append(rendered_header.split(\"\\n\"))",
"def read_header(datafile):\n\thead = []\n\tf = open(datafile,'r')\n\tfor i,line in enumerate(f):\n\t\tif i is 10: break\n\t\thead += [line]\n\tf.close()\n\treturn head",
"def prepend_header(filename, header=None, drop=0):\n for no, line in enumerate(fileinput.input(filename, inplace=True)):\n # it's meaningless to set drop to -1, -2, ...\n if no == 0 and drop == 0:\n if header:\n print(header)\n print(line, end='')\n # replace\n elif no + 1 == drop:\n if header:\n print(header)\n elif no >= drop:\n print(line, end='')\n else:\n # no + 1 < drop\n continue",
"def setFirstFileLabel(self):\n pp = self.rendererWindow.getCurrentPipelinePage()\n\n if pp is None:\n ext = \"\"\n\n else:\n ext = pp.extension\n\n text = \"%s%s%s\" % (self.fileprefix.text(), self.numberFormat, ext)\n\n foundFormat = False\n testfn = text % self.minIndex\n if not (os.path.isfile(testfn) or os.path.isfile(testfn+'.gz') or os.path.isfile(testfn+'.bz2')):\n self.logger.debug(\"First file does not exist; checking other number formats\")\n for i, nfmt in enumerate(self.numberFormats):\n if nfmt == self.numberFormat:\n continue\n\n testText = \"%s%s%s\" % (self.fileprefix.text(), nfmt, ext)\n testfn = testText % self.minIndex\n if os.path.isfile(testfn) or os.path.isfile(testfn+'.gz') or os.path.isfile(testfn+'.bz2'):\n foundFormat = True\n break\n\n if foundFormat:\n self.logger.debug(\"Found suitable number format: '%s'\", nfmt)\n self.numberFormatCombo.setCurrentIndex(i)\n\n if not foundFormat:\n self.firstFileLabel.setText(text % (self.minIndex,))",
"def _readCommonHeader(self):\n for i in range(self.ignore_header_lines):\n self.ignored_header_lines.append(nappy.utils.text_parser.readItemFromLine(self.file.readline()))\n \n self._readTopLine()\n self.ONAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.ORG = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.SNAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.MNAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n (self.IVOL, self.NVOL) = nappy.utils.text_parser.readItemsFromLine(self.file.readline(), 2, int)\n dates = nappy.utils.text_parser.readItemsFromLine(self.file.readline(), 6, int)\n (self.DATE, self.RDATE) = (dates[:3], dates[3:])\n self.NLHEAD += self.ignore_header_lines",
"def seqIo_newHeader(fName, info):\n d, n = os.path.split(fName)\n if d==[]:d='./'\n tName=fName[:-4] + '_new' + time.strftime(\"%d_%m_%Y\") + fName[-4:]\n sr = seqIo_reader(fName)\n sw = seqIo_writer(tName,info)\n n=sr.header['numFrames']\n for f in range(n):\n I,ts=sr.getFrame(f)\n sw.addFrame(I,ts)\n sr.close()\n sw.close()",
"def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h",
"def getHeaders(self):\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tif len(self.line) == 7:\n\t\t\tself.header.kod = self.line[0]\n\t\t\tself.header.ver = self.line[1]\n\t\t\tpID_date = self.line[2]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_date)\n\t\t\tpID_time = self.line[3]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_time)\n\t\t\tself.header.knod = int(self.line[4])\n\t\t\tself.header.nps = int(self.line[5])\n\t\t\tself.header.rnr = int(self.line[6])\n\t\telif len(self.line) == 3:\n\t\t\tself.header.knod = int(self.line[0])\n\t\t\tself.header.nps = int(self.line[1])\n\t\t\tself.header.rnr = int(self.line[2])\n\t\t\t\n\n\t\tself.header.title = self.mctalFile.readline().strip()\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tself.header.ntal = int(self.line[1])\n\n\t\tif self.header.ntal == 0:\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mNo tallies in this MCTAL file. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tif len(self.line) == 4:\n\t\t\tself.header.npert = int(self.line[3])\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mMCTAL file with perturbation card. Not supported. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\twhile self.line[0].lower() != \"tally\":\n\t\t\tfor l in self.line: self.header.ntals = np.append(self.header.ntals,int(l))\n\t\t\tself.line = self.mctalFile.readline().split()",
"def first(self):\n if self.is_empty():\n raise Empty(\"List is empty!\")\n return self._header._next._element"
] | [
"0.6566252",
"0.63757396",
"0.6336485",
"0.620483",
"0.61852175",
"0.6170607",
"0.6093349",
"0.6035563",
"0.60349864",
"0.6021",
"0.60165304",
"0.5994707",
"0.5994172",
"0.59916407",
"0.5964539",
"0.59252673",
"0.591808",
"0.59101254",
"0.5888349",
"0.5882781",
"0.58517176",
"0.5833451",
"0.5830455",
"0.57906806",
"0.5748265",
"0.5713931",
"0.5701462",
"0.56992054",
"0.5693686",
"0.5691205"
] | 0.7658299 | 0 |
Get data from a csv file. In the processing this is used for getting data from the concatenated csv file but can be used for any. Write it into a pandas dataframe.date_column_list should be [1, 19] or similar, with the index of the columns that should be converted to dates | def get_data_from_csv(filepath, filename, datatypes, date_column_list):
concatenated_file = os.path.join(filepath, filename)
dataframe = get_data_from_csv_full_path(concatenated_file, datatypes, date_column_list)
return dataframe | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_data_from_csv_full_path(filepath, datatypes, date_column_list):\n\n dataframe = pandas.read_csv(filepath, dtype=datatypes, date_parser=pandas.to_datetime, parse_dates=date_column_list)\n\n return dataframe",
"def read_csv(config, input_file_path):\n header = read_csv_header(input_file_path)\n\n general = config['general']\n date_cols_types = ['date_cols',\n 'first_exp_date_cols',\n 'last_exp_date_cols',\n 'index_date_col',\n 'lookback_date_col']\n date_cols = utils.generate_list_columns(header, config, date_cols_types)\n # it turns out we should read the dates first in as strings\n date_cols_types = {date_col: str for date_col in date_cols}\n df = pd.read_csv(input_file_path, dtype=date_cols_types)\n # convert string dates to dates using the date format\n # Large dataset, conversion done in parallel\n if len(date_cols) > 50 or (df.shape[0] > 20000 and len(date_cols) > 1):\n print('parallel!')\n # we have to do this in parallel otherwise it takes forever\n df[date_cols] = parse_utils.apply_parallel(df[date_cols],\n parse_utils.parse_dates,\n format=general['date_format'])\n # Small dataset, faster to convert in non-parallel fashion\n elif len(date_cols) > 0:\n df[date_cols] = df[date_cols].apply(pd.to_datetime,\n format=general['date_format'])\n return df",
"def read_csv():\n csv_file = \"dow.csv\"\n\n # read the data from the csv file, parsing the Dates to make the x-axis, setting index_col to zero to remove it\n data_frame = pd.read_csv(csv_file, parse_dates=True, index_col=0)\n return data_frame",
"def _get_df_from_csv(self, filename):\n df = pd.read_csv(filename)\n df.set_index('Date', drop=True, inplace=True)\n df.index = pd.to_datetime(df.index)\n return df",
"def read_load_data_from_csv(csv_path):\n # Load the original DataFrame, use easier-to-read column names, and drop unnecessary column\n original_df = pd.read_csv(csv_path).rename(columns={\"OperDay\" : \"Date\"}).drop([\"TOTAL\", \"DSTFlag\"],axis=1)\n\n original_df.name = csv_path.split(\"_\")[1]\n\n # Combine the originally separate date and hour columns into a single DateTime column\n return combine_date_and_hour_columns(original_df)",
"def load_csv(path):\n df = pd.read_csv(path)\n df['Report Date'] = df['Report Date'].apply(lambda x: x[2:7])\n return df",
"def read_csv(filename, cols=None, nrows=None):\n\n datecols = ['date_time', 'srch_ci', 'srch_co']\n dateparser = lambda x: pd.to_datetime(x, format='%Y-%m-%d %H:%M:%S',\n errors='coerce')\n dtypes = {\n 'id': np.uint32,\n 'site_name': np.uint8,\n 'posa_continent': np.uint8,\n 'user_location_country': np.uint16,\n 'user_location_region': np.uint16,\n 'user_location_city': np.uint16,\n 'orig_destination_distance': np.float32,\n 'user_id': np.uint32,\n 'is_mobile': bool,\n 'is_package': bool,\n 'channel': np.uint8,\n 'srch_adults_cnt': np.uint8,\n 'srch_children_cnt': np.uint8,\n 'srch_rm_cnt': np.uint8,\n 'srch_destination_id': np.uint32,\n 'srch_destination_type_id': np.uint8,\n 'is_booking': bool,\n 'cnt': np.uint64,\n 'hotel_continent': np.uint8,\n 'hotel_country': np.uint16,\n 'hotel_market': np.uint16,\n 'hotel_cluster': np.uint8,\n }\n\n df = pd.read_csv(\n filename,\n nrows=nrows,\n usecols=cols,\n dtype=dtypes,\n parse_dates=[col for col in datecols if col in cols],\n date_parser=dateparser,\n )\n\n if 'date_time' in df.columns:\n df['month'] = df['date_time'].dt.month.astype(np.uint8)\n df['year'] = df['date_time'].dt.year.astype(np.uint16)\n\n return df",
"def read_csv(self, csv_file):\n mylog.debug('Reading csv file %s for data' % csv_file)\n csv_data = pandas.read_csv(csv_file)\n mylog.debug('Read of csv file complete.')\n #mylog.debug('%s' % csv_data)\n #sometimes the csv has an empty dataframe #\n if csv_data.empty:\n mylog.debug('Data frame is empty; repopuating data')\n csv_info = []\n for item in csv_data:\n #add the data one cell at a time to the list #\n #for some reason, some csvs have the data #\n #with random decimal points #\n csv_info.append(item.split(\".\")[0])\n df = pandas.DataFrame(columns=csv_info)\n df.loc[0]=csv_info\n #write the data from the list back into the cells#\n #one at a time #\n for column in range(0, len(csv_info)): \n df.iloc[0,column] = csv_info[column]\n csv_data = df \n return csv_data",
"def get_date_df():\n dt_df = pd.read_csv('data/date.csv')\n dt_df['from'] = pd.to_datetime(dt_df['from'])\n dt_df['from_year'] = dt_df['from'].apply(lambda x: x.year)\n dt_df['from_month'] = dt_df['from'].apply(lambda x: x.month)\n dt_df['to'] = pd.to_datetime(dt_df['to'])\n dt_df['to_year'] = dt_df['from'].apply(lambda x: x.year)\n dt_df['to_month'] = dt_df['from'].apply(lambda x: x.month)\n return dt_df",
"def load_data_csv(inputfile, list_names, parse_dates_list):\n\n logger.info(\"loading data...\")\n try:\n os.path.exists(inputfile)\n except:\n raise IOError(\"Input file is missing\")\n\n dataframe = pd.read_csv(inputfile,\n header=None,\n names=list_names,\n parse_dates=parse_dates_list,\n date_parser=pd.to_datetime,\n delimiter='\\t'\n )\n logger.info(\"data loaded\")\n\n return dataframe",
"def process_csv(filepath):\n suburb = get_suburb(filepath)\n read_file = pd.read_csv(filepath,\n infer_datetime_format=True,\n parse_dates=[\"SALE DATE\"],\n dayfirst=True)\n read_file[\"SUBURB\"] = suburb\n separate_date(read_file)\n return read_file",
"def data_input(path, complete=False, nrows=10000):\n\n if complete:\n df = pd.read_csv(path)\n\n else:\n df = pd.read_csv(path, nrows=nrows)\n df[\"date_time\"] = pd.to_datetime(\n df[\"date_time\"], format=\"%Y-%m-%d %H:%M:%S\")\n\n #Maybe we could get rid of the exact timestamp if not useful\n #-> .apply(lambda x: x.date())\n return df",
"def merge_data(csv_files, delimiter = ',', parse_dates = ['Date']):\n \n for csv in csv_files:\n \n # date formats in source data is slightly different (/2019 vs. /19), \n # TODO: check for better method to catch this error\n \n \n try:\n df_new = pd.read_csv(csv, parse_dates=parse_dates,\n date_parser=lambda x: pd.datetime.strptime(str(x), '%d/%m/%Y'), delimiter=delimiter)\n \n except:\n df_new = pd.read_csv(csv, parse_dates=parse_dates,\n date_parser=lambda x: pd.datetime.strptime(str(x), '%d/%m/%y'), delimiter=delimiter)\n \n \n\n \n df_new['season'] = df_new.Date.max().year # add season column, defined as the year of the last matchday\n df_new['first_match_day'] = False \n df_new.loc[0:9, 'first_match_day'] = True # declare first 10 games as first match day\n df_new['matchDay'] = 0\n \n try:\n df = df.append(df_new,sort=False)\n except:\n df = df_new\n \n return df",
"def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()",
"def load_columns(self, csv_data):\n column_date = []\n column_time = []\n column_hold = []\n column_outcome = []\n for row in dataframe_to_rows(csv_data, index=False):\n cell_date = row[18]\n cell_date = cell_date.split(': ')[1]\n cell_time = row[23]\n cell_hold = row[24]\n cell_outcome = row[25]\n column_date.append(cell_date)\n column_time.append(cell_time)\n column_hold.append(cell_hold)\n column_outcome.append(cell_outcome)\n return column_date, column_time, column_hold, column_outcome",
"def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)",
"def extractserie(filename, colname):\n values = []\n indexes =[]\n chunksize = 1000\n for i, chunk in enumerate(pd.read_csv(filename, chunksize=chunksize, sep=';', encoding='latin1')):\n print(i)\n values.append(chunk.loc[:, colname])\n # indexes.append(chunk.index)\n print('check1')\n df_sel = pd.concat(values, axis=0)\n # df_sel.index = pd.DatetimeIndex(indexes)\n return df_sel",
"def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')",
"def _load_stored_csv(path: Union[Path, str]) -> Union[pd.DataFrame, pd.Series]:\n data = pd.read_csv(path, index_col=0, parse_dates=[0]).round(12)\n data.index = data.index.tz_convert(REFERENCE_TZ)\n return data",
"def import_data(file):\n df = pd.read_csv(file, parse_dates=True, keep_date_col=True, sep=';')\n df = reduce_mem_usage(df)\n return df",
"def _csv_to_df(csv_path, headers):\n\n # Assume all columns are strings\n columns_types = {i: str for i, header in enumerate(headers)}\n\n temp_df = pd.read_csv(csv_path, converters=columns_types, skip_blank_lines=False)\n # TODO: check that there are only two columns of type string, then convert to our format\n temp_df.columns = headers\n # Add the column split, this is all training data\n temp_df['annotation_unit_id'] = None\n return temp_df",
"def csv_to_dataframe(file_name: str) -> pd.DataFrame:\n\n # check if csv contains timestamps,\n # find where the data starts if so, raise error otherwise\n has_header, header_lines, columns_list = csv_has_timestamps(file_name, 50)\n\n if not has_header:\n sys.stdout.write(f\"{file_name} does not appear to have timestamps\\n\")\n raise TypeError\n\n file = open(file_name, \"r\")\n\n # based on return from __csv_process_header(), move the\n # file pointer forward until we find the start of the data\n for i in range(header_lines):\n file.readline()\n\n # finally, call the pandas library function\n data_frame = pd.read_csv(file,\n header=None,\n names=columns_list,\n index_col=None,\n parse_dates=True)\n\n # convert the relevant rows to timestamps and floats\n data_frame = __convert_timestamps(data_frame)\n\n file.close()\n return data_frame",
"def load_data(filepath):\n\tlogging.info(f\"Load data from {filepath}\")\n\tdf = pd.read_csv(filepath)\n\tdf = set_dtypes(df)\n\tdf = df.sort_values(by='query_date')\n\n\treturn df",
"def load_symbol_universe_data_from_csv(self, csv_fullpath):\n\n print(\"[{}] [INFO] Loading symbol universe data from csv...\".format(datetime.now().isoformat()))\n\n df = pd.read_csv(csv_fullpath)\n\n #--------------------------------------------------------------------------\n # Convert date column to type numpy datetime64.\n #--------------------------------------------------------------------------\n df.date = pd.to_datetime(df.date)\n\n return df",
"def read_weather_data_from_csv(csv_path):\n\n # Read the original DataFrame and select the relevant columns\n original_df = pd.read_csv(csv_path)[[\"DateUTC\",\"TemperatureF\"]]\n\n # Round up the hour of each Date to the nearest whole hour\n original_df[\"Date\"] = original_df[\"DateUTC\"].apply(round_utc_hour_up)\n\n # Rename Temperature field to include city name\n city = csv_path.split(\"_\")[1].split(\"/\")[1]\n original_df[city + \"_TemperatureF\"] = original_df[\"TemperatureF\"]\n original_df = original_df.drop([\"TemperatureF\", \"DateUTC\"], axis=1)\n\n return original_df",
"def _parse_csv(csv_file: str) -> pd.DataFrame:\n return pd.read_csv(csv_file, header=0)",
"def read_csv_data(csv_path):\n\n return pd.read_csv(csv_path, sep=',', engine='python')",
"def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values",
"def read_and_prepare_dataframe(start_date='1980-01-01'):\n \n # Read the dataset and rename 'dt' to 'Date'\n df = pd.read_csv('Data/GlobalLandTemperaturesByCountry.csv', parse_dates=['dt'])\n df.rename(columns={'dt':'Date'}, inplace=True)\n \n # Filter for Canada\n df = df[df['Country']=='Canada']\n \n # Filter out data prior to start date\n df = df[df['Date'] >= start_date]\n \n # To ensure data is sorted\n df = df.sort_values('Date')\n \n # Set index to Date and return the final dataframe\n return df.set_index('Date')",
"def read_csv():"
] | [
"0.75140345",
"0.72193193",
"0.68739146",
"0.6817987",
"0.67866164",
"0.6698974",
"0.6677149",
"0.6672171",
"0.66711706",
"0.6670936",
"0.64637154",
"0.6446584",
"0.64413923",
"0.64293706",
"0.6426104",
"0.64083576",
"0.6387908",
"0.6373854",
"0.63398945",
"0.6335341",
"0.6331928",
"0.63123107",
"0.6309677",
"0.6298261",
"0.6279373",
"0.62759984",
"0.6268494",
"0.6261267",
"0.6251595",
"0.62441707"
] | 0.7665351 | 0 |
Get data from a csv file. In the processing this is used for getting data from the concatenated csv file but can be used for any. Write it into a pandas dataframe.date_column_list should be [1, 19] or similar, with the index of the columns that should be converted to dates | def get_data_from_csv_full_path(filepath, datatypes, date_column_list):
dataframe = pandas.read_csv(filepath, dtype=datatypes, date_parser=pandas.to_datetime, parse_dates=date_column_list)
return dataframe | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_data_from_csv(filepath, filename, datatypes, date_column_list):\n\n concatenated_file = os.path.join(filepath, filename)\n\n dataframe = get_data_from_csv_full_path(concatenated_file, datatypes, date_column_list)\n\n return dataframe",
"def read_csv(config, input_file_path):\n header = read_csv_header(input_file_path)\n\n general = config['general']\n date_cols_types = ['date_cols',\n 'first_exp_date_cols',\n 'last_exp_date_cols',\n 'index_date_col',\n 'lookback_date_col']\n date_cols = utils.generate_list_columns(header, config, date_cols_types)\n # it turns out we should read the dates first in as strings\n date_cols_types = {date_col: str for date_col in date_cols}\n df = pd.read_csv(input_file_path, dtype=date_cols_types)\n # convert string dates to dates using the date format\n # Large dataset, conversion done in parallel\n if len(date_cols) > 50 or (df.shape[0] > 20000 and len(date_cols) > 1):\n print('parallel!')\n # we have to do this in parallel otherwise it takes forever\n df[date_cols] = parse_utils.apply_parallel(df[date_cols],\n parse_utils.parse_dates,\n format=general['date_format'])\n # Small dataset, faster to convert in non-parallel fashion\n elif len(date_cols) > 0:\n df[date_cols] = df[date_cols].apply(pd.to_datetime,\n format=general['date_format'])\n return df",
"def read_csv():\n csv_file = \"dow.csv\"\n\n # read the data from the csv file, parsing the Dates to make the x-axis, setting index_col to zero to remove it\n data_frame = pd.read_csv(csv_file, parse_dates=True, index_col=0)\n return data_frame",
"def _get_df_from_csv(self, filename):\n df = pd.read_csv(filename)\n df.set_index('Date', drop=True, inplace=True)\n df.index = pd.to_datetime(df.index)\n return df",
"def read_load_data_from_csv(csv_path):\n # Load the original DataFrame, use easier-to-read column names, and drop unnecessary column\n original_df = pd.read_csv(csv_path).rename(columns={\"OperDay\" : \"Date\"}).drop([\"TOTAL\", \"DSTFlag\"],axis=1)\n\n original_df.name = csv_path.split(\"_\")[1]\n\n # Combine the originally separate date and hour columns into a single DateTime column\n return combine_date_and_hour_columns(original_df)",
"def load_csv(path):\n df = pd.read_csv(path)\n df['Report Date'] = df['Report Date'].apply(lambda x: x[2:7])\n return df",
"def read_csv(filename, cols=None, nrows=None):\n\n datecols = ['date_time', 'srch_ci', 'srch_co']\n dateparser = lambda x: pd.to_datetime(x, format='%Y-%m-%d %H:%M:%S',\n errors='coerce')\n dtypes = {\n 'id': np.uint32,\n 'site_name': np.uint8,\n 'posa_continent': np.uint8,\n 'user_location_country': np.uint16,\n 'user_location_region': np.uint16,\n 'user_location_city': np.uint16,\n 'orig_destination_distance': np.float32,\n 'user_id': np.uint32,\n 'is_mobile': bool,\n 'is_package': bool,\n 'channel': np.uint8,\n 'srch_adults_cnt': np.uint8,\n 'srch_children_cnt': np.uint8,\n 'srch_rm_cnt': np.uint8,\n 'srch_destination_id': np.uint32,\n 'srch_destination_type_id': np.uint8,\n 'is_booking': bool,\n 'cnt': np.uint64,\n 'hotel_continent': np.uint8,\n 'hotel_country': np.uint16,\n 'hotel_market': np.uint16,\n 'hotel_cluster': np.uint8,\n }\n\n df = pd.read_csv(\n filename,\n nrows=nrows,\n usecols=cols,\n dtype=dtypes,\n parse_dates=[col for col in datecols if col in cols],\n date_parser=dateparser,\n )\n\n if 'date_time' in df.columns:\n df['month'] = df['date_time'].dt.month.astype(np.uint8)\n df['year'] = df['date_time'].dt.year.astype(np.uint16)\n\n return df",
"def read_csv(self, csv_file):\n mylog.debug('Reading csv file %s for data' % csv_file)\n csv_data = pandas.read_csv(csv_file)\n mylog.debug('Read of csv file complete.')\n #mylog.debug('%s' % csv_data)\n #sometimes the csv has an empty dataframe #\n if csv_data.empty:\n mylog.debug('Data frame is empty; repopuating data')\n csv_info = []\n for item in csv_data:\n #add the data one cell at a time to the list #\n #for some reason, some csvs have the data #\n #with random decimal points #\n csv_info.append(item.split(\".\")[0])\n df = pandas.DataFrame(columns=csv_info)\n df.loc[0]=csv_info\n #write the data from the list back into the cells#\n #one at a time #\n for column in range(0, len(csv_info)): \n df.iloc[0,column] = csv_info[column]\n csv_data = df \n return csv_data",
"def get_date_df():\n dt_df = pd.read_csv('data/date.csv')\n dt_df['from'] = pd.to_datetime(dt_df['from'])\n dt_df['from_year'] = dt_df['from'].apply(lambda x: x.year)\n dt_df['from_month'] = dt_df['from'].apply(lambda x: x.month)\n dt_df['to'] = pd.to_datetime(dt_df['to'])\n dt_df['to_year'] = dt_df['from'].apply(lambda x: x.year)\n dt_df['to_month'] = dt_df['from'].apply(lambda x: x.month)\n return dt_df",
"def load_data_csv(inputfile, list_names, parse_dates_list):\n\n logger.info(\"loading data...\")\n try:\n os.path.exists(inputfile)\n except:\n raise IOError(\"Input file is missing\")\n\n dataframe = pd.read_csv(inputfile,\n header=None,\n names=list_names,\n parse_dates=parse_dates_list,\n date_parser=pd.to_datetime,\n delimiter='\\t'\n )\n logger.info(\"data loaded\")\n\n return dataframe",
"def process_csv(filepath):\n suburb = get_suburb(filepath)\n read_file = pd.read_csv(filepath,\n infer_datetime_format=True,\n parse_dates=[\"SALE DATE\"],\n dayfirst=True)\n read_file[\"SUBURB\"] = suburb\n separate_date(read_file)\n return read_file",
"def data_input(path, complete=False, nrows=10000):\n\n if complete:\n df = pd.read_csv(path)\n\n else:\n df = pd.read_csv(path, nrows=nrows)\n df[\"date_time\"] = pd.to_datetime(\n df[\"date_time\"], format=\"%Y-%m-%d %H:%M:%S\")\n\n #Maybe we could get rid of the exact timestamp if not useful\n #-> .apply(lambda x: x.date())\n return df",
"def merge_data(csv_files, delimiter = ',', parse_dates = ['Date']):\n \n for csv in csv_files:\n \n # date formats in source data is slightly different (/2019 vs. /19), \n # TODO: check for better method to catch this error\n \n \n try:\n df_new = pd.read_csv(csv, parse_dates=parse_dates,\n date_parser=lambda x: pd.datetime.strptime(str(x), '%d/%m/%Y'), delimiter=delimiter)\n \n except:\n df_new = pd.read_csv(csv, parse_dates=parse_dates,\n date_parser=lambda x: pd.datetime.strptime(str(x), '%d/%m/%y'), delimiter=delimiter)\n \n \n\n \n df_new['season'] = df_new.Date.max().year # add season column, defined as the year of the last matchday\n df_new['first_match_day'] = False \n df_new.loc[0:9, 'first_match_day'] = True # declare first 10 games as first match day\n df_new['matchDay'] = 0\n \n try:\n df = df.append(df_new,sort=False)\n except:\n df = df_new\n \n return df",
"def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()",
"def load_columns(self, csv_data):\n column_date = []\n column_time = []\n column_hold = []\n column_outcome = []\n for row in dataframe_to_rows(csv_data, index=False):\n cell_date = row[18]\n cell_date = cell_date.split(': ')[1]\n cell_time = row[23]\n cell_hold = row[24]\n cell_outcome = row[25]\n column_date.append(cell_date)\n column_time.append(cell_time)\n column_hold.append(cell_hold)\n column_outcome.append(cell_outcome)\n return column_date, column_time, column_hold, column_outcome",
"def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)",
"def extractserie(filename, colname):\n values = []\n indexes =[]\n chunksize = 1000\n for i, chunk in enumerate(pd.read_csv(filename, chunksize=chunksize, sep=';', encoding='latin1')):\n print(i)\n values.append(chunk.loc[:, colname])\n # indexes.append(chunk.index)\n print('check1')\n df_sel = pd.concat(values, axis=0)\n # df_sel.index = pd.DatetimeIndex(indexes)\n return df_sel",
"def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')",
"def _load_stored_csv(path: Union[Path, str]) -> Union[pd.DataFrame, pd.Series]:\n data = pd.read_csv(path, index_col=0, parse_dates=[0]).round(12)\n data.index = data.index.tz_convert(REFERENCE_TZ)\n return data",
"def import_data(file):\n df = pd.read_csv(file, parse_dates=True, keep_date_col=True, sep=';')\n df = reduce_mem_usage(df)\n return df",
"def _csv_to_df(csv_path, headers):\n\n # Assume all columns are strings\n columns_types = {i: str for i, header in enumerate(headers)}\n\n temp_df = pd.read_csv(csv_path, converters=columns_types, skip_blank_lines=False)\n # TODO: check that there are only two columns of type string, then convert to our format\n temp_df.columns = headers\n # Add the column split, this is all training data\n temp_df['annotation_unit_id'] = None\n return temp_df",
"def csv_to_dataframe(file_name: str) -> pd.DataFrame:\n\n # check if csv contains timestamps,\n # find where the data starts if so, raise error otherwise\n has_header, header_lines, columns_list = csv_has_timestamps(file_name, 50)\n\n if not has_header:\n sys.stdout.write(f\"{file_name} does not appear to have timestamps\\n\")\n raise TypeError\n\n file = open(file_name, \"r\")\n\n # based on return from __csv_process_header(), move the\n # file pointer forward until we find the start of the data\n for i in range(header_lines):\n file.readline()\n\n # finally, call the pandas library function\n data_frame = pd.read_csv(file,\n header=None,\n names=columns_list,\n index_col=None,\n parse_dates=True)\n\n # convert the relevant rows to timestamps and floats\n data_frame = __convert_timestamps(data_frame)\n\n file.close()\n return data_frame",
"def load_data(filepath):\n\tlogging.info(f\"Load data from {filepath}\")\n\tdf = pd.read_csv(filepath)\n\tdf = set_dtypes(df)\n\tdf = df.sort_values(by='query_date')\n\n\treturn df",
"def load_symbol_universe_data_from_csv(self, csv_fullpath):\n\n print(\"[{}] [INFO] Loading symbol universe data from csv...\".format(datetime.now().isoformat()))\n\n df = pd.read_csv(csv_fullpath)\n\n #--------------------------------------------------------------------------\n # Convert date column to type numpy datetime64.\n #--------------------------------------------------------------------------\n df.date = pd.to_datetime(df.date)\n\n return df",
"def read_weather_data_from_csv(csv_path):\n\n # Read the original DataFrame and select the relevant columns\n original_df = pd.read_csv(csv_path)[[\"DateUTC\",\"TemperatureF\"]]\n\n # Round up the hour of each Date to the nearest whole hour\n original_df[\"Date\"] = original_df[\"DateUTC\"].apply(round_utc_hour_up)\n\n # Rename Temperature field to include city name\n city = csv_path.split(\"_\")[1].split(\"/\")[1]\n original_df[city + \"_TemperatureF\"] = original_df[\"TemperatureF\"]\n original_df = original_df.drop([\"TemperatureF\", \"DateUTC\"], axis=1)\n\n return original_df",
"def _parse_csv(csv_file: str) -> pd.DataFrame:\n return pd.read_csv(csv_file, header=0)",
"def read_csv_data(csv_path):\n\n return pd.read_csv(csv_path, sep=',', engine='python')",
"def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values",
"def read_and_prepare_dataframe(start_date='1980-01-01'):\n \n # Read the dataset and rename 'dt' to 'Date'\n df = pd.read_csv('Data/GlobalLandTemperaturesByCountry.csv', parse_dates=['dt'])\n df.rename(columns={'dt':'Date'}, inplace=True)\n \n # Filter for Canada\n df = df[df['Country']=='Canada']\n \n # Filter out data prior to start date\n df = df[df['Date'] >= start_date]\n \n # To ensure data is sorted\n df = df.sort_values('Date')\n \n # Set index to Date and return the final dataframe\n return df.set_index('Date')",
"def read_csv():"
] | [
"0.76642233",
"0.72183436",
"0.6874251",
"0.68164563",
"0.678676",
"0.66988724",
"0.6675445",
"0.66734153",
"0.6670892",
"0.66697973",
"0.64643896",
"0.64462715",
"0.64411694",
"0.6430269",
"0.64287794",
"0.6408976",
"0.638692",
"0.6373324",
"0.6339154",
"0.6334611",
"0.63313967",
"0.63109165",
"0.63091934",
"0.6298552",
"0.6280165",
"0.62753135",
"0.62687266",
"0.62618476",
"0.6251099",
"0.62454164"
] | 0.75128126 | 1 |
Create csv files from the data as it is grouped by day. | def output_daily_files(dataframe, path, filename):
days = dataframe.groupby('date_time_day')
dataframe.groupby('date_time_day').size().reset_index(name='data points per day')
for day in days.groups:
print(day.date())
output_path = path + filename + "_" + str(day.date()) + '.csv'
print("Creating intermediate flagged data file: ", output_path)
days.get_group(day).to_csv(output_path, index=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_csv(idir, dates):\n for path, dirs, files in os.walk(idir):\n for date in dates:\n # first loop over output dir\n if not path.endswith(str(date)):\n continue\n arr = path.split('/')\n oname = '%s-%s.csv' % (arr[-2], arr[-1])\n print(\"write %s\" % oname)\n with open(oname, 'w') as ostream:\n headers = None\n for ifile in files:\n if 'part-' not in ifile:\n continue\n iname = os.path.join(path, ifile)\n with open(iname) as istream:\n first_line = istream.readline()\n if not headers:\n headers = first_line\n ostream.write(headers)\n while True:\n line = istream.readline().replace('\"', '')\n if not line:\n break\n ostream.write(line)",
"def output_into_file(self, path: str):\n # Creating path if not exist\n Path(path).mkdir(parents=True, exist_ok=True)\n # Writing every day as a csv file\n for day in self:\n with open(f\"{path}/{day.name}.csv\", \"w\") as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # First line / Title\n writer.writerow([\" \", day.name])\n for shift in day:\n employees = \", \".join([e.name for e in shift.employees])\n writer.writerow([f\"{shift.start}-{shift.end}\", employees])",
"def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]",
"def store_grouped_data(data,path):\n i = 0\n for name, group in data:\n l = len(group)\n print name, \", \", l\n if l > 999:\n group.to_csv(path + \"//clean.events\"+ str(i), index=False)\n i += 1",
"def create_files_with_aggregates(df):\n # Save data grouped by title and channel\n df.groupby(['title', 'channel'])\\\n .size()\\\n .reset_index(name='counter')\\\n .sort_values(by=['counter'], ascending=False)\\\n .to_csv('views_by_title&channel.xlsx', index=False)\\\n\n\n # Views by channel\n df['channel'].value_counts().to_csv('views_by_channel.xlsx')\n\n # Views by day\n days = list()\n for t in df['time'].str.split('T'):\n # t[0] => day !!! t[1] => time\n days.append(t[0])\n df['day'] = days\n\n df.groupby(['day']).size().reset_index(name='counter').to_csv('views_by_day.xlsx', index=False)\n\n\n # Views by day of week\n df['day'] = pd.to_datetime(df['day'])\n df['day_of_week'] = df['day'].dt.day_name()\n df.groupby(['day_of_week']).size().reset_index(name='counter').to_csv('views_by_day_week.xlsx', index=False)\n\n create_plots(df)\n return df",
"def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise",
"def create_csv(self, type): \n if os.path.isfile(_path_finder('keydata','{0}_{1}.db'.format(\n self.keyword,type))):\n self.__db_init('{0}'.format(type))\n self.c.execute(\"SELECT MIN(date) FROM tweets\")\n mindate = self.c.fetchone()[0][0:10]\n self.c.execute(\"SELECT MAX(date) FROM tweets\")\n maxdate = self.c.fetchone()[0][0:10]\n start_date = datetime.datetime.strptime(mindate, '%Y-%m-%d')\n end_date = (datetime.datetime.strptime(maxdate, '%Y-%m-%d') + \n datetime.timedelta(days=1))\n \n def __date_range(start, end):\n for n in range((end - start).days):\n yield start + datetime.timedelta(days=n)\n \n def __db_to_list():\n for single_date in __date_range(start_date, end_date):\n d = \"\".join(['%',single_date.strftime(\"%Y-%m-%d\"),'%'])\n self.c.execute('''SELECT count(*) FROM tweets where \n date like('{0}')'''.format(d))\n yield [d[1:11], self.c.fetchone()[0]]\n \n path = _path_finder('keydata','{0}_{1}.csv'.format(\n self.keyword,type))\n if sys.version_info[0] < 3: #Python3 compatibility check\n infile = open(path, 'wb')\n else:\n infile = open(path, 'w', newline='', encoding='utf8')\n with infile as f:\n writer = csv.writer(f)\n writer.writerows(__db_to_list())\n self.conn.commit()\n self.conn.close()\n print('\\nReport has been created:')\n print(os.path.abspath(path))",
"def generate_datalogger_csv(logdir, datestring, keys, values, ts_keyname):\n if datestring == datetime.date.today().isoformat():\n logging.error(\"todays Logs are actually written and cannot used in datalogger\")\n return\n headers = [ts_keyname, ] + list(keys) + list(values)\n linebuffer = []\n linebuffer.append(\"\\t\".join(headers)) \n filename = os.path.join(logdir, \"haproxylog_%s.gz\" % datestring)\n logging.info(\"parsing file %s\", filename)\n try:\n parser = parser_generator(keys, values, gzip.open(filename, \"rb\"))\n for line in aggregator(keys, values, ts_keyname, parser):\n linebuffer.append(line)\n except IOError as exc:\n logging.exception(exc)\n return StringIO.StringIO(\"\\n\".join(linebuffer))",
"def split_data_into_exchanges(source_path, destination_path):\n for subdir, dirs, files in os.walk(source_path):\n for file in files:\n source_full_file = os.path.join(subdir, file)\n print(source_full_file)\n df = pd.read_csv(source_full_file)\n for group_name, df in df.groupby(['Ticker', 'Exchange']):\n file_name = destination_path / str(df['Date'].iloc[0]) / convertTuple(group_name)\n utils.make_dir(file_name)\n with open(file_name, \"w+\") as f:\n df.to_csv(f, index=False)",
"def to_csv(self, dir_path, **kwargs):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n for name, table in self.items():\n path = os.path.join(dir_path, '%s.csv' % name)\n\n table.to_csv(path, **kwargs)",
"def create_csv(self, data_frame):\n try:\n\n str_time = datetime.datetime.today().strftime('%Y%m%d')\n out_file = \"{}/{}.csv\".format(self.outpath, str_time)\n self.logger.logMsg(\"Creating CSV File {}\".format(out_file))\n data_frame.to_csv(out_file, index=False)\n\n if not os.path.exists(out_file):\n self.logger.logError('{} File is Not Created '.format(out_file))\n raise Exception('Error In Create CSV File not Found')\n self.logger.logMsg(\"Successfully Created {} File\".format(out_file))\n except Exception as e:\n self.logger.logError('Error {} Creating File'.format(str(e)))\n raise Exception('Error In Create CSV File {}'.format(str(e)))",
"def save_as_csv(time_series, data, path_and_file_name):\n\n parent_name = \"test\"\n parent_uqid = uuid.uuid4()\n\n file_obj = open(path_and_file_name, 'w')\n file_obj.write('version,'+str(2)+'\\n')\n file_obj.write('numOfCH,'+str(1)+'\\n')\n file_obj.write('type, scan\\n')\n file_obj.write('ch_type,'+str(0)+'\\n')\n\n file_obj.write('carpet pos,'+str(0)+'\\n')\n file_obj.write('parent_name,'+str(parent_name)+'\\n')\n file_obj.write('parent_uqid,'+str(parent_uqid)+'\\n')\n file_obj.write('parent_filename,'+str(path_and_file_name)+'\\n')\n\n file_obj.write('pc, 0\\n')\n file_obj.write('Time (ns), CH0 Auto-Correlation\\n')\n for time_step in range(0, time_series.shape[0]):\n file_obj.write(str(float(time_series[time_step]))+','+str(data[time_step])+ '\\n')\n file_obj.write('end\\n')\n\n file_obj.close()",
"def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )",
"def convert_to_csv(device, signal):\n subject_counter = 0\n for subject_ID in range(1600, 1651):\n data = pd.read_csv(f'./raw/{device}/{signal}/data_{subject_ID}_{signal}_{device}.txt', sep=\",\", header=None)\n data.columns = [\"subject_ID\", \"activity_ID\", \"Timestamp\", f\"x_{device}_{signal}\", f\"y_{device}_{signal}\",\n f\"z_{device}_{signal}\"]\n saveing_directory = f'{device}_{signal}/S{subject_counter}_{device}_{signal}.csv'\n data.to_csv(saveing_directory)\n subject_counter += 1\n print(subject_counter)",
"def create_csv(json_file):\n with open('json_data.csv', 'w', newline='') as json_data:\n filewriter = csv.writer(json_data, delimiter=',')\n filewriter.writerow(['timestamp', 'open',\n 'high', 'low', 'close', 'volume'])\n for data in json_file:\n timestamp = data[\"date\"]\n open_price = data[\"opening\"]\n high_price = data[\"high\"]\n low_price = data[\"low\"]\n close_price = data[\"closing\"]\n volume = data[\"volume\"]\n filewriter.writerow([timestamp, open_price, high_price,\n low_price, close_price, volume])",
"def create_daily_file(self, output_dir: str,\n day: int, header='Bazin', get_cost=False):\n # Create the output directory if it doesn't exist\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n features_file = output_dir + 'day_' + str(day) + '.dat'\n\n if header == 'Bazin':\n # add headers to files\n with open(features_file, 'w') as param_file:\n if get_cost:\n param_file.write(self.bazin_header)\n else:\n self.bazin_header = 'id redshift type code ' + \\\n 'orig_sample queryable ' + \\\n 'last_rmag gA gB gt0 ' + \\\n 'gtfall gtrise rA rB rt0 rtfall rtrise iA ' + \\\n 'iB it0 itfall itrise zA zB zt0 ztfall ztrise\\n'\n param_file.write(self.bazin_header)\n\n else:\n with open(features_file, 'w') as param_file:\n param_file.write(self.header)",
"def load_daily_data():\n return pd.read_csv(os.path.join('data', 'raw', 'full_grouped.csv'))",
"def Export_in_files(COVID_data, COVID_data_reconstructed):\r\n F_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')\r\n FR_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')\r\n \r\n COVID_data_lists = [COVID_data, COVID_data_reconstructed]\r\n Data_file_list = [F_data_file, FR_data_file]\r\n Countries_list = list(COVID_data.keys())[1:]\r\n \r\n for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file\r\n COVID_data_temp = COVID_data_lists[Data_set_inc]\r\n Data_file_temp = Data_file_list[Data_set_inc]\r\n \r\n Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\\n')\r\n \r\n for Country in Countries_list:\r\n COVID_data_single_country = COVID_data_temp[Country]\r\n \r\n Date_list = list(COVID_data[Country].keys())\r\n for Date in Date_list:\r\n COVID_data_single_country_single_date = COVID_data_single_country[Date]\r\n Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None\r\n \r\n Data_file_temp.write('%s;%s;' % (Country, Date))\r\n Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))\r\n Data_file_temp.write('\\n')\r\n \r\n Data_file_temp.close()",
"def make_csv(file_of_data):\n with open(file_of_data, 'w') as f:\n writer = csv.writer(f)\n header = (\"Counter\", \"Date/time\", \"Latitude\", \"Longitude\", \"Temperature\", \"Humidity\")\n writer.writerow(header)",
"def to_csv(data_path):\n news_df, price_df = load_data(data_path)\n\n combined_df = combine_stock_news(news_df, price_df)\n\n combined_df.to_csv(data_path + \"news_price_df.csv\")",
"def extract_csv_for_date(config, data_date): \n \n ### TODO: test config separately \n \n # print(config.DATA_ROOT)\n # print(data_date)\n \n # Raise an exception if attribute DATA_ROOT does not exist\n if not 'DATA_ROOT' in vars(config):\n raise AttributeError(\"Attribute DATA_ROOT does not exist\")\n \n # Raise an exception if DATA_ROOT does not exist\n if not os.path.exists(config.DATA_ROOT):\n raise NotADirectoryError(\"The path \" + config.DATA_ROOT + \" not found\")\n \n # Raise an exception if attribute METER_CHANNEL_DICT does not exist\n if not 'METER_CHANNEL_DICT' in vars(config):\n raise AttributeError(\"Attribute METER_CHANNEL_DICT does not exist\")\n \n # Raise an exception if attribute METER_CHANNEL_DICT does not exist\n if not 'SAMPLE_TIME' in vars(config):\n raise AttributeError(\"Attribute METER_CHANNEL_DICT does not exist\")\n \n data_date_dt = parse(data_date)\n \n if data_date_dt > config.DATA_END_DATE:\n raise ValueError(\"data_date entered is greater than the DATA_END_DATE: \" + \n str(config.DATA_END_DATE))\n \n if data_date_dt < config.DATA_START_DATE:\n raise ValueError(\"data_date entered is less than the DATA_START_DATE: \" + \n str(config.DATA_START_DATE))\n \n # Get the year, month and and day from date entered\n data_year = data_date_dt.year\n data_month = data_date_dt.month\n data_day = data_date_dt.day\n \n # Get the corresponding path in the directory to look for the data for the day\n data_path = os.path.join(config.DATA_ROOT, str(data_year), \"{:02}\".format(data_month), \"{:02}\".format(data_day))\n # print(data_path)\n # Find the count of meters\n meter_count = len(config.METER_CHANNEL_DICT)\n\n # Dictionary to store the names of the resulting csv files\n meter_csv_names = {}\n \n # Get the down-sampling time\n sample_time = config.SAMPLE_TIME\n \n # Create a dictionary with keys are meter names and values as dataframes \n # containing the data for the day\n meter_collection = {}\n \n # for meter_name in config.METER_CHANNEL_DICT:\n # # Create an empty dataframe, the columns will be created later\n # meter_collection[meter_name] = pd.DataFrame()\n\n #print(meter_collection)\n if os.path.exists(data_path):\n # Walk through all the files in the directory for the day's data\n for dirpath, dirnames, files in os.walk(data_path, topdown=True):\n # `files` contains the names of all the files at the location\n if len(files) == 0:\n print(\"No files found for day: \" + data_path)\n continue\n for filename in files:\n # Get the netcdf files, these are files with `.nc` extension\n if filename.lower().endswith('.nc'):\n # For the particular file, find out the corresponding meter and channel \n [meter, channel] = extract_ppty(filename, config.METER_CHANNEL_DICT.keys())\n # Create an entry in the `meter_collection` dict if it does not exist yet\n if meter not in meter_collection:\n meter_collection[meter] = pd.DataFrame()\n # Form the resulting csv name from the meter name if it doesnt exist yet\n # They are of the type - meter_name@Timestamp@Duration@Frequency\n # For e.g.: PQube3@2017-11-01T080002Z@[email protected]\n #print(meter, channel)\n if meter not in meter_csv_names:\n meter_csv_names[meter] = '@'.join([meter, '@'.join(filename.split('@')[1:4])])[:-3] + '.csv'\n #print(meter_csv_names)\n # Get the full path of the csv\n csv_name = os.path.join(data_path, meter_csv_names[meter])\n # Only extract if not already extracted to csv\n if (not os.path.isfile(csv_name)):\n # Get the dataframe containing time and channel values\n channel_df = extract_data(dirpath, filename)\n # Give the dataframe column a name\n channel_df.columns = [channel]\n # Down-sample the data to the sampling time intended\n channel_resampled = data_resample(channel_df, sample_time)\n # If our meter dataframe is empty so far, i.e. if this is the \n # first channel being entered, then create a copy of the \n # resampled dataframe\n if meter_collection[meter].empty:\n meter_collection[meter] = channel_resampled.copy()\n ####################### \n # This `else` clause handles two cases:\n # 1. If the dataframe is not empty, then add other columns to\n # the dataframe. (the else case)\n # 2. Some days have data downloaded more than once, this means \n # that channels can occur more than once. (like 05/21/2018)\n #######################\n else:\n # If the channel already exists in the dataframe\n # then either the other file has updated data or \n # subsequent data. \n if channel in meter_collection[meter].columns:\n # Get index from total dataframe \n idx_1 = meter_collection[meter].index\n # Get index from file dataframe\n idx_2 = channel_resampled.index\n # Compare the two, if the index is contained within,\n # then **update** the channel's value for file's indices. \n if np.all(np.isin(idx_2, idx_1)):\n meter_collection[meter][channel].loc[idx_2] = channel_resampled.values.tolist()\n # If the index is not contained, append the file df to\n # the total dataframe\n else:\n meter_collection[meter] = meter_collection[meter].append(channel_resampled, sort=False)\n meter_collection[meter].sort_index(inplace=True)\n #######################\n # This data is resampled a second time to handle two cases:\n # 1. When appending a resampled dataframe to an already resampled dataframe, the last\n # index of the original dataframe and the first index of the new dataframe can have\n # the same time. Resampling the appended dataframe will eliminate the repetitions.\n # 2. If the new dataframe to be appended starts at a much later time, resampling the\n # appended dataframe will create rows of missing data (NaN) at the times with no\n # measurement values. This makes it easier to detect missing measurement values and\n # perform data imputation at a later phase.\n #######################\n meter_collection[meter] = data_resample(meter_collection[meter], sample_time)\n # If the channel does not already exist, then add the\n # file dataframe to the total df. \n else:\n meter_collection[meter] = meter_collection[meter].join(channel_resampled, how='outer')\n else:\n print(\"Path not found: \" + data_path)\n \n # Perform data imputation wherrever needed\n # print(meter_collection)\n meter_collection = data_impute(meter_collection)\n \n # Write the total dataframes to csv file\n for meter in meter_collection:\n # Reorganize the order of columns to match the database tables \n meter_channels = config.METER_CHANNEL_DICT[meter]\n # meter_collection[meter].reset_index(inplace=True)\n meter_collection[meter] = meter_collection[meter].reindex(columns=meter_channels[1:])\n csv_name = os.path.join(data_path, meter_csv_names[meter])\n # print(csv_name)\n # Only write csv if it does not exist yet\n if(not os.path.isfile(csv_name)):\n meter_collection[meter].to_csv(csv_name, header=False)\n\n return meter_csv_names",
"def write_data_files(self):\n # build our strings\n header_string = \"\"\n data_string = \"\"\n for value in self.data.values():\n header_string += value[2] + \",\"\n if value[0] != None:\n data_string += value[1].format(value[0])\n else:\n data_string += \",\"\n # remove the extra comma and replace with a newline\n header_string = header_string[:-1]\n header_string += \"\\n\"\n data_string = data_string[:-1]\n data_string += \"\\n\"\n \n # show what we built\n #print(header_string)\n #print(data_string)\n \n # open a temp file\n with open(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"w\") as temp_file:\n #temp_file.write(header_string)\n temp_file.write(data_string)\n \n # move to the input file\n filetools.mv(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"{:s}\\\\VWSInput\\\\data.csv\".format(self.path))\n \n return",
"def create_file_output(self, results):\n for key, value in results.table_output.items():\n name_timestamp = key.split('&')\n _name = name_timestamp[0]\n timestamp = name_timestamp[1]\n file_name = output_file_prefix + \"-\" + _name + \".csv\"\n if file_name not in self.file_creation_set:\n self._header_written = False\n self.file_creation_set.update([file_name])\n for row in value:\n with open(file_name, 'a+') as file_to_write:\n row.update({'Timestamp': timestamp})\n _keys = row.keys()\n file_output = csv.DictWriter(file_to_write, _keys)\n if not self._header_written:\n file_output.writeheader()\n self._header_written = True\n file_output.writerow(row)\n file_to_write.close()\n return results",
"def generate_csv(type, json_list, columns_list):\n with open(\"data/\" + type + \"_\" + time.strftime(\"%Y-%m-%d_%H:%M:%S\") +\n \".csv\", 'a+') as f:\n csv_file = csv.DictWriter(f, fieldnames=columns_list,\n extrasaction=\"ignore\")\n csv_file.writeheader()\n for item in json_list:\n csv_file.writerow(item)\n print(\"\\nCSV file saved as data/\" + type + \"_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".csv\")",
"def package_dataset(ds, dirname=''):\n assert dirname != '', \"dirname required\"\n import pyrfc3339 as rfc3339\n\n os.makedirs(dirname)\n ds.edges[EDGE_COLS].to_csv(join(dirname, 'edges.csv'), index=False)\n# unused\n# ds.beacons.to_csv(join(dirname, 'beacons.csv'), **shared_cfg)\n\n for c in ds.chunks:\n fr_data = c.data.iloc[0]\n date = fr_data['Datetime']\n bid = fr_data['beaconid']\n fname = '{date}_{bid}'.format(\n date=date.strftime('%Y-%m-%dT%H-%M-%S-%f'), bid=bid)\n\n def _to_csv(data, suffix):\n data = data.copy()\n data['Datetime'] = data['Datetime'].apply(lambda dt: rfc3339.generate(dt, microseconds=True))\n data.to_csv(join(dirname, fname + suffix), index=False)\n\n _to_csv(c.data[DATA_COLS], \"_data.csv\")\n _to_csv(c.pos[POS_COLS], \"_pos.csv\")\n c.com = c.com.drop('magneticField', axis=1)\n _to_csv(c.com, \"_com.csv\")\n c.acc = c.acc.rename({'realx': 'accx', 'realy': 'accy', 'realz': 'accz'}, axis=1)\n _to_csv(c.acc, \"_acc.csv\")\n \n # Wide format\n d = c.data[DATA_COLS]\n d.pivot('Datetime', 'edgenodeid', 'rssi')\n dt = d.set_index('Datetime')\n dt = dt[WIDE_COLS]\n p = d.pivot('Datetime', 'edgenodeid', 'rssi')\n dt[['edge_' + str(n) for n in p.columns]] = p\n wide = dt.groupby(level=0).first().reset_index()\n _to_csv(wide, \"_data_wide.csv\")\n\n with open(join(dirname, fname + \".cfg\"), 'w') as f:\n cfg = {k: v for k, v in c.cfg.items() if k not in ['SubmissionDatetime', 'ReceiveDatetime', \n 'Experiment Name', 'Configuration File']}\n json.dump(cfg, f, indent=4, sort_keys=True)",
"def writeToMonthCsv(news_dict):\n\n for k in news_dict:\n output_f = open(k + \".csv\", \"wb\")\n writer = csv.writer(output_f)\n writer.writerow([news_dict[k].replace(\",\", \"\").encode(\"utf-8\")])\n output_f.close()",
"def create_csv(d, f):\n csv_data = list()\n csv_head = [unicode(x) for x in f]\n\n for row in d:\n row_data = list()\n for field in f:\n fields = field.split('.')\n row_data.append(extract_dict(row, fields))\n csv_data.append(row_data)\n\n csv = (csv_head, csv_data)\n return csv",
"def create_csv_file(df, folder, name):\n if folder != \"\" and not df.empty:\n path = os.path.join(folder, f\"{name}.csv\")\n df.to_csv(path, index=True)",
"def test_divide_csv_daily(self):\n\n with tempfile.TemporaryDirectory() as td:\n filename = \"storage_data.csv\"\n file_path = f\"{td}/{filename}\"\n with patch(\"masu.external.downloader.ocp.ocp_report_downloader.pd\") as mock_pd:\n with patch(\n \"masu.external.downloader.ocp.ocp_report_downloader.utils.detect_type\",\n return_value=(\"storage_usage\", None),\n ):\n dates = [\"2020-01-01 00:00:00 +UTC\", \"2020-01-02 00:00:00 +UTC\"]\n mock_report = {\n \"interval_start\": dates,\n \"persistentvolumeclaim_labels\": [\"label1\", \"label2\"],\n }\n df = pd.DataFrame(data=mock_report)\n mock_pd.read_csv.return_value = df\n daily_files = divide_csv_daily(file_path, self.ocp_manifest_id)\n self.assertNotEqual([], daily_files)\n self.assertEqual(len(daily_files), 2)\n gen_files = [\n f\"storage_usage.2020-01-01.{self.ocp_manifest_id}.0.csv\",\n f\"storage_usage.2020-01-02.{self.ocp_manifest_id}.0.csv\",\n ]\n expected_dates = [datetime.strptime(date[:10], \"%Y-%m-%d\") for date in dates]\n expected = [\n {\"filename\": gen_file, \"filepath\": f\"{td}/{gen_file}\", \"date\": expected_dates[i]}\n for i, gen_file in enumerate(gen_files)\n ]\n for expected_item in expected:\n self.assertIn(expected_item, daily_files)",
"def csvOutput(cycle, fctimes, beachdata, offshoredata, surfdata, fname='isurf_output.csv', outdir='.'):\n\n datestr = cycle.strftime('%Y%m%d00')\n\n with open(outdir+'/%s' %fname,'w') as outp:\n outp.write(datestr+'\\r\\n')\n for isite in range(len(beachdata['name'])):\n outp.write('\\r\\n')\n outp.write('%s' %beachdata['name'][isite] + '\\r\\n')\n outp.write('%d' %beachdata['type'][isite] + '\\r\\n')\n #outp.write('TI Hsmo Tpmo Dmo Hseq Tpeq DmEq Hsbr Dpbr\\r\\n')\n #outp.write('LT,Wspd,Wdir,Hsmo,Tpmo,Dmo,Tide,Hseq,Tpeq,DmEq,Hsbr,Dpbr,Hlbr,Hhbr,BT\\r\\n')\n outp.write('LT,Wspd,Wdir,Hsmo,Tpmo,Dmo,Hseq,Tpeq,DmEq,Hsbr,Dpbr,Hlbr,Hhbr,BT\\r\\n')\n\n\t # write out to file\n for itime in range(len(fctimes)):\n\n # write out the data values to file\n\t #outp.write ('%02d' %fctimes[lp] + ' %4.2f %4.1f %3d' %tuple([hm0[lp,isite], tp[lp,isite], dirn[lp,isite]]) + \\\n # ' %4.2f %4.1f %3d' %tuple([hsshwd[lp,isite], tpshwd[lp,isite], reldir[lp,isite]]) + ' %4.2f %4.2f' %tuple([hsbkinit[lp,isite], dpsat[lp,isite]]) + '\\r\\n')\n\t outp.write('%02d' %fctimes[itime] + \\\n ',%4.1f' %offshoredata['wspd'][itime,isite] + \\\n #',%3d' %offshoredata['wdir'][itime,isite] + \\\n ',%4.2f' %offshoredata['hm0'][itime,isite] + \\\n ',%4.1f' %offshoredata['tp'][itime,isite] + \\\n ',%3d' %offshoredata['dirn'][itime,isite] + \\\n ',%4.2f' %surfdata['shorewardHs'][itime,isite] + \\\n ',%4.1f' %surfdata['shorewardT'][itime,isite] + \\\n ',%3d' %surfdata['relativeDirn'][itime,isite] + \\\n ',%4.2f' %surfdata['breakerHs'][itime,isite] + \\\n ',%4.2f' %surfdata['saturatedDepth'][itime,isite] + \\\n ',%4.2f' %surfdata['Hb1in3'][itime,isite] + \\\n ',%4.2f' %surfdata['Hb1in10'][itime,isite] + \\\n ',%1d' %surfdata['breakerType'][itime,isite] + '\\r\\n')\n outp.close()"
] | [
"0.694198",
"0.6872562",
"0.6830753",
"0.6723689",
"0.6640168",
"0.6443004",
"0.6358902",
"0.63040715",
"0.62817705",
"0.62778574",
"0.6272296",
"0.6240539",
"0.62286836",
"0.6219305",
"0.6161711",
"0.61344004",
"0.6130334",
"0.61212337",
"0.61146325",
"0.61035657",
"0.6088116",
"0.6055521",
"0.6039119",
"0.6034222",
"0.60287315",
"0.6013903",
"0.6007753",
"0.59912384",
"0.59775925",
"0.59764725"
] | 0.7845971 | 0 |
Create a tuple of the date_time, latitude and longitude of a location in a dataframe from a given date_time. | def get_location(datetime, position_df):
latitude = position_df[position_df.date_time == datetime].latitude.item()
longitude = position_df[position_df.date_time == datetime].longitude.item()
location = (datetime, latitude, longitude)
return location | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_time_series_at_location(data, lat, lon, feature):\n\n ts = data.sel(lat=lat, lon=lon, method='nearest', drop=True).to_series()\n index = ts.index.get_level_values('time')\n values = ts.values\n\n return pd.DataFrame({'Date': index.values, feature: values})",
"def create_demo_location_history() -> geopandas.GeoDataFrame:\n np.random.seed(123)\n\n time = pd.date_range(start=datetime.fromtimestamp(1624241116), end=datetime.now(), freq=\"1min\").values\n\n center_point = (-36.875990410695394, 174.76398830024274)\n lat = np.random.normal(loc=center_point[0], scale=0.01, size=len(time))\n lon = np.random.normal(loc=center_point[1], scale=0.01, size=len(time))\n\n geometry = [Point(lon, lat) for lon, lat in zip(lon, lat)]\n return geopandas.GeoDataFrame(pd.DataFrame(dict(time=time, lat=lat, lon=lon)), geometry=geometry)",
"def get_lat_lon():\r\n\r\n # Columns: dt,AverageTemperature,AverageTemperatureUncertainty,City,Country,Latitude,Longitude\r\n temperatures = pd.read_csv(\"GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv\")\r\n\r\n Latitude = temperatures['Latitude']\r\n Longitude = temperatures['Longitude']\r\n City = temperatures['City']\r\n Country = temperatures['Country']\r\n\r\n lat_array = []\r\n long_array = []\r\n cities_array = []\r\n countries_array = []\r\n tuples = []\r\n for i, j, city, country in zip(Latitude, Longitude, City, Country):\r\n if (i, j) not in tuples:\r\n tuples.append((i, j))\r\n lat_array.append(float(i[:-1]))\r\n long_array.append(float(j[:-1]))\r\n cities_array.append(city)\r\n countries_array.append(country)\r\n\r\n return lat_array, long_array, cities_array, countries_array",
"def get_lat_and_long(row):\r\n\tlatitude = row['latitude']\r\n\tlongitude = row['longitude']\r\n\treturn latitude, longitude",
"def local_datetime(rdd_tuple):\n timezone = rdd_tuple[0].split(\" ; \")[2]\n utc_time = rdd_tuple[2]\n city_key = rdd_tuple[0][:-(len(timezone) + 3)]\n temperature = rdd_tuple[1]\n local_dt = utils.locutils.convert_timezone(utc_time, timezone)\n return city_key, temperature, local_dt",
"def meta_properties(self, date_col=\"Date\", type_col=\"Primary Type\", lat_col=\"Latitude\",\\\n lon_col=\"Longitude\", loc_col=\"Location\", out_fname=\"data_formated.csv\"):\n # implement keywords\n # would we have to deal w/ file w/o headers?\n data = pd.read_csv(self._path, usecols=[date_col, type_col, lat_col, lon_col, loc_col],\\\n parse_dates=[date_col], infer_datetime_format=True)\n data.sort_values(date_col, inplace=True)\n min_date = data.iloc[0][date_col]\n max_date = data.iloc[(data.shape[0]-1)][date_col]\n\n lat = []\n lon = []\n\n nulls = []\n for row in data.itertuples(index=True, name='Pandas'):\n index = (row.Index)\n # if lat, lon = nan, drop the row\n # update: confirmed that issue is with code, not with data; for some reason\n # csv is actually correctly grabbing location, there just legitimately are\n # entries w/o location data\n if pd.isnull(getattr(row, loc_col)):\n # print(\"row: {} got a {} for the 'Location' column with date: {}\".format(index, \\\n # getattr(row, loc_col), getattr(row, date_col)))\n if not pd.isnull(getattr(row, lat_col)) and not pd.isnull(getattr(row, lon_col)):\n lat.append(str(getattr(row, lat_col)))\n lon.append(str(getattr(row, lon_col)))\n if \",\" in data.loc[index, type_col]:\n data.loc[index, type_col] = data.loc[index, type_col].replace(\",\", \" \")\n print(\\\n \"Successfully extracted lat, lon from lat_col, lon_col for row: {}\".format(index))\n else:\n nulls.append((index, getattr(row, date_col)))\n data.drop(index, inplace=True)\n # print(\"No location data available for row: {} with date: {}\".format(index,\\\n # getattr(row, date_col)))\n else:\n loc = literal_eval(getattr(row, loc_col))\n lat.append(loc[0])\n lon.append(loc[1])\n if \",\" in data.loc[index, type_col]:\n data.loc[index, type_col] = data.loc[index, type_col].replace(\",\", \" \")\n\n data[\"Latitude\"] = lat\n data[\"Longitude\"] = lon\n data.drop(loc_col, axis=1, inplace=True)\n\n data.sort_values(\"Latitude\", inplace=True)\n min_lat = float(data.iloc[0][\"Latitude\"])\n max_lat = float(data.iloc[(data.shape[0]-1)][\"Latitude\"])\n\n data.sort_values(\"Longitude\", inplace=True)\n min_lon = float(data.iloc[0][\"Longitude\"])\n max_lon = float(data.iloc[(data.shape[0]-1)][\"Longitude\"])\n\n data.to_csv(self.__file_dir+'/'+out_fname, na_rep=\"\", header=False, index=False)\n\n attrs = {'min_date': min_date, 'max_date': max_date, \"min_lat\":min_lat,\\\n \"max_lat\":max_lat, \"min_lon\":min_lon, \"max_lon\":max_lon, \\\n \"dates\":pd.date_range(min_date, max_date), \"num_attributes\": data.shape[1],\\\n \"num_entries\":data.shape[0]}\n self._meta_dict = attrs\n self._meta_dict['df'] = data\n pickle.dump(data, open(CWD + \"/meta_dict.p\", \"wb\"))\n print(\"Num entries w/o location data: {}\".format(len(nulls)))\n pickle.dump(nulls, open(CWD + \"/nulls.p\", \"wb\"))\n\n # not include the formatted dataset?\n return attrs",
"def get_weather(latitude: float, longitude: float, year: int) -> DataFrame:\n # Set time period\n start = datetime(year, 1, 1)\n end = datetime(year, 12, 31)\n\n # Create Point for NYC\n location = Point(latitude, longitude)\n\n # Get daily data for the year\n data = Daily(location, start, end)\n return data.fetch()",
"def read_long_lat_proxi():\n session = Session()\n # data est une liste de tuple\n long_lat_proxi_data = session.query(Prix_Median.longitude,\n Prix_Median.latitude,\n Prix_Median.ocean_proximity_str,\n Prix_Median.ocean_proximity).all()\n session.close()\n list_long_lat = DataFrame(long_lat_proxi_data)\n list_long_lat = list_long_lat.drop_duplicates()\n return list_long_lat",
"def toListOfTuple(self, df:pd.core.frame.DataFrame) -> List[Tuple]: \n df['TIME_STAMP'] = df['TIME_STAMP'].astype('str')\n records = df.to_records(index=False)\n listOfTuple = list(records)\n return listOfTuple",
"def build_turbine_loc(turbine_x, turbine_y):\n turbineLoc = pd.DataFrame({'x': turbine_x, 'y': turbine_y})\n return turbineLoc",
"def get_data(self, date_time):\n\n query = \"Select * from {table} where START_DATE <= '{datetime}' and END_DATE > '{datetime}'\"\n query = query.format(table=self.table_name, datetime=date_time)\n return pd.read_sql_query(query, con=self.con)",
"def getGPSrecords(t_start,t_end):\n client=connectionDB()\n query = '''\n select device_id as user,x as lon,y as lat,datetime\n from cadi360-sac.kovid_dev.records\n where datetime>=@t_start and datetime<=@t_end and x!=y and x is not null;\n '''\n job_config = bq.QueryJobConfig(\n query_parameters=[\n bq.ScalarQueryParameter('t_start', \"STRING\", t_start),\n bq.ScalarQueryParameter('t_end', \"STRING\", t_end),\n ]\n )\n query_job = client.query(query, job_config=job_config)\n results = query_job.result()\n GPSrecords = results.to_dataframe()\n GPSrecords = gpd.GeoDataFrame(GPSrecords, geometry=gpd.points_from_xy(GPSrecords.lon, GPSrecords.lat))\n return GPSrecords",
"def fetch_gfs_data(timeslot, lat, lon):\n pass",
"def map_coord_transformer(df, proj_string, lat_column_name, long_column_name):\n logging.info('Generating coordinate reference systems... ')\n #generate coordinate reference system objects for details of how this works \n from_crs = pyproj.CRS.from_string(proj_string)\n from_proj = pyproj.Proj(from_crs)\n gps_proj = pyproj.Proj('epsg:4326')\n original_coordinates_to_latlong_obj = pyproj.Transformer.from_proj(from_proj, gps_proj)\n logging.info('Defining transformation functions...')\n def original_coordinates_to_latlong(adf):\n (lat,long) = original_coordinates_to_latlong_obj.transform(adf[lat_column_name], adf[long_column_name])\n return lat, long\n \n #apply converter to generate series\n logging.info('Converting coordinates...')\n latlong_series = df.apply(original_coordinates_to_latlong, axis=1)\n \n #get calculated values and put back into df.\n logging.info('Splitting series...')\n lat_series = latlong_series.copy().apply(lambda x: x[0])\n long_series = latlong_series.copy().apply(lambda x: x[1])\n \n #return the values as \n logging.info('Preparing to return calc_lat and calc_long...')\n df.loc[:,'calc_lat'] = lat_series.copy()\n df.loc[:,'calc_long'] = long_series.copy()\n \n return df",
"def seperateCoord(dfcol):\n lon = []\n lat = []\n for i in range(len(dfcol)):\n dfcol[i] = dfcol[i].split()\n lon.append(dfcol[i][0])\n lat.append(dfcol[i][1])\n return lon, lat",
"def date_time_to_tuple(self, date_time):\n result = self.date_regex.findall(date_time)\n return result[0]",
"def location2station(location):\r\n # just forget it, use google\r\n location = quote(str(location))\r\n geo_url = 'http://maps.google.com/maps/geo?key=%s&q=%s&sensor=false&output=csv'%(API_KEY,location)\r\n point = map(float,urlopen(geo_url).readline().split(',')[-2:])\r\n best,result = 99999999,[]\r\n for row in rows():\r\n test_point = map(float, (row[2],row[3]))\r\n distance = ((test_point[0]-point[0])**2 + (test_point[1]-point[1])**2)**.5\r\n if distance < best:\r\n best,result = distance,row\r\n return tuple(result)",
"def get_location(self) -> tuple:\r\n if self.data is None:\r\n return (None, None)\r\n \r\n lat = self.data['GPSInfo']['GPSLatitude']\r\n lon = self.data['GPSInfo']['GPSLongitude']\r\n \r\n # Convert from Degrees, minutes, seconds to standard form\r\n latitude = (lat[0][0] / lat[0][1]) \\\r\n + (lat[1][0] / lat[1][1] / 60) \\\r\n + (lat[2][0] / lat[2][1] / 3600)\r\n \r\n longitude = (lon[0][0] / lon[0][1]) \\\r\n + (lon[1][0] / lon[1][1] / 60) \\\r\n + (lon[2][0] / lon[2][1] / 3600)\r\n\r\n # Adjust for direction references\r\n if self.data['GPSInfo']['GPSLatitudeRef'] == 'S':\r\n latitude *= -1\r\n\r\n if self.data['GPSInfo']['GPSLongitudeRef'] == 'W':\r\n longitude *= -1\r\n\r\n return (round(latitude, 6), round(longitude, 6))",
"async def location_data(location: LocationDataRequest):\n\n # Make sure location paramater is a string in the form of \"City, State\"\n\n location = str(location)\n location = location.replace('location=', \"\")\n location = location.replace(\"'\", \"\")\n\n\n # Queries for data response\n\n #pop_query = \"\"\"SELECT \"2019 Population\" FROM CitySpire WHERE \"Location\" = %s\"\"\", [location]\n #rent_query = \"\"\"SELECT \"2019 Rental Rates\" FROM CitySpire WHERE \"Location\" = %s\"\"\", [location]\n #walk_query = \"\"\"SELECT \"2019 Walk Score\" FROM CitySpire WHERE \"Location\" = %s\"\"\", [location]\n #live_query = \"\"\"SELECT \"2019 Livability Score\" FROM CitySpire WHERE \"Location\" = %s\"\"\", [location]\n\n cursor.execute(\"\"\"SELECT \"2019 Population\" FROM cityspire WHERE \"Location\" = %s;\"\"\", [location])\n pop = cursor.fetchone()\n #pop = pop[0][0] # This is slice slice the tuple value from the list of tuples\n\n cursor.execute(\"\"\"SELECT \"2019 Rental Rates\" FROM cityspire WHERE \"Location\" = %s;\"\"\", [location])\n rent = cursor.fetchone()\n #rent = rent[0][0] # This is slice slice the tuple value from the list of tuples\n\n cursor.execute(\"\"\"SELECT \"Walk Score\" FROM cityspire WHERE \"Location\" = %s;\"\"\", [location])\n walk = cursor.fetchone()\n #walk = walk[0][0] # This is slice slice the tuple value from the list of tuples\n\n cursor.execute(\"\"\"SELECT \"Livability Score\" FROM cityspire WHERE \"Location\" = %s;\"\"\", [location])\n live = cursor.fetchone()\n #live = live[0][0] # This is slice slice the tuple value from the list of tuples\n\n \n # Close the cursor and connection (this breaks the API)\n\n #cursor.close()\n #connection.close()\n\n\n # Return the data that was requested and queried\n\n return {\n \"city_name\": str(location),\n \"population\": int(pop[0]),\n \"rent_per_month\": int(rent[0]),\n \"walk_score\": int(walk[0]),\n \"livability_score\": int(live[0])\n }",
"def get_locations():\n\n dtype = {'id': str,\\\n 'lat': float,\n 'lon': float,\n 'address': str,\n }\n\n try: \n locations = pd.read_csv('Data/kulkijat-mittauspisteet.csv', sep=',', dtype=dtype)\n except FileNotFoundError:\n print('\\nMittauspisteet sisältävää tiedostoa kulkijat-mittauspisteet.csv ei löytynyt.\\n')\n locations = pd.DataFrame()\n\n return locations",
"def get_gridcell_history(\n lat,\n lon,\n dataset,\n also_return_snapped_coordinates=False,\n also_return_metadata=False,\n use_imperial_units=True,\n convert_to_local_time=True,\n as_of=None,\n ipfs_timeout=None):\n try:\n metadata = get_metadata(get_heads()[dataset])\n except KeyError:\n raise DatasetError(\"No such dataset in dClimate\")\n\n # set up units\n converter, dweather_unit = get_unit_converter(metadata[\"unit of measurement\"], use_imperial_units)\n\n # get dataset-specific \"no observation\" value\n missing_value = metadata[\"missing value\"]\n try:\n dataset_obj = GRIDDED_DATASETS[dataset](as_of=as_of, ipfs_timeout=ipfs_timeout)\n except KeyError:\n raise DatasetError(\"No such dataset in dClimate\")\n\n try:\n (lat, lon), resp_series = dataset_obj.get_data(lat, lon)\n\n except (ipfshttpclient.exceptions.ErrorResponse, ipfshttpclient.exceptions.TimeoutError, KeyError, FileNotFoundError) as e:\n raise CoordinateNotFoundError(\"Invalid coordinate for dataset\")\n\n # try a timezone-based transformation on the times in case we're using an hourly set.\n if convert_to_local_time:\n try:\n tf = TimezoneFinder()\n local_tz = pytz.timezone(tf.timezone_at(lng=lon, lat=lat))\n resp_series = resp_series.tz_localize(\"UTC\").tz_convert(local_tz)\n except (AttributeError, TypeError): # datetime.date (daily sets) doesn't work with this, only datetime.datetime (hourly sets)\n pass\n\n if type(missing_value) == str:\n resp_series = resp_series.replace(missing_value, np.NaN).astype(float)\n else:\n resp_series.loc[resp_series.astype(float) == missing_value] = np.NaN\n resp_series = resp_series.astype(float)\n \n resp_series = resp_series * dweather_unit\n if converter is not None:\n resp_series = pd.Series(converter(resp_series.values), resp_series.index)\n result = {k: convert_nans_to_none(v) for k, v in resp_series.to_dict().items()}\n \n if also_return_metadata:\n result = tupleify(result) + ({\"metadata\": metadata},)\n if also_return_snapped_coordinates:\n result = tupleify(result) + ({\"snapped to\": (lat, lon)},)\n return result",
"def lat_lng(row):\r\n lat = row[\"latitude\"]\r\n lng = row[\"longitude\"]\r\n n = int(lat/GRANULARITY)\r\n nlat_start = n * GRANULARITY\r\n nlat_end = nlat_start + GRANULARITY\r\n nlg=int(lng/GRANULARITY)\r\n nlng_start = nlg * GRANULARITY\r\n nlng_end = nlng_start + GRANULARITY\r\n latlng=[(nlat_start,nlng_start), (nlat_start,nlng_end), (nlat_end,nlng_end), (nlat_end,nlng_start)]\r\n return latlng",
"def load_data(city, month, day):\n \n# Using pandas accessor to find month, day, hour from the Start Time column in the source data\n print(\"A moment please while I find the data....\\n\")\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['month'] = df['Start Time'].dt.month\n df['hour'] = df['Start Time'].dt.hour\n\n return df",
"def build_polling_location_txt(self):\n self.base_df['address_line'] = self.base_df.apply(\n lambda row: self.get_address_line(row['index'], row['address1'], row['address2'], row['city'],\n row['state'], row['zip_code']), axis=1)\n\n self.base_df['directions'] = self.base_df.apply(\n lambda row: self.get_directions(), axis=1)\n #\n self.base_df['hours'] = self.base_df.apply(\n lambda row: self.get_hours(row['index'],row['start_time'], row['end_time']), axis=1)\n\n self.base_df['photo_uri'] = self.base_df.apply(\n lambda row: self.get_photo_uri(), axis=1)\n\n self.base_df['hours_open_id'] = self.base_df.apply(\n lambda row: self.create_hours_open_id(row['index'], row['address1'], row['address2'], row['city'],\n row['state'], row['zip_code']), axis=1)\n\n self.base_df['is_drop_box'] = self.base_df.apply(\n lambda row: self.is_drop_box(), axis=1)\n\n self.base_df['is_early_voting'] = self.base_df.apply(\n lambda row: self.is_early_voting(), axis=1)\n\n self.base_df['latitude'] = self.base_df.apply(\n lambda row: self.get_latitude(), axis=1)\n\n self.base_df['longitude'] = self.base_df.apply(\n lambda row: self.get_longitude(), axis=1)\n\n self.base_df['latlng_source'] = self.base_df.apply(\n lambda row: self.get_latlng_source(), axis=1)\n\n self.base_df['id'] = self.base_df.apply(\n lambda row: self.create_id(row['index'], row['ocd_division'],row['address1'], row['address2'],\n row['city'], row['state'], row['zip_code']), axis=1)\n\n return self.base_df",
"def add_latlon(df):\n LLs = [num2deg(x,y,z) for x,y,z in zip(df['x'],df['y'],df['z'])]\n LLdf = pd.DataFrame.from_records(LLs,columns = ['latitude','longitude'])\n return pd.concat([df.reset_index(drop=True),LLdf],axis = 1)",
"def _get_weather_data(self, lat, long):\n return {}\n try:\n # get the data\n forecast = self.ds.get_forecast(\n lat, long,\n exclude=[weather.HOURLY, weather.MINUTELY,\n weather.DAILY, weather.ALERTS, weather.FLAGS])\n\n # add lat & long to the hourly weather data for composite key in db\n data = forecast.currently\n data.latitude = lat\n data.longitude = long\n data = data.__dict__\n data.pop(\"time\")\n return data\n except Exception as e:\n print(e)\n return None",
"def read_gps_data(gps_file, harbor_data):\n header_names = [\"GPS HOURS\", \"MIN\", \"SEC\", \"MET (MIN)\", \"LONG (decimal deg)\", \"LAT (decimal deg)\", \"ALT (ft)\"] \n gps_data = pd.read_csv(gps_file, sep='\\t', skiprows=2, usecols=[0,1,2,6], names=header_names)\n times = {}\n times = gps_data[\"GPS HOURS\"].apply(str) + \":\" + gps_data[\"MIN\"].apply(str) + \":\" + gps_data[\"SEC\"].apply(str)\n \n temp = list(times) # a list of strings\n # Convert string time to float hours for easier plotting\n init_time = temp[0] # take first time which will be your time zero\n harbor_data[\"gps_times\"] = [] # list to hold the data\n for h_time in temp:\n delta_t = dt.strptime(h_time, '%H:%M:%S') - dt.strptime(init_time, '%H:%M:%S') # get delta time\n harbor_data[\"gps_times\"].append(float(delta_t.total_seconds()/3600)) # convert to hours\n\n harbor_data[\"gps_altitude\"] = gps_data[\"ALT (ft)\"] # Places altitudes in harbor_data",
"def _extract_timestamp(self, data, time_col='image_timestamp', drop=True):\n\n def parse_timestamp(x):\n \"\"\"Parses example fmt: 'Sat Dec 23 10:01:24 2017 PST' \"\"\"\n return ' '.join(x.split(' ')[1:-1])\n\n df = data.copy()\n pre = 'tmp-'\n df[pre + time_col] = df[time_col].apply(parse_timestamp)\n df[pre + time_col] = pd.to_datetime(df[pre + time_col], infer_datetime_format=True)\n df['image_date'] = df[pre + time_col].dt.date\n df['image_time'] = df[pre + time_col].dt.time\n if drop:\n df = df.drop(columns=[pre + time_col])\n\n return df",
"def get_exif_location(self, exif_data, lonlat):\n\n if lonlat=='lonlat':\n lat = ''\n lon = ''\n gps_latitude = self._get_if_exist(exif_data, 'GPS GPSLatitude')\n gps_latitude_ref = self._get_if_exist(exif_data, 'GPS GPSLatitudeRef')\n gps_longitude = self._get_if_exist(exif_data, 'GPS GPSLongitude')\n gps_longitude_ref = self._get_if_exist(exif_data, 'GPS GPSLongitudeRef')\n\n if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n lat = self._convert_to_degress(gps_latitude)\n if gps_latitude_ref.values[0] != 'N':\n lat = 0 - lat\n\n lon = self._convert_to_degress(gps_longitude)\n if gps_longitude_ref.values[0] != 'E':\n lon = 0 - lon\n\n return lat, lon",
"def get_latitude_longtitude(location):\n\n tmp = geolocator.geocode(location)\n return tmp.latitude, tmp.longitude"
] | [
"0.667102",
"0.6042708",
"0.6014541",
"0.57075053",
"0.5705346",
"0.5480165",
"0.54587775",
"0.53738266",
"0.5367151",
"0.53136206",
"0.5286916",
"0.52677363",
"0.5256691",
"0.5254006",
"0.5233893",
"0.52287805",
"0.5220375",
"0.51836544",
"0.51567215",
"0.51262206",
"0.5107872",
"0.5105806",
"0.5102625",
"0.5100563",
"0.5092197",
"0.5040356",
"0.5015328",
"0.49947572",
"0.49805617",
"0.49762508"
] | 0.78045213 | 0 |
Convert a date in a string into a python date, where the dataframe and the variable name are known. | def string_to_datetime(dataframe):
print("Which variable would you like to convert from a date string to a python date?")
existing_variable = input()
print("What would you like to call the new date variable?")
new_variable = input()
dataframe[new_variable] = dataframe[existing_variable].dt.strftime('%Y-%m-%d')
return dataframe | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def string_object_to_datetime(dataframe):\n\n print(\"Which variable would you like to convert from a date string to a python date?\")\n existing_variable = input()\n print(\"What would you like to call the new date variable?\")\n new_variable = input()\n\n dataframe[new_variable] = datetime.strptime(dataframe[existing_variable], '%Y-%m-%d')\n\n return dataframe",
"def string_to_date(date_string):\n\n return date(int(date_string[:4]),\n int(date_string[5:7]),\n int(date_string[8:10]))",
"def date_from_string(date):\n _type = type(date)\n try:\n if _type == datetime.date:\n return date\n elif _type == datetime.datetime:\n return datetime.datetime.date(date)\n else:\n return datetime.datetime.date(datetime.datetime.strptime(date, '%Y-%m-%d'))\n except ValueError:\n return date\n except TypeError:\n return date",
"def parse_date(date_str, date_format=DATE_FORMAT):\n if date_str is None:\n return None\n return pd.datetime.strptime(date_str, '%Y%m%d')",
"def _parse(self, date_str, format='%Y-%m-%d'):\n from pandas import to_datetime\n rv = to_datetime(date_str, format=format)\n if hasattr(rv, 'to_datetime'):\n rv = rv.to_datetime()\n return rv",
"def _str_to_date(self, date):\n return datetools.date_parser(date)",
"def string_to_date(string):\n params = string.strip().split('-')\n year = int(params[0])\n month = int(params[1])\n day = int(params[2])\n d = date(year, month, day)\n return d",
"def convert_date_string(df,col_name):\n df[col_name] = pd.to_datetime(df[col_name], infer_datetime_format=True)\n return df",
"def str_to_date(str_input):\n date = str_input.split('-')\n return dt.date(int(date[0]), int(date[1]), int(date[2]))",
"def convert_date_column(datestring):\n return datetime.datetime.strptime(datestring.strip(), \"%b-%Y\").date()",
"def parse_date(date) -> pd.Timestamp:\n if isinstance(date, datetime):\n date = pd.to_datetime(date)\n elif isinstance(date, str):\n s = date.strip()\n\n # Remove \" - REDO\" and \" Re run\", which appear once each\n if \" - REDO\" in date or \" Re run\" in date:\n print(f\"@TODO: Make sure we properly replace previous measures for assay dates with string \\\" - REDO\\\" or \\\" Re run\\\". Matching date is: {date}\")\n date = date.replace(\" - REDO\", \"\").replace(\" Re run\", \"\")\n \n # Remove \"th\" and \"nd\" suffix (eg. 25th -> 25)\n if date.endswith(\"th\") or date.endswith(\"nd\"):\n date = date[:-2]\n\n # Fix up \"Septembe\" typo\n date = date.replace(\"Septembe \", \"September \")\n \n # Add year if not present\n if np.sum([str(y) in date for y in range(2020, 2025)]) == 0:\n date = \"{}, 2020\".format(date)\n try:\n date = pd.to_datetime(date)\n except:\n date = None\n else:\n date = None\n\n return date",
"def convert_date_run(datestring):\n if isinstance(datestring, datetime.date):\n return datestring\n\n try:\n return datetime.datetime.strptime(datestring, \"%Y-%m-%d\").date()\n except ValueError:\n try:\n return datetime.datetime.strptime(datestring, \"%m/%d/%Y\").date()\n except ValueError:\n # ISO 8601 without timezone\n return datetime.datetime.strptime(datestring, \"%Y-%m-%dT%H:%M:%S\").date()",
"def str_2_date(str_date):\n str_format = \"%m/%d/%y\"\n return datetime.strptime(str_date, str_format)",
"def _convert_date(date_string, s_format='%Y-%m-%d'):\r\n if isinstance(date_string, str):\r\n return datetime.strptime(date_string, s_format)\r\n elif isinstance(date_string, datetime):\r\n return date_string\r\n else:\r\n raise TypeError(date_string, 'is not a string or datetime object')",
"def convert_str_to_date(date_str):\n if date_str.lower() == 'tomorrow':\n return datetime.date.today() + datetime.timedelta(days=1)\n elif date_str.lower() == 'today':\n return datetime.date.today()\n elif date_str.lower() == 'yesterday':\n return datetime.date.today() + datetime.timedelta(days=-1)\n elif date_str.lower() in day_values:\n return next_weekday(date_str)\n # Otherwise, process as a three-part date\n part_list = date_str.split()\n day = part_list[1].replace('th', '').replace('rd', '').replace('st', '')\n processed_date_str = ' '.join([part_list[0], day, part_list[2]])\n return datetime.datetime.strptime(processed_date_str, DATE_STR_FMT).date()",
"def string_to_date(given_date: str) -> date:\n return datetime.strptime(given_date, \"%m/%d/%Y\").date()",
"def convert_date(date_str):\n return datetime.strptime(date_str, \"%d/%m/%Y\")",
"def str_2_date( sdate ):\r\n if isinstance( sdate, str ):\r\n for fmt in ( \"%Y-%m-%d\", \"%m/%d/%Y\" ):\r\n try:\r\n return datetime.strptime( sdate, fmt ).date()\r\n except ValueError:\r\n pass\r\n else:\r\n return sdate",
"def datemake(datestring):\n return dtt.datetime.strptime(datestring,'%m/%d/%Y')",
"def str_to_date(date_str):\r\n\r\n if date_str == 'today':\r\n date = datetime.date.today()\r\n else:\r\n date = datetime.datetime.strptime(date_str, \"%Y-%m-%d\").date()\r\n\r\n return date",
"def _convert_column_to_date(dframe, column):\n try:\n return dframe[column].apply(parse_date)\n except AttributeError:\n # it is already a datetime\n pass\n except ValueError:\n # it is not a correctly formatted date\n pass\n except OverflowError:\n # it is a number that is too large to be a date\n pass",
"def str_to_date(date_string, fmt=\"%Y-%m-%d\"):\n return datetime.strptime(date_string, fmt).date()",
"def convert_str_to_date(date: str):\n try:\n return datetime.strptime(date, \"%Y/%m/%d\").date()\n except ValueError:\n try:\n return datetime.strptime(date, \"%Y-%m-%d\").date()\n except ValueError as error:\n raise error",
"def _get_date(string):\n try:\n return _date.fromordinal(_dateparse(string).toordinal())\n except ValueError:\n print(string)\n raise",
"def str2date(date):\n return datetime.datetime.strptime(date, \"%m/%d/%Y\").date()",
"def _get_date_from_str(date_input):\r\n return datetime.datetime.strptime(date_input.strip(), \"%Y-%m-%d\").replace(tzinfo=pytz.UTC)",
"def make_date(cls, df: pd.DataFrame, date_field: str) -> pd.DataFrame:\n field_dtype = df[date_field].dtype\n if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n field_dtype = np.datetime64\n if not np.issubdtype(field_dtype, np.datetime64):\n df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)\n return df",
"def convert_str2date(date):\n import datetime\n date = str(date)\n year = int(date[0:4])\n month = int(date[4:6])\n day = int(date[6:8])\n return datetime.datetime(year,month,day)",
"def datestring_to_date(datestring):\n year, month, day = datestring.split(\"-\")\n date = datetime.date(year=int(year), month=int(month), day=int(day))\n return date",
"def _string_to_date(datestr,fmt):\n if not isinstance(datestr,str):\n raise InstrumentParameterException('Value %s is not a string.' % str(datestr))\n try:\n date_time = time.strptime(datestr,fmt)\n date = (date_time[2],date_time[1],date_time[0])\n\n except ValueError:\n raise InstrumentParameterException('Value %s could not be formatted to a date.' % str(datestr))\n \n return date"
] | [
"0.7501158",
"0.6790072",
"0.6781764",
"0.6770597",
"0.672437",
"0.6669885",
"0.66560954",
"0.6652141",
"0.66447526",
"0.6644526",
"0.66266483",
"0.6503308",
"0.64974356",
"0.6488791",
"0.6486182",
"0.6479067",
"0.64767814",
"0.64334184",
"0.6429908",
"0.63495094",
"0.63429475",
"0.63228893",
"0.63054705",
"0.63034916",
"0.6299224",
"0.6260589",
"0.62478095",
"0.6245991",
"0.6229734",
"0.6217933"
] | 0.74788135 | 1 |
Calculate the haversine or greatcircle distance in metres between two points with latitudes and longitudes, where they are known as the origin and destination. | def calculate_distance(origin, destination):
datetime1, lat1, lon1 = origin
datetime2, lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c # Distance in km
d_m = d * 1000 # Distance in metres
return d_m | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance(origin, destination): ## https://stackoverflow.com/questions/44743075/calculate-the-distance-between-two-coordinates-with-python\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371*0.62 # miles\n\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = (math.sin(dlat / 2) * math.sin(dlat / 2) +\n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *\n math.sin(dlon / 2) * math.sin(dlon / 2))\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n\n return d",
"def distance_(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371 # km\n\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = (\n math.sin(dlat / 2) **2 +\n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *\n math.sin(dlon / 2) **2 \n )\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n\n return d",
"def haversine(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371000\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) * math.cos(\n math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n\n return d",
"def calc_distance_two_points(lat_from, long_from, lat_to, long_to):\n distance_in_km = haversine(\n (lat_from, long_from),\n (lat_to, long_to),\n unit='km')\n\n return distance_in_km",
"def distance(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371 # km\n\n dlat = math.radians(lat2-lat1)\n dlon = math.radians(lon2-lon1)\n a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \\\n * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n d = radius * c\n\n return d",
"def distance(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371 # km\n\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = (math.sin(dlat / 2) * math.sin(dlat / 2) +\n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *\n math.sin(dlon / 2) * math.sin(dlon / 2))\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n return d",
"def distance(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371 # km (radius of Earth)\n\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = (math.sin(dlat / 2) * math.sin(dlat / 2) +\n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *\n math.sin(dlon / 2) * math.sin(dlon / 2))\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n\n return d",
"def distance(lat1, lon1, lat2, lon2):\r\n earth_radius=3959.0 #miles\r\n if lat1==lat2 and lon1==lon2:\r\n dst=0\r\n else:\r\n dst = acos(\r\n (sin(radians(lat1)) * sin(radians(lat2))) +\r\n (cos(radians(lat1)) * cos(radians(lat2)) * cos(radians(lon1) - radians(lon2)))\r\n ) * earth_radius\r\n return dst",
"def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']",
"def distance(gps1, gps2):\n return haversine(gps1.lng, gps1.lat, gps2.lng, gps2.lat)",
"def calculate_distance(x: float, y: float) -> float:\n # return geopy.distance.vincenty(x, y).km\n R = 6370\n lat1 = radians(x[0]) #insert value\n lon1 = radians(x[1])\n lat2 = radians(y[0])\n lon2 = radians(y[1])\n\n dlon = lon2 - lon1\n dlat = lat2- lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n distance = R * c\n return distance",
"def calculate_distance(point1, point2):\n import math\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles",
"def dist(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r",
"def calculate_distance(point1, point2):\n import math\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles",
"def calcDistance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km * 1000",
"def calculateDistance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])\n\n # haversine formula\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r",
"def calculate_distance(source,dest):\n\n ### Earth radius in miles\n R = 3960.0\n\n lat1, lon1 = source\n lat2, lon2 = dest\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n ### compute distance in spherical coordinates\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n return distance",
"def getDist(lat1,long1,lat2,long2):\n\tlat1 = math.radians(lat1)\n\tlong1 = math.radians(long1)\n\tlat2 = math.radians(lat2)\n\tlong2 = math.radians(long2)\n\tR = 6371 # km\n\td = cmath.acos(cmath.sin(lat1) * cmath.sin(lat2) + \\\n\tcmath.cos(lat1) * cmath.cos(lat2) *\n\tcmath.cos(long2 - long1)) * R\n\treturn abs(d) # cast to float",
"def calc_dist(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n mtr = 6371000 * c\n return mtr",
"def distance(origin: Tuple[float, float], destination: Tuple[float, float]) -> float:\n radius = 6372797.560856 # Earth's quatratic mean radius for WGS-84\n\n lat1, lon1, lat2, lon2 = map(\n math.radians, [origin[0], origin[1], destination[0], destination[1]]\n )\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = (\n math.sin(dlat / 2) ** 2\n + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n )\n c = 2 * math.asin(math.sqrt(a))\n\n return c * radius",
"def dist_between(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arcsin(np.sqrt(a)) \n km = 6367 * c\n return km",
"def distance(lat1, lon1, lat2, lon2):\n coord = map(lambda x: float(x) * pi / 180.0, [lat1, lon1, lat2, lon2])\n inverse_arc = sin(coord[0]) * sin(coord[2]) + \\\n cos(coord[0]) * cos(coord[2]) * cos(coord[1] - (coord[3]))\n arc_dist = acos(min(1, max(inverse_arc, -1))) * 6371\n return arc_dist",
"def haversine(lon1, lat1, lon2, lat2): \r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) \r\n #print 34\r\n dlon = lon2 - lon1 \r\n dlat = lat2 - lat1 \r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 \r\n c = 2 * atan(sqrt(a)/sqrt(1-a)) \r\n r = 6371 \r\n d=c * r\r\n #print type(d)\r\n return d",
"def get_geo_distance(origin, destination):\n lon1, lat1 = origin\n lon2, lat2 = destination\n radius = 6371 # km\n\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = (math.sin(dlat / 2) * math.sin(dlat / 2) +\n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *\n math.sin(dlon / 2) * math.sin(dlon / 2))\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n\n return d",
"def _getDistance(self, source, dest):\n\n lat1 = source[0]\n lat2 = dest[0]\n lon1 = source[1]\n lon2 = dest[1]\n\n # Formula from https://www.movable-type.co.uk/scripts/latlong.html\n R = 6370000\n phi1 = math.radians(lat1)\n phi2 = math.radians(lat2)\n deltaPhi = math.radians(lat2-lat1)\n deltalmb = math.radians(lon2-lon1)\n a = math.sin(deltaPhi/2) * math.sin(deltaPhi/2) + \\\n math.cos(phi1) * math.cos(phi2) * \\\n math.sin(deltalmb/2) * math.sin(deltalmb/2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a));\n d = (R * c)/1000.\n\n return d",
"def calc_distance_haversine(coord1, coord2):\n lat1, lon1 = coord1\n lat2, lon2 = coord2\n # approximate radius of earth in km\n R = 6373.0\n\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n return distance",
"def calculate_distance(point1, point2):\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles",
"def calcDistanceOptimized(lat1, lon1, lat2, lon2):\n rad = 0.017453292519943\n yDistance = (lat2 - lat1) * 60.00721\n xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * 30.053965\n distance = math.sqrt( yDistance**2 + xDistance**2 )\n return distance * 1852.00088832",
"def distance_between(lat_1, lon_1, lat_2, lon_2):\n lat_1, lon_1 = math.radians(lat_1), math.radians(lon_1)\n lat_2, lon_2 = math.radians(lat_2), math.radians(lon_2)\n theta = lon_1 - lon_2\n dist = math.sin(lat_1)*math.sin(lat_2) + math.cos(lat_1)*math.cos(lat_2)*math.cos(theta)\n dist = math.acos(dist)\n dist = math.degrees(dist)\n dist = dist * 69.06 # 69.09 = circumference of earth in miles / 360 degrees\n return dist",
"def get_distance(lat1, lon1, lat2, lon2):\n phi1 = math.radians(lat1)\n phi2 = math.radians(lat2)\n d_phi = math.radians(lat2 - lat1)\n d_lam = math.radians(lon2 - lon1)\n a = math.sin(d_phi/2) ** 2 + math.cos(phi1) * math.cos(phi2) * math.sin(d_lam/2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n return 6371000 * c"
] | [
"0.75903994",
"0.7581176",
"0.75455487",
"0.7531257",
"0.7419517",
"0.74132216",
"0.73949015",
"0.7383521",
"0.7311661",
"0.72562194",
"0.7238989",
"0.7202122",
"0.7194691",
"0.71847636",
"0.71827453",
"0.7173336",
"0.71522576",
"0.71509475",
"0.714448",
"0.71255165",
"0.712217",
"0.7121346",
"0.7111213",
"0.71072245",
"0.71045846",
"0.71029824",
"0.7096369",
"0.70842654",
"0.7078026",
"0.7057589"
] | 0.7787034 | 0 |
Calculate the speed in knots between two locations which are dictionaries containing latitude, longitude and date_time. | def knots_two_points(origin, destination):
distance_m = calculate_distance(origin, destination)
datetime1_timestamp, lat1, lon1 = origin
datetime2_timestamp, lat2, lon2 = destination
datetime1 = datetime1_timestamp.timestamp()
datetime2 = datetime2_timestamp.timestamp()
seconds = abs((datetime1) - (datetime2))
conversion = 3600 / 1852 # convert 1 ms-1 to knots (nautical miles per hour; 1 nm = 1852 metres)
speed_knots = (distance_m / seconds) * conversion
if seconds > 0:
return (distance_m, speed_knots)
else:
return "N/A" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def car_speed(time_loc, prev_time_loc):\n timestamp, lat, longitude = time_loc\n prev_timestamp, prev_lat, prev_longitude = prev_time_loc\n\n dist = geopy.distance.distance((lat, longitude), (prev_lat, prev_longitude)).miles\n # Time difference in milliseconds\n time_diff = timestamp - prev_timestamp\n speed = (dist/time_diff) * 3600000\n\n return speed",
"def _calc_times():\n app.logger.debug(\"Got a JSON request\")\n km = request.args.get('km', 0, type=float)\n begin_date = request.args.get('begin_date')\n begin_time = request.args.get('begin_time')\n arrow_start = arrow.get(begin_date + \" \" + begin_time + \":00\")\n brevet_dist = request.args.get('brevet_dist', 999, type=int)\n app.logger.debug(\"km={}\".format(km))\n app.logger.debug(\"request.args: {}\".format(request.args))\n # FIXME: These probably aren't the right open and close times\n # and brevets may be longer than 200km\n percent120 = brevet_dist * 1.2\n possible_brev = [200, 300, 400, 600, 1000]\n if brevet_dist not in possible_brev:\n note = \"Current brevet distance is abnormal. Choose from 200, 300, 400, 600, or 1000\"\n elif km > percent120:\n note = \"Control location is more than 20% over the selected distance.\"\n else:\n note = \"\"\n open_time = acp_times.open_time(km, brevet_dist, arrow_start.isoformat())\n close_time = acp_times.close_time(km, brevet_dist, arrow_start.isoformat())\n result = {\"open\": open_time, \"close\": close_time, \"note\": note}\n return flask.jsonify(result=result)",
"def get_distance_metres(aLocation1, aLocation2):\n \n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5",
"def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n dlong *= math.cos( aLocation2.lat * math.pi / 180.0 )\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5",
"def _calc_times():\n app.logger.debug(\"Got a JSON request\")\n distance = request.args.get(\"distance\", type = int)\n begin_time = request.args.get(\"begin_time\", type = str)\n begin_date = request.args.get(\"begin_date\", type = str)\n brevet_start_time = begin_date + \" \" + begin_time\n km = request.args.get('km', 999, type=float)\n\n app.logger.debug(\"km={}\".format(km))\n app.logger.debug(\"request.args: {}\".format(request.args))\n # FIXME: These probably aren't the right open and close times\n # and brevets may be longer than 200km\n open_time = acp_times.open_time(km, distance, brevet_start_time)\n close_time = acp_times.close_time(km, distance, brevet_start_time)\n result = {\"open\": open_time, \"close\": close_time}\n return flask.jsonify(result=result)",
"def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5",
"def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5",
"def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5",
"def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5",
"def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5",
"def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat * dlat) + (dlong * dlong)) * 1.113195e5",
"def dist(a, b):\n base_url=\"https://route.api.here.com/routing/7.2/calculateroute.json?\"\n payload = {'app_id':HERE_ID, \n 'app_code':HERE_CODE,\n 'waypoint0':'geo!'+','.join([str(i) for i in a]),\n 'waypoint1':'geo!'+','.join([str(i) for i in b]),\n 'mode':'fastest;car;traffic:disabled',\n }\n resp = requests.get(base_url, params=payload)\n data = json.loads(resp.content)\n #import ipdb; ipdb.set_trace()\n summary = data['response']['route'][0]['summary']\n return {\"distance\" : summary['distance'], \n \"trafficTime\" : summary[\"trafficTime\"],\n \"baseTime\" : summary[\"baseTime\"]}",
"def calculate_travel_time_simple(distance_meters, accel_mps2):\n time = math.sqrt(4 * distance_meters / accel_mps2)\n speed = accel_mps2 * time * 0.5\n return [time, speed]",
"def get_nearest_station(latitude, longitude):\n url1 = \"http://realtime.mbta.com/developer/api/v2/stopsbylocation\"\n params1 = {'api_key':'lm1M_mXgq0O6dsH9xduPAQ','lat':latitude,'lon':longitude,'format':'json'}\n req1 = requests.get(url1,params=params1)\n stat1 = req1.status_code\n stop_name = req1.json()['stop'][0]['stop_name']\n distance = req1.json()['stop'][0]['distance']\n return stop_name, distance",
"def calculate_vars(data, lat, lon):\n # Keep track of running distance and time calculations\n distance_to_dest = 0.0\n time_estimate = 0.0\n\n # Calculate from starting dest to first point in data\n user_coords = (lat, lon)\n first_path_coords = (data[0][\"lat\"], data[0][\"lon\"])\n first_distance = geopy.distance.distance(user_coords, first_path_coords).miles\n distance_to_dest += first_distance\n time_estimate += first_distance * 20 # 3mph walking speed\n\n # Calculate for all other points\n for i in range(1, len(data) - 1):\n this_coords = (data[i][\"lat\"], data[i][\"lon\"])\n next_coords = (data[i + 1][\"lat\"], data[i + 1][\"lon\"])\n\n distance = geopy.distance.distance(this_coords, next_coords).miles\n distance_to_dest += distance\n time_estimate += distance * 20 # 3mph walking speed\n\n # Round distance and time estimates\n distance_to_dest = round(distance_to_dest, 1)\n time_estimate = round(time_estimate)\n\n return distance_to_dest, time_estimate",
"def dist_between(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arcsin(np.sqrt(a)) \n km = 6367 * c\n return km",
"def calculateTrackBetweenFlights(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['azi1']",
"def get_travel_time(pc1, pc2, time, rand=False, mode=\"ggAPI\"):\n # TODO: implement the random part if needed\n # TODO: possible to replace this by API calls to Google Maps in real time\n if mode == \"simple\":\n # Retrieve distance between pc1 and pc2\n # #pc -> coord (read data)\n closest1 = get_coordinates(pc1)\n closest2 = get_coordinates(pc2)\n\n # print(\"closest to\", pc1, \" is \", closest1)\n # print(\"closest to \", pc2, \" is \", closest2)\n\n # #coords -> distance (haversine)\n dist = haversine(closest1[0], closest1[1], closest2[0], closest2[1])\n\n # Load the appropriate statistics\n times = [datetime.datetime.strptime(str(t), '%H%M').replace(year=1970,\n month=1,\n day=1,\n tzinfo=gmt8)\n for t in list(STATS[\"DAYTIME\"][1:])] # avoid the 0-line\n idx_nearest_time = times.index(nearest(times, time))\n # print(idx_nearest_time)\n poly_coeff = list(STATS[[\"C\",\"B\",\"A\"]].iloc[idx_nearest_time+1]) # correction of +1 to avoid the 0-line...\n # print(\"COEF\", poly_coeff)\n\n # apply the model -> retrieve mean and std (optional) ADDED 10 mn of service, this model is way too optimistic !\n return datetime.timedelta(seconds=poly.polyval(dist, poly_coeff)+10*60), dist\n# --------\n elif mode == \"ggAPI\":\n # coords\n # #pc -> coord (read data)\n closest1 = get_coordinates(pc1)\n closest2 = get_coordinates(pc2)\n\n # retrieve UTC time in seconds since 1970-1-1\n cur_time = datetime.datetime.now(tz=gmt8)\n time_traffic = time.replace(year=cur_time.year, month=cur_time.month, day=cur_time.day+1, tzinfo=gmt8)\n # +1 to avoid past call to Google API\n\n # api call\n url = \"https://maps.googleapis.com/maps/api/directions/json?\"\n params = {\"origin\": str(closest1[0])+','+str(closest1[1]),\n \"destination\":str(closest2[0])+','+str(closest2[1]),\n \"key\": \"AIzaSyApMtNEiMXlpRju4TR8my3lK_0tG-VafPU\",\n \"units\":\"metrics\",\n \"departure_time\":str(int(time_traffic.timestamp() // 1)),\n \"region\":\"sg\",\n \"traffic_model\":\"best_guess\"\n }\n api_call = requests.get(\n url,\n params=params\n ).json()\n\n if api_call[\"status\"] == 'INVALID_REQUEST':\n raise ValueError('Invalid Google Maps API request: '+api_call[\"error_message\"])\n\n # duration and distance retrieval (seconds and meters)\n duration = api_call[\"routes\"][0][\"legs\"][0][\"duration\"][\"value\"]\n dist = api_call[\"routes\"][0][\"legs\"][0][\"distance\"][\"value\"]\n\n return datetime.timedelta(seconds=duration + 3*60), dist\n\n\n\n else:\n raise ValueError('Incorrect mode :'+mode+'. Choose between \"simple\" and \"ggAPI\".')",
"def distance(gps1, gps2):\n return haversine(gps1.lng, gps1.lat, gps2.lng, gps2.lat)",
"def calcDistanceOptimized(lat1, lon1, lat2, lon2):\n rad = 0.017453292519943\n yDistance = (lat2 - lat1) * 60.00721\n xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * 30.053965\n distance = math.sqrt( yDistance**2 + xDistance**2 )\n return distance * 1852.00088832",
"def test_pointings():\n\n t0 = Time(\"J2000\").jd\n dt_min = 20.0\n dt_days = dt_min * 1 / 60.0 * 1 / 24.0 # 20 minutes in days\n\n time_arr = np.arange(20) * dt_days + t0\n obs = observatory.Observatory(latitude, longitude)\n\n obs.set_pointings(time_arr)\n\n ras = np.array([c[0] for c in obs.pointing_centers])\n decs = np.array([c[1] for c in obs.pointing_centers])\n if np.any(np.diff(ras) < 0):\n ind = np.where(np.diff(ras) < 0)[0][0]\n ras[ind + 1 :] += 360.0 # Deal with 360 degree wrap\n degperhour_sidereal = 360.0 / 23.9344\n dts = np.diff(ras) / degperhour_sidereal\n dts *= 60.0 # Minutes\n assert np.allclose(dts, dt_min, atol=1e-2) # Within half a second.\n assert np.allclose(decs, latitude, atol=1e-1) # Within 6 arcmin",
"def _calc_times():\n app.logger.debug(\"Got a JSON request\")\n km = request.args.get('km', 999, type=float)\n distance = request.args.get('distance', type = int)\n begin_date = request.args.get('begin_date', type = str)\n begin_time = request.args.get('begin_time', type = str)\n dateAndTime = begin_date + \" \" + begin_time\n time = arrow.get(dateAndTime, 'YYYY-MM-DD HH:mm') \n \n open_time = acp_times.open_time(km, distance, time.isoformat())\n close_time = acp_times.close_time(km, distance, time.isoformat())\n result = {\"open\": open_time, \"close\": close_time}\n return flask.jsonify(result=result)",
"def distance(a, b):\n return vincenty((float(a.longitude), float(a.latitude)),\n (float(b.longitude), float(b.latitude))).km",
"def get_distance_metres(aLocation1, aLocation2):\n [dNorth, dEast, dDown] = get_position_error(aLocation1, aLocation2)\n \n return math.sqrt((dNorth*dNorth) + (dEast*dEast))",
"def find_distance_to_store(latitude1, longitude1, latitude2, longitude2):\n lat1 = float(latitude1)\n lon1 = float(longitude1)\n lat2 = float(latitude2)\n lon2 = float(longitude2)\n\n dlat = radians(lat2-lat1)\n dlon = radians(lon2-lon1)\n a = sin(dlat/2) * sin(dlat/2) + cos(radians(lat1)) \\\n * cos(radians(lat2)) * sin(dlon/2) * sin(dlon/2)\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n distance_in_kilometers = RADIUS_OF_THE_EARTH_KILOMETERS * c\n\n return distance_in_kilometers",
"def _solar_time(date, lon, lat):\n # IDL is computing time_t as hours and fractional minutes\n time_t = ee.Date(date).get('hour').add(\n ee.Date(date).get('minute').divide(60))\n\n # This will return the hour floating point value\n # time_t = ee.Date(date).get('hour').add(ee.Date(date).getFraction('hour'))\n\n # CGM - DOY and hour could be images in order to make these expressions\n julian = _to_jd(date)\n\n # Sunrise time\n julian_ = time_t.divide(24.0).add(julian)\n j_cen = julian_.add(0.5 - 2451545.0).divide(36525.0)\n # CGM - Does the mod happen before or after the multiply\n lon_sun = j_cen.multiply(0.0003032).add(36000.76983) \\\n .multiply(j_cen).mod(360.0).add(280.46646).subtract(360)\n an_sun = j_cen.multiply(-0.0001537).add(35999.05029) \\\n .multiply(j_cen).add(357.52911)\n ecc = j_cen.multiply(0.0000001267).add(0.000042037) \\\n .multiply(j_cen).multiply(-1).add(0.016708634)\n ob_ecl = j_cen.multiply(-0.001813).add(0.00059) \\\n .multiply(j_cen).add(46.815) \\\n .multiply(j_cen).multiply(-1).add(21.448) \\\n .divide(60.0).add(26).divide(60).add(23)\n ob_corr = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).cos() \\\n .multiply(0.00256).add(ob_ecl)\n var_y = ob_corr.divide(2.0).multiply(deg2rad).tan().multiply(\n ob_corr.divide(2.0).multiply(deg2rad).tan())\n eq_t = (\n lon_sun.multiply(2.0).multiply(deg2rad).sin().multiply(var_y)\n .subtract(an_sun.multiply(deg2rad).sin().multiply(ecc).multiply(2.0))\n .add(an_sun.multiply(deg2rad).sin()\n .multiply(lon_sun.multiply(2.0).multiply(deg2rad).cos())\n .multiply(var_y).multiply(ecc).multiply(4.0))\n .subtract(lon_sun.multiply(4.0).multiply(deg2rad).sin()\n .multiply(var_y).multiply(var_y).multiply(0.5))\n .subtract(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(ecc).multiply(ecc).multiply(1.25))\n .multiply(4.0).multiply(rad2deg))\n sun_eq = (\n an_sun.multiply(deg2rad).sin().multiply(\n j_cen.multiply(0.000014).add(0.004817)\n .multiply(j_cen).multiply(-1).add(1.914602))\n .add(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(j_cen.multiply(-0.000101).add(0.019993)))\n .add(an_sun.multiply(3.0).multiply(deg2rad).sin().multiply(0.000289)))\n sun_true = sun_eq.add(lon_sun)\n sun_app = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).sin() \\\n .multiply(-0.00478).subtract(0.00569).add(sun_true)\n\n # CGM - Intentionally not converting back to degrees\n d = ob_corr.multiply(deg2rad).sin() \\\n .multiply(sun_app.multiply(deg2rad).sin()) \\\n .asin()\n\n # CGM - Functions below are lat/lon dependent and can be written as\n # ee.Image expressions\n # CGM - d is still in radians, not converting\n ha_t = lat.expression(\n 'acos((cos(90.833 * pi / 180) / (cos(lat) * cos(d))) - tan(lat) * tan(d))'\n ' * (180 / pi)',\n {'lat': lat, 'd': d, 'pi': math.pi})\n\n # print('\\n{:10s} {:.12f}'.format('julian_', julian_.getInfo()))\n # print('{:10s} {:.12f}'.format('time_t', time_t.getInfo()))\n # print('{:10s} {:.12f}'.format('j_cen', j_cen.getInfo()))\n # print('{:10s} {:.12f}'.format('lon_sun', lon_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('an_sun', an_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('ecc', ecc.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_ecl', ob_ecl.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_corr', ob_corr.getInfo()))\n # print('{:10s} {:.12f}'.format('var_y', var_y.getInfo()))\n # print('{:10s} {:.12f}'.format('eq_t', eq_t.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_eq', sun_eq.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_true', sun_true.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_app', sun_app.getInfo()))\n # print('{:10s} {:.12f}'.format('d', d.getInfo()))\n\n return d, eq_t, ha_t",
"def _calc_times():\n\n app.logger.debug(\"Got a JSON request\")\n\n km = request.args.get('km', 999, type=float)\n distance = request.args.get('distance', 200, type=int)\n begin_time = request.args.get('begin_time', type=str)\n begin_date = request.args.get('begin_date', type=str)\n\n app.logger.debug(\"km={}\".format(km))\n app.logger.debug(\"request.args: {}\".format(request.args))\n\n print(begin_date + \" \" + begin_time)\n start_arrow = arrow.get(begin_date + \" \" + begin_time, \"YYYY-MM-DD HH:mm\")\n print('start', start_arrow.isoformat())\n\n open_time = acp_times.open_time(km, distance, start_arrow)\n close_time = acp_times.close_time(km, distance, start_arrow)\n result = {\"open\": open_time, \"close\": close_time}\n\n return flask.jsonify(result=result)",
"def get_nearest_station(latitude, longitude):\n urlbase = \"http://realtime.mbta.com/developer/api/v2/stopsbylocation?api_key=wX9NwuHnZU2ToO7GmGR9uw&lat=\"\n urlbase += str(latitude)\n urlbase += '&lon='\n urlbase += str(longitude)\n urlbase += '&format=json'\n response_data = get_json(urlbase)\n station_name = response_data[\"stop\"][0][\"stop_name\"]\n distance = response_data[\"stop\"][0][\"distance\"]\n return station_name, distance",
"def neighbor_distance_statistics(self):\n # Compute\n # 1. Distance between different timings of the same day on the same route for each airline.\n # 2. Distance between airlines for close by timings.\n # 3. Distance between neighboring dates for same timing.\n price_column = self.train_df.columns.get_loc('Price')\n departure_datetime_column = self.train_df.columns.get_loc('Departure_datetime')\n dep_time_column = self.train_df.columns.get_loc('Dep_Time')\n date_of_journey_column = self.train_df.columns.get_loc('Date_of_Journey')\n\n def update_price_ratio_dict(dep_time_to_price_dict, same_day_price_ratio_dict):\n if len(dep_time_to_price_dict) > 1:\n dep_time_sorted = sorted(dep_time_to_price_dict.keys())\n for key_i in range(len(dep_time_sorted) - 1):\n for key_j in range(key_i + 1, len(dep_time_sorted)):\n dep_time_first = dep_time_sorted[key_i]\n dep_time_second = dep_time_sorted[key_j]\n\n if dep_time_first not in same_day_price_ratio_dict:\n same_day_price_ratio_dict[dep_time_first] = dict()\n if dep_time_second not in same_day_price_ratio_dict[dep_time_first]:\n same_day_price_ratio_dict[dep_time_first][dep_time_second] = []\n\n price_ratio = dep_time_to_price_dict[dep_time_second] * 1.0 / dep_time_to_price_dict[\n dep_time_first]\n same_day_price_ratio_dict[dep_time_first][dep_time_second].append(price_ratio)\n\n same_day_price_ratio_stats = dict()\n for airline in self.train_df['Airline'].unique():\n same_day_price_ratio_stats[airline] = dict()\n source_codes_airline = self.train_df[self.train_df['Airline'] == airline]['Source_code'].unique()\n\n for source_code in source_codes_airline:\n destination_codes_airline = self.train_df[(self.train_df['Airline'] == airline) & (self.train_df['Source_code'] == source_code)][\n 'Destination_code'].unique()\n same_day_price_ratio_stats[airline][source_code] = dict()\n for destination_code in destination_codes_airline:\n same_day_price_ratio_stats[airline][source_code][destination_code] = dict()\n n_stops_arr = sorted(self.train_df[(self.train_df['Airline'] == airline) &\n (self.train_df['Source_code'] == source_code) &\n (self.train_df['Destination_code'] == destination_code)]['n_stops'].unique())\n # TODO Convert n_stops column into int. If not able to do why is it so?\n n_stops_arr = [int(float(x)) for x in n_stops_arr]\n for n_stops in n_stops_arr:\n same_day_price_ratio_stats[airline][source_code][destination_code][n_stops] = dict()\n routes_arr = self.train_df[(self.train_df['Airline'] == airline) &\n (self.train_df['Source_code'] == source_code) &\n (self.train_df['Destination_code'] == destination_code) &\n (self.train_df['n_stops'] == n_stops)]['Route'].unique()\n for route in routes_arr:\n same_day_price_ratio_stats[airline][source_code][destination_code][n_stops][route] = dict()\n same_day_price_ratio_dict = dict()\n filter_dict = {'Airline': airline, 'Source_code': source_code, 'Destination_code': destination_code,\n 'n_stops': n_stops, 'Route': route}\n df = self.get_subset_data(df=self.train_df, filter_dict=filter_dict)\n\n i = 0\n prev_date_of_journey = df.iloc[i, date_of_journey_column]\n prev_dep_time = df.iloc[i, dep_time_column]\n dep_time_to_price_dict = dict()\n dep_time_to_price_dict[prev_dep_time] = df.iloc[i, price_column]\n i += 1\n while i < len(df):\n cur_date_of_journey = df.iloc[i, date_of_journey_column]\n cur_dep_time = df.iloc[i, dep_time_column]\n cur_price = df.iloc[i, price_column]\n\n if prev_date_of_journey != cur_date_of_journey:\n update_price_ratio_dict(dep_time_to_price_dict, same_day_price_ratio_dict)\n\n # reset dep time to price mapping\n dep_time_to_price_dict = dict()\n else:\n if cur_dep_time not in dep_time_to_price_dict:\n dep_time_to_price_dict[cur_dep_time] = cur_price\n else:\n # If multiple instances of price available, then consider the lowest one\n if cur_price < dep_time_to_price_dict[cur_dep_time]:\n dep_time_to_price_dict[cur_dep_time] = cur_price\n\n i += 1\n prev_dep_time = cur_dep_time\n prev_date_of_journey = cur_date_of_journey\n\n update_price_ratio_dict(dep_time_to_price_dict, same_day_price_ratio_dict)\n same_day_price_ratio_stats[airline][source_code][destination_code][n_stops][route] = same_day_price_ratio_dict\n\n output_folder = \"../statistics\"\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n with open(os.path.join(output_folder, \"same_day_price_ratio_stats.json\"), \"w\") as fd:\n json.dump(obj=same_day_price_ratio_stats, fp=fd)",
"def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']"
] | [
"0.58214194",
"0.579355",
"0.56663066",
"0.56069",
"0.5578686",
"0.55682963",
"0.55682963",
"0.55682963",
"0.55682963",
"0.55682963",
"0.55472785",
"0.5474715",
"0.5464001",
"0.54027903",
"0.5389351",
"0.5357196",
"0.53546923",
"0.5334414",
"0.5303193",
"0.5286532",
"0.52700824",
"0.5246165",
"0.5229179",
"0.5221609",
"0.52158326",
"0.5207235",
"0.51955634",
"0.51874685",
"0.5183026",
"0.51682967"
] | 0.72649795 | 0 |
Set the timezone to be UTC. | def set_utc(date_time):
utc = datetime.timezone(datetime.timedelta(0))
date_time = date_time.replace(tzinfo=utc)
return date_time | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_timezone(self, to_tz):\n self.startdate = to_tz.localize(self.startdate.replace(tzinfo=None))\n self.enddate = to_tz.localize(self.enddate.replace(tzinfo=None))\n self.timezone = to_tz",
"def fromutc(self, dt):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=self)\n return super(UTC, self).fromutc(dt)",
"def utc(self, utc):\n\n self._utc = utc",
"def may_the_timezone_be_with_it(self):\n self.timestamp = pytz.utc.localize(self.timestamp)",
"def setUTC(self, flag):\n try:\n\n adjtimeFile = \"/etc/adjtime\"\n if self.__mountDir:\n adjtimeFile = self.__mountDir + adjtimeFile\n\n fd = open(adjtimeFile)\n content = fd.read()\n fd.close()\n\n newContent = content\n\n if flag and not \"UTC\" in content:\n if \"LOCAL\" in content:\n newContent = re.sub(\"LOCAL\", \"UTC\", content)\n else:\n newContent += \"UTC\\n\"\n elif not \"LOCAL\" in content:\n if \"UTC\" in content:\n newContent = re.sub(\"UTC\", \"LOCAL\", content)\n else:\n newContent += \"LOCAL\\n\"\n\n fd = open(adjtimeFile, \"w\")\n fd.write(newContent)\n fd.close()\n except Exception as e:\n self.__logger.critical(\"Failed to write UTC configuration\")\n raise ZKVMError(\"POSTINSTALL\", \"TIMEZONE\", \"UTC_CONF\")",
"def timezone(self, timezone):\n\n self._timezone = timezone",
"def timezone(self, timezone):\n\n self._timezone = timezone",
"def timezone(self, timezone):\n\n self._timezone = timezone",
"def timezone(self, timezone):\n\n self._timezone = timezone",
"def timezone(self, timezone):\n\n self._timezone = timezone",
"def set_utc(df, locale):\n return df.tz_localize('utc').tz_convert(None)",
"def set_timezone(conn, timezone):\n with Tx(conn) as c:\n c.execute('SET timezone = %s', (timezone,))",
"def utcnow():\n return datetime.utcnow().replace(tzinfo=UTC)",
"def _get_tz():\n return 'UTC'",
"def set_timezone():\n tz_name = request.vars.name\n # Validates the name.\n from pytz import all_timezones_set\n if tz_name in all_timezones_set:\n session.user_timezone = tz_name\n # If the user is logged in, sets also the timezone for the user.\n # Otherwise, it can happen that a user expires a cookie, then click on edit.\n # When the user is presented the edit page, the translation is done according to UTC,\n # but when the user is done editing, due to autodetection, the user is then in\n # it's own time zone, and the dates of an assignment change.\n # This really happened.\n if auth.user is not None:\n db.auth_user[auth.user.id] = dict(user_timezone = tz_name)\n logger.info(\"Set timezone to: %r\" % tz_name)\n else:\n logger.warning(\"Invalid timezone received: %r\" % tz_name)",
"def set_timezone():\n tz_name = request.vars.name\n # Validates the name.\n from pytz import all_timezones_set\n if tz_name in all_timezones_set:\n session.user_timezone = tz_name\n # If the user is logged in, sets also the timezone for the user.\n # Otherwise, it can happen that a user expires a cookie, then click on edit.\n # When the user is presented the edit page, the translation is done according to UTC,\n # but when the user is done editing, due to autodetection, the user is then in\n # it's own time zone, and the dates of an assignment change.\n # This really happened.\n if auth.user is not None:\n db.auth_user[auth.user.id] = dict(user_timezone = tz_name)\n logger.info(\"Set timezone to: %r\" % tz_name)\n else:\n logger.warning(\"Invalid timezone received: %r\" % tz_name)",
"def time_utc(self, time_utc):\n\n self._time_utc = time_utc",
"def nowUTC():\n return datetime.datetime.now(pytz.utc)",
"def sync_timezone(self, sync_timezone):\n\n self._sync_timezone = sync_timezone",
"def time_zone(self, time_zone):\n\n self._time_zone = time_zone",
"def _convertTZ(self):\n tz = timezone.get_current_timezone()\n dtstart = self['DTSTART']\n dtend = self['DTEND']\n if dtstart.zone() == \"UTC\":\n dtstart.dt = dtstart.dt.astimezone(tz)\n if dtend.zone() == \"UTC\":\n dtend.dt = dtend.dt.astimezone(tz)",
"def utcnow() -> datetime.datetime:\n return datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)",
"def to_utc(dt):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=pytz.utc)\n else:\n return dt.astimezone(pytz.utc)",
"def ensure_utc_time(ts: datetime) -> datetime:\n if ts.tzinfo is None:\n return datetime(*ts.timetuple()[:6], tzinfo=UTC_TZ)\n elif str(ts.tzinfo) != str(UTC_TZ):\n return ts.astimezone(UTC_TZ)\n return ts",
"def clear_time_override():\r\n utcnow.override_time = None",
"def set(self, tzone):\n\t\t\n\t\tif self.no_dbus: return\n\t\t\n\t\tself.TimeZone.SetTimezone(\n\t\t\t'(sb)',\n\t\t\ttzone,\n\t\t\tTrue # User interaction\n\t\t)",
"def tz_as_utc(dt: datetime) -> datetime:\n if dt.tzinfo is None:\n return dt.replace(tzinfo=tzutc())\n return dt.astimezone(tzutc())",
"def addTZCleanup(testCase):\n tzIn = environ.get(\"TZ\", None)\n\n @testCase.addCleanup\n def resetTZ():\n setTZ(tzIn)",
"def __init__(self, timezone=None, utc=None):\n\n self._timezone = None\n self._utc = None\n\n if timezone is not None:\n self.timezone = timezone\n if utc is not None:\n self.utc = utc",
"def astimezone(self, tz=LOCAL):\n if tz is None:\n tz = LOCAL\n tz = parser.get_timezone(tz)\n return super(self.__class__, self).astimezone(tz)"
] | [
"0.72103435",
"0.71776843",
"0.7111709",
"0.7052505",
"0.68563294",
"0.68358237",
"0.68358237",
"0.68358237",
"0.68358237",
"0.68358237",
"0.66069865",
"0.65402013",
"0.6403162",
"0.6390136",
"0.6363629",
"0.6363629",
"0.63539517",
"0.6352093",
"0.6342055",
"0.63034976",
"0.62964016",
"0.62624645",
"0.62168837",
"0.62084514",
"0.61955816",
"0.61826164",
"0.6181145",
"0.6159618",
"0.61405176",
"0.61112905"
] | 0.72553647 | 0 |
Calculate the speed between consectutive points and add this as a variable to the dataframe. | def calculate_speed(position_df):
print("Calculating speed of track")
earliest_date_time = position_df['date_time'].min()
current_date = earliest_date_time
previous_position = get_location(earliest_date_time, position_df)
#print("Previous position: ", previous_position)
#datetime_previous, latitude_previous, longitude_previous = previous_position
# count_speed_errors = 0
line_number = -1
for position in position_df.itertuples():
line_number += 1
row_index = position[0]
if line_number == 0:
position_df.at[row_index, 'measureland_qualifier_flag_speed'] = 1 # assume good values to begin with
continue
current_position = position[2:5]
# print(current_position)
(position_difference_m, speed_knots) = knots_two_points(previous_position, current_position)
position_df.at[row_index, 'speed'] = speed_knots
#print("calculated speeds")
position_df.at[row_index, 'distance'] = position_difference_m
#print("Checked position difference")
previous_position = current_position
print(position_df.isnull())
return position_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_speed(self):\n return self.__corrds[self.X_SPEED], self.__corrds[self.Y_SPEED]",
"def _calc_speed_features(self):\n if len(self.ball_pos_stamps) >= NUM_STAMPS_CALC_SPEED:\n vx = 0\n vy = 0\n counter = 0\n for i in range(NUM_STAMPS_CALC_SPEED):\n for j in range(i+1, NUM_STAMPS_CALC_SPEED):\n bps_i = self.ball_pos_stamps[i]\n bps_j = self.ball_pos_stamps[j]\n vx += (bps_j[0][0] - bps_i[0][0]) / (bps_j[1] - bps_i[1])\n vy += (bps_j[0][1] - bps_i[0][1]) / (bps_j[1] - bps_i[1])\n counter += 1\n\n vx /= counter\n vy /= counter\n\n self.speed_features = (vx, vy)",
"def get_speed(self):\r\n return self.__x_speed, self.__y_speed",
"def calculate_speed(self, old):\n self.speed[0] = self.center[0] - old.center[0]\n self.speed[1] = self.center[1] - old.center[1]",
"def time_speed(self):\n time_speed = []\n for i in range (len(np.unique(self.pd.objid))):\n trajec = self.dataset.trajec(self.dataset.keys[i])\n times = trajec.time_epoch_secs + trajec.time_epoch_nsecs / 1e9\n time_speedy = np.vstack([times, trajec.speed])\n time_speed.append(time_speedy)\n return time_speed",
"def create_avg_speed_cols(df):\n #create speed column. this should be correlated with the day component\n #and may give additional insight\n df['avg_speed_haversine'] = 1000*df['distance'].values/df['trip_duration']\n df['avg_speed_manhattan'] = 1000*df['manhattan_distance'].values/df['trip_duration']\n\n return df",
"def speed(self) -> int:",
"def speed(self) -> int:",
"def get_speed(self):\n raise NotImplementedError",
"def get_speed(self):\n raise NotImplementedError",
"def analyse_speed(position_df):\n\n print(\"Analysing speed of track\")\n upper_bound = get_stats(position_df, 'speed')\n print(\"Upper bound:\", upper_bound)\n\n # no speed value\n position_df.loc[position_df['speed'].apply(math.isnan), 'measureland_qualifier_flag_speed'] = 9 # missing values\n print(\"Rows where the speed is null: \", position_df.loc[position_df['speed'].apply(math.isnan)])\n # speed greater than upper bound\n position_df.loc[position_df['speed'] > upper_bound, 'measureland_qualifier_flag_speed'] = 3 # probably bad values\n\n # speed within allowed limits (0 <= speed <= upper bound)\n position_df.loc[position_df['speed'] <= upper_bound, 'measureland_qualifier_flag_speed'] = 1 # good values\n\n print(position_df['measureland_qualifier_flag_speed'].isnull())\n\n position_df['measureland_qualifier_flag_speed'] = position_df['measureland_qualifier_flag_speed'].astype(int)\n\n return position_df",
"def get_speed(self):\n raise NotImplementedError()",
"def compute_speeds(self):\n # compute and return the speed as the norm of all velocity vectors\n return norm(self.velocities, axis=1)",
"def create_velocity_column(self, perf):\n perf[\"velocity\"] = perf.index\n perf[\"velocity\"] = perf.moving * np.sqrt(np.power(perf.x.diff(), 2) + np.power(perf.y.diff(), 2)) / perf.velocity.diff()\n perf[\"velocity\"] = perf[\"velocity\"].fillna(0)\n return perf[\"velocity\"]",
"def set_speed(self,speed):\n self.speed_p = speed",
"def speed(self, s=0):",
"def speed(self) -> float:\n return self._speed",
"def speed(self) -> float:\n return self._speed",
"def increment_speed(self):\n self.speed += 0.0004",
"def _pid_control(self, target_speed, current_speed):\n _e = (target_speed - current_speed)\n self._e_buffer.append(_e)\n\n if len(self._e_buffer) >= 2:\n _de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt\n _ie = sum(self._e_buffer) * self._dt\n else:\n _de = 0.0\n _ie = 0.0\n\n return np.clip((self._K_P * _e) + (self._K_D * _de / self._dt) + (self._K_I * _ie * self._dt), 0.0, 1.0)",
"def change_speed(self, speed):\n\n self.__corrds[self.X_SPEED], self.__corrds[self.Y_SPEED] = speed\n if type(speed) != tuple:\n raise ValueError('speed must be a tuple of the form (sp_x, sp_y)')",
"def CalculateSpeedIndex(self):\n time_completeness_list = self.GetTimeCompletenessList()\n prev_completeness = 0.0\n speed_index = 0.0\n prev_time = time_completeness_list[0][0]\n for time, completeness in time_completeness_list:\n # Add the incemental value for the interval just before this event.\n elapsed_time = time - prev_time\n incompleteness = (1.0 - prev_completeness)\n speed_index += elapsed_time * incompleteness\n\n # Update variables for next iteration.\n prev_completeness = completeness\n prev_time = time\n return speed_index",
"def GetSpeed(self):\n pass",
"def speed(self, speed: int, time: int = 0, /) -> None:",
"def head_speed(\n self, combined_speed=None,\n x=None, y=None, z=None, a=None, b=None, c=None):\n pass",
"def accelerate(self):\n x_speed = self.__calc_speed(Ship._X)\n y_speed = self.__calc_speed(Ship._Y)\n self._speed_vect = (x_speed, y_speed)",
"def _add_delta_times_to_df(self, route_df):\n\n \n\n route_df = route_df.assign(delta_times = self.delta_times)\n #route_df = route_df.assign(total_time = self.route_time)\n\n\n return route_df",
"def get_speed(self):\n return self._speed",
"def speed(self):\n return 1 # speed system not implemented yet",
"def calculateDistances(df):\n return"
] | [
"0.6205459",
"0.61389077",
"0.6073118",
"0.6069948",
"0.6020611",
"0.6010716",
"0.59792453",
"0.59792453",
"0.5810725",
"0.5810725",
"0.5795017",
"0.5666431",
"0.5643413",
"0.55845016",
"0.5575384",
"0.5570548",
"0.5568289",
"0.5568289",
"0.5565899",
"0.5557476",
"0.5556018",
"0.5552908",
"0.5530715",
"0.55245036",
"0.55182964",
"0.55149776",
"0.551491",
"0.55076",
"0.5489718",
"0.5486808"
] | 0.63845634 | 0 |
Analyse the speed that has been calculated and flag the data points accordingly. | def analyse_speed(position_df):
print("Analysing speed of track")
upper_bound = get_stats(position_df, 'speed')
print("Upper bound:", upper_bound)
# no speed value
position_df.loc[position_df['speed'].apply(math.isnan), 'measureland_qualifier_flag_speed'] = 9 # missing values
print("Rows where the speed is null: ", position_df.loc[position_df['speed'].apply(math.isnan)])
# speed greater than upper bound
position_df.loc[position_df['speed'] > upper_bound, 'measureland_qualifier_flag_speed'] = 3 # probably bad values
# speed within allowed limits (0 <= speed <= upper bound)
position_df.loc[position_df['speed'] <= upper_bound, 'measureland_qualifier_flag_speed'] = 1 # good values
print(position_df['measureland_qualifier_flag_speed'].isnull())
position_df['measureland_qualifier_flag_speed'] = position_df['measureland_qualifier_flag_speed'].astype(int)
return position_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _calc_speed_features(self):\n if len(self.ball_pos_stamps) >= NUM_STAMPS_CALC_SPEED:\n vx = 0\n vy = 0\n counter = 0\n for i in range(NUM_STAMPS_CALC_SPEED):\n for j in range(i+1, NUM_STAMPS_CALC_SPEED):\n bps_i = self.ball_pos_stamps[i]\n bps_j = self.ball_pos_stamps[j]\n vx += (bps_j[0][0] - bps_i[0][0]) / (bps_j[1] - bps_i[1])\n vy += (bps_j[0][1] - bps_i[0][1]) / (bps_j[1] - bps_i[1])\n counter += 1\n\n vx /= counter\n vy /= counter\n\n self.speed_features = (vx, vy)",
"def speed(self) -> int:",
"def speed(self) -> int:",
"def __update_speed_stop(self):\n if self.velocidade > SERVO_DUTY_CYCLE_MEIO:\n self.velocidade -= self.incremento_veloc\n \n # Para mesmo que haja arredondamento de float\n if self.velocidade <= SERVO_DUTY_CYCLE_MEIO:\n self.velocidade = SERVO_DUTY_CYCLE_MEIO\n self.servo.set_duty_cycle(0.0)\n else:\n self.servo.set_duty_cycle(self.velocidade)\n elif self.velocidade < SERVO_DUTY_CYCLE_MEIO:\n self.velocidade += self.incremento_veloc\n \n # Para mesmo que haja arredondamento de float\n if self.velocidade >= SERVO_DUTY_CYCLE_MEIO:\n self.velocidade = SERVO_DUTY_CYCLE_MEIO\n self.servo.set_duty_cycle(0.0)\n else:\n self.servo.set_duty_cycle(self.velocidade)\n else:\n self.servo.set_duty_cycle(0.0)",
"def calculate_speed(position_df):\n\n print(\"Calculating speed of track\")\n\n earliest_date_time = position_df['date_time'].min()\n\n current_date = earliest_date_time\n\n previous_position = get_location(earliest_date_time, position_df)\n #print(\"Previous position: \", previous_position)\n #datetime_previous, latitude_previous, longitude_previous = previous_position\n\n # count_speed_errors = 0\n\n line_number = -1\n for position in position_df.itertuples():\n line_number += 1\n row_index = position[0]\n\n if line_number == 0:\n position_df.at[row_index, 'measureland_qualifier_flag_speed'] = 1 # assume good values to begin with\n continue\n\n current_position = position[2:5]\n\n # print(current_position)\n (position_difference_m, speed_knots) = knots_two_points(previous_position, current_position)\n position_df.at[row_index, 'speed'] = speed_knots\n\n #print(\"calculated speeds\")\n\n position_df.at[row_index, 'distance'] = position_difference_m\n\n #print(\"Checked position difference\")\n\n previous_position = current_position\n\n print(position_df.isnull())\n\n return position_df",
"def speed(self, speed: int, time: int = 0, /) -> None:",
"def slow_update_duration(self):\n for i in range(len(self.data_file.sorted_data)):\n if self.data_file.sorted_data[i]['type'] == 'slow':\n slow_upd = self.data_file.sorted_data[i]['timestamp']\n Config.ANALYSIS.write(f\"slow at: {slow_upd}\\n\")\n if i == 0:\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n elif i == len(self.data_file.sorted_data) - 1:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\\n\")\n else:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\")\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n Config.ANALYSIS.write(\"\\n\\n\")",
"def tonfilter(ton,_speed):\r\n global _ton\r\n global _status\r\n global _status_old\r\n min_ton = 13\r\n\r\n if ton <= min_ton and _speed >= 5 :\r\n _ton = 0\r\n _status = 'EMPTY_TRAVEL'\r\n elif ton <= min_ton and _speed < 5 :\r\n _ton = 0\r\n _status = 'EMPTY_STOP'\r\n elif ton > min_ton and _speed < 5 and _slope == 1 and (_status_old == 'EMPTY_STOP' or _status_old == 'LOADING'):\r\n _ton = ton\r\n _status = 'LOADING'\r\n elif ton >= 140 and _speed > 5 :\r\n _ton = ton\r\n _status = 'LOADED_MOVE'\r\n elif ton >= 140 and _speed <= 5 and _slope == 1 :\r\n _ton = ton\r\n _status = 'LOADED_STOP'\r\n elif ton > min_ton and ton <= 135 and _speed < 5 and _slope == -1 and (_status_old == 'LOADED_STOP' or _status_old == 'LOADED_MOVE' or _status_old == 'DUMPING'):\r\n _ton = ton\r\n _status = 'DUMPING'\r\n else :\r\n _ton = ton\r\n _status_old = _status",
"def check_performance(self):\n self.lg.debug('Checking performance.')\n avg_up = (sum(self.results_up)) / len(self.results_up)\n avg_down = (sum(self.results_down)) / len(self.results_down)\n if (\n avg_up < self.tolerance * self.up or\n avg_down < self.tolerance * self.down\n ):\n self.bad_performance = True\n else:\n self.bad_performance = False",
"def check_speed(self, vals: dict) -> None:\r\n if vals['bools']['hyper']:\r\n self._speed = self._dir * self._hyper_speed\r\n elif vals['bools']['fast']:\r\n self._speed = self._dir * self._fast_speed\r\n else:\r\n self._speed = self._dir * self._normal_speed",
"def set_speed():\n pass",
"def speed(self):\n return 1 # speed system not implemented yet",
"def speed_detect(motion_first, motion_second, distance):\n if (GPIO.input(motion_first) == 1):\n # Determines if first motion sensor is triggered first.\n speed(timing(motion_second), distance)\n # Sends speed as message to server when corresponding sensor triggered.\n ensure_low(motion_first, motion_second)\n # Ensure that both motion sensors are low/are not triggered.\n\n if (GPIO.input(motion_second) == 1):\n # Determines if second motion sensor is triggered first.\n speed(timing(motion_first), distance)\n # Sends speed as message to server when corresponding sensor triggered.\n ensure_low(motion_first, motion_second)\n # Ensure that both motion sensors are low/are not triggered.",
"def __call__(self, *args, **kwargs):\n out = super().__call__(*args, **kwargs)\n out **= 2\n self._debug.append(out)\n out = int(out > self.threshold_value)\n if out and self.since_last_peak > self._num_of_taps:\n self.since_last_peak = -1\n else:\n out = 0\n self.since_last_peak += 1\n return out",
"def speed(self, value: int, /) -> None:",
"def CalculateSpeedIndex(self):\n time_completeness_list = self.GetTimeCompletenessList()\n prev_completeness = 0.0\n speed_index = 0.0\n prev_time = time_completeness_list[0][0]\n for time, completeness in time_completeness_list:\n # Add the incemental value for the interval just before this event.\n elapsed_time = time - prev_time\n incompleteness = (1.0 - prev_completeness)\n speed_index += elapsed_time * incompleteness\n\n # Update variables for next iteration.\n prev_completeness = completeness\n prev_time = time\n return speed_index",
"def analyse_course(position_df):\n\n print(\"Analysing course of track\")\n total_data_points = len(position_df)\n\n earliest_date_time = position_df['date_time'].min()\n current_date = earliest_date_time\n\n previous_position = get_location(earliest_date_time, position_df)\n datetime_previous, latitude_previous, longitude_previous = previous_position\n\n previous_speed_knots = 0\n\n count_acceleration_errors = 0\n\n line_number = -1\n for position in position_df.itertuples():\n line_number += 1\n row_index = position[0]\n\n if line_number == 0:\n #position_df.at[row_index, 'measureland_qualifier_flag_course'] = 1 # assume good value\n position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 1 # assume good value\n continue\n\n current_position = position[2:5]\n\n # Calculate acceleration between two points\n current_conditions = knots_two_points(previous_position, current_position)\n current_speed_knots = current_conditions[1] # altered to this because distance and speed are output as a tuple from knots_two_points\n\n time_difference = (current_position[0] - previous_position[0]).total_seconds()\n\n speed_difference_metres_per_sec = (current_speed_knots - previous_speed_knots) * (1852 / 3600) # convert knots to ms-1\n if time_difference > 0:\n acceleration = speed_difference_metres_per_sec / time_difference\n else:\n acceleration = 0\n\n # Print errors where data do not meet requirements\n error_message_acceleration = \"\"\n\n if acceleration == \"N/A\":\n error_message_acceleration = \"No acceleration value calculated\"\n position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 9 # no value\n elif acceleration > 1:\n count_acceleration_errors += 1\n error_message_acceleration = \"** Acceleration too quick **\"\n position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 3 # probably bad value\n elif acceleration <= 1:\n position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 1 # good value\n\n # if error_message_acceleration != \"\":\n # print(\"Error: {} {} ({:.4f}, {:.4f}) acceleration: {} ms-2\".format(error_message_acceleration,\n # current_position[0],\n # current_position[1],\n # current_position[2], acceleration))\n\n previous_position = current_position\n #previous_bearing = current_bearing\n previous_speed_knots = current_speed_knots\n\n #position_df['measureland_qualifier_flag_course'] = position_df['measureland_qualifier_flag_course'].astype(int)\n position_df['measureland_qualifier_flag_acceleration'] = position_df['measureland_qualifier_flag_acceleration'].astype(int)\n\n return (count_acceleration_errors)",
"def do_scan(self):\n scan = self.scan\n laser = self.devices[scan['laser']['name']]\n dev_to_scan = scan['axis']['device']['name']\n output = scan['axis']['device']['property']\n approx_time_to_scan = (laser.params['stop_wavelength']-laser.params['start_wavelength'])/laser.params['wavelength_speed']\n # Scan the laser and the values of the given device\n if output != 'time':\n dev_range = scan['axis']['device']['range']\n start = Q_(dev_range[0])\n units = start.u\n stop = Q_(dev_range[1])\n step = Q_(dev_range[2])\n \n num_points_dev = ((stop-start)/step).to('')\n else:\n dev_range = scan['axis']['device']['range']\n start = 1\n stop = dev_range[1]\n num_points_dev = stop\n\n num_points_dev += 1 # So the last bit of information is true.\n\n for value in np.linspace(start, stop, num_points_dev, endpoint=True):\n if output != 'time':\n self.set_value_to_device(dev_to_scan, {output: value * units})\n dev = self.devices[dev_to_scan]\n time.sleep(0.1)\n while not dev.driver.finished_moving:\n time.sleep(0.2)\n\n self.do_line_scan()\n \n return True",
"def measure(self):\n pass",
"def accelerate(self):\n\n self.log_speed()\n while self.speed_mph < self.maximum_speed:\n time.sleep(1)\n if (self.speed_mph + self.acceleration_rate) > self.maximum_speed:\n self.speed_mph = self.maximum_speed\n self.log_speed()\n break\n else:\n self.speed_mph += self.acceleration_rate\n self.log_speed()",
"def get_speed(self) -> float: \r\n if self.distance < self.distance_stop:\r\n print(\"STOP: Obstacle detected ({} cm)\".format(self.distance))\r\n return 0\r\n elif self.distance < self.distance_slow: \r\n return self.speed * 0.8\r\n else:\r\n return self.speed",
"def task_2():\n\n # To store the list of speeds to plot\n list_of_speeds = []\n list_of_times = []\n list_of_time_difference = [0]\n\n # To go from 1 through 80\n for i in range(LOW_SPEED, HIGH_SPEED + 1, 5):\n list_of_speeds.append(i)\n list_of_times.append(((DISTANCE / i) * 60))\n\n for i in range(1, len(list_of_times)):\n list_of_time_difference.append(list_of_times[i-1] - list_of_times[i])\n\n plt.plot(list_of_speeds, list_of_time_difference)\n plt.xlabel(\"Speed (in mph)\")\n plt.ylabel(\"Time saved (in minutes)\")\n plt.show()",
"def speed(data_array, time=1):\n dst = DataOperation.geo_m(data_array)\n speed_values = np.zeros(dst.size)\n count = 0\n for d in dst:\n speed_values[count] = d/time * 3.6# dystans jest w m, przedzial czasowy 1 s, a chcemy k/h\n count += 1\n return speed_values",
"def step(self):\n if self.change_rate != 0:\n self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()\n\n if self.speed < 0.5 * self._initial_speed:\n self.speed = 0.5 * self._initial_speed\n if self.speed > 2.0 * self._initial_speed:\n self.speed = 2.0 * self._initial_speed\n else:\n pass",
"def update(self):\n\n obstVals = self.robot.getDepth(self.startCol, self.startRow,\n self.sampleWidth, self.sampleHeight)\n\n masked_obstVals = numpy.ma.masked_array(obstVals, obstVals == 0)\n\n if numpy.ma.count(masked_obstVals) == 0:\n meanDistance = 500\n else:\n meanDistance = numpy.mean(masked_obstVals)\n if meanDistance < 500:\n meanDistance = 500\n\n if meanDistance < 1200: # Changing this value will change how sensitive robot is to walls\n self.setVector(self.speedMult / meanDistance, 180 - self.angle)\n else:\n self.setVector(0.0, 0.0)",
"def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False",
"def tracking(self) -> None:\n dist, delta_angle, timestamp = self.vision.get_vision_data()\n # collect data only once per loop\n if timestamp is None:\n # self.next_state(\"searching\")\n # print(f\"tracking -> searching {self.vision.get_vision_data()}\")\n self.state = self.searching\n else:\n if abs(delta_angle) > self.find_allowable_angle(dist):\n # print(f\"Telling turret to slew by {delta_angle}\")\n self.turret.slew(delta_angle)\n if self.ready_to_spin():\n # self.next_state(\"firing\")\n # print(f\"tracking -> spining_up {self.vision.get_vision_data()}\")\n self.distance = dist\n self.state = self.spining_up",
"def mark_tap_start_and_end(data: DataFrame, delta_in_ms: int):\n\n lead_file = 'Accelerometer.csv'\n time_col = x_columns[lead_file]\n\n delta = delta_in_ms * 1000000\n\n for tap_file in tap_file_names:\n tap_feature = tap_file_to_feature_name[tap_file]\n # Step 1: Put a 2 at the start and a 3 at the end of each event\n\n indices = data[data[tap_feature] == 1].index\n if len(indices) == 0:\n continue\n for i in range(len(indices)):\n if i == 0 or data[time_col][ indices[i] ] - data[time_col][ indices[i - 1] ] > delta:\n data[tap_feature].loc[ indices[i] ] = 2\n if i > 0:\n if data[tap_feature][ indices[i - 1] ] == 1:\n data[tap_feature].loc[ indices[i - 1] ] = 3\n elif indices[i - 1] + 1 < data.shape[0] and data[tap_feature][ indices[i - 1] + 1 ] == 0:\n # In this case, the tap lasted only one time step,\n # so we call the end of the last tap the reading after\n data[tap_feature].loc[ indices[i - 1] + 1 ] = 3\n else:\n #Hopefully this case will never occur, where two consecutive taps\n #are more than delta apart but with no readings in between\n print(\"Something seems off about this data...\")\n print(data[ indices[i] - 5 : indices[i] + 5][[time_col, tap_feature]])\n return\n\n if i == len(indices) - 1:\n # If we're at the end of the list, that must be the end of the last tap\n if data[tap_feature][ indices[i] ] == 1:\n data[tap_feature].loc[ indices[i] ] = 3\n elif indices[i] + 1 < data.shape[0]:\n data[tap_feature].loc[ indices[i] + 1] = 3\n else:\n data[tap_feature].loc[ indices[i] ] = 0 # Remove the miscreant\n print(\"There's an issue with a tap at the very last point of the data...\")\n\n if sum(data[data[tap_feature] == 2][tap_feature]) * 3 != sum(data[data[tap_feature] == 3][tap_feature]) * 2:\n print(\"Uh oh, we placed an unbalanced number of 2's and 3's. Thanos would be disappointed.\")\n \n\n # Step 2: Put a 4 at the start of the \"before\" window\n # and a 5 at the end of the \"after\" window\n\n start_indices = data[data[tap_feature] == 2].index\n end_indices = data[data[tap_feature] == 3].index\n if len(start_indices) != len(end_indices):\n print(\"Impossible.\")\n\n #We should be able to get a half_delta on either side of\n #each window\n half_delta = delta // 2\n\n\n for i in range(len(start_indices)):\n find_index_before = start_indices[i]\n range_min = data[time_col][ start_indices[i] ] - half_delta\n while find_index_before > 0 and data[time_col][find_index_before] > range_min \\\n and data[tap_feature][find_index_before - 1] < 2:\n find_index_before -= 1\n if data[tap_feature][find_index_before] == 0:\n data[tap_feature].loc[find_index_before] = 4\n elif data[tap_feature][find_index_before] == 5 and data[tap_feature][find_index_before + 1] == 0:\n # Keep our windows from overlapping - don't put the start of one on\n # top of the end of the previous\n data[tap_feature].loc[find_index_before + 1] = 4\n elif find_index_after == 0 and data[tap_feature][find_index_after + 1] == 0:\n # If we're at the start of the interval, shift what was there forward one\n data[tap_feature].loc[find_index_after + 1] = data[tap_feature].loc[find_index_after]\n data[tap_feature].loc[find_index_after] = 4\n elif find_index_before == start_indices[i] and data[tap_feature][find_index_before - 1] == 5 \\\n and find_index_before >= 2 and data[tap_feature][find_index_before - 2] < 2:\n data[tap_feature].loc[find_index_before - 2] = 5\n data[tap_feature].loc[find_index_before - 1] = 4\n else:\n # The most likely case is that we hit the beginning or end of the\n # interval, in which case we should probably just throw the point out\n print(\"Oh no, that's pretty weird: \", data[tap_feature][find_index_before], find_index_before, start_indices[i])\n \n\n find_index_after = end_indices[i]\n range_max = data[time_col][ end_indices[i] ] + half_delta\n while find_index_after + 1 < data.shape[0] and data[time_col][find_index_after] < range_max \\\n and data[tap_feature][find_index_after + 1] < 2:\n find_index_after += 1\n if data[tap_feature][find_index_after] == 0:\n data[tap_feature].loc[find_index_after] = 5\n elif find_index_after == data.shape[0] - 1 and data[tap_feature][find_index_after - 1] == 0:\n # If we're at the end of the interval, shift what was there back one\n data[tap_feature].loc[find_index_after - 1] = data[tap_feature].loc[find_index_after]\n data[tap_feature].loc[find_index_after] = 5\n elif find_index_after == end_indices[i] and data[tap_feature][find_index_after + 1] < 2:\n data[tap_feature].loc[find_index_before + 1] = 5\n else:\n # See above comment\n print(\"Oh no, that's REALLY weird\", find_index_after, data[tap_feature])",
"def GetSpeed(self):\n pass",
"def compute_feedback_data(self):\n\n # supposed constants for computing this values\n\n # returned data\n time_spent = []\n fuel_spent = []\n fishing_avg_prob = []\n color = []\n\n path_points_data = [[self.x_list_main, self.y_list_main, 'green'],\n [self.x_list, self.y_list, 'red'],\n [self.x_list_filtered, self.y_list_filtered, 'blue']]\n\n fish_list = [self.fish_prob_list,\n self.fish_prob_list_filtered,\n self.fish_prob_list_main]\n\n # time and fuel spent\n for path in path_points_data:\n fuel = 0\n time = 0\n # we append color for making later file writing easier\n color.append(path[2])\n\n # last point won't have distance nor angle\n for i in range(len(path[0]) - 1):\n # time and fuel consumption\n current_point = np.array([path[0][i], path[1][i]])\n if i == 0:\n # won't work if previous_point = np.array([0,0])\n previous_point = current_point - np.array([1, 0])\n else:\n previous_point = np.array([path[0][i-1], path[1][i-1]])\n\n final_point = np.array([path[0][i+1], path[1][i+1]])\n\n distance = np.linalg.norm(final_point - current_point)\n angle = self.compute_angle(\n previous_point, current_point, final_point)\n\n fuel = self.fuel_consumption * distance * self.avg_speed + \\\n self.fuel_consumption_turning * angle / 360\n\n time = distance / self.avg_speed + self.time_consumption_turning * angle / 360\n\n time_spent.append(time)\n fuel_spent.append(fuel)\n\n # fishing average probabilities\n for fish_probs in fish_list:\n fishing_prob = sum(fish_probs) / float(len(fish_probs))\n fishing_avg_prob.append(fishing_prob)\n\n # generates file with info\n open(os.path.join(self.path_to_results, 'report.txt'), 'w').close()\n with open(os.path.join(self.path_to_results, 'report.txt'), 'w') as file:\n for i in range(len(color)):\n file.write('{} path spends {:.4f} units of time, {:.4f} of fuel and has an average fishing probability of {:.4f} \\n'.format(\n color[i], time_spent[i], fuel_spent[i], fishing_avg_prob[i]))\n\n file.write('\\n')\n file.write('{} path spends the least time \\n'.format(\n color[time_spent.index(min(time_spent))]))\n file.write('{} path spends the least fuel \\n'.format(\n color[fuel_spent.index(min(fuel_spent))]))\n file.write('{} path has the greatest fishing average probability'.format(\n color[fishing_avg_prob.index(max(fishing_avg_prob))]))"
] | [
"0.63325125",
"0.60852146",
"0.60852146",
"0.5910877",
"0.5835467",
"0.5824722",
"0.5761051",
"0.5743983",
"0.57370144",
"0.57069635",
"0.5693888",
"0.56703067",
"0.5669882",
"0.5654955",
"0.56399757",
"0.5597239",
"0.5595384",
"0.5584568",
"0.5552577",
"0.555006",
"0.55467975",
"0.5542729",
"0.5511296",
"0.5493681",
"0.54915243",
"0.54848486",
"0.54822874",
"0.5455402",
"0.5454285",
"0.5449085"
] | 0.65621316 | 0 |
Analyse the distance between the points. Even when the ship is stationary, the lat and long vary slightly. Where there is an error in the lat and long being the same in consecutive points, the distance should be greater than 0 (to 6 dp, which is equivalent to 0.19 cm at the Equator). | def analyse_distance_between_points(position_df):
print("Analysing distance between consecutive points.")
maximum_distance = 0.019 # 1 x 10^-6 of a degree is 0.019 m
# no distance value
position_df.loc[position_df['distance'].apply(math.isnan), 'measureland_qualifier_flag_distance'] = 9 # missing values
print("Rows where the distance is null: ", position_df.loc[position_df['distance'].apply(math.isnan)])
# good data
print("Flagging good data distance")
position_df.loc[abs(position_df['distance']) > maximum_distance, 'measureland_qualifier_flag_distance'] = 1 # good values
# bad data
print("Flagging bad data distance")
position_df.loc[abs(position_df['distance']) <= maximum_distance, 'measureland_qualifier_flag_distance'] = 3 # probably bad values
in_port_periods = get_list_block_time_periods("/home/jen/projects/ace_data_management/wip/cruise_track_data/in_port.csv")
for in_port_period in in_port_periods:
#print("Periods ship was in port: ", in_port_period[0], in_port_period[1])
position_df.loc[(abs(position_df['distance']) <= maximum_distance) & (in_port_period[0] < position_df['date_time']) & (position_df['date_time'] < in_port_period[1]), 'measureland_qualifier_flag_distance'] = 1 # good values
position_df['measureland_qualifier_flag_distance'] = position_df['measureland_qualifier_flag_distance'].astype(int)
return position_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']",
"def road_distance(lat1, lon1, lat2, lon2):\n point1 = lat1, lon1\n point2 = lat2, lon2\n url = \"https://maps.googleapis.com/maps/api/distancematrix/json?origins={0},{1}&destinations={2},{3}&mode=driving&language=en-EN&sensor=false&key={4}\".format(str(lat1),str(lon1),str(lat2),str(lon2), google_api_key)\n response = api_call(url)\n km = response['rows'][0]['elements'][0]['distance']['value']\n return round(km/1000,1)",
"def test_distance_between_points_near_0_longitude(self) -> None:\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=-0.1, elevation_2=0, haversine=True)\n print(distance)\n self.assertTrue(distance < 230000)\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=-0.1, elevation_2=0, haversine=False)\n print(distance)\n self.assertTrue(distance < 230000)\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=360-0.1, elevation_2=0, haversine=True)\n print(distance)\n self.assertTrue(distance < 230000)\n distance = mod_geo.distance(latitude_1=0, longitude_1=0.1, elevation_1=0, latitude_2=0, longitude_2=360-0.1, elevation_2=0, haversine=False)\n print(distance)\n self.assertTrue(distance < 230000)",
"def _getDistance(self, source, dest):\n\n lat1 = source[0]\n lat2 = dest[0]\n lon1 = source[1]\n lon2 = dest[1]\n\n # Formula from https://www.movable-type.co.uk/scripts/latlong.html\n R = 6370000\n phi1 = math.radians(lat1)\n phi2 = math.radians(lat2)\n deltaPhi = math.radians(lat2-lat1)\n deltalmb = math.radians(lon2-lon1)\n a = math.sin(deltaPhi/2) * math.sin(deltaPhi/2) + \\\n math.cos(phi1) * math.cos(phi2) * \\\n math.sin(deltalmb/2) * math.sin(deltalmb/2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a));\n d = (R * c)/1000.\n\n return d",
"def test_get_distance_to_same_place() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n )\n\n assert meters == 0",
"def get_distance_between_point(test_long, test_lat, lab_long, lab_lat):\r\n test = (test_lat, test_long)\r\n lab = (lab_lat, lab_long)\r\n return geodesic(test, lab).miles",
"def distance(lat1, lon1, lat2, lon2):\n lon1, lat1 = math.radians(lon1), math.radians(lat1)\n lon2, lat2 = math.radians(lon2), math.radians(lat2)\n a = (math.sin((lat2 - lat1) / 2) ** 2 +\n math.cos(lat1) * math.cos(lat2) * math.sin((lon2 - lon1) / 2) ** 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = 6371000 * c\n\n return d",
"def calculate_distance(srcLong, srcLat, dstLong, dstLat):\n return math.sqrt( (srcLong-dstLong) ** 2 + (srcLat - dstLat) ** 2)",
"def calculate_vars(data, lat, lon):\n # Keep track of running distance and time calculations\n distance_to_dest = 0.0\n time_estimate = 0.0\n\n # Calculate from starting dest to first point in data\n user_coords = (lat, lon)\n first_path_coords = (data[0][\"lat\"], data[0][\"lon\"])\n first_distance = geopy.distance.distance(user_coords, first_path_coords).miles\n distance_to_dest += first_distance\n time_estimate += first_distance * 20 # 3mph walking speed\n\n # Calculate for all other points\n for i in range(1, len(data) - 1):\n this_coords = (data[i][\"lat\"], data[i][\"lon\"])\n next_coords = (data[i + 1][\"lat\"], data[i + 1][\"lon\"])\n\n distance = geopy.distance.distance(this_coords, next_coords).miles\n distance_to_dest += distance\n time_estimate += distance * 20 # 3mph walking speed\n\n # Round distance and time estimates\n distance_to_dest = round(distance_to_dest, 1)\n time_estimate = round(time_estimate)\n\n return distance_to_dest, time_estimate",
"def distance_coordinates(lat1: Decimal, lon1: Decimal, lat2: Decimal, lon2: Decimal) -> Decimal:\n lat1 = math.radians(lat1)\n lon1 = math.radians(lon1)\n lat2 = math.radians(lat2)\n lon2 = math.radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n distance = Decimal(R * c)\n\n return distance",
"def test_get_distance() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1],\n )\n\n assert meters / 1000 - DISTANCE_KM < 0.01",
"def coord_distance(lat1, lon1, lat2, lon2):\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n km = 2 * 6367 * math.asin(math.sqrt(a))\n mi = 0.621371 * km\n return mi",
"def distance_gps(point1, point2):\n return haversine_distance(point1.get_latitude(), point1.get_longitude(),\n point2.get_latitude(), point2.get_longitude())",
"def calculate_distance(loc1, loc2):\n matrix = googlemaps.distance_matrix.distance_matrix(client=gmaps_dist, origins=loc2, destinations=loc1)\n # import pdb; pdb.set_trace()\n return matrix['rows'][0]['elements'][0]['distance']['value']",
"def distance(self, lat: float, lon: float) -> float:\n return distance((self.lat, self.lon), (lat, lon))",
"def coord_distance(lat1, lon1, lat2, lon2):\n\tlon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n\tdlon = lon2 - lon1\n\tdlat = lat2 - lat1\n\ta = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n\tc = 2 * math.asin(math.sqrt(a))\n\tkm = 6367 * c \n\treturn km",
"def gpx_distance(lat1, lon1, lat2, lon2):\n theta = lon1 - lon2\n rads = sin(radians(lat1)) * sin(radians(lat2)) + cos(radians(lat1)) * cos(radians(lat2)) * cos(radians(theta))\n\n # make sure rads is [-1, 1]\n rads = 1 if rads > 1 else rads\n rads = -1 if rads < -1 else rads\n\n rads = acos(rads)\n\n # multiply by radius of the earth to get distance\n return rads * 6367",
"def measure_gps(lat1, lon1, lat2, lon2):\n R = 6378.137; # Radius of earth in KM\n dLat = radians(lat2) - radians(lat1)\n dLon = radians(lon2) - radians(lon1)\n a = sin(dLat/2) * sin(dLat/2) + cos(radians(lat1)) * cos(radians(lat2)) * sin(dLon/2) * sin(dLon/2)\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n d = R * c\n return d * 1000 # meters",
"def points2distance(start, end):\r\n start_long = math.radians(recalculate_coordinate(start[0], 'deg'))\r\n #print 'dzcx ',start_long\r\n start_latt = math.radians(recalculate_coordinate(start[1], 'deg'))\r\n\r\n end_long = math.radians(recalculate_coordinate(end[0], 'deg'))\r\n end_latt = math.radians(recalculate_coordinate(end[1], 'deg'))\r\n \r\n d_latt = end_latt - start_latt\r\n d_long = end_long - start_long\r\n \r\n r = 6371\r\n hav = math.sin(d_latt/2)**2 + math.cos(start_latt) * math.cos(end_latt) * math.sin(d_long/2)**2\r\n c = 2 * r * math.asin(math.sqrt(hav))\r\n return c",
"def get_distance(lat1, lon1, lat2, lon2):\n phi1 = math.radians(lat1)\n phi2 = math.radians(lat2)\n d_phi = math.radians(lat2 - lat1)\n d_lam = math.radians(lon2 - lon1)\n a = math.sin(d_phi/2) ** 2 + math.cos(phi1) * math.cos(phi2) * math.sin(d_lam/2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n return 6371000 * c",
"def calculate_distance(x: float, y: float) -> float:\n # return geopy.distance.vincenty(x, y).km\n R = 6370\n lat1 = radians(x[0]) #insert value\n lon1 = radians(x[1])\n lat2 = radians(y[0])\n lon2 = radians(y[1])\n\n dlon = lon2 - lon1\n dlat = lat2- lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n distance = R * c\n return distance",
"def calculate_distance(long_x, lat_x, long_y, lat_y):\n if not (long_x and lat_x and long_y and lat_y):\n return 0\n\n dlon = long_y - long_x if long_y != long_x else long_y\n dlat = lat_y - lat_x if lat_y != lat_x else lat_y\n\n a = (sin(dlat / 2) ** 2) + cos(lat_x) * cos(lat_y) * (sin(dlon / 2) ** 2)\n c = 2 * asin(sqrt(a))\n r = 6371 # radius of earth in kms.\n return (c * r)",
"def calcDistanceOptimized(lat1, lon1, lat2, lon2):\n rad = 0.017453292519943\n yDistance = (lat2 - lat1) * 60.00721\n xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * 30.053965\n distance = math.sqrt( yDistance**2 + xDistance**2 )\n return distance * 1852.00088832",
"def distance(lat0, lng0, lat1, lng1):\n # convert decimal degrees to radians \n lat0, lng0, lat1, lng1 = map(radians, [lat0, lng0, lat1, lng1])\n # haversine formula \n dlng = lng1 - lng0 \n dlat = lat1 - lat0 \n a = sin(dlat/2)**2 + cos(lat0) * cos(lat1) * sin(dlng/2)**2\n c = 2 * asin(sqrt(a)) \n m = 6367000 * c\n return m",
"def test_get_distance(self):\n meters = location_util.distance(COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1])\n self.assertAlmostEqual(meters / 1000, DISTANCE_KM, places=2)",
"def distance(gps1, gps2):\n return haversine(gps1.lng, gps1.lat, gps2.lng, gps2.lat)",
"def get_distance_meters(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5",
"def getDist(lat1,long1,lat2,long2):\n\tlat1 = math.radians(lat1)\n\tlong1 = math.radians(long1)\n\tlat2 = math.radians(lat2)\n\tlong2 = math.radians(long2)\n\tR = 6371 # km\n\td = cmath.acos(cmath.sin(lat1) * cmath.sin(lat2) + \\\n\tcmath.cos(lat1) * cmath.cos(lat2) *\n\tcmath.cos(long2 - long1)) * R\n\treturn abs(d) # cast to float",
"def get_distance(lat1, long1, lat2, long2):\n x = 69.1*(lat2 - lat1)\n y = 69.1*(long2 - long1) * math.cos(lat1/57.3)\n dist = math.sqrt(x*x + y*y)\n return dist",
"def get_distance_meters(location1, location2):\n dlat = location2.lat - location1.lat\n dlong = location2.lon - location1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5"
] | [
"0.7262544",
"0.66239125",
"0.6588583",
"0.6551321",
"0.6533308",
"0.6515686",
"0.64724946",
"0.64680743",
"0.64418674",
"0.6419749",
"0.63746256",
"0.6371783",
"0.6368408",
"0.6347524",
"0.631554",
"0.6312636",
"0.6309692",
"0.62915975",
"0.62739503",
"0.626852",
"0.62599057",
"0.623383",
"0.6229413",
"0.6225661",
"0.62251055",
"0.6223188",
"0.6218255",
"0.62176406",
"0.62089497",
"0.61837864"
] | 0.6638487 | 1 |
Calculate the difference between two bearings, based on bearings between 0 and 360. | def calculate_bearing_difference(current_bearing, previous_bearing):
difference = current_bearing - previous_bearing
while difference < -180:
difference += 360
while difference > 180:
difference -= 360
return difference | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CalculateCompassDifference(a, b):\n delta = NormalizeAngle(a - b)\n return delta",
"def heading_difference(self, other_heading):\n diff = abs(self.heading - other_heading)\n if diff > 180:\n diff = 360 - diff\n return diff",
"def angle_diff(a1, a2):\n a = a1 - a2\n if abs(a) > 180:\n return np.sign(a)*360 - a\n else:\n return a",
"def direction_diff(direction_a, direction_b):\n diff = abs(direction_a - direction_b)\n return diff if diff < math.pi else 2*math.pi - diff",
"def bearing(a: Point, b: Point) -> float:\n x_north = a.y - b.y\n y_east = b.x - a.x\n _bearing = math.atan2(y_east, x_north)\n bearing_degrees = math.degrees(_bearing)\n if bearing_degrees < 0:\n bearing_degrees += 360\n return bearing_degrees",
"def bearing(start,finish):\n\n s = math.pi * np.squeeze(np.array(start)) / 180\n f = math.pi * np.squeeze(np.array(finish)) / 180\n\n y = math.sin(f[1] - s[1]) * math.cos(f[0])\n x = math.cos(s[0])*math.sin(f[0]) - math.sin(s[0])*math.cos(f[0])*math.cos(f[1] - s[1])\n\n return math.atan2(y,x)/math.pi * 180 % 360",
"def get_bearing(aLocation1, aLocation2):\n [off_y, off_x] = get_position_error(aLocation1, aLocation2)\n \n # bearing is clockwise, offset_y is north, offset_x is east\n bearing = 90.00 + math.degrees(math.atan2(-off_y, off_x))\n if bearing < 0: bearing += 360.00\n \n return bearing",
"def angle_diff(self,a,b):\n self.a = self.angle_normalize(a)\n self.b = self.angle_normalize(b)\n self.d1 = a-b\n self.d2 = 2*math.pi - math.fabs(self.d1)\n if self.d1 > 0:\n self.d2 *= -1.0\n if math.fabs(self.d1) < math.fabs(self.d2):\n return self.d1\n else:\n return self.d2",
"def angle_diff(self,a,b):\n self.a = self.angle_normalize(a)\n self.b = self.angle_normalize(b)\n self.d1 = a-b\n self.d2 = 2*math.pi - math.fabs(self.d1)\n if self.d1 > 0:\n self.d2 *= -1.0\n if math.fabs(self.d1) < math.fabs(self.d2):\n return self.d1\n else:\n return self.d2",
"def angle_diff(self, a, b):\n a = self.angle_normalize(a)\n b = self.angle_normalize(b)\n d1 = a-b\n d2 = 2*math.pi - math.fabs(d1)\n if d1 > 0:\n d2 *= -1.0\n if math.fabs(d1) < math.fabs(d2):\n return d1\n else:\n return d2",
"def angle_diff(self, a, b):\n\n\t\td1 = a-b\n\t\td2 = 2*math.pi - math.fabs(d1)\n\t\tif d1 > 0:\n\t\t\td2 *= -1.0\n\t\tif math.fabs(d1) < math.fabs(d2):\n\t\t\treturn d1\n\t\telse:\n\t\t\treturn d2",
"def angle_diff(self, a, b):\n\n\t\td1 = a-b\n\t\td2 = 2*math.pi - math.fabs(d1)\n\t\tif d1 > 0:\n\t\t\td2 *= -1.0\n\t\tif math.fabs(d1) < math.fabs(d2):\n\t\t\treturn d1\n\t\telse:\n\t\t\treturn d2",
"def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360",
"def rhumb_bearing(start,finish):\n s = math.pi * np.squeeze(np.array(start)) / 180\n f = math.pi * np.squeeze(np.array(finish)) / 180\n\n delta_lat = math.log(math.tan(math.pi/4 + f[0]/2)/\n math.tan(math.pi/4 + s[0]/2))\n delta_lon = f[1]-s[1]\n\n if abs(delta_lon) > math.pi:\n if delta_lon > 0:\n delta_lon = -2*math.pi + delta_lon\n else:\n delta_lon = 2*math.pi + delta_lon\n\n res = 180*math.atan2(delta_lon,delta_lat)/math.pi\n\n return (res + 360) % 360",
"def angle_difference(x, y):\n return 180 - abs(abs(x - y) - 180)",
"def angle_difference(self, x, y):\n return 180 - abs(abs(x - y) - 180)",
"def angle_difference(θ1, θ2):\n ordinary_diff = (θ2 - θ1) % np.pi\n return (np.pi / 2) - np.abs(ordinary_diff - (np.pi / 2))",
"def angle_difference(a1, a2, deg=True, abs_val=False):\n\n if deg is False:\n a1 = rad2deg(a1)\n a2 = rad2deg(a2)\n\n d = (a2-a1+180.0)%360.0-180.0\n\n if abs_val:\n d = numpy.abs(d)\n\n if deg is False:\n return deg2rad(d)\n else:\n return d",
"def bearing(self) -> int:\n return self._bearing",
"def ang_diff(self, theta1, theta2):\n\n return (theta1 - theta2 + np.pi) % (2 * np.pi) - np.pi",
"def angle_diff(ang):\n while ang > math.pi:\n ang -= 2*math.pi\n while ang < -math.pi:\n ang += 2*math.pi\n\n return ang",
"def calculate_bearing(origin, destination):\n\n datetime1, lat1, lon1 = origin\n datetime2, lat2, lon2 = destination\n\n dlon = math.radians(lon2 - lon1)\n\n bearing = math.atan2(math.sin(dlon) * math.cos(math.radians(lat2)),\n math.cos(math.radians(lat1)) * math.sin(math.radians(lat2))\n - math.sin(math.radians(lat1)) * math.cos(math.radians(lat2)) * math.cos(dlon))\n\n bearing_degrees = math.degrees(bearing)\n\n return bearing_degrees",
"def angle_diff(self, i):\n (h0, k0, l0) = [int(np.rint(x)) for x in self.hkl(i)]\n polar0 = self.unit_cell.two_theta((h0, k0, l0), self.wavelength)\n return np.abs(self.polar(i) - polar0)",
"def calculate_angle(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.atan2(dy, dx) * 180.0 / math.pi",
"def angle_difference(ang1,ang2,units):\n ang1r = angle_to_radians(ang1,units)\n ang2r = angle_to_radians(ang2,units)\n y = np.sin(ang2r-ang1r)\n x = np.cos(ang2r-ang1r)\n angdiffr = np.arctan2(y,x)\n return radians_to_angle(angdiffr,units)",
"def bearing(self):\n return self['bearing']",
"def get_bearing(lat1, lon1, lat2, lon2):\n lat1 = math.radians(lat1)\n lon1 = math.radians(lon1)\n lat2 = math.radians(lat2)\n lon2 = math.radians(lon2)\n y = math.sin(lon2 - lon1) * math.cos(lat2)\n x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1)\n return (math.degrees(math.atan2(y, x)) + 360) % 360",
"def angle_to(self, other):\n return other.angle - self.angle",
"def coord_bearing_degrees(lat2, long2, lat1, long1):\n lat1 = math.radians(lat1)\n lat2 = math.radians(lat2)\n\n diffLong = math.radians(long1 - long2)\n\n x = math.sin(diffLong) * math.cos(lat2)\n y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)\n * math.cos(lat2) * math.cos(diffLong))\n\n initial_bearing = math.atan2(x, y)\n\n # We now have the initial beating but math.atan2 returns\n # values from -180 to 180 which isn't what we want for a compass\n # beating, solution is to normalize it\n initial_bearing = math.degrees(initial_bearing)\n compass_bearing = 180-initial_bearing\n\n return compass_bearing",
"def get_angle_degrees_between(self, other):\n return math.degrees(self.get_angle_between(other))"
] | [
"0.73599833",
"0.6880248",
"0.68224984",
"0.671869",
"0.67171574",
"0.66473395",
"0.66381854",
"0.66117483",
"0.66117483",
"0.6520156",
"0.64502794",
"0.64502794",
"0.64371663",
"0.62577987",
"0.6250261",
"0.6221615",
"0.6191899",
"0.6185481",
"0.61676335",
"0.61280036",
"0.6099457",
"0.60968316",
"0.60786355",
"0.60450983",
"0.60296124",
"0.59612286",
"0.59524894",
"0.59253824",
"0.5835207",
"0.5779095"
] | 0.80572766 | 0 |
Analyse the change in the course between two points regarding the bearing and acceleration these features need information from previous points. | def analyse_course(position_df):
print("Analysing course of track")
total_data_points = len(position_df)
earliest_date_time = position_df['date_time'].min()
current_date = earliest_date_time
previous_position = get_location(earliest_date_time, position_df)
datetime_previous, latitude_previous, longitude_previous = previous_position
previous_speed_knots = 0
count_acceleration_errors = 0
line_number = -1
for position in position_df.itertuples():
line_number += 1
row_index = position[0]
if line_number == 0:
#position_df.at[row_index, 'measureland_qualifier_flag_course'] = 1 # assume good value
position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 1 # assume good value
continue
current_position = position[2:5]
# Calculate acceleration between two points
current_conditions = knots_two_points(previous_position, current_position)
current_speed_knots = current_conditions[1] # altered to this because distance and speed are output as a tuple from knots_two_points
time_difference = (current_position[0] - previous_position[0]).total_seconds()
speed_difference_metres_per_sec = (current_speed_knots - previous_speed_knots) * (1852 / 3600) # convert knots to ms-1
if time_difference > 0:
acceleration = speed_difference_metres_per_sec / time_difference
else:
acceleration = 0
# Print errors where data do not meet requirements
error_message_acceleration = ""
if acceleration == "N/A":
error_message_acceleration = "No acceleration value calculated"
position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 9 # no value
elif acceleration > 1:
count_acceleration_errors += 1
error_message_acceleration = "** Acceleration too quick **"
position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 3 # probably bad value
elif acceleration <= 1:
position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 1 # good value
# if error_message_acceleration != "":
# print("Error: {} {} ({:.4f}, {:.4f}) acceleration: {} ms-2".format(error_message_acceleration,
# current_position[0],
# current_position[1],
# current_position[2], acceleration))
previous_position = current_position
#previous_bearing = current_bearing
previous_speed_knots = current_speed_knots
#position_df['measureland_qualifier_flag_course'] = position_df['measureland_qualifier_flag_course'].astype(int)
position_df['measureland_qualifier_flag_acceleration'] = position_df['measureland_qualifier_flag_acceleration'].astype(int)
return (count_acceleration_errors) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _calc_speed_features(self):\n if len(self.ball_pos_stamps) >= NUM_STAMPS_CALC_SPEED:\n vx = 0\n vy = 0\n counter = 0\n for i in range(NUM_STAMPS_CALC_SPEED):\n for j in range(i+1, NUM_STAMPS_CALC_SPEED):\n bps_i = self.ball_pos_stamps[i]\n bps_j = self.ball_pos_stamps[j]\n vx += (bps_j[0][0] - bps_i[0][0]) / (bps_j[1] - bps_i[1])\n vy += (bps_j[0][1] - bps_i[0][1]) / (bps_j[1] - bps_i[1])\n counter += 1\n\n vx /= counter\n vy /= counter\n\n self.speed_features = (vx, vy)",
"def compute_trajectory():\n pass",
"def track(self, old_frame, new_frame):\n \n global redetect\n self.old_points = np.reshape(self.old_points, (-1,1,2))\n \n # forward detection\n self.new_points, st, err = cv2.calcOpticalFlowPyrLK(old_frame, \n new_frame, \n self.old_points, \n None, \n **self.lk_params)\n # backward redetection\n old_points_recon, st, err = cv2.calcOpticalFlowPyrLK(new_frame, \n old_frame, \n self.new_points, \n None, \n **self.lk_params)\n \n # discard the points which have even a single pixel displacement\n # after the forward-backward error detection\n d = abs(self.old_points - old_points_recon).reshape(-1,2).max(-1)\n good_points = d < 1\n self.new_points = np.array([pt for pt in itertools.compress(self.new_points,\n good_points)])\n \n # at least two keypoints are neede for tracking\n if len(self.new_points.shape) < 2:\n redetect = True\n return (0,0,0,0)\n\n #self.remove_outliers()\n\n # update the new points\n self.old_points = self.new_points\n\n # get the updated bounding box\n x,y,w,h = cv2.boundingRect(self.new_points)\n self.bounding_box = (x,y,w,h)\n \n return (x,y,w,h)",
"def update_apc11(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n pos = self.pos+delta_t*self.vel\n vel = self.vel+delta_t*kap[1]\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n\n force = self.force(pos,\n vel,\n self.time+delta_t, drag=False)\n\n pos = self.pos+delta_t*vel\n vel = self.vel+delta_t*force\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n\n try:\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n vel = self.vel+col.delta_t*kap[1]\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest = True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n \n self.time += delta_t\n\n return (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)",
"def track_features(self):\r\n img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(img)\r\n\r\n # Compute a rough relative rotation which takes a vector \r\n # from the previous frame to the current frame.\r\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\r\n\r\n # Organize the features in the previous image.\r\n prev_ids = []\r\n prev_lifetime = []\r\n prev_cam0_points = []\r\n prev_cam1_points = []\r\n\r\n for feature in chain.from_iterable(self.prev_features):\r\n prev_ids.append(feature.id)\r\n prev_lifetime.append(feature.lifetime)\r\n prev_cam0_points.append(feature.cam0_point)\r\n prev_cam1_points.append(feature.cam1_point)\r\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\r\n\r\n # Number of the features before tracking.\r\n self.num_features['before_tracking'] = len(prev_cam0_points)\r\n\r\n # Abort tracking if there is no features in the previous frame.\r\n if len(prev_cam0_points) == 0:\r\n return\r\n\r\n # Track features using LK optical flow method.\r\n curr_cam0_points = self.predict_feature_tracking(\r\n prev_cam0_points, cam0_R_p_c, self.cam0_intrinsics)\r\n\r\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.prev_cam0_pyramid, self.curr_cam0_pyramid,\r\n prev_cam0_points.astype(np.float32), \r\n curr_cam0_points.astype(np.float32), \r\n **self.config.lk_params)\r\n \r\n # Mark those tracked points out of the image region as untracked.\r\n for i, point in enumerate(curr_cam0_points):\r\n if not track_inliers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n track_inliers[i] = 0\r\n\r\n # Collect the tracked points.\r\n prev_tracked_ids = select(prev_ids, track_inliers)\r\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\r\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\r\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\r\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\r\n\r\n # Number of features left after tracking.\r\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\r\n\r\n # Outlier removal involves three steps, which forms a close\r\n # loop between the previous and current frames of cam0 (left)\r\n # and cam1 (right). Assuming the stereo matching between the\r\n # previous cam0 and cam1 images are correct, the three steps are:\r\n #\r\n # prev frames cam0 ----------> cam1\r\n # | |\r\n # |ransac |ransac\r\n # | stereo match |\r\n # curr frames cam0 ----------> cam1\r\n #\r\n # 1) Stereo matching between current images of cam0 and cam1.\r\n # 2) RANSAC between previous and current images of cam0.\r\n # 3) RANSAC between previous and current images of cam1.\r\n #\r\n # For Step 3, tracking between the images is no longer needed.\r\n # The stereo matching results are directly used in the RANSAC.\r\n\r\n # Step 1: stereo matching.\r\n curr_cam1_points, match_inliers = self.stereo_match(\r\n curr_tracked_cam0_points)\r\n\r\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\r\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\r\n prev_matched_cam0_points = select(prev_tracked_cam0_points, match_inliers)\r\n prev_matched_cam1_points = select(prev_tracked_cam1_points, match_inliers)\r\n curr_matched_cam0_points = select(curr_tracked_cam0_points, match_inliers)\r\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\r\n\r\n # Number of features left after stereo matching.\r\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\r\n\r\n # Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.\r\n # cam0_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam0_points, curr_matched_cam0_points,\r\n # cam0_R_p_c, self.cam0_intrinsics, \r\n # self.cam0_distortion_model, self.cam0_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n\r\n # cam1_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam1_points, curr_matched_cam1_points,\r\n # cam1_R_p_c, self.cam1_intrinsics, \r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\r\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\r\n\r\n # Number of features after ransac.\r\n after_ransac = 0\r\n for i in range(len(cam0_ransac_inliers)):\r\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\r\n continue \r\n row = int(curr_matched_cam0_points[i][1] / grid_height)\r\n col = int(curr_matched_cam0_points[i][0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n\r\n grid_new_feature = FeatureMetaData()\r\n grid_new_feature.id = prev_matched_ids[i]\r\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\r\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\r\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\r\n prev_matched_lifetime[i] += 1\r\n\r\n self.curr_features[code].append(grid_new_feature)\r\n after_ransac += 1\r\n self.num_features['after_ransac'] = after_ransac\r\n\r\n # Compute the tracking rate.\r\n # prev_feature_num = sum([len(x) for x in self.prev_features])\r\n # curr_feature_num = sum([len(x) for x in self.curr_features])\r",
"def update_curr_acc_points(self, new_points, ef_pose, step):\n new_points = se3_transform_pc(ef_pose, new_points) \n # the number below can be adjusted for efficiency and robustness\n aggr_sample_point_num = min(int(CONFIG.pt_accumulate_ratio**step * CONFIG.uniform_num_pts), new_points.shape[1])\n index = np.random.choice(range(new_points.shape[1]), size=aggr_sample_point_num, replace=False).astype(np.int)\n\n new_points = new_points[:,index]\n print('new points before filtering with table height', new_points.shape)\n index = new_points[2, :] > self.table_height\n new_points = new_points[:, index]\n print('new points {} total point {}'.format(new_points.shape, self.acc_points.shape))\n\n self.acc_points = np.concatenate((new_points, self.acc_points), axis=1) #\n self.acc_points = regularize_pc_point_count(self.acc_points.T, 4096, use_farthest_point=True).T\n # if it still grows too much, can limit points by call regularize pc point count\n # self.planner.expert_plan can also be called with these dense points directly",
"def getETA():",
"def getETA():",
"def update_apc22(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n if len(self._old) >= 1:\n\n\n try:\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n beta = 0.5*self.delta_t/(self.time-self.get_old(0, 2))\n\n pos = self.pos+delta_t*((1+beta)*self.vel-beta*self.get_old(0, 0))\n vel = self.vel+delta_t*((1+beta)*kap[1]-beta*self.get_old(0, 1))\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n pos = self.pos+delta_t/2.0*(self.vel+vel)\n vel = self.vel+delta_t/2.0*(self.force(pos, vel, self.time+delta_t,\n drag=False)+kap[1])\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n beta = 0.5*col.delta_t/(self.time-self.get_old(0, 2))\n vel = self.vel+col.delta_t*(1+beta)*kap[1]-beta*self.get_old(0, 1)\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest=True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n\n self.time += delta_t\n\n else:\n ## reduced to using the Adams first order method for the first timestep:\n\n kap = update_apc1(self)\n\n self.set_old(kap, 1)\n\n return kap",
"def analyze_orbit_corrector(OC1, OC2, beamline, phase_beg):\n\n M = np.identity(4)\n OC_parameters = np.zeros(4)\n\n for element in beamline:\n M = np.dot(element.M1, M)\n\n # Since the X and Y are decoupled, we can treat them separately.\n M_x = M[0:2, 0:2]\n M_y = M[2:4, 2:4]\n\n L1 = [[OC1.length/2], [1]]\n L2 = [[OC2.length/2], [1]]\n\n M_OC1 = np.array(OC1.M1)[0:2, 0:2]\n M_OC2 = np.array(OC2.M1)[0:2, 0:2]\n\n # The following part solve the cx_1 and cx_2\n M1_x = np.linalg.multi_dot([M_OC2, M_x, L1])\n M2_x = np.linalg.multi_dot([M_OC2, M_x, M_OC1])\n M_OC_x = np.hstack((M1_x, L2))\n\n OC_parameters[0:2] = -np.linalg.multi_dot([np.linalg.inv(M_OC_x), M2_x, phase_beg[0:2]])\n # The end of the X-part\n\n # The following part solve the cy_1 and cy_2\n M1_y = np.linalg.multi_dot([M_OC2, M_y, L1])\n M2_y = np.linalg.multi_dot([M_OC2, M_y, M_OC1])\n M_OC_y = np.hstack((M1_y, L2))\n\n OC_parameters[2:4] = -np.linalg.multi_dot([np.linalg.inv(M_OC_y), M2_y, phase_beg[2:4]])\n # The end of the Y-part\n\n\n return OC_parameters",
"def main():\n \n def get_x_input():\n \"\"\"\n This gets the initial x position and velocity values\n Param:none\n Return:Tuple with x pos and vel\n \"\"\"\n # Ask for and validate user input for x pos and vel\n while True:\n try:\n posx = float(input(\"Please enter the initial x position in m: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n\n while True:\n try:\n velx = float(input(\"Please enter the initial x velocity in m/s: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n \n #return tuple\n xinput = (posx, velx)\n return xinput\n\n def get_y_input():\n \"\"\"\n This gets the initial y position and velocity values\n Param:none\n Return:Tuple with y pos and vel\n \"\"\" \n # Ask for and validate user input for y pos and vel\n while True:\n try:\n posy = float(input(\"Please enter the initial y position in m: \"))\n\n #start at ground\n if posy < 0:\n print(\"Please enter a positive value.\")\n continue\n\n except ValueError:\n print(\"Invalid input\")\n continue\n else:\n break\n\n while True:\n try:\n vely = float(input(\"Please enter the initial y velocity in m/s: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n \n # Return tuple\n yinput = (posy, vely)\n return yinput\n\n #Inital position and velocity of user input x and y\n posx0, velx0 = get_x_input()\n posy0, vely0 = get_y_input()\n \n #acceleration y acceleration is gravity\n accelx = 0.0\n GRAVITY = -9.8 \n \n #Initial time of 0s, time intervals of .01 s\n deltat = .01\n t = 0.0\n \n #lists of all x and y positions in the motion \n x = [posx0]\n y = [posy0]\n \n #limit of time intervals to calculate\n intervals = 4000\n\n for i in range(0, intervals):\n #increment time, add xy positions at that time\n t = t + deltat\n x.append(position(posx0, velx0, t, accelx))\n y.append(position(posy0, vely0, t, GRAVITY))\n \n #if the projectile has hit the ground, break\n if y[i+1] <= 0:\n break\n\n plot_motion(x, y)",
"def approach_gps(g_lat,g_lon,emily_lat_start, emily_lon_start, pose_rad, Parameters): #approach a gps position using potential fields\r\n\tx_goal,y_goal = latlongtoxy(g_lat,g_lon,g_lat)\r\n\tx_e_start,y_e_start = latlongtoxy(emily_lat_start,emily_lon_start,g_lat)\r\n\r\n\tprint (\"\\n HERE I AM\\n\\n\")\r\n\r\n\tdist = haver_distance(g_lat, g_lon, emily_lat_start, emily_lon_start)\r\n\tinitial_dist = dist\r\n\r\n\tprint ('Distance: ',dist)\r\n\theading = get_heading(emily_lat_start, emily_lon_start, g_lat, g_lon)\r\n print ('After get heading')\r\n\t# Eric: I'm not sure if turn_towards is necessary for a successful run.\r\n\t#turn_towards(heading)\r\n\tprint ('After Turn towards')\r\n\t#turn towards the goal initially\r\n\r\n\tstart_time = time.time()\r\n\tcurrent_time = 0\r\n\tdstore = []\r\n\thstore = []\r\n\twhile(dist >= goal_radius):\r\n\r\n\t\t#------------ code for reading gps location of emily and its orientation ------\r\n\t\te_lat = vehicle.location.global_frame.lat\r\n\t\te_lon = vehicle.location.global_frame.lon\r\n\t\te_heading = vehicle.heading * pi/180\t\t# convert heading to radians\r\n\t\t#------------------ get e_lat,e_lon, e_orient ---------------------\r\n\r\n\r\n\t\tx_e,y_e = latlongtoxy(e_lat,e_lon,g_lat)\t\t\t#change latitude and longitude to xy\r\n\r\n\t\t#x,y are given to approach victim function as y,x to algin the north heading and direction in x,y\r\n\r\n\t\tdx,dy = approach_victim_behaviour(y_goal,x_goal, y_e,x_e, pose_rad, Parameters)\t#get potential field vector\r\n\t\trc1, rc3 = dxdytorc(dx,dy, e_heading,g_lon)\t\t\t\t\t#get rc parameters\r\n\t\tdist = haver_distance(g_lat, g_lon, e_lat, e_lon)\t\t\t\t#haversine distance\r\n\r\n\t\tcurrent_time = time.time() - start_time\r\n\t\tprint (\"Time, Heading, Distance\")\r\n\t\tprint (current_time, e_heading*180/pi, dist)\r\n\t\tdstore.append(dist)\r\n\t\thstore.append(e_heading*180/pi)\r\n\t\t#code for sending the writing the rc commands\r\n\t\t# 3 is the thrust control\r\n\t\t#vehicle.channels.overrides = {'3':rc3}\r\n\t\tsendThrottleCommand(rc3, enableThrottle)\r\n\t\ttime.sleep(0.5)\r\n\t\tvehicle.channels.overrides = {'1':rc1}\r\n\t\tprint (\"Rudder: \",rc1)\r\n\t\tprint (\"Throttle: \",rc3)\r\n\t\tsaveToLog(e_lat, e_lon,dist,rc1,rc3)\r\n\t\ttime.sleep(0.5)\r\n\tprint(initial_dist)\r\n\tprint(\"intial \", emily_lat_start,emily_lon_start)\r\n\tprint(\"final \",e_lat,e_lon)\r\n\tplt.plot(dstore)\r\n\t#plt.title('Distance form home vs time')\r\n\tplt.xlabel(\"Time\")\r\n\tplt.ylabel('Distance')\r\n\tplt.show()\r\n\tplt.plot(hstore)\r\n\tplt.show()",
"def prep_data(filename):\n column_name = ['time', 'x_accel', 'y_accel', 'z_accel', 'total_accel']\n raw_data = pd.read_csv(filename, names=column_name, header=None) \n raw_data = raw_data.drop(0)\n\n raw_data['time'] = raw_data['time'].apply(to_float)\n raw_data['total_accel'] = raw_data['total_accel'].apply(to_float)\n raw_data['x_accel'] = raw_data['x_accel'].apply(to_float)\n raw_data['y_accel'] = raw_data['y_accel'].apply(to_float)\n raw_data['z_accel'] = raw_data['z_accel'].apply(to_float)\n \n accel = raw_data['total_accel'].tolist()\n time = raw_data['time'].tolist()\n x = raw_data['x_accel'].tolist()\n y = raw_data['y_accel'].tolist()\n z = raw_data['z_accel'].tolist()\n\n # Lowess accelerations \n x_lowess = lowess(x, time, frac=0.09)\n y_lowess = lowess(y, time, frac=0.09)\n z_lowess = lowess(z, time, frac=0.09)\n\n x_vel = np.trapz(x_lowess[:,1], time)\n y_vel = np.trapz(y_lowess[:,1], time)\n z_vel = np.trapz(z_lowess[:,1], time)\n \n print(x_vel, y_vel, z_vel)\n print(calc_distance((0,0), (x_vel, y_vel))) \n lowess_columns = {'time':time,\n 'x_lowess':x_lowess[:,1],\n 'y_lowess':y_lowess[:,1],\n 'z_lowess':z_lowess[:,1]\n }\n data_lowess = pd.DataFrame(lowess_columns)\n plt.plot(time, x_lowess[:,1], \"r-\", linewidth=2, alpha=0.2)\n plt.plot(time, y_lowess[:,1], \"g-\", linewidth=2, alpha=0.2)\n plt.plot(time, z_lowess[:,1], \"b-\", linewidth=2, alpha=0.2)\n plt.title('LOWESS smoothed acceleration')\n plt.xlabel('time')\n plt.ylabel('acceleration')\n plt.legend(['x', 'y', 'z'])\n plt.show()\n \n # Kalman Filter accelerations\n \n \"\"\"\n # Kalman Filter\n std = np.std(accel)\n print(std)\n initial_guess = [0]\n observation_covariance = std**2#np.diag([std, std]) ** 2\n kf = KalmanFilter(\n initial_state_mean= initial_guess,\n initial_state_covariance=observation_covariance,\n observation_covariance=observation_covariance,\n )\n pred_state, state_cov = kf.smooth(accel)\n \n plt.plot(time, pred_state[:,0], 'g-', linewidth=2, alpha=0.5)\n plt.plot(time, accel, 'b.', alpha = 0.1)\n plt.show()\n \"\"\"",
"def trajectory1(self):\r\n\r\n trackt = [] # particle trajectory,\r\n trackx = [] # particle trajectory\r\n an = [] # analitical s**2 + x**2 = t**2\r\n s1 = [] # s = 10; s = 0, light\r\n s2 = [] # s = 20;\r\n s3 = [] # s = 40;\r\n for i in range(0, len(self.dt.obs.obt_g)):\r\n trackt.append(float(i))\r\n trackx.append(self.dt.x[i])\r\n an.append(math.sqrt(float(i) ** 2 + self.dt.x[i] ** 2))\r\n s1.append(math.sqrt(1.0 ** 2 + self.dt.x[i] ** 2))\r\n s2.append(math.sqrt(2.0 ** 2 + self.dt.x[i] ** 2))\r\n s3.append(math.sqrt(4.0 ** 2 + self.dt.x[i] ** 2))\r\n\r\n # plots:\r\n\r\n (fig, ax) = plt.subplots() # figsize=(7,5)\r\n\r\n # trajectory\r\n\r\n ax.plot(\r\n trackx,\r\n trackt,\r\n marker='+',\r\n linewidth=1,\r\n linestyle='-',\r\n color='green',\r\n label='treck',\r\n )\r\n\r\n # measurement t\r\n # ax.plot(self.dt.x, self.dt.t, marker=\"+\", linestyle=\" \", color=\"blue\", label=\"result of measurement\")\r\n\r\n ax.plot(\r\n self.dt.x,\r\n self.dt.t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurement',\r\n )\r\n\r\n # analitical t\r\n\r\n ax.plot(self.dt.x, an, linestyle='-', color='red',\r\n label='continuum')\r\n\r\n # light trajectory\r\n\r\n ax.plot(trackx, trackx, linestyle='-', color='yellow',\r\n label='s=0 (light)')\r\n\r\n # s(x) curves\r\n\r\n ax.plot(\r\n trackx,\r\n s1,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=1.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s2,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=2.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s3,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=4.0',\r\n )\r\n\r\n # error of measurement t\r\n\r\n ax.errorbar(self.dt.x, self.dt.t, fmt='k ', yerr=self.dt.t_err)\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('x in metres')\r\n xm = -1.0\r\n for i in range(len(self.dt.x)):\r\n if self.dt.x[i] > xm:\r\n xm = self.dt.x[i]\r\n stepx = round(xm / float(len(self.dt.x)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0.0, xm])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('t in metres of light time ')\r\n ym = -1.0\r\n for i in range(len(self.dt.t)):\r\n if self.dt.t[i] > ym:\r\n ym = self.dt.t[i]\r\n stepy = round(ym / float(len(self.dt.t)), 1)\r\n ym = round(ym + stepy, 1)\r\n ax.set_ylim([0.0, ym])\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()",
"def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]",
"def g(point, contact_point, force_direction, ball_loc, t):\n # line equation = ball_loc + t*direction\n # distance to the ooi\n #distance = ( np.linalg.norm( np.cross((ball_loc[:2] - point[:2]), force_direction[:2], 0, 0) ) / \n # np.linalg.norm(force_direction[:2]))\n direction = force_direction\n force_direction = force_direction + contact_point\n print force_direction\n distance = np.linalg.norm(np.cross(point[:2] - contact_point[:2], point[:2] -\n force_direction[:2], 0 , 0)) / np.linalg.norm(abs(force_direction[:2] -\n contact_point[:2]))\n #the smaller the distance, the bigger the number\n distance = 100 / distance\n\n global accuracy_point \n accuracy_point= accuracy_point + [distance]\n\n retract_distance_x = math.sqrt(np.vdot(contact_point[0] - point[0],\n contact_point[0] - point[0]))\n retract_distance_y = math.sqrt(np.vdot(contact_point[1] - point[1],\n contact_point[1] - point[1]))\n retract_distance_z = math.sqrt(np.vdot(contact_point[2] - point[2], contact_point[2] - point[2]))\n global xy\n xy = xy + [retract_distance_x + retract_distance_y]\n global z \n z = z + [retract_distance_z * 0.3]\n\n retract_distance = 0\n # the retraction distance gets favored in the x and y directions\n retract_distance = (direction[0] * retract_distance_x +\n direction[1] *\n retract_distance_y + 0.3 * retract_distance_z)\n #force_direction[1] * retract_distance_y + force_direction[2] * retract_distance_z)\n global distance_point \n print retract_distance\n distance_point = distance_point + [np.ndarray.tolist(retract_distance)[0][0]]\n return (retract_distance, distance)",
"def detect_features(self):\n # P.S. the features and descriptors of frame A are calculated beforehand\n self.featureFrameB, self.featureDesB = self.orb.detectAndCompute(self.frameB, None)",
"def collect(self):\n self.prev_position = {'lefthand':kinect.get_coord('lefthand'), \n 'righthand':kinect.get_coord('righthand')}\n \n while True:\n for hand in 'lefthand','righthand':\n position = kinect.get_coord(hand)\n displacement = vector.Subtract(position, self.prev_position[hand])\n acceleration = vector.Distance(displacement, self.prev_displacement[hand])\n self.prev_position[hand] = position\n self.prev_displacement[hand] = displacement\n # We square the acceleration so the variance is exaggerated\n self.accelerations[hand].append(acceleration**2) \n \n # Limit to latest self.sample_limit samples\n self.accelerations[hand] = self.accelerations[hand][-self.sample_limit:] \n self.velocities[hand].append(vector.Magnitude(displacement))\n self.velocities[hand] = self.velocities[hand][-self.sample_limit:]\n time.sleep(self.rate)",
"def optimize(self, acceleration=True, plot=False, fileExtension=None):\n \n x = np.zeros(self.nUnknowns+self.nLambda)\n x[0:self.nUnknowns] = self.physicalToTransformed(self.initial)\n\n chi2 = 1e10\n chi2Old = 1e20\n relchi2 = np.abs((chi2 - chi2Old) / chi2Old)\n xnew = np.copy(x)\n \n loop = 0 \n loopInt = 0\n \n lambdaLM = 1e-3\n chi2Best = 1e10\n chi2Old = 1e10\n nWorstChi2 = 0\n\n # dChi2Old = 0\n\n self.chi2 = []\n self.l0 = []\n self.l1 = []\n \n while ((relchi2 > 1e-6) & (loop < 20) & (nWorstChi2 < 8)):\n\n chi2, chi2NW, dChi2, ddChi2, stokes = self.computeFunctionAndGradient(x[0:self.nUnknowns], x[self.nUnknowns:])\n\n chi2Old = np.copy(chi2) \n \n H = 0.5 * ddChi2 \n H += np.diag(lambdaLM * np.diag(H))\n gradF = 0.5 * dChi2\n\n# First deal with the Hazel part\n U, w, VT = np.linalg.svd(H[0:self.nUnknowns,0:self.nUnknowns], full_matrices=True)\n\n wmax = np.max(w)\n wInv = 1.0 / w\n wInv[w < 1e-6*wmax] = 0.0\n\n# xnew = xold - H^-1 * grad F\n deltaxnew = -VT.T.dot(np.diag(wInv)).dot(U.T).dot(gradF[0:self.nUnknowns])\n xnew[0:self.nUnknowns] = x[0:self.nUnknowns] + deltaxnew\n \n chi2, chi2NW, stokes = self.meritFunction(xnew[0:self.nUnknowns], xnew[self.nUnknowns:])\n \n if ((loop + 1) % 5 == 0):\n thr = self.lambdaL1 \n thr = self.lambdaL1 + 500.*self.lambdaL1*np.exp(-loop)\n print(thr)\n\n if (self.wavelet == 'iuwt'):\n tmp = (self.obs[0,:] - stokes[0,:])[:,None]\n if (self.innerIterations == 1):\n res = ps.sparse.iuwt_decomposition(tmp, self.nLevelsIUWT, 0, True)\n res[0][np.abs(res[0]) < thr] = 0.0 \n xnew[self.nUnknowns:] = ps.sparse.iuwt_recomposition(res[0], 0, res[1])[:,0]\n else: \n xnew[self.nUnknowns:] = ps.sparse.proxes.prox_l1General(tmp, self.forwardIUWT, self.backwardIUWT, \n thr, threshold=self.thresholdIUWT, verbose=False)[:,0]\n\n if (self.wavelet == 'wavelet'):\n xnew[self.nUnknowns:] = ps.sparse.proxes.prox_l1General(self.obs[0,:] - stokes[0,:], self.wavedec, self.waverec, thr, threshold='hard', verbose=False)\n\n xnew[self.nUnknowns:] /= (1.0+xnew[self.nUnknowns])\n xnew[self.nUnknowns:] = upLimitThr(xnew[self.nUnknowns:], 0.0)\n\n #x = np.copy(xnew)\n\n chi2, chi2NW, stokes = self.meritFunction(xnew[0:self.nUnknowns], xnew[self.nUnknowns:])\n \n if (chi2NW < chi2Best):\n if (lambdaLM >= 1e4):\n lambdaLM /= 100.0\n elif ((lambdaLM >= 1e-4) or (lambdaLM < 1e4)):\n lambdaLM /= 10.0\n elif(lambdaLM < 1e-4):\n lambdaLM /= 5.0\n if (lambdaLM < 1e-6):\n lambdaLM = 1e-6\n\n chi2Best = np.copy(chi2NW)\n x = np.copy(xnew)\n nWorstChi2 = 0\n else:\n if (lambdaLM > 1e4):\n lambdaLM *= 100.0\n elif ((lambdaLM >= 1e-4) or (lambdaLM < 1e4)):\n lambdaLM *= 10.0\n elif(lambdaLM < 1e-4):\n lambdaLM *= 5.0\n nWorstChi2 += 1\n\n relchi2 = np.abs((chi2 - chi2Old) / chi2Old)\n l1Norm = np.linalg.norm(x[self.nUnknowns:], 1)\n\n if (self.wavelet == 'iuwt'):\n tmp = self.forwardIUWT(xnew[self.nUnknowns:][:,None])\n l0Norm = np.sum(np.abs(tmp) > 1e-6)\n if (self.wavelet == 'wavelet'): \n l0Norm = np.sum(np.abs(self.wavedec(x[self.nUnknowns:])) > 1e-10)\n \n print(\"Iteration {0} - chi2={1:10.4f} - l1={2} - l0={3} - relchi2={4} - lambda={5}\".format(loop, chi2NW, l1Norm, l0Norm, relchi2, lambdaLM))\n self.printNodes(x[0:self.nUnknowns])\n\n self.chi2.append(chi2NW)\n self.l0.append(l0Norm)\n self.l1.append(l1Norm)\n\n \n loop += 1\n\n xPhysical = self.transformedToPhysical(x[0:self.nNodesTotal])\n nodes = self.vector2Nodes(xPhysical)\n stokes, cont, atmosNew = ps.radtran.synthLTENodes(self.referenceAtmos, nodes)\n stokes /= self.contHSRA\n\n sys = x[self.nUnknowns:]\n\n np.savez( \"results/lte_{0}_lambda_{1}_inner_{2}.npz\".format(self.wavelet,fileExtension,self.innerIterations), self.obs, stokes, sys, self.chi2, x, self.wavelength, \n self.l1, self.l0, self.maskChi2)\n\n\n # pl.close('all')\n\n \n # f, ax = pl.subplots(nrows=2, ncols=2, figsize=(12,9))\n # ax = ax.flatten()\n # labelStokes = ['I/Ic','Q/Ic','U/Ic','V/Ic']\n # ax[0].plot(self.obs[0,:], label='obs')\n # ax[0].plot(stokes[0,:] + sys, label='stokes+sys')\n # ax[0].plot(1.0+sys, label='sys')\n # ax[0].plot(stokes[0,:], label='stokes')\n\n # ax[0].legend()\n\n # ax[1].plot(self.obs[0,:] / (1.0+sys))\n # ax[1].plot(stokes[0,:])\n \n # pl.tight_layout()\n if (plot):\n pl.savefig('/scratch/Dropbox/CONGRESOS/2015/Hinode9/code/systematicsExampleWithFit.png')\n\n \n print(\"--------\")\n print(\"l1 norm of systematics : {0}\".format(np.linalg.norm(x[self.nUnknowns:], 1)))\n\n return x",
"def update_apc23(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n if len(self._old) >= 1:\n\n\n try:\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n beta = 0.5*self.delta_t/(self.time-self.get_old(0, 2))\n\n pos = self.pos+delta_t*((1+beta)*self.vel-beta*self.get_old(0, 0))\n vel = self.vel+delta_t*((1+beta)*kap[1]-beta*self.get_old(0, 1))\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n\n beta1 = (3.0*(self.time-self.get_old(0,2))+delta_t)/(6.0*self.time-self.get_old(0,2))\n beta2 = -delta_t**2/(6.0*(self.time+delta_t-self.get_old(0,2))*(self.time-self.get_old(0,2)))\n\n print self.force(pos, vel, self.time+delta_t), kap[1], self.get_old(0,1)\n\n pos = self.pos+delta_t*((1.0-beta1-beta2)*vel+beta1*self.vel+beta2*self.get_old(0, 0))\n vel = self.vel+delta_t*((1.0-beta1-beta2)*self.force(pos, vel, self.time+delta_t,\n drag=False)+beta1*kap[1]+beta2*self.get_old(0,1))\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n beta = 0.5*col.delta_t/(self.time-self.get_old(0, 2))\n vel = self.vel+col.delta_t*(1+beta)*kap[1]-beta*self.get_old(0, 1)\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest=True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n\n self.time += delta_t\n\n else:\n ## reduced to using the Adams first order method for the first timestep:\n\n kap = update_apc1(self)\n\n self.set_old(kap, 1)\n\n return kap",
"def proz(): \r\n print(\"processing: \",CURDATA()[0]) \r\n Check_180turn(left_boundary,right_boundary)\r\n EF() #exponential window multiplication + fourier\r\n APK0() #1. Phase correction 0th Ordnung\r\n APK1() #1. Phase correction 1st Ordnung\r\n ABS() #Baseline correction\r\n APK()\r\n ABS() #Baseline correction\r\n Check_180turn(left_boundary,right_boundary)",
"def acc_or_brake(name_of_track, value):\n\n df = pd.read_csv(name_of_track, index_col=0)\n\n x = df[\"x\"] # x, y track data and corresponding acceleration in that point\n y = df[\"y\"]\n a = df[value]\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n fig, axs = plt.subplots(2, 1, sharex=True, sharey=True)\n\n mean = np.mean(a) # defining the median and standard deviation of the acceleration\n std = np.std(a)\n\n norm = plt.Normalize(mean - std, mean + std)\n lc = LineCollection(segments, cmap='viridis', norm=norm)\n\n lc.set_array(a)\n lc.set_linewidth(2)\n line = axs[0].add_collection(lc)\n fig.colorbar(line, ax=axs[0])\n\n cmap = ListedColormap(['r', 'g', 'b'])\n norm = BoundaryNorm([-10, -1, 1, 10], cmap.N) # your set of boundaries\n lc = LineCollection(segments, cmap=cmap, norm=norm)\n lc.set_array(a)\n lc.set_linewidth(2)\n line = axs[1].add_collection(lc)\n fig.colorbar(line, ax=axs[1])\n\n axs[0].set_xlim(-20, 50)\n axs[0].set_ylim(-5, 45)\n plt.xlabel(\"x [m]\")\n plt.ylabel(\"y [m]\")\n plt.show()",
"def tcs2(self):\n S = self.M.allocState({})\n self.M.propagate(S, 0, 1)\n\n # set initial beam data\n S.ref_IonZ = self.refIonZ\n S.IonZ = self.IonZ\n\n S.moment0 = self.BC0\n S.moment1 = self.ENV0\n\n S.ref_IonEk = self.refIonEk\n \n S.phis = S.moment0[PS_S,:]\n S.IonEk = S.moment0[PS_PS,:]*MeVtoeV + S.ref_IonEk\n\n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n \n U,T = self.save(U, T, S)\n\n\n #S.clng = self.clng\n\n fin = len(self.M)\n\n H = self.M.propagate(S, 1, fin, observe=range(fin))\n \n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n\n for i in range(fin-1):\n U,T = self.save(U, T, H[i][1])\n\n return U,T",
"def processAlgorithm(self, parameters, context, feedback):\n NO2_present_raster = self.parameterAsRasterLayer(parameters, self.INPUTNP, context)\n NO2_present_data_source = gdal.Open(NO2_present_raster.dataProvider().dataSourceUri())\n arr_NO2_present = NO2_present_data_source.GetRasterBand(1).ReadAsArray()\n\n PM10_present_raster = self.parameterAsRasterLayer(parameters, self.INPUTPP, context)\n PM10_present_data_source = gdal.Open(PM10_present_raster.dataProvider().dataSourceUri())\n arr_PM10_present = PM10_present_data_source.GetRasterBand(1).ReadAsArray()\n\n ozono_present_raster = self.parameterAsRasterLayer(parameters, self.INPUTOP, context)\n ozono_present_data_source = gdal.Open(ozono_present_raster.dataProvider().dataSourceUri())\n arr_ozono_present = ozono_present_data_source.GetRasterBand(1).ReadAsArray()\n\n arr_present = arr_ozono_present + arr_PM10_present + arr_NO2_present\n\n NO2_future_raster = self.parameterAsRasterLayer(parameters, self.INPUTNF, context)\n NO2_future_data_source = gdal.Open(NO2_future_raster.dataProvider().dataSourceUri())\n arr_NO2_future = NO2_future_data_source.GetRasterBand(1).ReadAsArray()\n\n PM10_future_raster = self.parameterAsRasterLayer(parameters, self.INPUTPF, context)\n PM10_future_data_source = gdal.Open(PM10_future_raster.dataProvider().dataSourceUri())\n arr_PM10_future = PM10_future_data_source.GetRasterBand(1).ReadAsArray()\n\n ozono_future_raster = self.parameterAsRasterLayer(parameters, self.INPUTOF, context)\n ozono_future_data_source = gdal.Open(ozono_future_raster.dataProvider().dataSourceUri())\n arr_ozono_future = ozono_future_data_source.GetRasterBand(1).ReadAsArray()\n\n arr_future = arr_ozono_future + arr_PM10_future + arr_NO2_future\n\n area_pixel = self.parameterAsInt(parameters, self.PIXEL_RES, context) * self.parameterAsInt(\n parameters, self.PIXEL_RES, context)\n\n NO2_euro_coeff = 77641.89\n ozono_euro_coeff = 14658.11\n PM10_euro_coeff = 17132.56\n\n arr_euro_present_NO2 = arr_NO2_present * NO2_euro_coeff\n arr_euro_present_ozono = arr_ozono_present * ozono_euro_coeff\n arr_euro_present_PM10 = arr_PM10_present * PM10_euro_coeff\n arr_value_present = arr_euro_present_PM10 + arr_euro_present_ozono + arr_euro_present_NO2\n\n arr_euro_future_NO2 = arr_NO2_future * NO2_euro_coeff\n arr_euro_future_ozono = arr_ozono_future * ozono_euro_coeff\n arr_euro_future_PM10 = arr_PM10_future * PM10_euro_coeff\n arr_value_future = arr_euro_future_PM10 + arr_euro_future_ozono + arr_euro_future_NO2\n\n arr_diff_NO2 = arr_euro_future_NO2 - arr_euro_present_NO2\n arr_diff_PM10 = arr_euro_future_PM10 - arr_euro_present_PM10\n arr_diff_ozono = arr_euro_future_ozono - arr_euro_present_ozono\n\n arr_diff_tot = arr_diff_NO2 + arr_diff_PM10 + arr_diff_ozono\n\n # Initialize and write on output raster\n path_output = self.parameterAsString(parameters, self.OUTPUT, context)\n file_output = path_output + '/SE_02_rimozione_inquinanti_delta_euro.tiff'\n driver = gdal.GetDriverByName(\"GTiff\")\n [cols, rows] = arr_NO2_present.shape\n diff_tot = np.sum(arr_diff_tot) / (cols * rows )\n outdata = driver.Create(file_output, rows, cols, 1, gdal.GDT_Float64)\n outdata.SetGeoTransform(NO2_present_data_source.GetGeoTransform()) ##sets same geotransform as input\n outdata.SetProjection(NO2_present_data_source.GetProjection()) ##sets same projection as input\n outdata.GetRasterBand(1).WriteArray(arr_diff_tot)\n print(np.max(outdata.GetRasterBand(1).ReadAsArray()))\n outdata.FlushCache()\n\n # Years\n present = self.parameterAsInt(parameters, self.INPUTPRE, context)\n future = self.parameterAsInt(parameters, self.INPUTFUT, context)\n report_output = path_output + '/SE_rimozione_inquinanti.txt'\n f = open(report_output, \"w+\")\n today = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')\n f.write(\"Sommario dell'analisi della rimozione inquinanti\\n\")\n f.write(\"Data: \" + today +\"\\n\\n\\n\")\n f.write(\"Analisi stato di fatto\\n\\n\")\n f.write(\"Anno corrente: %i \\n\" % (present))\n f.write(\"Rimozione NO2 Stato attuale (ton): %f \\n\" % (np.sum(arr_NO2_present)))\n f.write(\"Rimozione PM10 Stato attuale (ton): %f \\n\" % (np.sum(arr_PM10_present)))\n f.write(\"Rimozione ozono Stato attuale (ton): %f \\n\" % (np.sum(arr_ozono_present)))\n f.write(\"Valore totale della rimozione inquinanti (€): %f \\n\\n\\n\" % (np.sum(arr_value_present)))\n f.write(\"Analisi stato di progetto\\n\\n\")\n f.write(\"Anno progetto: %i \\n\" % (future))\n f.write(\"Rimozione NO2 Stato di progetto (ton): %f \\n\" % (np.sum(arr_NO2_future)))\n f.write(\"Rimozione PM10 Stato di progetto (ton): %f \\n\" % (np.sum(arr_PM10_future)))\n f.write(\"Rimozione ozono Stato di progetto (ton): %f \\n\" % (np.sum(arr_ozono_future)))\n f.write(\"Valore totale della rimozione inquinanti (€): %f \\n\\n\\n\" % (np.sum(arr_value_future)))\n f.write(\"Differenze tra stato di progetto e stato attuale\\n\\n\")\n f.write(\"Anno progetto: %i - %i\\n\" % (present, future))\n f.write(\"Differenza della rimozione inquinanti (ton):: %f \\n\" % (np.sum(arr_future - arr_present)))\n f.write(\"Differenza sequestro inquinanti per unità di superficie (ton/ha): %f \\n\" % (\n np.sum(arr_future - arr_present) / (cols * rows * area_pixel) * 10000))\n f.write(\"Differenza in termini economici del SE Rimozione inquinanti (stato di progetto – stato attuale) (€):%d \\n\" % (\n np.sum(arr_diff_tot))) \n return {self.OUTPUT: 'Completed'}\n\n \n # ----------------------------------------------------------------------------------- \n # Copyright (c) 2021 Città di Torino.\n # \n # This material is free software: you can redistribute it and/or modify\n # it under the terms of the GNU General Public License as published by\n # the Free Software Foundation, either version 2 of the License, or\n # (at your option) any later version.\n # \n # This program is distributed in the hope that it will be useful,\n # but WITHOUT ANY WARRANTY; without even the implied warranty of\n # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n # GNU General Public License for more details.\n # \n # You should have received a copy of the GNU General Public License\n # along with this program. If not, see http://www.gnu.org/licenses.\n # ----------------------------------------------------------------------------------- ",
"def at_b (self):\n self.argc = int((len(n.coord[0]))/2)\n self.pts_con = np.array(self.coord[:,self.argc:len(n.coord[0])])\n\n self.xd = self.xdi\n self.zd = self.zdi \n \n for i, x in enumerate(self.xdi):\n self.aux_con = self.pts_con[0] - x \n self.arg1 = np.argmin(abs(self.aux_con)) \n \n if (self.aux_con[self.arg1] < 0 and self.arg1 == 0) or (self.aux_con[self.arg1] > 0 and self.arg1 == len(self.aux_con)-1):\n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif (self.aux_con[self.arg1] > 0 and self.aux_con[self.arg1+1] > self.aux_con[self.arg1]): #(self.aux_con[self.arg1] < 0 and self.aux_con[self.arg1-1] > self.aux_con[self.arg1]) or \n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif self.aux_con[self.arg1] < 0:\n #print(self.arg1)\n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 - 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1])\n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n\n elif self.aux_con[self.arg1] > 0:\n #print(self.arg1) \n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 + 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1]) \n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n \n #print('Defensa {0}\\n{1}: {2}\\n{3}: {4}'.format(i,self.arg1,self.aux_con[self.arg1],self.arg2,self.aux_con[self.arg2])) \n \n #self.yd = self.yd\n self.b = np.array([self.xd,self.yd,self.zd])\n #self.b.loc[:,('y')] = self.b.loc[:,('y')] ",
"def compute_acceleration(self, x, y):\n a_x = G * self.m / (x*x + y*y) * -x/(np.sqrt(x*x + y*y))\n a_y = G * self.m / (x*x + y*y) * -y/(np.sqrt(x*x + y*y))\n return np.array([a_x, a_y])",
"def compute_vel(self, state, goal):\n\n \"\"\"\n Unicycle model control law:\n [v;w] = [kp 0 0; 0 ka kb]*[p;a;b]\n v = commanded linear velocity of robot\n w = commanded rotational velcoity of robot\n kp = gain parameter where kp > 0\n ka = gain parameter where ka - kp > 0\n kb = gain parameter where kb < 0\n p = distance from robot to goal\n a = angle between current robot heading and heading to goal\n b = error between current heading to goal and target end heading\n \"\"\"\n \n #print('state,goal,v,w')\n #print(state)\n #print(goal)\n\n xr = state[0][0] # m in world frame\n yr = state[1][0] # m in world frame\n thetar = state[2][0] #rads\n\n xg = goal[0] # m in world frame\n yg = goal[1] # m in world frame\n\n dy = yg - yr\n dx = xg - xr\n\n #print('')\n #print(state)\n #print(goal)\n \n # Calculate a\n a = -1*thetar + math.atan2(dy,dx)\n\n #print(a)\n\n if a > math.pi:\n a = a - 2*math.pi\n\n if a < -1*math.pi:\n a = a + 2*math.pi\n\n #print(a)\n\n # Set omega according to control law\n omega = self.ka*a\n if math.fabs(omega) > self.MAX_OMEGA:\n if omega > 0:\n omega = self.MAX_OMEGA\n else:\n omega = -1*self.MAX_OMEGA\n\n # Calculate P\n p = math.sqrt(dy*dy + dx*dx)\n\n # Set v \n v = self.kp*p\n if v > self.MAX_SPEED:\n v = self.MAX_SPEED\n\n # set the done value\n done = (p <= self.done_distance)\n\n #print(v)\n #print(omega)\n\n out_tuple = (v, omega, done)\n \n return out_tuple",
"def control(self, distance, angle): #return [acceleration, turn]\n # matrix representation\n # activations of input concepts\n self.C[0] = self.MFgoalLeft(angle)\n self.C[1] = self.MFgoalCenter(angle)\n self.C[2] = self.MFgoalRight(angle)\n self.C[3] = self.MFgoalClose(distance)\n self.C[4] = self.MFgoalMiddle(distance)\n self.C[5] = self.MFgoalFar(distance)\n # activations of output concepts\n for i in range(6):\n self.C[6] += self.matrix[6][i] * self.C[i]\n for i in range(6):\n self.C[7] += self.matrix[7][i] * self.C[i]\n # limit outputs\n if self.C[6] < -1: self.C[6] = -1.0\n elif self.C[6] > 1: self.C[6] = 1.0\n if self.C[7] < -1: self.C[7] = -1.0\n elif self.C[7] > 1: self.C[7] = 1.0\n # return outputs\n acceleration = self.MFacceleration(self.C[6])\n turn = self.MFturn(self.C[7])\n #print 'input = ' + str(distance) + ' ' + str(angle)\n #print 'output = ' + str(acceleration) + ' ' + str(turn)\n return [acceleration,turn]",
"def front_column_model_p_gain():",
"def report_result(force_a_before, force_b_before, force_a_after, force_b_after):\n damage_a = 0.0\n damage_b = 0.0\n ################################# YOUR CODE HERE #################################\n damage_a = calculate_training_cost(force_a_before) - calculate_training_cost(force_a_after)\n damage_b = calculate_training_cost(force_b_before) - calculate_training_cost(force_b_after)\n ##################################################################################\n return damage_a, damage_b"
] | [
"0.5762477",
"0.56302744",
"0.5575331",
"0.5492774",
"0.5490755",
"0.54605937",
"0.5433181",
"0.5433181",
"0.53979737",
"0.53446215",
"0.53392273",
"0.5332665",
"0.53279614",
"0.53275424",
"0.53230864",
"0.5313829",
"0.5306313",
"0.5281994",
"0.52609384",
"0.5243571",
"0.5234441",
"0.5229717",
"0.5228305",
"0.52079254",
"0.5199777",
"0.51897687",
"0.5183016",
"0.51570016",
"0.51386434",
"0.51139957"
] | 0.6443012 | 0 |
Calculate the overall data quality flag taking into account the others that have been assigned. | def calculate_measureland_qualifier_flag_overall(row):
mqf_tuple = (row['measureland_qualifier_flag_speed'],
row['measureland_qualifier_flag_distance'],
row['measureland_qualifier_flag_acceleration'],
row['measureland_qualifier_flag_visual'])
if mqf_tuple.count(3) >= 1:
return 3 # probably bad value
elif mqf_tuple.count(1) == len(mqf_tuple):
return 1 # good value
elif (mqf_tuple.count(9) >= 1) and (mqf_tuple.count(1) == (len(mqf_tuple) - mqf_tuple.count(9))):
return 2 # probably good value
elif (mqf_tuple.count(2) >= 1) and (mqf_tuple.count(1) == (len(mqf_tuple) - mqf_tuple.count(2))):
return 2 # probably good value
else:
return 2 # values that have passed the quality check are likely to be of good quality according to the criteria used, so assign as probably good value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quality_data(self, s):\n known_symbols = np.mod(range(176),48)>=32\n print('quality_data',np.sum(np.real(s[known_symbols])<0))\n success = np.sum(np.real(s[known_symbols])<0) < 20\n return success,0 ## no doppler estimate for data frames",
"def quality_fis(self,fis):\n if fis.dimension() == self.training_data.shape[1]:\n last_res = 0.0\n count = 0\n for i in range(self.check_data.shape[0]):\n last_res = fis.evaluate(np.hstack((self.check_data[i],last_res)))\n if abs(last_res - self.id) < 0.5:\n count = count + 1\n return (count,self.check_data.shape[0])\n else:\n rvec = fis.evaluates(self.check_data) - self.id\n rvec = ma.masked_inside(rvec,-0.5,0.5)\n return (ma.count_masked(rvec),self.check_data.shape[0])\n \n if fis.dimension() == self.training_data.shape[1]:\n dat = np.hstack((self.check_data,self.id*np.ones((self.check_data.shape[0],1))))\n else:\n dat = self.check_data\n #if self.check_data.shape[1] == self.training_data.shape[1]:\n # dat = self.check_data\n #else:\n # dat = np.hstack((self.check_data,np.zeros((self.check_data.shape[0],1))))\n rvec = fis.evaluates(dat) - self.id\n rvec = ma.masked_inside(rvec,-0.5,0.5)\n return (ma.count_masked(rvec),self.check_data.shape[0])",
"def quality(self) -> float:\n if self.get_cover_size() == 0:\n return 0\n else:\n if self.baseline == Baseline.COMPLEMENT:\n return self.__complement_quality()\n else:\n return self.__population_quality()",
"def determine_quality(self, function):\n if self.ground_truth_annotation_select.value is None:\n return None\n if self.segmentation_result_select.value is None:\n return None\n if self.segmentation_result_select.value is self.ground_truth_annotation_select.value:\n return None\n\n if self.ground_truth_annotation_select.value.data.max() == 0:\n return\n if self.segmentation_result_select.value.data.max() == 0:\n return\n\n quality = function(self.ground_truth_annotation_select.value.data, self.segmentation_result_select.value.data)\n\n return quality",
"def quality(self):\n return self.plays * self.number",
"def quality(self): \n\n subsetInt = [int(s) for s in self.subset.split() if s.isdigit()]\n columnNames = [] \n for i in range(len(subsetInt)):\n if subsetInt[i] == 1:\n columnNames.append(self.varNames[i])\n\n #qualityBand number of subset\n q = columnNames.index('Quality') \n\n if subsetInt[self.qualityBand] == 1:\n dataCount = self.subset.count('1')\n QC = np.repeat(self.DC[:,q].reshape((self.DC.shape[0],1)), dataCount-1, axis = 1)\n if self.dataset == 'MOD09A1.005' or self.dataset == 'MOD13Q1.005':\n QC = np.uint16(QC)\n else:\n QC = np.uint8(QC)\n\n QCm = QC & 1 #flips DCm mask\n DCm = np.delete(self.DC, q, 1) #looks good\n \n DCm = np.ma.masked_where(QCm == 1, DCm)\n DCm = np.ma.masked_where(DCm == 9999.0, DCm) \n \n if len(self.tiles) > 1:\n obs = self.observations/len(self.tiles)\n if len(self.tiles) == 1:\n obs = self.observations/2\n \n outArray = np.empty(shape = (self.rows*self.columns*obs, 0))\n for b in range(0, self.DC.shape[1]-1):\n cfull = DCm[:,b].reshape((self.observations, self.rows, self.columns))\n b16 = np.empty(shape = (self.rows*self.columns*obs, 0))\n for band in range(0,cfull.shape[0],2):\n c16 = np.ma.mean(cfull[band:band+1,:,:], axis=0)\n c16f = np.ma.filled(c16, 9999.0).astype(float).reshape((self.rows*self.columns))\n b16 = np.append(b16, c16f)\n outArray = np.append(outArray, b16.reshape((obs*self.rows*self.columns, 1)), axis = 1)\n \n self.finalDC = outArray\n \n np.save(str(self.directory) + '/' + self.dataset + '.npy', self.finalDC)\n del outArray, QC, DCm\n\n outfile = str(self.directory) + '/' + self.dataset + '.txt'\n f = open(outfile, 'w')\n for name in columnNames:\n if name != 'Quality':\n f.write(name + '\\n')\n var = [a for a in columnNames if not a.startswith('Quality')]\n logger.log('SUCCESS', 'The final 16-day interval quality-masked matrix was created successfully. This matrix has dimensions %d rows by %d columns. Datasets included in the matrix are %s' % (self.finalDC.shape[0], self.finalDC.shape[1], var))\n \n \n if subsetInt[self.qualityBand] != 1:\n cleanDC = np.delete(self.DC, q, 1)\n \n \n if len(self.tiles) > 1:\n obs = self.observations/len(self.tiles)\n if len(self.tiles) == 1:\n obs = self.observations/2\n \n outArray = np.empty(shape = (self.rows*self.columns*obs, 0))\n for b in range(cleanDC.shape[1]):\n cfull = cleanDC[:,b].reshape((self.observations, self.rows, self.columns))\n b16 = np.empty(shape=(self.rows*self.columns*obs))\n for band in range(cfull.shape[0]):\n c16 = np.mean(cfull[band:band+1,:,:], axis=0)\n band16 = np.append(b16, c16, axis=0)\n outArray = np.append(outArray, b16.reshape((obs*self.rows*self.columns, 1)), axis = 1)\n\n np.save(self.directory + '/' + self.dataset + '.npy', self.finalDC)\n del cleanDC, outArray\n \n outfile = self.directory + '/' + self.dataset + '.txt'\n f = open(outfile, 'w')\n for name in columnNames:\n if name != 'Quality':\n f.write(str(name) + ' \\n')\n var = [a for a in columnNames if not a.startswith('Quality')]\n logger.log('SUCCESS', 'The final 16-day interval matrix was created successfully. A quality mask was not applied, though remaining no data values are set at 9999. This matrix has dimensions %d rows by %d columns. Datasets included in the matrix are %s' % (self.finalDC.shape[0], self.finalDC.shape[1], var))",
"def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]:\n freq_increment = np.mean(np.diff(fit_data.x_data))\n\n fit_a = fit_data.ufloat_params[\"a\"]\n fit_b = fit_data.ufloat_params[\"b\"]\n fit_freq = fit_data.ufloat_params[\"freq\"]\n fit_kappa = fit_data.ufloat_params[\"kappa\"]\n\n snr = abs(fit_a.n) / np.sqrt(abs(np.median(fit_data.y_data) - fit_b.n))\n fit_width_ratio = fit_kappa.n / np.ptp(fit_data.x_data)\n\n criteria = [\n fit_data.x_range[0] <= fit_freq.n <= fit_data.x_range[1],\n 1.5 * freq_increment < fit_kappa.n,\n fit_width_ratio < 0.25,\n fit_data.reduced_chisq < 3,\n curve.utils.is_error_not_significant(fit_kappa),\n snr > 2,\n ]\n\n if all(criteria):\n return \"good\"\n\n return \"bad\"",
"def __population_quality(self) -> float:\n population_identifier = np.zeros(shape=self.Dataset.size)\n subgroup_identifier = np.ones(shape=len(self.get_cover()))\n group = np.concatenate((population_identifier,\n subgroup_identifier))\n\n subgroup_times = self.Dataset.survival[self.get_cover()]\n subgroup_status = self.Dataset.status[self.get_cover()]\n\n time = np.concatenate((self.Dataset.survival, subgroup_times))\n status = np.concatenate((self.Dataset.status, subgroup_status))\n\n _, pvalue = sm.duration.survdiff(time, status, group)\n return 1 - pvalue",
"def _evaluate_quality(self, fit_data: FitData) -> Union[str, None]:\n return None",
"def quality_fis(self,fis):\n correct = 0\n count = 0\n for cl_state in self.classes:\n r,c = cl_state.quality_fis(fis)\n print \"For\",cl_state.name,r,\"/\",c\n correct += r\n count += c\n return (correct,count)",
"def set_quality(self):\n p = self.suitability + 1.15 * self.fono\n self.quality = np.exp(p) / (1 + np.exp(p))",
"def quality_checks(ds):\n parameters = ['barometric_pressure', 'relative_humidity', 'air_temperature', 'longwave_irradiance',\n 'precipitation', 'shortwave_irradiance', 'sea_surface_temperature', 'sea_surface_conductivity',\n 'sea_surface_salinity', 'eastward_wind_velocity', 'northward_wind_velocity']\n for p in parameters:\n # The primary failure mode of the METBK is to repeat the last value it received from a sensor.\n # Use the IOOS QARTOD flat line test to identify these cases (consider it suspect if it repeats\n # for 20+ minutes and failed if it repeats for 35+ minutes).\n flags = qartod.flat_line_test(ds[p].values, ds['time'].values, 1200, 2100, 0.00001)\n\n # The secondary failure mode occurs when the METBK logger sets values to a NaN if no sensor data is available.\n # In the case of the sea surface conductivity and temperature data, different values are used to represent\n # missing data. Specifically, the values are set to a 0.0 and -5.0, respectively. In either case, (NaNs or\n # 0.0 and -5.0) set the QC flag to 9 to indicate \"Missing\" data, and then convert the 0.0 and -5.0 values to\n # a NaN to avoid propagating false numbers into subsequent calculations (e.g. salinity or heat flux).\n if p == 'sea_surface_temperature':\n m = ds[p] < -4.0 # use a floating point value just above -5\n flags[m] = 9\n ds[p][m] = np.nan\n ds['sea_surface_salinity'][m] = np.nan\n elif p == 'sea_surface_conductivity':\n m = ds[p] < 0.5 # use a floating point value just above 0\n flags[m] = 9\n ds[p][m] = np.nan\n ds['sea_surface_salinity'][m] = np.nan\n else:\n m = np.isnan(ds[p])\n flags[m] = 9\n\n # add the qc_flags to the dataset, rolling up the results into a single value\n qc_summary = p + '_qc_summary_flag'\n if qc_summary in ds.variables:\n # add the new test results to the existing QC summary results\n qc = ds[qc_summary]\n flags = np.array([flags, qc.values])\n ds[qc_summary] = ('time', flags.max(axis=0, initial=1))\n else:\n # create a new QC summary variable\n ds[qc_summary] = ('time', flags)\n\n # set up the attributes for the new variable\n ds[qc_summary].attrs = dict({\n 'long_name': '%s QC Summary Flag' % ds[p].attrs['long_name'],\n 'standard_name': 'aggregate_quality_flag',\n 'comment': ('Summary quality flag combining the results of the instrument-specific quality tests with '\n 'existing OOI QC tests, if available, to create a single QARTOD style aggregate quality flag'),\n 'flag_values': np.array([1, 2, 3, 4, 9]),\n 'flag_meanings': 'pass not_evaluated suspect_or_of_high_interest fail missing'\n })",
"def is_reducing(self):\n return bool(set(self.kind) & set(\"XYZ\"))",
"def __complement_quality(self) -> float:\n group = np.zeros(shape=self.Dataset.size)\n np.put(group, self.get_cover(), 1)\n\n time = self.Dataset.survival\n status = self.Dataset.status\n\n _, pvalue = sm.duration.survdiff(time, status, group)\n return 1 - pvalue",
"def flag_samples(self,counts):\n counts = self.fov_qc(counts)\n counts = self.binding_density_qc(counts)\n counts = self.pos_control_linearity_qc(counts)\n counts = self.pos_control_detection_limit_qc(counts)\n return(counts)",
"def data_quality(dset):\n dq_threshold = config.tech[_SECTION].threshold.int\n return np.nan_to_num(dset.data_quality) <= dq_threshold",
"def get_Flagging(flagging_file, n_Rec, nChan, exp_count):\n\n line = subprocess.check_output(['grep','Flagged', flagging_file]) # grab the summary line\n str_line = line.decode('utf-8')\n TOKS = str_line.split()\n total_flagged_pct = float(TOKS[-2]) #data+autocorrelation\n total_uv = float(TOKS[7])\n\n # Getting data flagged percentage\n \n autocorr_flagged_pct = (36 * n_Rec * n_Chan / total_uv)*100.0\n data_flagged_pct = round(total_flagged_pct - autocorr_flagged_pct, 3)\n\n # Finding out which antenna has been flagged completely.\n ANT1, ANT2, FLAG = [], [], [] \n with open(flagging_file, 'r') as f:\n for line in f:\n if \"#\" not in line: # grep -v \"#\"\n if \"Flagged\" not in line: # grep -v \"Flagged\"\n if len(line.split())>2: # avoid new channel-wise summaries at end of flagSummary file\n TOKS=line.split()\n ant1 = int(TOKS[3])\n ant2 = int(TOKS[4])\n flag = float(TOKS[6])\n if (ant1 < ant2) and (flag == 100): # extract non-correlated antenna pairs with 100 percent flagging\n ANT1.append(ant1)\n ANT2.append(ant2)\n FLAG.append(flag)\n\n ant1, ant2, flag = np.asarray(ANT1), np.asarray(ANT2), np.asarray(FLAG)\n \n ANT_NAME = []\n for x in range(0,36):\n count1 = np.count_nonzero(ant1 == x)\n count2 = np.count_nonzero(ant2 == x)\n total_count = count1 + count2\n if total_count == exp_count:\n ant_num = x+1\n ant_name = 'ak'+ str(ant_num)\n ANT_NAME.append(ant_name)\n\n total_flagged_ant = len(ANT_NAME)\n \n flag_ant_file = 'flagged_antenna.txt'\n ffile = open(fig_dir + '/'+ flag_ant_file,'a')\n \n if total_flagged_ant > 1:\n ffile.write(flagging_file[-24:-18])\n ffile.write('\\n')\n for item in ANT_NAME:\n ffile.write(item)\n ffile.write('\\n')\n else:\n ffile.write(flagging_file[-24:-18])\n ffile.write('\\n none \\n')\n\n ffile.close()\n \n return data_flagged_pct, total_flagged_ant, flag_ant_file",
"def get_fare(self):\r\n return super().get_fare()*self.fanciness_rating+self.flagfall",
"def is_good_qualtiative_example(iaa_score, ann1_total, ann2_total):\n return iaa_score > .3 and iaa_score < 1 and ann1_total > 3 and ann2_total > 3",
"def calc_resources(self):\n self.popula = self.energy = self.popula_used = self.energy_used = 0\n self.cnt_public = self.cnt_shop = self.cnt_1 = self.cnt_2 = self.cnt_3 = self.cnt_4 = self.cnt_5 = self.cnt_office = 0\n self.popula += self.extra_pop\n for i in range(20):\n b = self.b[i]\n if b == 'T':\n self.popula += self.f[i] * 2\n self.energy_used += 1\n elif b == 'O':\n self.popula_used += 1\n self.energy_used += 1\n self.cnt_office += self.f[i]\n elif b == 'U':\n self.popula_used += 1\n self.cnt_public += 1\n elif b == 'S':\n self.energy_used += 1\n self.cnt_shop += 1\n elif b == '1':\n self.popula += 1\n self.energy += 1\n self.popula_used += 1\n self.cnt_1 += 1\n elif b == '2':\n self.popula_used += 1\n self.cnt_2 += 1\n elif b == '3':\n self.popula_used += 1\n self.cnt_3 += 1\n elif b == '4':\n self.popula += 2\n self.popula_used += 1\n self.cnt_4 += 1\n elif b == '5':\n self.energy += 2\n self.popula_used += 1\n self.cnt_5 += 1\n elif b == 'A':\n self.energy += 2\n self.popula_used += 1\n elif b == 'F':\n self.energy += 3\n self.popula_used += 1\n elif b == 'G':\n self.popula += 1\n if 'tvst' in args.exp:\n self.popula += self.cnt_shop\n if 'ward' in args.exp:\n self.popula += 3\n if 'elec' in args.exp:\n self.energy += 3\n if 'capi' in args.exp:\n self.popula_used += 2\n if 'fire' in args.exp:\n self.popula_used += 1\n if 'park' in args.exp:\n self.popula_used += 1",
"def total_organic_compound(self):\n return self.indoor_air_quality[1]",
"def set_flags(data):\n flag_status_bit = {\n \"flagfield_rf1\": np.array([1, 1, 2, 1, 2, 0, 0, 0]),\n \"flagfield_rf2\": np.array([2, 2, 0, 0, 0, 0, 0, 0]),\n \"flagfield_pl\": np.array([2, 2, 2, 2, 0, 0, 0, 0]),\n \"flagfield_gen1\": np.array([0, 2, 0, 0, 0, 0, 0, 0]),\n \"flagfield_gen2\": np.array([1, 0, 2, 0, 0, 0, 0, 0])\n }\n\n f_usable = np.zeros(data[\"flagfield_rf1\"].size, dtype=np.uint8)\n\n for flagfield, bitmask in flag_status_bit.items():\n subset = np.nonzero(data[flagfield])[0]\n\n if subset.size > 0:\n unpacked_bits = np.fliplr(\n np.unpackbits(data[flagfield][subset]).reshape(-1,\n 8).astype(bool))\n\n flag = np.ma.array(np.tile(bitmask,\n unpacked_bits.shape[0]).reshape(-1, 8),\n mask=~unpacked_bits,\n fill_value=0)\n\n f_usable[subset] = np.max(np.vstack(\n (f_usable[subset], flag.filled().max(axis=1))),\n axis=0)\n\n return f_usable",
"def main():\n logging.info(\"Executing data quality module\")\n\n calculate_quality()",
"def testQualityDictinary(self):\n for qual in ['bq', 'hq', 'uq']:\n for res in ['1080', '720', '480']:\n try:\n int(self.quality[qual][res])\n except ValueError:\n self.assertNotEqual(\n self.quality[qual][res],\n self.config.quality[qual][res]\n )\n self.assertEqual(\n tools.QUALITY_DEFAULT,\n self.config.quality[qual][res]\n )\n else:\n self.assertEqual(\n int(self.quality[qual][res]),\n self.config.quality[qual][res]\n )",
"def getSupport(self,data):\n ans=np.ones(5822)\n num_of_trans=5822\n for i in range(len(data)):\n arr=np.array(self._dataTable[data[i][0]])\n num=(arr==data[i][1])\n ans=np.logical_and(ans,num)\n return(sum(ans)/num_of_trans)",
"def quality(self):\n try:\n qid = int((self.tool_metadata or {}).get(\"quality\", 0))\n except:\n qid = 0\n\n # We might be able to get the quality strings from the item's tags\n internal_name, name = \"normal\", \"Normal\"\n if self.tags:\n tags = {x.get('category'): x for x in self.tags}\n if 'Quality' in tags:\n internal_name, name = tags['Quality'].get('internal_name'), tags['Quality'].get('name')\n\n return qid, internal_name, name",
"def mask_evaluation(annotation_mask, result_mask, idx):\n\n true_positive = np.sum(np.logical_and(annotation_mask == 255, result_mask == 255)) \n false_positive = np.sum(np.logical_and(result_mask == 255, annotation_mask != result_mask))\n false_negative = np.sum(np.logical_and(annotation_mask == 255, annotation_mask != result_mask))\n\n precision = true_positive / (true_positive + false_positive)\n recall = true_positive / (true_positive + false_negative)\n f1_measure = 2 * ((precision * recall) / (precision + recall))\n\n return recall, precision, f1_measure",
"def is_maximizing_liberties(self):\r\n if self.for_color == 1:\r\n new_positions = self.result_board.black_positions\r\n else:\r\n new_positions = self.result_board.white_positions\r\n\r\n new_sum = 0\r\n for pos in new_positions:\r\n new_sum += self.result_board.board[pos[0]][pos[1]].liberty\r\n self.priority += (new_sum * 0.1)",
"def analyze(self):\n try:\n self.options[self.multi_image][1]()\n except:\n raise Exception(\"Multi Image Option not defined.\")\n\n self.image = self.data / self.exposure\n\n background = self.min_val = np.min(self.image[:511,:511])\n self.max_val = np.max(self.image[:511,:511])\n # stats.mode returns modal value = value that occours most often\n #background = stats.mode(im[:50,:50].ravel())[0][0]\n\n intensity = self.image.sum() - background*np.size(self.image)\n\n #results.append((self.index, intensity, background))\n self.index =+ 1",
"def coverage(self):\r\n return 0, 1"
] | [
"0.6451386",
"0.60514176",
"0.595479",
"0.5936915",
"0.57833517",
"0.5775925",
"0.57434183",
"0.5740497",
"0.56944513",
"0.5693227",
"0.56801933",
"0.56720245",
"0.5591802",
"0.55496436",
"0.55253345",
"0.54495364",
"0.5421906",
"0.54183483",
"0.54179144",
"0.5415607",
"0.5385671",
"0.5378227",
"0.53645533",
"0.53508353",
"0.533561",
"0.53120685",
"0.5306044",
"0.52905095",
"0.5283728",
"0.5280215"
] | 0.6428443 | 1 |
Remove the intermediate step qualifier flag columns that are not required in the final output data set. | def remove_intermediate_columns(dataframe):
combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',
'measureland_qualifier_flag_distance',
'measureland_qualifier_flag_acceleration',
'measureland_qualifier_flag_visual'])
print("Dimensions of combined dataframe after dropping columns:", combined_dataframe_dropped_cols.shape)
print("Combined dataframe after dropping columns: ", combined_dataframe_dropped_cols.sample(10))
return combined_dataframe_dropped_cols | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eliminateRedundantInfo(self):\n\n allEliminated = False\n edep = self.energyDependentWidths\n for colId in range(edep.nColumns)[::-1]:\n column = edep.columns[colId]\n columnData = edep.getColumn( column.name, units='eV' )\n if len(set( columnData ) ) == 1:\n setattr( self.constantWidths, column.name, PQU.PQU( PQU.pqu_float.surmiseSignificantDigits( columnData[0] ), column.units ) )\n [d.pop(colId) for d in edep.data]\n edep.columns.pop(colId)\n for idx, col in enumerate( edep.columns ): col.index = idx #re-number\n #if edep.nColumns == 1 and edep.columns[0].name == 'energy':\n # edep.columns, edep.data = [],[] # all widths are constant\n # allEliminated = True\n return allEliminated",
"def cleanStep(idict):\n for step in ['input', 'output']:\n data = idict.get(step, {})\n for key, values in data.items():\n for elem in values:\n for skip in ['pfn', 'InputPFN', 'OutputPFN', 'inputpfns']:\n if skip in elem:\n del elem[skip]\n data[key] = values\n return idict",
"def delete_intermediate_csvs(wk_dir):\n # Remove intermediate csv tables\n out_files = os.listdir(wk_dir)\n delete_keys = [\"int_metrics\",\"region_dims\"]\n delete_list = [f for f in out_files if any(x in f for x in delete_keys)]\n for f in delete_list:\n os.remove(f)",
"def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)",
"def get_cols_drop():",
"def delete_all_gap(self):\n # pdb.set_trace()\n\n rem = set(self.get_all_gap_cols())\n subset = [x for x in range(0, self.get_length()) if x not in rem]\n self.remove_columns(set(rem))\n #_LOG.debug(\"Alignment length reduced to %d\" % len(subset))\n return subset",
"def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()",
"def trim_features():\n pass",
"def drop_extra_columns(self):\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)",
"def clean(self):\n\n if (self.clean_level == 'dusty') | (self.clean_level == 'clean'):\n idx, = np.where(self['B_flag'] == 0)\n self.data = self[idx, :]\n\n return",
"def nonphysicalxs_remotion(a2_data,res_nufi_removal):\n for i in a2_data['I'].keys():\n if i=='MACR' and res_nufi_removal==True:\n if 'nufi' in a2_data['I'][i]['R'].keys():\n a2_data['I'][i]['R'].pop('nufi')\n for r in a2_data['I'][i]['R'].keys():\n if any(x in r for x in ['111', '112', '122', '212', '222', '211', '322',\n '321', '312', '311', '221', '121']):\n a2_data['I'][i]['R'].pop(r)\n return a2_data",
"def _these_columns_cannot_annotate_exp_cons(self):\n _cols = set([]) #\n for param_name, req_cols in self.required_columns.items():\n _cols |= req_cols\n\n return _cols | self.other_useful_columns",
"def test_removed_step(raw_frame):\n data = DataSteps(raw_frame)\n\n @data.step\n def inc_col1(frame):\n return frame.assign(Col1=lambda df: df[\"Col1\"] + 1)\n\n @data.step(active=False) # noqa: F811\n def inc_col1(frame):\n return frame.assign(Col1=lambda df: df[\"Col1\"] + 1)\n\n assert len(data.steps) == 0",
"def _common_preprocess(self, data):\n\n data = data.drop('id', axis=1) \n data = data.drop(['17', '488', 'B01AF', 'H01AB'], axis=1, errors='ignore')\n\n # drop age outliers\n idx = data[(data['age'] > 99)].index\n data = data.drop(idx)\n\n # drop rows with CKD\n idx = data[((data['585'] != 0) | (data['586'] != 0)) &\n (data['ckd'] == 0)].index\n data = data.drop(idx)\n data = data.drop(['585', '586'], axis=1)\n\n return data",
"def discard(self):\n for f in self.featureNames:\n self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']\n return",
"def _fix_uniq_col(self):\n # subgradient; for two boolean arrays, multiplication seems to be the best way \n # (equivalent to logical_and)\n n_covered_col = self.a_csr.dot(np.ones(self.ncols)) \n ifix = np.zeros(self.ncols, dtype=bool)\n if (np.count_nonzero(n_covered_col) != self.mrows):\n raise ValueError(\"There are uncovered rows! Please check your input!\")\n if (np.any(n_covered_col==1)):\n inonzero = self.a_csr[n_covered_col==1,:].nonzero()\n ifix[inonzero[1]] = True\n\n return ifix",
"def removeCols(self) -> List['StateNode']:\n cols = self.state[1]\n states: List[StateNode] = []\n for i in range(len(cols)):\n for j in range(i + 1, len(cols) + 1):\n # for j in range(i + 1, i + 2):\n new_cols = cols[:i] + cols[j:]\n if len(new_cols) == 0:\n continue\n states.append(StateNode(self.table, \n (self.state[0], new_cols),\n ([], cols[i:j]),\n self.cost + j - i + self.count_pairs(self.state[0], cols[i:j]),\n self))\n return states",
"def remove_data():\n # Removing the existing data\n col_answer_given.remove()\n col_answer_not_given.remove()\n col_q_not_given.remove()\n col_to_summarize.remove()",
"def cleanup(self):\n self.final_params = self.final_params_expected[self.count]\n self.flag = self.flag_expected[self.count]\n self.count += 1\n self.count = self.count % len(self.flag_expected)",
"def _strip_excess_data(recipe):\n for key in list(recipe.keys()):\n if key == \"ingredients\" or key == \"steps\":\n continue\n elif \"ingredient\" in key or \"step\" in key:\n del recipe[key]\n\n return recipe",
"def prune_gbm_features(schema: Dict):\n gbm_feature_types = ['binary', 'category', 'number']\n pruned_all_of = []\n for cond in schema['items']['allOf']:\n if_type = cond['if']['properties']['type']['const']\n if if_type in gbm_feature_types:\n pruned_all_of += [cond]\n schema['items']['allOf'] = pruned_all_of",
"def preprocess_dataset(dataset=None, remove_missing=60, remove_empty_rows=True):\n print('feature size before dropping:{}'.format(dataset.shape[1]))\n dataset_after_drop = dataset.dropna(thresh=dataset.shape[0]*remove_missing/100, how='all',axis=1)\n print('feature size after dropping:{}'.format(dataset_after_drop.shape[1]))\n print('row size before dropping:{}'.format(dataset_after_drop.shape[0]))\n if remove_empty_rows is True:\n df_final = dataset_after_drop.dropna(inplace=False).reset_index (drop=True)\n print('row size after dropping:{}'.format(df_final.shape[0]))\n print('---------------')\n print('final shape:{}'.format(df_final.shape))\n return df_final\n else:\n return dataset_after_drop",
"def cleanup(adata, del_prediction=False, del_2nd_moments=False):\n\n if \"pca_fit\" in adata.uns_keys():\n adata.uns[\"pca_fit\"] = None\n if \"velocyto_SVR\" in adata.uns_keys():\n adata.uns[\"velocyto_SVR\"][\"SVR\"] = None\n if \"umap_fit\" in adata.uns_keys():\n adata.uns[\"umap_fit\"][\"fit\"] = None\n if \"velocity_pca_fit\" in adata.uns_keys():\n adata.uns[\"velocity_pca_fit\"] = None\n if \"kmc\" in adata.uns_keys():\n adata.uns[\"kmc\"] = None\n if \"kinetics_heatmap\" in adata.uns_keys():\n adata.uns.pop(\"kinetics_heatmap\")\n if \"hdbscan\" in adata.uns_keys():\n adata.uns.pop(\"hdbscan\")\n\n VF_keys = [i if i.startswith(\"VecFld\") else None for i in adata.uns_keys()]\n for i in VF_keys:\n if i is not None and \"VecFld2D\" in adata.uns[i].keys():\n del adata.uns[i][\"VecFld2D\"]\n\n fate_keys = [i if i.startswith(\"fate\") else None for i in adata.uns_keys()]\n for i in fate_keys:\n if i is not None:\n if adata.uns[i][\"init_cells\"] is not None:\n adata.uns[i][\"init_cells\"] = list(adata.uns[i][\"init_cells\"])\n if \"prediction\" in adata.uns[i].keys():\n if del_prediction:\n del adata.uns[i][\"prediction\"]\n if \"VecFld_true\" in adata.uns[i].keys():\n if adata.uns[i][\"VecFld_true\"] is not None:\n del adata.uns[i][\"VecFld_true\"]\n\n if del_2nd_moments:\n from .tools.utils import remove_2nd_moments\n\n remove_2nd_moments(adata)\n\n return adata",
"def finalize(self):\n # this forces only some filters will be used for feature computation\n # this is not ideal, but a necessary stop-gap while we revise\n # the PropertyTable SQL\n self._good_filters = ['u', 'g', 'r', 'i', 'z', 'Y']\n self._good_filter_wave = np.array([3569.5, 4766.5, 6214.5, 7544.5, 8707.5, 10039.5])\n\n use_filters = set(self._good_filters) & self.filters\n if not self.filters.issubset(use_filters):\n message = 'Number of useful filters ({}) does not equal number available filters ({}) - some filters will not be used'.format(\n ''.join(use_filters), ''.join(self.filters))\n warnings.warn(message, RuntimeWarning)\n self.filters = set(use_filters)\n mask = np.array([True if x in self.filters else False for x in self.passband])\n\n if mask.size: # Not empty arrays\n self.time = self.time[mask]\n self.flux = self.flux[mask]\n self.fluxErr = self.fluxErr[mask]\n self.obsId = self.obsId[mask]\n self.passband = self.passband[mask]\n self.zeropoint = self.zeropoint[mask]\n for key in self._extra_cols:\n val = getattr(self, key)\n setattr(self, key, val[mask])\n\n self.nobs = len(self.time)\n if self.nobs == 0:\n message = 'Object {} with locus ID {} has no good observations.'.format(self.objectId, self.locusId)\n raise ValueError(message)\n\n return self._remove_flux_extinction()",
"def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]",
"def _drop_columns_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n res = res.select(op.columns_produced())\n return res",
"def drop_attributes(df, cutoff=25, extra_add=[]):\n\n df_copy = df.copy()\n\n attributs_drop = []\n for var in sorted(df.columns):\n series = df[var]\n perc_missing = 100 - series.count() / len(series) * 100\n\n if perc_missing > cutoff:\n attributs_drop.append(var)\n else:\n continue\n\n if len(extra_add) == 0:\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n else:\n attributs_drop = attributs_drop + extra_add\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n return df_copy",
"def unusedFromKDOTDataPreparation():",
"def reduce_data_to_necessary_columns(filtered_df):\n hist_df = filtered_df[\n [\n \"UniqueName\",\n \"Joins\",\n \"Projection_Attributes\",\n \"Selection_Attributes\",\n \"GroupBy\",\n \"OrderBy\",\n \"Strings\",\n \"Tables\",\n ]\n ].set_index(\"UniqueName\")\n return hist_df",
"def remove_urequired_columns(self, unrequired_columns):\n self.df = self.df.drop(columns=unrequired_columns)"
] | [
"0.5762991",
"0.54455286",
"0.54203403",
"0.53003746",
"0.5283763",
"0.5281597",
"0.52428263",
"0.52249265",
"0.52244925",
"0.51974267",
"0.5169289",
"0.51507044",
"0.51137793",
"0.5095008",
"0.50759375",
"0.5040321",
"0.5026872",
"0.5004204",
"0.4998381",
"0.49654576",
"0.4939483",
"0.49386102",
"0.49368465",
"0.4934985",
"0.49169937",
"0.48920166",
"0.48875684",
"0.48821336",
"0.48415262",
"0.4828639"
] | 0.6163486 | 0 |
Choose rows from the dataframe according to values in one of the columns. | def choose_rows(rows):
# Ensure that the object is not empty.
assert(len(rows) > 0)
# The following rows preferentially select data where the device_id=64 (i.e the GLONASS over the Trimble).
# Also select by data quality (1 = good value, 2 = probably good value).
# If the data quality is not good, then do not select, even if there is no other point for that time.
# We are only interested in the good data at this point.
if len(rows) >= 1 and rows[0]['device_id'] == 64 and rows[0]['measureland_qualifier_flag_overall'] == 1:
return rows[0]
elif len(rows) >=2 and rows[1]['device_id'] == 64 and rows[1]['measureland_qualifier_flag_overall'] == 1:
return rows[1]
elif len(rows) >= 3 and rows[2]['device_id'] == 64 and rows[2]['measureland_qualifier_flag_overall'] == 1:
return rows[2]
elif len(rows) >= 1 and rows[0]['device_id'] == 63 and rows[0]['measureland_qualifier_flag_overall'] == 1:
return rows[0]
elif len(rows) >=2 and rows[1]['device_id'] == 63 and rows[1]['measureland_qualifier_flag_overall'] == 1:
return rows[1]
elif len(rows) >= 3 and rows[2]['device_id'] == 63 and rows[2]['measureland_qualifier_flag_overall'] == 1:
return rows[2]
elif len(rows) == 1 and rows[0]['measureland_qualifier_flag_overall'] == 2: # for the first row which has a value
# of 3, because QC was not able to tell otherwise
return rows[0]
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_relevant_rows(df, column_name, column_value, not_equal=False):\n\n if not_equal:\n return df.loc[df[column_name] != column_value]\n\n return df.loc[df[column_name] == column_value]",
"def _filter(self, col: str, val: Any) -> pd.DataFrame:\n return self._df[self._df[col] == val]",
"def create(df,column,list_):\n return df[df[column].isin(list_)]",
"def sample_rows(df, nrows):",
"def column_select(df,returnList = [\"x\",\"y\"]):\n df = df.sort_values(by = 'frame_id')\n return [ list(df[k]) for k in returnList]",
"def filter_rows(self, **kwargs):\n filtered = self._data.copy()\n for colname, values in kwargs.items():\n values = [values] if type(values) == str else values\n filtered = filtered[filtered[colname].isin(values)]\n return self._copy(filtered)",
"def _select(self, row):\n if not self._head:\n self._head = self._create_head(row)\n\n if 'indices' not in self._state:\n fields = self._args.fields.split()\n self._state['indices'] = [self._get_index(f) for f in fields]\n\n return [row[i] for i in self._state['indices']]",
"def chose_only_hypothesis_colums(df):\n lst = ['abv', 'ibu', 'gravity', 'abv_min', 'abv_max', 'ibu_min',\n 'ibu_max', 'srm_min', 'srm_max', 'og_min', 'fg_min', 'fg_max']\n return df[lst]",
"def select_rows_by_condition_on_columns(df: pd.DataFrame, cols: List[str],\n condition: str = \"== 1\", logic: str = \"or\"):\n # First eliminate spaces in columns, this method will not work with spaces\n new_cols = [col.replace(\" \", \"_\").replace(\".\", \"_\") for col in cols]\n df.rename(\n columns={col: new_col for col, new_col in zip(cols, new_cols)}, inplace=True\n )\n\n # Now create a string to query the dataframe with\n logic_spaces = \" \" + logic + \" \"\n query_str = logic_spaces.join(\n [str(col) + condition for col in new_cols]\n ) #'col1 == 1, col2 == 1', etc.\n\n # Query dataframe\n outdf = df.query(query_str).copy()\n\n # Rename columns back to original\n outdf.rename(\n columns={new_col: col for col, new_col in zip(cols, new_cols)}, inplace=True\n )\n\n return outdf",
"def index_selecting():\n df = pd.read_csv('data/ad_feature.csv',header=0) #type:pd.DataFrame\n print df[:2] , df[2: ] #前两行\n\n df.iloc[:2 , :]\n df.iloc[:2, [2,3] ] # 第 2 列和 第3列\n\n # df.loc[row_indexer,column_indexer]\n df.loc[3, ['cate_id','price']]\n\n df[['cate_id', 'price']]\n\n #boolean index\n df[ df['price'] > 1000]\n df[ (df['price'] > 1000) & (df['price'] < 2000)]\n\n\n\n df[df['cate_id'].isin([6261])]\n\n #select by callable\n\n\n # .loc, .iloc, and also [] indexing can accept a callable as indexer\n\n\n df.loc[lambda d: d.price > 2000, :]",
"def filter_row(col, rw):\n return rw == row",
"def get_df_by_set(my_df, my_set):\n return my_df.iloc[list(my_set), :]",
"def get_subtable(df, col, val) -> pd.DataFrame:\r\n return df[df[col] == val].drop(columns=col)",
"def get_row_indices(df, col, vals):\n\n return list(df[df[col].isin(vals)].index)",
"def _select(self,X,y=None):\n return X.loc[:,self.columns], y",
"def get_candidate_row_from_df(cls):\n recs = df.groupby('class')\n recs = recs.filter(lambda t: t['class'].tolist()[0] == cls)\n list_of_candidate_rows = []\n for i, row in recs.iterrows():\n width = row['x2'] - row['x1']\n height = row['y2'] - row['y1']\n if width < self.thres or height < self.thres: continue\n list_of_candidate_rows.append(row)\n candidate = random.choice(list_of_candidate_rows)\n return candidate",
"def subset(df, items, axis=1):\n if axis == 1:\n columns = list(set(items).intersection(df.columns))\n return df[columns]\n elif axis == 0:\n indices = list(set(items).intersection(df.index))\n return df.loc[indices, :]\n else:\n raise ValueError(\"Invalid axis: %d\" % axis)",
"def filter_by_isin(df: pd.DataFrame, column: str, values: Iterable) -> pd.DataFrame:\n # First, create a \"map\" series from all possible values in the column => whether they should pass the filter\n all_ids = df[column].unique()\n is_id_relevant = pd.Series(np.zeros(len(all_ids)), index=all_ids).astype('bool') # Default false\n is_id_relevant.loc[values] = True\n\n # Create a boolean mask for column, based on the mapping above. Grab the raw array.\n mask = is_id_relevant[df[column]].values\n # Apply mask\n return df[mask]",
"def filter_by(df, constraints):\n indexer = [constraints[name] if name in constraints else slice(None)\n for name in df.index.names]\n return df.loc[tuple(indexer)] if len(df.shape) == 1 else df.loc[tuple(indexer),]",
"def single_column_iterator(df, colidx):\n for _, values in df.iterrows():\n yield values.iloc[colidx]",
"def get_train_data(df):\n\n srch_order = []\n cat0 = df[df.category == 5].index\n cat1 = df[df.category == 1].index\n cat2 = df[df.category == 0].index\n amount = int(len(df) * .04)\n print(\"amount of rows selected: \", amount)\n\n cat2_selec = np.random.choice(cat2, amount, replace=False)\n\n cat012 = np.concatenate((cat0, cat1, cat2_selec))\n\n df_selection = df.loc[cat012]\n\n return df_selection",
"def select_one_product(self, table, column_1, value_1, column_2, value_2):\n select_row = \"SELECT * FROM {} WHERE {}='{}' AND {}='{}';\".format(table, column_1, value_1, column_2, value_2)\n self.cursor.execute(select_row)\n row = self.cursor.fetchone()\n return row",
"def process_dataframe(self, dataframe):\n return dataframe[dataframe.ix[:,0] < dataframe.ix[:,1]]",
"def process_dataframe(self, dataframe):\n return dataframe[dataframe.ix[:,0] < dataframe.ix[:,1]]",
"def get_sample(df, col_name, n=100, seed=42):\n np.random.seed(seed)\n \n random_idx = np.random.choice(df[col_name].dropna().index, size=n, replace=False)\n \n return df.loc[random_idx, col_name]",
"def same_as(self, rows: List[Row], column: Column) -> List[Row]:\n return_list: List[Row] = []\n if not rows:\n return return_list\n cell_value = rows[0].values[column.name]\n for table_row in self.table_data:\n new_cell_value = table_row.values[column.name]\n if new_cell_value is None or not isinstance(new_cell_value, type(cell_value)):\n continue\n if new_cell_value == cell_value:\n return_list.append(table_row)\n return return_list",
"def instances_based_condition(df,col1,val1,col2,val2):\r\n instance=df[(df[col1]>val1) & (df[col2]==val2)]\r\n return instance",
"def _select_rows_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"SelectRowsNode\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.SelectRowsNode\"\n )\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n temp_v_columns = []\n # pre-scan expressions\n er = ExpressionRequirementsCollector()\n for opk in op.ops.values():\n opk.act_on(None, expr_walker=er)\n er.add_in_temp_columns(temp_v_columns)\n value_to_send_to_act = None\n if er.collect_required:\n if isinstance(res, pl.LazyFrame):\n res = res.collect()\n value_to_send_to_act = res\n # work on expression\n if len(temp_v_columns) > 0:\n res = res.with_columns(temp_v_columns)\n selection = op.expr.act_on(\n value_to_send_to_act, \n expr_walker=PolarsExpressionActor(polars_model=self, extend_context=True)) # PolarsTerm\n assert isinstance(selection, PolarsTerm)\n res = res.filter(selection.polars_term)\n if len(temp_v_columns) > 0:\n res = res.select(op.columns_produced())\n if self.use_lazy_eval and isinstance(res, pl.DataFrame):\n res = res.lazy()\n return res",
"def get_subset(df, constraints):\n for constraint in constraints:\n subset = df.loc[df[constraint[0]].isin(constraint[1])]\n df = subset\n return subset",
"def select_columns(data, columns):\n return data.loc[:, columns]"
] | [
"0.60818195",
"0.5826822",
"0.5779508",
"0.573627",
"0.5658017",
"0.558915",
"0.55783916",
"0.5562325",
"0.5559441",
"0.55420333",
"0.5506508",
"0.55038416",
"0.5499845",
"0.54789746",
"0.54606724",
"0.5433628",
"0.54250365",
"0.53969055",
"0.53790253",
"0.5374446",
"0.53492814",
"0.53313106",
"0.53194076",
"0.53194076",
"0.5312662",
"0.53058594",
"0.5301662",
"0.52807546",
"0.52409184",
"0.5236443"
] | 0.6122471 | 0 |
Create a new dataframe from the prioritised points according to the conditions required. Rows are chosen from small groups which occur at the same time (to seconds). | def prioritise_data_points(dataframe, output_filepath, output_filename):
# Beginning to prioritise data points. Firstly ensure that the data are sorted by date and time.
dataframe = dataframe.sort_values(['date_time'])
last_processed_datetime_secs = None
rows_pending_decision = []
progress_count = 0
# open output file
output_file = output_filepath + output_filename
print("Output selected rows to ", output_file)
f = open(output_file, "a+")
if f:
print("Output file opened")
writer = csv.writer(f, delimiter=',')
writer.writerow(['date_time','latitude','longitude','fix_quality','number_satellites','horiz_dilution_of_position',
'altitude','altitude_units','geoid_height','geoid_height_units','device_id','speed','measureland_qualifier_flag_overall'])
selected_count = 0
non_selected_count = 0
for row_id, row in dataframe.iterrows():
row_datetime_secs = row['date_time'].strftime('%Y-%m-%d %H:%M:%S')
progress_count += 1
# do batches of 1000 rows at a time to avoid overloading memory
if progress_count == 1000:
print("Prioritising data points. Processing:", row_datetime_secs)
progress_count = 0
if row_datetime_secs != last_processed_datetime_secs and last_processed_datetime_secs is not None:
#print("Type:", rows_pending_decision())
count_rows = 0
length_selection = len(rows_pending_decision)
#print("-------------------Rows to choose from--------------: (Total = ", length_selection, ")")
selected_row = choose_rows(rows_pending_decision)
for item in rows_pending_decision:
#print("Counting rows: ", count_rows)
#print("Row number: ", count_rows, " \n --Here is the row--\n", rows_pending_decision[count_rows][[1, 2, 3, 14, 15, 16]])
#print("Row length: ", len(rows_pending_decision[count_rows]))
count_rows += 1
if selected_row is not None:
# write the selected row out to the file rather than appending it to the dataframe
# print(type(selected_row))
convert_float_nan_to_string_NaN(selected_row)
# selected_row['date_time'] = selected_row['date_time'].strftime('%Y-%m-%d %H:%M:%S.%f')
selected_row['date_time'] = '{}.{}+00:00'.format(selected_row['date_time'].strftime('%Y-%m-%dT%H:%M:%S'), selected_row['date_time'].strftime('%f')[0:2])
writer.writerow([*selected_row[1:12], selected_row[14], selected_row[16]])
#print("-----selected row: \n", selected_row)
#print("-----Selected row: \n", selected_row[[1, 2, 3, 13, 14, 15, 16]])
selected_count += 1
else:
#print("Rows of poor quality data, nothing selected")
non_selected_count += 1
rows_pending_decision = []
rows_pending_decision.append(row)
last_processed_datetime_secs = row_datetime_secs
f.close()
if not f:
print("Output file closed")
print("Number of rows selected: ", selected_count)
print("Number of rows where no selection is made: ", non_selected_count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pre_df(temp_pre_df):\n \n event_time_max = temp_pre_df['event_time'].max()\n cat_dfs = []\n for num in np.arange(0,(1080/2)+1,30)[1:]:\n # making <= null i.e keeping >\n temp_pre_df.loc[temp_pre_df['event_time'] <= int(num), 'event_time'] = np.nan\n for col in ['event_name', 'specialty', 'plan_type']:\n cat_df = temp_pre_df.groupby([\"id\", col]).agg({\"event_time\": 'count'}).unstack(level=col)\n cat_df = cat_df/(event_time_max-num)\n cat_df.columns = ['__'.join(['normChange', col, name, str(int(num))]) for name in cat_df.columns.droplevel()]\n cat_dfs.append(cat_df)\n pre_df = pd.concat(cat_dfs, axis = 1) \n return pre_df.fillna(0)",
"def same_extremes(df, train, test):\n\n # Merge together the data\n lookup = df.join(train.append(test)[[\"group_1\", \"date_act\"]])\n\n # Caluculate the easy ones\n grp = pd.DataFrame()\n grp[\"count\"] = lookup.groupby([\"group_1\", \"date_act\"])[\"outcome\"].count()\n grp[\"min\"] = lookup.groupby([\"group_1\", \"date_act\"])[\"outcome\"].min()\n grp[\"max\"] = lookup.groupby([\"group_1\", \"date_act\"])[\"outcome\"].max()\n grp = grp[grp[\"count\"] > 1]\n grp[\"value\"] = None\n grp.loc[(grp[\"max\"] < 0.5), \"value\"] = grp[\"min\"]\n grp.loc[(grp[\"min\"] > 0.5), \"value\"] = grp[\"max\"]\n\n # Do the remaining ones by loop\n for index, row in grp[grp[\"value\"].isnull()].iterrows():\n if 0.5-row[\"min\"] > row[\"max\"]-0.5:\n grp.loc[index, \"value\"] = row[\"min\"]\n else:\n grp.loc[index, \"value\"] = row[\"max\"]\n\n # Merge to lookup for indexing and filling\n lookup = lookup.reset_index().merge(grp[[\"value\"]].reset_index(), how=\"left\", on=[\"group_1\", \"date_act\"]\n ).set_index(\"activity_id\")\n lookup[\"value\"] = lookup[\"value\"].fillna(lookup[\"outcome\"])\n\n df[\"outcome\"] = lookup[\"outcome\"]\n\n return df",
"def split(interactions: pd.DataFrame, p: float = 0.25) -> Tuple[pd.DataFrame, pd.DataFrame]:\n test = interactions.groupby('track_id').sample(frac=p)\n rows = set((a, b) for _, (a, b, _) in test.iterrows())\n train_mask = [i for i, (_, (a, b, _)) in tqdm(enumerate(interactions.iterrows()), desc=\"Constructing train-set\",\n total=len(interactions)) if (a, b) not in rows]\n train = interactions.iloc[train_mask]\n\n return train, test",
"def example_staypoints_merge():\n p1 = Point(8.5067847, 47.4)\n\n t1 = pd.Timestamp(\"1971-01-01 00:00:00\", tz=\"utc\")\n t2 = pd.Timestamp(\"1971-01-02 05:00:00\", tz=\"utc\")\n t3 = pd.Timestamp(\"1971-01-02 06:45:00\", tz=\"utc\")\n t4 = pd.Timestamp(\"1971-01-02 08:55:00\", tz=\"utc\")\n t45 = pd.Timestamp(\"1971-01-02 08:57:00\", tz=\"utc\")\n t5 = pd.Timestamp(\"1971-01-02 09:00:00\", tz=\"utc\")\n t6 = pd.Timestamp(\"1971-01-02 09:20:00\", tz=\"utc\")\n\n list_dict = [\n {\"id\": 1, \"user_id\": 0, \"started_at\": t1, \"finished_at\": t2, \"geom\": p1, \"location_id\": 1},\n {\"id\": 5, \"user_id\": 0, \"started_at\": t2, \"finished_at\": t2, \"geom\": p1, \"location_id\": 2},\n {\"id\": 2, \"user_id\": 0, \"started_at\": t3, \"finished_at\": t4, \"geom\": p1, \"location_id\": 2},\n {\"id\": 6, \"user_id\": 0, \"started_at\": t4, \"finished_at\": t45, \"geom\": p1, \"location_id\": 2},\n {\"id\": 15, \"user_id\": 0, \"started_at\": t5, \"finished_at\": t6, \"geom\": p1, \"location_id\": 2},\n {\"id\": 7, \"user_id\": 1, \"started_at\": t3, \"finished_at\": t4, \"geom\": p1, \"location_id\": 2},\n {\"id\": 80, \"user_id\": 1, \"started_at\": t45, \"finished_at\": t5, \"geom\": p1, \"location_id\": 2},\n {\"id\": 3, \"user_id\": 1, \"started_at\": t5, \"finished_at\": t6, \"geom\": p1, \"location_id\": 4},\n ]\n sp = gpd.GeoDataFrame(data=list_dict, geometry=\"geom\", crs=\"EPSG:4326\")\n sp = sp.set_index(\"id\")\n sp.as_staypoints\n\n # generate empty triplegs for the merge function\n tpls = pd.DataFrame([], columns=[\"user_id\", \"started_at\", \"finished_at\"])\n return sp, tpls",
"def data_filter(\n df, CondTempRange=[float('-inf'), float('inf')],\n EvapTempRange=[float('-inf'), float('inf')],\n RemovalPoint=[OperatingPoint()],\n AddPoint=[OperatingPoint()]\n ):\n\n # copy new dataframe\n df_new = copy.deepcopy(df)\n\n # condition list\n cond = []\n cond.append(df.CondTempInF >= CondTempRange[0])\n cond.append(df.CondTempInF <= CondTempRange[1])\n cond.append(df.EvapTempInF >= EvapTempRange[0])\n cond.append(df.EvapTempInF <= EvapTempRange[1])\n for point in RemovalPoint:\n cond.append(df.OperatingPoint != point)\n addcond = []\n for point in AddPoint:\n addcond.append(df.OperatingPoint == point)\n\n # Apply AND to all conditions\n final_condition = cond[0]\n for ii in xrange(1, len(cond)):\n final_condition = final_condition*cond[ii]\n\n # Apply OR to all conditions\n for ii in xrange(0, len(addcond)):\n final_condition = final_condition+addcond[ii]\n\n # Return the data that satisfy all conditions\n return df_new[final_condition]",
"def filter_dataframe(self, df, d, hour):\n no_G = False\n\n df_p = df.copy()\n total_length = len(df_p)\n df_y = df_p.groupby(df_p.index.year).sum()\n\n d['ppt'] = np.mean(df_y.Precip.values)\n\n\n\n # NB, I'm not slicing the df here, setting this to a copy\n\n # The values for this flag are (0–3): _F_MDS_QC = 0 (measured);\n # _F_MDS_QC = 1 (filled with high confidence);\n #df_bal = df[( (df['Qle_qc'] == 0) | (df['Qle_qc'] == 1) ) &\n # ( (df['Qh_qc'] == 0) | (df['Qh_qc'] == 1) ) &\n # #( (df['Rnet_qc'] == 0) | (df['Rnet_qc'] == 1) ) &\n # ( (df['Qg_qc'] == 0) | (df['Qg_qc'] == 1) )]\n\n #top = np.sum(df_bal.Qle + df_bal.Qh)\n #bot = np.sum(df_bal.Rnet - df_bal.Qg)\n\n # filter daylight hours, good LE data, GPP, CO2\n #\n # If we have no ground heat flux, just use Rn\n if len(df[(df['Qg_qc'] == 0) | (df['Qg_qc'] == 1)]) == 0:\n df = df[(df.index.hour >= 7) &\n (df.index.hour <= 18) &\n ( (df['Qle_qc'] == 0) | (df['Qle_qc'] == 1) ) &\n (df['ET'] > 0.01 / 1000.) & # check in mmol, but units are mol\n (df['VPD'] > 0.05)]\n no_G = True\n else:\n df = df[(df.index.hour >= 7) &\n (df.index.hour <= 18) &\n ( (df['Qle_qc'] == 0) | (df['Qle_qc'] == 1) ) &\n ( (df['Qg_qc'] == 0) | (df['Qg_qc'] == 1) ) &\n (df['ET'] > 0.01 / 1000.) & # check in mmol, but units are mol\n (df['VPD'] > 0.05)]\n\n # Turn on if EB correcting - i.e. Fig A2\n # Correct based on method 4 from Wohlfahrt et al. Agricultural and\n # Forest Meteorology 149\n #if top > 0.0:\n # corection_factor = bot/top\n # df.Qle *= corection_factor\n # df.Qh *= corection_factor\n #\n # df.loc[:, 'Qle'] = df['Qle'] * corection_factor\n # df.loc[:, 'Qh'] = df['Qh'] * corection_factor\n #\n # lhv = self.latent_heat_vapourisation(df['Tair'])\n # df.loc[:, 'ET'] = df['Qle'] / lhv\n # df.loc[:, 'ET'] = df['Qle'] / lhv\n #\n #\n # # kg m-2 s-1 to mol m-2 s-1\n # conv = c.KG_TO_G * c.G_WATER_TO_MOL_WATER\n # df.loc[:, 'ET'] = df['ET'].copy() * conv\n #\n #\n # if bot > 0.0:\n # d['EBR'] = top / bot\n #else:\n # d['EBR'] = -999.9\n\n # Filter events after rain ...\n idx = df[df.Precip > 0.0].index.tolist()\n\n if hour:\n # hour gap i.e. Tumba\n bad_dates = []\n for rain_idx in idx:\n bad_dates.append(rain_idx)\n for i in range(24):\n new_idx = rain_idx + dt.timedelta(minutes=60)\n bad_dates.append(new_idx)\n rain_idx = new_idx\n\n df2 = df.copy()\n df2.loc[:, 'GPP_umol_m2_s'] = df2.loc[:, 'GPP'] #* c.MOL_C_TO_GRAMS_C * c.UMOL_TO_MOL\n df2.loc[:, 'GPP'] *= c.MOL_C_TO_GRAMS_C * c.UMOL_TO_MOL * \\\n c.SEC_TO_HR\n\n\n df = df2\n else:\n\n # 30 min gap\n bad_dates = []\n for rain_idx in idx:\n bad_dates.append(rain_idx)\n for i in range(48):\n new_idx = rain_idx + dt.timedelta(minutes=30)\n bad_dates.append(new_idx)\n rain_idx = new_idx\n\n df2 = df.copy()\n df2.loc[:, 'GPP_umol_m2_s'] = df2.loc[:, 'GPP'] #* c.MOL_C_TO_GRAMS_C * c.UMOL_TO_MOL\n df2.loc[:, 'GPP'] *= c.MOL_C_TO_GRAMS_C * c.UMOL_TO_MOL * \\\n c.SEC_TO_HLFHR\n\n\n df = df2\n\n # There will be duplicate dates most likely so remove these.\n bad_dates = np.unique(bad_dates)\n\n # remove rain days...\n df = df[~df.index.isin(bad_dates)]\n\n return (df, d, no_G)",
"def time_split_dataset(df, train_start_date, train_end_date, holdout_end_date, date_col):\n\n train_set = df.copy()[\n (df[date_col] >= train_start_date) & (df[date_col] <= train_end_date)]\n\n test_set = df.copy()[\n (df[date_col] > train_end_date) & (df[date_col] <= holdout_end_date)]\n\n return train_set, test_set",
"def example_staypoints():\n p1 = Point(8.5067847, 47.4)\n p2 = Point(8.5067847, 47.5)\n p3 = Point(8.5067847, 47.6)\n p4 = Point(8.5067847, 47.7)\n\n t1 = pd.Timestamp(\"1971-01-01 00:00:00\", tz=\"utc\")\n t2 = pd.Timestamp(\"1971-01-01 05:00:00\", tz=\"utc\")\n t3 = pd.Timestamp(\"1971-01-02 07:00:00\", tz=\"utc\")\n t4 = pd.Timestamp(\"1971-01-02 08:00:00\", tz=\"utc\")\n t5 = pd.Timestamp(\"1971-01-02 09:00:00\", tz=\"utc\")\n t6 = pd.Timestamp(\"1971-01-02 10:00:00\", tz=\"utc\")\n\n list_dict = [\n {\"id\": 1, \"user_id\": 0, \"started_at\": t1, \"finished_at\": t2, \"geom\": p1},\n {\"id\": 5, \"user_id\": 0, \"started_at\": t2, \"finished_at\": t3, \"geom\": p2},\n {\"id\": 2, \"user_id\": 0, \"started_at\": t3, \"finished_at\": t4, \"geom\": p3},\n {\"id\": 6, \"user_id\": 0, \"started_at\": t4, \"finished_at\": t5, \"geom\": p2},\n {\"id\": 15, \"user_id\": 0, \"started_at\": t5, \"finished_at\": t6, \"geom\": p1},\n {\"id\": 7, \"user_id\": 1, \"started_at\": t3, \"finished_at\": t4, \"geom\": p4},\n {\"id\": 80, \"user_id\": 1, \"started_at\": t4, \"finished_at\": t5, \"geom\": p2},\n {\"id\": 3, \"user_id\": 1, \"started_at\": t5, \"finished_at\": t6, \"geom\": p2},\n ]\n sp = gpd.GeoDataFrame(data=list_dict, geometry=\"geom\", crs=\"EPSG:4326\")\n sp = sp.set_index(\"id\")\n sp.as_staypoints\n return sp",
"def filtering(self, Z):\n coord_x_est = [0]\n coord_y_est = [0]\n time_est = [Z[['time']].iloc[0][0]]\n for i in range(1, len(Z.x)):\n new_dt = Z[['time']].iloc[i][0] - time_est[-1]\n while (new_dt > 150):\n new_state = [coord_x_est[-1], coord_y_est[-1], time_est[-1]]\n new_state = self._restore(new_state)\n coord_x_est.append(new_state[0])\n coord_y_est.append(new_state[1])\n time_est.append(new_state[2])\n new_dt -= 100\n\n# self.configure(new_dt)\n self._predict()\n mes = Z[['x', 'y']].iloc[i].to_numpy()\n self._update(np.resize(mes, (2, 1)))\n # save for latter plotting\n coord_x_est.append(self.x[0])\n coord_y_est.append(self.x[4])\n time_est.append(int(Z[['time']].iloc[i]))\n\n return pd.DataFrame({'X_f': coord_x_est,\n 'Y_f': coord_y_est,\n 'time': time_est\n })",
"def samples_timesteps_features(dataframe, columns, start_date, timesteps=72, \n steps_ahead=24, window_days=100, train_percent=80.):\n \n def overlap_windows(dataset, timesteps, steps_ahead):\n \"\"\" Create overlaping window of time-series data\n \n Parameters\n ----------\n dataset: pd.DataFrame\n time-series pandas dataset\n timesteps: int\n number of time steps from the past for creating output arrays\n steps_ahead: int\n number of time steps into the future for making predictions\n \n Returns\n -------\n X, y: np.array\n input and output 3-d arrays of overlaping time windows\n \"\"\"\n X = []; y = []\n \n start = 0\n for i in range(len(dataset)):\n # Define the end of the input sequence\n in_end = start + timesteps\n out_end = in_end + steps_ahead\n # Ensure that there is enough data\n if out_end <= len(dataset):\n X.append(dataset[start:in_end, :])\n # First column holds load values\n y.append(dataset[in_end:out_end, 0])\n # Move along one time step\n start += 1\n \n # Convert list to np.array\n X = np.asarray(X)\n y = np.asarray(y)\n \n return X, y\n\n\n data = dataframe.copy()\n \n if window_days*24 > data.values.shape[0]:\n raise ValueError('Variable window_days has too large value: {}*24h = {} > {}, which is more than there is data!'.format(window_days, window_days*24, \n data.values.shape[0]))\n \n # Training period\n # ---------------\n train_percent = train_percent/100.\n st = pd.to_datetime(start_date) # start date\n et = st + dt.timedelta(days=int(train_percent*window_days)) # end date\n train = data.loc[st:et].values\n \n # Standardize and transform training data set\n mean_std_values = {}\n for i, column in enumerate(columns):\n # Calculate mean and standard deviation only\n # from the training data set values\n mu = train[:,i].mean() # axis=0\n sd = train[:,i].std()\n mean_std_values[column] = (mu, sd)\n # Standardize training data\n train[:,i] = (train[:,i] - mu)/sd\n \n # Create overlapping windows with training data\n X_train, y_train = overlap_windows(train, timesteps, steps_ahead)\n \n # Testing / Validation period\n # ---------------------------\n sv = et \n ev = sv + dt.timedelta(days=int((1-train_percent)*window_days)+1)\n test = data.loc[sv:ev].values\n \n # Transform testing/validation data set\n for i, column in enumerate(columns):\n # Use mean and standard deviation from the\n # training data set\n mu = mean_std_values[column][0]\n sd = mean_std_values[column][1]\n # Standardize test data\n test[:,i] = (test[:,i] - mu)/sd\n \n # Create overlaping windows with test data\n X_test, y_test = overlap_windows(test, timesteps, steps_ahead)\n \n return mean_std_values, X_train, y_train, X_test, y_test",
"def oversample_minority(df, ratio=1.0, random_state=3):\n count_class_0, count_class_1 = df[\"Status\"].value_counts()\n df_class_0 = df[df[\"Status\"] == \"paid\"]\n df_class_1 = df[df[\"Status\"] == \"defaulted\"]\n # print(count_class_0)\n # print(count_class_1)\n df_class_1_over = df_class_1.sample(\n int(ratio * count_class_0), replace=True, random_state=random_state\n )\n df_train_over = pd.concat([df_class_0, df_class_1_over], axis=0)\n # print(df_train_over['Status'].value_counts())\n return df_train_over",
"def sfp_prior_preparation(portshow_sfp_aggregated_df, pattern_dct):\n\n sfp_aggregated_modified_df = portshow_sfp_aggregated_df.copy()\n # drop duplicated port rows\n sfp_aggregated_modified_df.drop_duplicates(subset=['configname', 'chassis_name', 'chassis_wwn', \n 'switchName', 'switchWwn', 'slot', 'port'], inplace=True)\n # extract transceiver speed\n sfp_aggregated_modified_df['Transceiver_speed_extracted'] = \\\n sfp_aggregated_modified_df['Transceiver_mode'].str.extract(pattern_dct['transceiver_speed'])\n # extract transceiver mode\n sfp_aggregated_modified_df['Transceiver_mode_extracted'] = \\\n sfp_aggregated_modified_df['Transceiver_mode'].str.extract(pattern_dct['transceiver_mode'])\n # merge sfp speed and mode (lw, sw)\n sfp_aggregated_modified_df = dfop.merge_columns(sfp_aggregated_modified_df, summary_column='Transceiver_speed_mode_extracted', \n merge_columns=['Transceiver_speed_extracted', 'Transceiver_mode_extracted'], \n sep=' ', drop_merge_columns=False, sort_summary=False)\n # merge port state with transceiver details\n # add 'No_SFP_module' tag for cu media in blade switches to mark portPhys status\n # mask_vendor_no_sfp_module = sfp_aggregated_modified_df['Transceiver_Name'] == 'No SFP module'\n # mask_portphys_no_module = sfp_aggregated_modified_df['portPhys'] == 'No_Module'\n # sfp_aggregated_modified_df.loc[~mask_portphys_no_module & mask_vendor_no_sfp_module, 'No_SFP_module'] = 'No_SFP_module'\n sfp_aggregated_modified_df = dfop.merge_columns(sfp_aggregated_modified_df, summary_column='PortPhys_transceiver', \n merge_columns=['portPhys', 'Transceiver_speed_mode_extracted'], \n sep=' ', drop_merge_columns=False, sort_summary=False)\n # add annotation to the intervals\n comment_sfp_readings_interval(sfp_aggregated_modified_df) \n # transceiver support\n comment_sfp_support(sfp_aggregated_modified_df)\n # transceiver form factor (qsfp, dsfp)\n comment_specific_sfp(sfp_aggregated_modified_df, sfp_specification_column='Transceiver_form_factor', sfp_specification_name='Form factor', normal_value='sfp', upper_case_spec=True)\n # long distance sfps\n comment_specific_sfp(sfp_aggregated_modified_df, sfp_specification_column='Transceiver_distanceMax', sfp_specification_name='Distance', normal_value='normal')\n # merge vendor, part number and transcever details (speed and mode) \n sfp_aggregated_modified_df = dfop.merge_columns(\n sfp_aggregated_modified_df, summary_column='Transceiver_Name_PN', \n merge_columns=['Transceiver_Name', 'Transceiver_PN', 'Transceiver_speed_mode_extracted'], \n sep=' ', drop_merge_columns=False)\n # port_quantity column\n sfp_aggregated_modified_df['Port_quantity'] = 'Port_quantity'\n # transceiver quantity column\n mask_sfp_pn_notna = sfp_aggregated_modified_df['Transceiver_PN'].notna()\n sfp_aggregated_modified_df.loc[mask_sfp_pn_notna, 'Transceiver_quantity'] = 'Transceiver_quantity'\n return sfp_aggregated_modified_df",
"def filter_samples(df, normal_samples, damaged_samples, assembly_samples, missing_samples, damaged_thread_samples,\n loosening_samples, move_samples):\n # Count the sample types\n count_df = df.groupby(['sample_nr'])['label'].median()\n unique, counts = np.unique(count_df, return_counts=True)\n labels_count_dict = {A: B for A, B in zip(unique, counts)}\n\n # Take only the amount of samples that's needed to fill the requirement\n sampled_list = []\n for label in labels_count_dict:\n subindex = list(np.unique(df.loc[df['label'] == label].index.get_level_values(0)))\n\n if label == 0:\n to_take = normal_samples * labels_count_dict[0]\n elif label == 1:\n to_take = damaged_samples * labels_count_dict[1]\n elif label == 2:\n to_take = assembly_samples * labels_count_dict[2]\n elif label == 3:\n to_take = missing_samples * labels_count_dict[3]\n elif label == 4:\n to_take = damaged_thread_samples * labels_count_dict[4]\n elif label == 5:\n to_take = loosening_samples * labels_count_dict[5]\n elif label == 6:\n to_take = move_samples * labels_count_dict[6]\n\n sample_ids = np.random.choice(subindex, int(to_take), replace=False)\n sampled_df = df[df.index.get_level_values(0).isin(sample_ids)]\n sampled_list.append(sampled_df)\n\n taken_data = pd.concat(sampled_list, ignore_index=False).sort_values(['sample_nr', 'event'])\n\n # Reset the sample numbers\n taken_data = taken_data.reset_index()\n taken_data['sample_nr'] = (taken_data['sample_nr'] != taken_data['sample_nr'].shift(1)).astype(int).cumsum()\n taken_data['event'] = taken_data.index\n taken_data = taken_data.set_index(['sample_nr', 'event'])\n taken_data = taken_data.reset_index('event', drop=True)\n taken_data = taken_data.set_index(taken_data.groupby(level=0).cumcount().rename('event'), append=True)\n taken_data = taken_data.sort_index()\n\n return taken_data",
"def split_orders(df, ts_val, ts_test, column=TIMESTAMP):\n df_trn = df.loc[df[column] < ts_val, :]\n df_val = df.loc[df[column] < ts_test, :]\n df_test = df.copy()\n\n df_trn.reset_index(drop=True, inplace=True)\n df_val.reset_index(drop=True, inplace=True)\n df_test.reset_index(drop=True, inplace=True)\n\n return df_trn, df_val, df_test",
"def assign_to_time_window_depth(df_liquidity, sta, end, hours_s, hours_e):\n\n df_liquidity_sel = df_liquidity[(hours_s >= sta)\n & (hours_e <= end)\n ]\n\n # filter for ice-berg order indication\n df_liquidity_sel_ = df_liquidity_sel[df_liquidity_sel['Volume'] != 0]\n\n # check if df_liquidity_sel_ is not empty and return average in the rare case that now orders are submitted in a specific time frame\n if df_liquidity_sel_.shape[0] > 0:\n return [df_liquidity_sel_['Price'][df_liquidity_sel_['Side'] == 'S'].values.tolist(),\n df_liquidity_sel_['Volume'][df_liquidity_sel_[\n 'Side'] == 'S'].values.tolist(),\n df_liquidity_sel_['Price'][df_liquidity_sel_[\n 'Side'] == 'B'].values.tolist(),\n df_liquidity_sel_['Volume'][df_liquidity_sel_['Side'] == 'B'].values.tolist()]\n\n else:\n return None",
"def execQ8():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n filtered_frame = frame.sort_values(by='Price', ascending=False).drop_duplicates('Product').head(10)\n return filtered_frame",
"def sleepTime(df, lev1=20, lev2=120, lev3=30, init_state=True):\n REQ_PASSIVE_RATE = 0.75 # at least 75% of values\n \n sleep = pd.Series(np.full(len(df), -1, dtype=int), index=df.index)\n\n dfmed = df.value.rolling(lev1, center=True).median()\n pstart = []\n \n lev1_pre = lev1/2\n lev1_pos = lev1-lev1_pre\n \n for i in range (lev1, len(dfmed)-lev1_pos):\n # if lev1 size window is 0, add to possible start of sleep list\n # and step over to next possible sleep start\n if dfmed[i-lev1_pre:i].sum() == 0 and dfmed[i+1:i+lev1_pos].sum() == 0: \n pstart.append(i-lev1)\n i += lev1_pos\n \n # alternative:\n # dfmed.rolling(lev1).sum()\n #pend = dfmed.index[(dfmed.rolling(lev3).min() > 0)]\n \n laststate = -lev2\n for i in pstart:\n # ignore possible start state earlier flagged as active sleep states\n if i - laststate < lev2:\n continue\n # if given percentage is 0, we have active sleep state\n if dfmed[i:i+lev2].quantile(REQ_PASSIVE_RATE) == 0:\n laststate = i\n sleep[i] = 0 \n \n for i in range (lev1_pre, len(dfmed)-lev3):\n # if lev3 size window is non 0, then end of active asleep\n if dfmed[i:i+lev3].min() > 0: \n i += lev3\n sleep[i] = 1 \n \n laststate = init_state\n for i in range(len(sleep)):\n if sleep[i] != -1:\n laststate = sleep[i]\n else:\n sleep[i] = laststate\n \n return sleep",
"def groupTrajectories(self, dt = 100)->None:#4 * 30)->None:\r\n for i, p1 in enumerate(self._analyzer.activePeople):\r\n for j, p2 in enumerate(self._analyzer.activePeople):\r\n if (i > j) and (p1 not in p2.inGroupWith):\r\n if ((len(p1.coordinates) >= dt) and (len(p2.coordinates) >= dt)):\r\n in_group = True\r\n for k in range(dt):\r\n if ((p1.coordinates[-k] != None) and (p2.coordinates[-k] != None) and (p1.coordinates[-k].DistanceFrom(p2.coordinates[-k]) > self._minDist)):\r\n in_group = False\r\n if in_group:\r\n p1.inGroupWith.append(p2)\r\n p2.inGroupWith.append(p1)",
"def get_post_df(temp_post_df):\n \n cat_dfs = []\n for num in np.arange(1080/2,0,-30):\n # making > null i.e keeping <=\n temp_post_df.loc[temp_post_df['event_time'] > int(num), 'event_time'] = np.nan\n for col in ['event_name', 'specialty', 'plan_type']:\n cat_df = temp_post_df.groupby([\"id\", col]).agg({\"event_time\": 'count'}).unstack(level=col)\n cat_df = cat_df/num\n cat_df.columns = ['__'.join(['normChange', col, name, str(int(num))]) for name in cat_df.columns.droplevel()]\n cat_dfs.append(cat_df) \n post_df = pd.concat(cat_dfs, axis = 1)\n return post_df.fillna(0)",
"def run_strategy(data, sort_by='price', sort_mode=True, limit=5, strategy=None):\n grouped = data.dropna().groupby('timestamp')\n collect = []\n for name, group in grouped:\n if strategy:\n temp = group.apply(strategy)\n else:\n temp = group.sort_values(by=sort_by, ascending=sort_mode).iloc[:limit]\n collect.append(temp)\n return pd.concat(collect)",
"def assign_priorities_and_dp(self):\r\n taskset_copy = copy(self.taskset)\r\n tasks = taskset_copy.sorted_by_crit()",
"def get_reliability(df, pos):\n weights = {\n \"F\": {\n 'gs60': {\"n-1\": .62, \"n-2\": .22, \"n-3\": .16},\n 'toi/gp': {\"n-1\": .9, \"n-2\": .1, \"n-3\": 0}\n },\n \"D\": {\n 'gs60': {\"n-1\": .6, \"n-2\": .25, \"n-3\": .15},\n 'toi/gp': {\"n-1\": .85, \"n-2\": .15, \"n-3\": 0}\n }\n }\n\n # Only essential columns\n df = df[[\"player\", \"player_id\", \"season\", 'toi_on', 'gs60', 'toi/gp']]\n\n # Copy over ALL Data\n predict_df = df.copy()\n\n # To get fours years in a row (3 predicting 1).. we run 'get_previous_year' 3 times\n # Each time we get n-_ by using predict_col\n # NOTE: I'm writing over df here!!!!\n for seasons in range(1, 4):\n df = get_previous_yr(df, predict_df, seasons)\n # Instead of dropping we just fill it\n df = df.fillna(0)\n\n # Filter for minimum toi over the previous 3 years\n # I'll do 1200 over three years to qualify (because b4 we did at least 400 for each 3)\n # Also need 800 in year 4\n df = df[(df['toi_on'] >= 800) & (df['toi_on_n-1'] + df['toi_on_n-2'] + df['toi_on_n-3'] >= 1200)]\n\n print(\"\\nPlayers: {}\".format(df.shape[0]))\n\n # Apply weights and predict\n for col in ['gs60', 'toi/gp']:\n df['weighted_{}'.format(col)] = df['{}_n-1'.format(col)] * weights[pos][col][\"n-1\"]\\\n + df['{}_n-2'.format(col)] * weights[pos][col][\"n-2\"]\\\n + df['{}_n-3'.format(col)] * weights[pos][col][\"n-3\"]\n\n df['weighted_sample_{}'.format(col)] = df['toi_on_n-1'] * weights[pos][col][\"n-1\"]\\\n + df['toi_on_n-2'] * weights[pos][col][\"n-2\"]\\\n + df['toi_on_n-3'] * weights[pos][col][\"n-3\"]\n\n # Prepare shit\n model_features = df['weighted_{}'.format(col)].values.tolist()\n model_target = df[col].values.tolist()\n model_features, model_target = np.array(model_features), np.array(model_target).ravel()\n corr = pearsonr(model_features, model_target)[0]\n\n print(\"The Correlation for {}:\".format(col), round(corr, 2))\n print(\"The Constant for {}:\".format(col), round((1-corr)/corr * df['weighted_sample_{}'.format(col)].mean(), 0))",
"def split_data(df):\n\n df['ranked_latest'] = df.groupby(['userId'])['timestamp'].rank(method='first', ascending=False)\n train_df = df[df['ranked_latest'] != 1]\n test_df = df[df['ranked_latest'] == 1]\n\n train_df = train_df[['userId', 'movieId', 'rating']]\n test_df = test_df[['userId', 'movieId', 'rating']]\n\n return train_df, test_df",
"def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')",
"def create_dataframe(euctr_cond):\n def f(x):\n d = {}\n d['number_of_countries'] = x.eudract_number_with_country.nunique()\n d['min_end_date'] = x.date_of_the_global_end_of_the_trial.min()\n d['max_end_date'] = x.date_of_the_global_end_of_the_trial.max()\n d['comp_date'] = np.where(pd.notnull(x.date_of_the_global_end_of_the_trial),1,0).sum()\n d['has_results'] = x.trial_results.sum()\n d['includes_pip'] = x.trial_is_part_of_a_paediatric_investigation_plan.sum()\n d['single_blind'] = x.trial_single_blind.sum()\n d['not_single_blind'] = x.not_single_blind.sum()\n d['rare_disease'] = x.trial_condition_being_studied_is_a_rare_disease.sum()\n d['not_rare_disease'] = x.not_rare_disease.sum()\n d['rare_disease_blank'] = x.rare_disease_blank.sum()\n d['completed'] = np.where(x.end_of_trial_status == 'Completed', 1, 0).sum()\n d['ongoing'] = np.where((x.end_of_trial_status == 'Ongoing') | (x.end_of_trial_status == 'Restarted'), 1, 0).sum()\n d['terminated'] = np.where(x.end_of_trial_status == 'Prematurely Ended', 1, 0).sum()\n d['suspended'] = np.where((x.end_of_trial_status == 'Temporarily Halted') | (x.end_of_trial_status == 'Suspended by CA'), 1, 0).sum()\n d['other_status'] = np.where((x.end_of_trial_status == 'Not Authorised') | (x.end_of_trial_status == 'Prohibited by CA'), 1, 0).sum()\n d['no_status'] = np.where(pd.isnull(x.end_of_trial_status),1,0).sum()\n d['phase_1'] = x.trial_human_pharmacology_phase_i.sum()\n d['phase_2'] = x.trial_therapeutic_exploratory_phase_ii.sum()\n d['phase_3'] = x.trial_therapeutic_confirmatory_phase_iii.sum()\n d['phase_4'] = x.trial_therapeutic_use_phase_iv.sum()\n d['bioequivalence'] = x.trial_bioequivalence_study.sum()\n d['not_bioequivalence'] = x.not_bioequivalence_study.sum()\n d['healthy_volunteers'] = x.subject_healthy_volunteers.sum()\n d['not_healthy_volunteers'] = x.not_healthy_volunteers.sum()\n d['full_title'] = x.full_title.astype('str').min()\n d['abbreviated_title'] = x.abbreviated_title.astype('str').max()\n d['non_eu'] = x.non_eu.sum()\n return pd.Series(d)\n\n return euctr_cond.groupby('eudract_number').apply(f).reset_index()",
"def create_sample_dataframe():\n ax_readings = []\n ay_readings = []\n az_readings = []\n mx_readings = []\n my_readings = []\n mz_readings = []\n gx_readings = []\n gy_readings = []\n gz_readings = []\n activity_list = [LABELS_NAMES[0] for _ in range(SEGMENT_TIME_SIZE)]\n\n\n for _ in range(SEGMENT_TIME_SIZE):\n ax_readings.append(random.uniform(-10,10))\n ay_readings.append(random.uniform(-10,10))\n az_readings.append(random.uniform(-10,10))\n mx_readings.append(random.uniform(-10,10))\n my_readings.append(random.uniform(-10,10))\n mz_readings.append(random.uniform(-10,10))\n gx_readings.append(random.uniform(-10,10))\n gy_readings.append(random.uniform(-10,10))\n gz_readings.append(random.uniform(-10,10))\n\n data_dict = {\n COLUMN_NAMES[0]: activity_list, COLUMN_NAMES[1]: ax_readings,\n COLUMN_NAMES[2]: ay_readings, COLUMN_NAMES[3]: az_readings,\n COLUMN_NAMES[4]: gx_readings, COLUMN_NAMES[5]: gy_readings,\n COLUMN_NAMES[6]: gz_readings, COLUMN_NAMES[7]: mx_readings,\n COLUMN_NAMES[8]: my_readings, COLUMN_NAMES[9]: mz_readings\n }\n\n df = pd.DataFrame(data=data_dict)\n return df",
"def choose_split(data,treshold):\n n_features = len(data[0]) - 1 # number of columns\n quest_gain = [] # keep track of the gains and questions\n\n for col in range(1,n_features): # for each feature\n values = set([row[col] for row in data]) # unique values in the column\n for val in values: # for each value\n question = Question(col, val)\n \n # try splitting the dataset\n true_rows, false_rows = partition(data, question)\n\n # Skip this split if it doesn't divide the dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(data, true_rows, false_rows)\n quest_gain.append(Question_gain(gain,question))\n\n possible_question = [] # possible questions to ask\n n_quest_gain = len(quest_gain)\n\n if n_quest_gain == 0:\n return float('Inf'), float('NaN') #\n\n for x in range(n_quest_gain):\n if (quest_gain[x].gain >= treshold):\n possible_question.append(Question_gain(quest_gain[x].gain,quest_gain[x].question))\n \n n_possible_question = len(possible_question)\n if n_possible_question == 0:\n return float('Inf'), float('NaN')\n\n if n_possible_question>=2:\n [i, j] = random.sample(range(0, n_possible_question), 2)\n else:\n i = j = random.randint(0,n_possible_question-1)\n\n if possible_question[i].gain>=possible_question[j].gain:\n return possible_question[i].gain, possible_question[i].question\n else:\n return possible_question[j].gain, possible_question[j].question",
"def generate_example() -> pd.DataFrame:\n rng = np.random.RandomState(1234)\n\n df = generate_test_dataframe(n_dims=2, size=2000)\n df[\"date\"] = pd.Timestamp(\"2000-01-01\") + pd.to_timedelta(df[\"dim_0\"], unit=\"D\")\n df[\"month\"] = df[\"date\"].dt.month.astype(np.int8)\n df[\"year\"] = df[\"date\"].dt.year.astype(np.int16)\n df[\"city\"] = \"city_\" + df[\"dim_1\"].astype(\"str\")\n df[\"country\"] = \"country_\" + (df[\"dim_1\"] // 500).astype(\"str\")\n df[\"avg_temp\"] = (\n rng.normal(loc=10.0, scale=5.0, size=len(df))\n .round(decimals=1)\n .astype(np.float32)\n )\n df[\"rain\"] = rng.rand(len(df)) > 0.9\n df[\"mood\"] = \"ok\"\n df.loc[(~df[\"rain\"]) & (df[\"avg_temp\"] > 15), \"mood\"] = \"great\"\n df.loc[(df[\"rain\"]) & (df[\"avg_temp\"] < 5), \"mood\"] = \"sad\"\n return df[[\"date\", \"month\", \"year\", \"city\", \"country\", \"avg_temp\", \"rain\", \"mood\"]]",
"def first_featurizer(group_key, order_key, condition = None):\n \n tempdf = pd.DataFrame({'gkey':group_key, 'okey':order_key})\n \n if condition is None:\n d = tempdf.groupby('gkey').okey.min().to_dict()\n else:\n d = tempdf[condition].groupby('gkey').okey.min().to_dict()\n \n return tempdf.apply(lambda x: x.okey == d[x.gkey] if x.gkey in d else False, 1)",
"def sample_tissues(df,size):\n tissues_df = pd.DataFrame(df,columns=['tissueid','time','n']).drop_duplicates()\n sample = tissues_df.groupby('n').apply(lambda x: x.sample(size)) \n index = pd.MultiIndex.from_frame(sample) \n df = df.assign(i=[i for n in df.n for i in range(n)]) \n df.set_index(['tissueid','time','i'])[index]"
] | [
"0.567459",
"0.5266208",
"0.5183847",
"0.51282346",
"0.5121264",
"0.50491226",
"0.50445664",
"0.49976778",
"0.49790773",
"0.4944259",
"0.4943229",
"0.49305478",
"0.48992634",
"0.48914003",
"0.4884616",
"0.48839062",
"0.48819488",
"0.48802802",
"0.48760328",
"0.486246",
"0.48617962",
"0.4860878",
"0.4842715",
"0.48272887",
"0.482362",
"0.48228398",
"0.48219126",
"0.48218927",
"0.47984168",
"0.47957304"
] | 0.6297207 | 0 |
Get number of points from each device in prioritised dataframe | def get_device_summary(dataframe):
print("Total number of points: ", len(dataframe))
print("The number of rows from each device are as follows: ", dataframe.groupby(['device_id']).size()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_dist_count(df):\n df_dist = df.apply(lambda r: r/sum(r) if sum(r)>0 else r, axis=1)\n df_count = pd.DataFrame()\n df_count['count'] = df.apply(lambda r: sum(r), axis=1)\n # we can't really distinguish class 1/4 and 2/7,\n # maybe this will help to seperate 14/27\n df_count['count_14'] = df_dist.apply(lambda r: r[0] + r[3], axis=1)\n df_count['count_27'] = df_dist.apply(lambda r: r[1] + r[6], axis=1)\n \n return df_dist, df_count",
"def get_num_instances(df):\n non_nan = df.dropna(axis='columns') # nan cols would not have valid counts\n classes = non_nan.groupby('class_label')\n counts = classes.count() # count instances in each group (class)\n first_column = counts.iloc[:, 1] # we could get any column instead\n return first_column",
"def pos_conserved(df, conservation):\n nb_rows, nb_cols = df.shape\n\n value_counts = df.apply(pd.Series.value_counts, axis=0).max(axis=0).ge(conservation * nb_rows)\n\n ge = [i for i, x in enumerate(value_counts) if x]\n return ge",
"def get_number_of_components(df):\n n_components = 6 # since there a 6 numeric features\n pipe = _build_model(df, use_pca=True, n_components=n_components, use_kmeans=False, n_clusters=99)\n explained_variances = pipe.named_steps['pca'].explained_variance_ratio_\n plt.figure(7, figsize=(12, 6))\n plt.plot(range(1, 7), np.cumsum(explained_variances), 'o')\n plt.plot(range(1, 7), np.cumsum(explained_variances), '-', alpha=0.5)\n plt.xlabel('number of components')\n plt.ylabel('cumulative explained variance')\n plt.show()",
"def get_gini(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return 1 - sum([np.square(float(label_count[label])/total_count) for label in label_count.keys()])",
"def get_coop_coop_neighbour_dist(df):\n j_dist = df.groupby(['n','k'])['j'].value_counts(normalize=True).sort_index()\n return j_dist.reset_index(name='j_freq')",
"def count_neighbors_within_distance_groups(\n grouped_distances: pd.core.groupby.DataFrameGroupBy,\n) -> pd.DataFrame:\n return (\n grouped_distances.apply(\n lambda x: pd.to_numeric(arg=x[\"distance_ij\"].count(), downcast=\"integer\")\n )\n .rename(\"n\")\n .reset_index()\n )",
"def get_num_classes(df):\n classes = df.groupby('class_label')\n return classes.ngroups",
"def get_class_count(df):\r\n \r\n return df[\"class\"].value_counts()",
"def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])",
"def n_points(self) -> int:\n return len(self.all_df)",
"def n_points(self) -> int:\n return len(self.df)",
"def getCounts(training_data, test_row, k):\n neighbors = getNeighbors(training_data, test_row, k)\n output_vals = [row[-1] for row in neighbors]\n\n counts = dict()\n\n for i in output_vals:\n counts[i] = counts.get(i, 0) + 1\n \n return counts",
"def custom_numpy_count(df, weights=None):\n val = df.values\n un = np.unique(val.reshape(-1))\n if weights:\n pass\n r = {u: np.einsum('i, ij->j', weights, (val == u)) if weights is not None else np.einsum('ij->j', (val == u).astype(int)) for u in un}\n return pd.DataFrame(r).transpose()",
"def label_counts(rows):\n counts = rows.iloc[:, -1].value_counts()\n return counts",
"def get_number_of_data_points(self):\n\n log.warning(\n \"get_number_of_data_points not implemented, values for statistical measurements such as AIC or BIC are \"\n \"unreliable\",\n )\n\n return 1.0",
"def get_num_pos_neg_kmers(st: Subtype, df: DataFrame) -> Tuple[int, int]:\n dfst = df[(df['subtype'] == str(st.subtype))]\n return dfst[dfst['is_pos_kmer']].shape[0], dfst[~dfst['is_pos_kmer']].shape[0]",
"def get_num_points(self):\n dimensions = self.data.shape\n return dimensions[0]",
"def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0",
"def grid_point_count(self):\n return pytools.product(self.grid_point_counts())",
"def numpoints(self):\n return len(self.pars) + 1 # so dof is 1",
"def compute(self, df):\n df = df.drop_duplicates()\\\n .copy() # We force the copy to avoid raising a SettingWithCopyWarning when creating the 'team-id' column\n\n df['team-id'] = np.where(df.league.isna(), df.team, df.team + '-' + df.league)\n\n df = df.groupby('player-id', as_index=False) \\\n .agg(team_count=('team-id', 'count'), teams=('team-id', list))\n\n df = df[df.team_count >= 3]\n\n team_triple_counter = dict()\n for player_teams in df.teams:\n player_team_triples = itertools.combinations(player_teams, r=3)\n for team_triple in player_team_triples:\n key = frozenset(team_triple)\n current_triple_count = team_triple_counter.get(key, 0)\n team_triple_counter[key] = current_triple_count + 1\n\n return [(triple, count) for triple, count in team_triple_counter.items() if count >= self.min_player_count]",
"def count_elements_in_dataset(dataset):\n return dataset.count()",
"def count_quadrants(target, predictions, threshold=0, return_quadrants_df=False) :\n\n comparison = pd.concat([target, predictions], axis=1)\n comparison.columns = ['Target', 'Prediction']\n\n\n quadrant1 = comparison.loc[(comparison.Target < threshold ) & (comparison.Prediction > threshold)]\n quadrant2 = comparison.loc[(comparison.Target > threshold ) & (comparison.Prediction > threshold)]\n quadrant3 = comparison.loc[(comparison.Target < threshold ) & (comparison.Prediction < threshold)]\n quadrant4 = comparison.loc[(comparison.Target > threshold ) & (comparison.Prediction < threshold)]\n\n quadrants = [quadrant1,\n quadrant2,\n quadrant3,\n quadrant4]\n\n sizes = [len(q) for q in quadrants]\n total_size = np.sum(sizes)\n sizes.insert(0, total_size)\n if return_quadrants_df:\n return sizes, quadrants\n return sizes",
"def number_of_values(df,value=0,axis=0):\n \n return (df == value).astype(int).sum(axis=1-axis)",
"def Points_Counting(self):\n return len(self.__traectory_list)",
"def get_correct_lap_count(self):",
"def get_degree_distribution(df):\n degree_dist = df.k.value_counts(normalize=True).sort_index()\n return degree_dist",
"def ranking(availability_info,mapds):\n rank=Counter(dict())\n for key in availability_info.keys():\n rank[mapds[key]]=len(availability_info[key])\n #print rank\n return rank",
"def number_performers(self):\n return len(self.touches['device_id'].unique().tolist())"
] | [
"0.6276028",
"0.6060957",
"0.6040219",
"0.5901118",
"0.58417374",
"0.58276767",
"0.582397",
"0.5809163",
"0.57872033",
"0.57382846",
"0.5704486",
"0.5683693",
"0.56827277",
"0.56554943",
"0.5646682",
"0.56145674",
"0.5590394",
"0.55788803",
"0.55454695",
"0.5504899",
"0.5490605",
"0.54853404",
"0.5480914",
"0.5464016",
"0.54541713",
"0.5403382",
"0.53882927",
"0.5384614",
"0.5363342",
"0.5354762"
] | 0.6771763 | 0 |
Parse the display lines and return as much as we can infer about the state of the amp. Note that the maximum length of sources is 8 characters. Display line 2 is an informational display with multiple purposes so we decode what we can but it's up to the caller to decide what to do when one item disappears when another appears. It is copied out verbatim in the 'info' field and it is probably safest to just display that to the user and leave it at that. | def parse_display_lines(self):
is_on = None
source_name = None
volume = None
mute_on = None
party_mode_on = None
info = None
rec_source = None
zone2_source = None
zone2_volume = None
zone3_source = None
zone3_volume = None
zone4_source = None
zone4_volume = None
line0 = self.lines[0]
if len(line0) != 21:
_LOGGER.error("Display line 1 must be exactly 21 bytes")
if (
line0
== "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
):
is_on = False
else:
is_on = True
source_name = line0[:8].rstrip()
party_mode_on = line0[10:13] == "pty"
vol_str = line0[14:]
if (vol_str == "MUTE ON") or (vol_str == " "):
mute_on = True
volume = None
elif vol_str[0:3] != "VOL":
_LOGGER.error("Could not verify VOL string: %s", vol_str)
else:
mute_on = False
volume = int(vol_str[3:])
line1 = self.lines[1]
if len(line1) != 21:
_LOGGER.error("Display line 2 must be exactly 21 bytes")
if (
line1
== "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
):
pass
else:
info = line1.strip().replace("\x19", "II")
if line1[:9] == " REC ":
rec_source = line1[9:].rstrip()
elif line1[:9] == " ZONE2 ":
zone2_source = line1[9:].rstrip()
elif line1[:14] == " ZONE2 VOL ":
zone2_volume = int(line1[14:16])
elif line1[:9] == " ZONE3 ":
zone3_source = line1[9:].rstrip()
elif line1[:14] == " ZONE3 VOL ":
zone3_volume = int(line1[14:16])
elif line1[:9] == " ZONE4 ":
zone4_source = line1[9:].rstrip()
elif line1[:14] == " ZONE4 VOL ":
zone4_volume = int(line1[14:16])
return {
"is_on": is_on,
"source_name": source_name,
"volume": volume,
"mute_on": mute_on,
"party_mode_on": party_mode_on,
"info": info,
"rec_source": rec_source,
"zone2_source": zone2_source,
"zone2_volume": zone2_volume,
"zone3_source": zone3_source,
"zone3_volume": zone3_volume,
"zone4_source": zone4_source,
"zone4_volume": zone4_volume,
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_log(self, info_stream):\n fields={'name': \"nameshort:\\s+(?P<name>\\S[\\s\\S]+)$\",\n \"title\": \"episodeshort:\\s+(?P<title>\\S[\\s\\S]+)$\",\n \"full title\": \"title:\\s+(?P<full_title>\\S[\\s\\S]+)$\",\n \"description\":\"desc:\\s+(?P<description>\\S+.*)$\",\n \"episode name\":\"episodeshort:\\s+(?P<episode_name>\\S+.*)$\"}\n optional_fields={\n \"senum\": \"senum:\\s+(?P<senum>s\\d+e\\d+)$\",}\n fields_re={}\n [fields_re.update({key: re.compile(fields[key])}) for key in fields]\n [fields_re.update({key: re.compile(optional_fields[key])}) for key in optional_fields]\n \n blank_re=re.compile(\"^\\s+$\")\n show_info=[]\n current_show={}\n for line in info_stream:\n for key in fields_re:\n a_match=fields_re[key].match(line)\n if a_match:\n matched_field=a_match.groupdict()\n # insert field with leading and trailing whitespace removed\n [current_show.update({k:matched_field[k].strip()}) for k in matched_field]\n \n # end when all optional and mandatory fields are read *or* when all mandatory fields have been read and a blank line is found:\n if ( len(current_show.keys())==(len(fields.keys())+len(optional_fields))) or blank_re.match(line) and len(current_show.keys())== len(fields.keys()):\n show_info+=[current_show]\n #self.shows+=[episode_factory(current_show)]\n current_show={}\n return show_info",
"def _parse_ip_stats_link_show(raw_result):\n\n show_re = (\n r'.+?RX:.*?\\n'\n r'\\s*(?P<rx_bytes>\\d+)\\s+(?P<rx_packets>\\d+)\\s+(?P<rx_errors>\\d+)\\s+'\n r'(?P<rx_dropped>\\d+)\\s+(?P<rx_overrun>\\d+)\\s+(?P<rx_mcast>\\d+)'\n r'.+?TX:.*?\\n'\n r'\\s*(?P<tx_bytes>\\d+)\\s+(?P<tx_packets>\\d+)\\s+(?P<tx_errors>\\d+)\\s+'\n r'(?P<tx_dropped>\\d+)\\s+(?P<tx_carrier>\\d+)\\s+(?P<tx_collisions>\\d+)'\n )\n\n re_result = match(show_re, raw_result, DOTALL)\n result = None\n\n if (re_result):\n result = re_result.groupdict()\n for key, value in result.items():\n if value is not None:\n if value.isdigit():\n result[key] = int(value)\n\n return result",
"def parse_illumina_line(l, barcode_length, rev_comp_barcode,\r\n barcode_in_sequence=False):\r\n fields = l.strip().split(':')\r\n\r\n y_position_subfields = fields[4].split('#')\r\n y_position = int(y_position_subfields[0])\r\n sequence = fields[5]\r\n qual_string = fields[6]\r\n\r\n if barcode_in_sequence:\r\n barcode = sequence[:barcode_length]\r\n sequence = sequence[barcode_length:]\r\n qual_string = qual_string[barcode_length:]\r\n else:\r\n barcode = y_position_subfields[1][:barcode_length]\r\n\r\n if rev_comp_barcode:\r\n barcode = str(DNA(barcode).rc())\r\n\r\n result = {\r\n 'Full description': ':'.join(fields[:5]),\r\n 'Machine Name': fields[0],\r\n 'Channel Number': int(fields[1]),\r\n 'Tile Number': int(fields[2]),\r\n 'X Position': int(fields[3]),\r\n 'Y Position': y_position,\r\n 'Barcode': barcode,\r\n 'Full Y Position Field': fields[4],\r\n 'Sequence': sequence,\r\n 'Quality Score': qual_string}\r\n\r\n return result",
"def ExtractTrackInformation(lines):\n\n # The starting line should be something like ' TRACK 01 AUDIO'\n # and we want to create ``data = {'track': '1'}``\n # NB: Cue format has a 99 track limit\n data = {\"track\": CueMetadata.ExtractProperty(lines[0], \"TRACK\")[0:2].lstrip(\"0\")}\n\n # Parse the remaining lines for this track to find the track starting time\n # which is typically, but not necessarily, a line starting with ' INDEX 01'\n # Also want to pick up any extra tags in the block and store it in ``data``,\n # eg, the 'TITLE' field. Since not all fields are valid but remarks are\n # it's necessary to \"un-remark\" the lines starting with 'REM '\n times = {}\n for line in lines[1:]:\n if not line.startswith(' ' * 4):\n break\n line = line.strip()\n # Don't consider multi-artist albums\n if line.startswith(\"PERFORMER\"):\n continue\n line = line.replace(\"INDEX \", \"INDEX\") # Turn 'INDEX 01' into 'INDEX01', etc.\n line = line.replace(\"REM \", \"\") # Make remarks appear as valid tags\n name = line.split(\" \")[0]\n info = CueMetadata.ExtractProperty(line, name)\n if not info:\n continue\n name = name.lower()\n if \"INDEX\" in line:\n # Handle these time codes separately since there may be more than one\n times[name] = time.CueTimeToMKATime(info)\n else:\n data[name] = info\n # In CUE files, 'INDEX 00' is (typically) used for pre-gap and 'INDEX 01' denotes\n # the start of the actual track. Higher indices are possible, but rarely used,\n # typically for access to portions of songs. Here we want to prefer 'INDEX 01'\n # and use 'INDEX 00' if there is no 'INDEX 01' while ignoring higher indices.\n for idx in [\"index01\", \"index00\"]:\n if idx in times:\n time_code = idx\n break\n else:\n raise CueFormatError(f\"No valid time codes found for track {data['track']}\")\n data[\"start_time\"] = times[time_code]\n return data",
"def parse_show(self, raw_output):\n\n items = []\n table_ = output_parser.table(raw_output)\n for row in table_['values']:\n item = {}\n item[row[0]] = row[1]\n items.append(item)\n return items",
"def _parseMediaInfo(self):\n\t\t# the program path to MediaInfo should be set otherwise\n\t\tenv = {'path': env_mediainfo_dir}\n\t\t# the command for MediaInfo is a fixed command\n\t\tcom = [com_mediainfo, '-f', self.name]\n\t\t# invoke the external program\n\t\tproc = externalProcess(com, env)\n\t\t# read the programs output line by line and parse the output to a dictionary, obtaining all information\n\t\tinfo = {}\n\t\tstate = 'start'\n\t\tstream = 0\n\t\tfor line in proc.execute():\n\t\t\tlist = line.split(\":\")\n\t\t\t# recognize the sections ('General','Video','Audio','Text')\n\t\t\tif len(list) == 1 and list[0] != '':\n\t\t\t\tstate = str(list[0].lstrip().rstrip())\n\t\t\t\t# print \"New state: \", state\n\t\t\telif len(list) >= 2 and list[0] != '' and list[1] != '':\n\t\t\t\t# recognize several stream identifier\n\t\t\t\tif str(list[0].lstrip().rstrip()) == 'Stream identifier':\n\t\t\t\t\tstream = int(str(list[1].lstrip().rstrip()))\n\t\t\t\t\tcontinue\n\t\t\t\t# save the information to the dictionary\n\t\t\t\tkey = state + \"_\" + str(stream) + \"_\" + str(list[0].lstrip().rstrip())\n\t\t\t\twhile key in info.keys():\n\t\t\t\t\tkey += \"_\"\n\t\t\t\tinfo[key] = str(list[1].lstrip().rstrip())\n\t\treturn info",
"def _cfgshow_process(state, buf):\n global _cfgshow_state_eff, _cfgshow_state_exit\n\n operand, rl, next_state, t_buf, key = None, list(), None, buf, None\n\n # Clean up the line for processing\n for tl in _cfgshow_clean_buf:\n t_buf = t_buf.replace(tl[0], tl[1])\n tl = [b.strip() for b in gen_util.remove_duplicate_char(t_buf.strip(), ' ').split(' ') if len(b.strip()) > 0]\n\n # Figure out what the key, operand, and content is\n k = tl[0] if len(tl) > 0 else None\n if k is not None and k in _cfgshow_operand_tbl:\n operand = tl[1] if len(tl) > 1 else None\n rl = tl[2:] if len(tl) > 2 else list()\n else:\n k, operand, rl = None, None, tl\n\n # Figure out what the next state is\n if len(tl) == 0:\n next_state = _cfgshow_state_exit if state == _cfgshow_state_eff else _cfgshow_state_eff\n elif 'no configuration defined' in buf:\n next_state = _cfgshow_state_eff\n elif 'no configuration in effect' in buf:\n next_state = _cfgshow_state_exit\n elif operand is not None and operand in _cfgshow_operand_tbl:\n next_state = _cfgshow_operand_tbl[operand]['state']\n\n return next_state, k, operand, rl",
"def display_fields(self):\r\n\r\n field_text = self.show_fields()\r\n field_text_list = field_text.split(EOL)[0:-1]\r\n\r\n def fld_format (x_temp):\r\n\r\n x_temp = x_temp.split(COLON)[0], x_temp.split(COLON)[1]\r\n\r\n \"\"\"formats output of the list of search results\"\"\"\r\n\r\n if not isinstance(x_temp[1],str):\r\n shown_indexes = rangelist.range_find([int(Index(a_temp))\r\n for a_temp in x_temp[1]],reduce=True)\r\n else:\r\n shown_indexes = x_temp[1]\r\n\r\n if len(shown_indexes) < 20:\r\n return (abridge(x_temp[0]).replace(VERTLINE,SLASH)\r\n +VERTLINE\r\n +shown_indexes)\r\n\r\n\r\n returnlist = []\r\n sp_temp = rangelist.split_up_range(shown_indexes)\r\n\r\n\r\n returnlist.append(x_temp[0].replace(VERTLINE,SLASH)[0:min([60,len(x_temp[0])])]\r\n +VERTLINE+sp_temp[0])\r\n for s_temp in sp_temp[1:]:\r\n returnlist.append(VERTLINE+s_temp)\r\n\r\n return returnlist\r\n\r\n show_list(field_text_list,\r\n alerts.FIELDS[3:],0,40,\r\n func=fld_format,\r\n present=True,\r\n display=display)",
"def read_uef_details(chunks):\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0x0])\n\n\tif pos == None:\n\n\t\toriginator = 'Unknown'\n\n\telif chunk[1] == '':\n\n\t\toriginator = 'Unknown'\n\telse:\n\t\toriginator = chunk[1]\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0x5])\n\n\tif pos == None:\n\n\t\tmachine, keyboard = 'Unknown', 'Unknown'\n\n\telse:\n\n\t\tmachines = ('BBC Model A', 'Electron', 'BBC Model B', 'BBC Master')\n\t\tkeyboards = ('Any layout', 'Physical layout', 'Remapped')\n\n\t\tmachine = ord(chunk[1][0]) & 0x0f\n\t\tkeyboard = (ord(chunk[1][0]) & 0xf0) >> 4\n\n\t\tif machine < len(machines):\n\t\t\tmachine = machines[machine]\n\t\telse:\n\t\t\tmachine = 'Unknown'\n\n\t\tif keyboard < len(keyboards):\n\t\t\tkeyboard = keyboards[keyboard]\n\t\telse:\n\t\t\tkeyboard = 'Unknown'\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0xff00])\n\n\tif pos == None:\n\n\t\temulator = 'Unknown'\n\n\telif chunk[1] == '':\n\n\t\temulator = 'Unknown'\n\telse:\n\t\temulator = chunk[1]\n\n\n\t# Remove trailing null bytes\n\twhile originator[-1] == '\\000':\n\n\t\toriginator = originator[:-1]\n\n\twhile emulator[-1] == '\\000':\n\n\t\temulator = emulator[:-1]\n\n\tfeatures = ''\n\tif find_next_chunk(chunks, 0, [0x1])[0] != None:\n\t\tfeatures = features + '\\n' + 'Instructions'\n\tif find_next_chunk(chunks, 0, [0x2])[0] != None:\n\t\tfeatures = features + '\\n' + 'Credits'\n\tif find_next_chunk(chunks, 0, [0x3])[0] != None:\n\t\tfeatures = features + '\\n' + 'Inlay'\n\n\treturn originator, machine, keyboard, emulator, features",
"def _parse_output_status_details(lines):\n details = list()\n detail_indicator = re.compile(\"^--\")\n for line in lines:\n line = line.rstrip()\n if re.match(detail_indicator, line):\n details.append(line)\n else:\n break\n return details",
"def info(self):\n unparsed = [x for x in self.run_command('info') if x != '|']\n try:\n streams = [x.split(' ')[2] for x in [x for x in unparsed if x[0] == '+'][:-1]]\n except:\n raise ParseError(\"Could not get streams.\")\n out_list = []\n start = 1\n for stream in streams:\n cur_stream = {'Stream': stream}\n first_char = '|'\n while first_char == '|':\n cur_stream[unparsed[start].split(': ')[0][2:]] = ''.join(unparsed[start].split(': ')[1:])\n start += 1\n first_char = unparsed[start][0]\n start += 1\n out_list.append(cur_stream)\n return out_list",
"def get_display_info(self):\n return self.display_info",
"def show(self, update, context):\n\n # TODO: add show how long till bus\n message = update.message.text.lower().split(\" \")\n user = self.User(update)\n if len(message) == 1:\n output = \"hey looks like you still don't know how to use this command\\n\" \\\n \"don't worry, I'll teach you :)\\n\" \\\n \"here's a list of what you can do:\\n\" \\\n \"/show requests - this will show you your pending requests\\n\" \\\n \"/show lines - this will show you the lines that are available\\n\" \\\n \"/show buses for line {number} - this will show you the locations of the buses in the line you've specified\"\n elif message[1] == \"lines\":\n print(\"showing lines\")\n available_lines = self.bus_controller.show_available_lines()\n if available_lines == \"None\":\n output = \"there are currently no available lines\"\n else:\n output = f\"the currently available lines are: {str(available_lines)}\"\n elif message[1] == \"requests\":\n user = self.__find_matching_user(user)\n if len(user.stations) == 0:\n output = \"You don't have any pending requests\"\n else:\n output = \"Your pending requests:\\n\"\n for station in user.stations:\n output += f\"{station}\\n\"\n output = output[:-1:]\n elif message[1:-1:] == ['buses', 'for', 'line']:\n if not message[4].isnumeric(): # checks that the value is a number\n output = f\"{message[4]} isn't a number i support, therefor I can't help you.\"\n elif not (int(message[4]) > 0 and int(message[4]) <= 999): # checks that the number is within limits\n output = f\"sorry, {message[4]} is out of range, we only have lines within the range 1-999\"\n else: # gets here if the number is legit and everything is good\n line_num = int(message[4])\n if not self.bus_controller.check_line(line_num):\n output = \"there are no available buses for that line\"\n else:\n output = f\"the locations of the buses that are available for that line are: \\n\" \\\n f\"{self.bus_controller.show_buses_for_line(line_num)}\"\n else:\n print(message[1:-1:])\n output = \"couldn't recognise this command, try /show for the list of options you have for this command\"\n self.data_base.log(user, update.message.text, output)\n user.send_message(output)",
"def split_show_channel(line):\n show,channel=line.split(\",\")\n return (show, channel)",
"def parseMISPreamble(lines, flight, summarize=False):\n # Attempt to parse stuff from the Flight Plan ID bit. Fancy logic for\n # grabbing the fancy name, which didn't always exist\n try:\n flightid = regExper(lines, 'Flight Plan ID', howmany=1,\n keytype='key:val')\n fid = keyValuePair(flightid.group(), \"Flight Plan ID\", dtype=str)\n fid = fid.strip().split(\"_\")\n if fid[1] != '':\n try:\n flight.instrument = flight.instdict[fid[1].strip()]\n except:\n flight.instrument = ''\n if fid[2] != '':\n flight.fancyname = fid[2]\n except:\n fid = ['', '', '']\n\n # Grab the filename and date of MIS file creation\n filename = regExper(lines, 'Filename', howmany=1, keytype='key:val')\n flight.filename = keyValuePair(filename.group(), \"Filename\", dtype=str)\n\n # Note: the saved key is a timestamp, with a space in between stuff.\n saved = regExper(lines, 'Saved', howmany=1, keytype='key:dtime')\n flight.saved = keyValuePairDT(saved.group(), \"Saved\")\n\n # Search for two airports; first is takeoff, second is landing\n airports = regExper(lines, 'Airport', howmany=2, keytype='key:val')\n if airports is not None and len(airports) == 2:\n flight.origin = keyValuePair(airports[0].group(),\n \"Airport\", dtype=str)\n flight.destination = keyValuePair(airports[1].group(),\n \"Airport\", dtype=str)\n elif len(airports) != 2 or airports is None:\n print(\"WARNING: Couldn't find departure/arrival information!\")\n flight.origin = \"Unknown\"\n flight.destination = \"Unknown\"\n\n runway = regExper(lines, 'Runway', howmany=1, keytype='key:val')\n flight.drunway = keyValuePair(runway.group(), \"Runway\", dtype=str)\n\n legs = regExper(lines, 'Legs', howmany=1, keytype='key:val')\n flight.nlegs = keyValuePair(legs.group(), \"Legs\", dtype=int)\n\n mach = regExper(lines, 'Mach', howmany=1, keytype='key:val')\n flight.mach = keyValuePair(mach.group(), \"Mach\", dtype=float)\n\n takeoff = regExper(lines, 'Takeoff', howmany=1, keytype='key:dtime')\n flight.takeoff = keyValuePairDT(takeoff.group(), \"Takeoff\")\n\n obstime = regExper(lines, 'Obs Time', howmany=1, keytype='key:val')\n flight.obstime = keyValuePairTD(obstime.group(), \"Obs Time\")\n\n flttime = regExper(lines, 'Flt Time', howmany=1, keytype='key:val')\n flight.flighttime = keyValuePairTD(flttime.group(), \"Flt Time\")\n\n landing = regExper(lines, 'Landing', howmany=1, keytype='key:dtime')\n flight.landing = keyValuePairDT(landing.group(), \"Landing\")\n\n # NOTE: I hate fp. It sometimes doesn't write sunrise info.\n sunset = regExper(lines, 'Sunset', howmany=1, keytype='key:val')\n try:\n flight.sunset = keyValuePairTD(sunset.group(), \"Sunset\")\n except:\n flight.sunset = \"NONE\"\n\n sunrise = regExper(lines, 'Sunrise', howmany=1, keytype='key:val')\n try:\n flight.sunrise = keyValuePairTD(sunrise.group(), \"Sunrise\")\n except:\n flight.sunrise = \"NONE\"\n\n if summarize is True:\n print(flight.summarize())\n\n return flight",
"def adjust_display(self, display: typing.List[typing.List[str]]):",
"def _extract(self, sync=None, chmap=None, video_path=None,\n display=False, extrapolate_missing=True):\n fpga_times = extract_camera_sync(sync=sync, chmap=chmap)\n count, (*_, gpio) = raw.load_embedded_frame_data(self.session_path, self.label)\n\n if gpio is not None and gpio['indices'].size > 1:\n _logger.info('Aligning to audio TTLs')\n # Extract audio TTLs\n audio = _get_sync_fronts(sync, chmap['audio'])\n _, ts = raw.load_camera_ssv_times(self.session_path, self.label)\n \"\"\"\n NB: Some of the audio TTLs occur very close together, and are therefore not\n reflected in the pin state. This function removes those. Also converts frame times to\n FPGA time.\n \"\"\"\n gpio, audio, ts = groom_pin_state(gpio, audio, ts, display=display)\n \"\"\"\n The length of the count and pin state are regularly longer than the length of\n the video file. Here we assert that the video is either shorter or the same\n length as the arrays, and we make an assumption that the missing frames are\n right at the end of the video. We therefore simply shorten the arrays to match\n the length of the video.\n \"\"\"\n if video_path is None:\n filename = f'_iblrig_{self.label}Camera.raw.mp4'\n video_path = self.session_path.joinpath('raw_video_data', filename)\n # Permit the video path to be the length for development and debugging purposes\n length = video_path if isinstance(video_path, int) else get_video_length(video_path)\n _logger.debug(f'Number of video frames = {length}')\n if count.size > length:\n count = count[:length]\n else:\n assert length == count.size, 'fewer counts than frames'\n raw_ts = fpga_times[self.label]\n timestamps = align_with_audio(raw_ts, audio, gpio, count,\n display=display, extrapolate_missing=extrapolate_missing)\n else:\n _logger.warning('Alignment by wheel data not yet implemented')\n timestamps = fpga_times[self.label]\n\n return timestamps",
"def displayInfo(self):\n # clear stdout for a smoother display\n # os.system('cls' if os.name=='nt' else 'clear')\n\n #print(\"=========== Status ============\")\n # print(\n # \"speed: \" + str(self.speed) +\n # \"\\nangle: \" + str(self.steering_angle) +\n # \"\\nsign: \" + str(self.detected_sign) +\n # \"\\nlane lines: \" + str(self.lane_lines) +\n # \"\\nintersection line flag: \" + str(self.intersection_line) +\n # \"\\ncurrent state label: \" + str(self.currentStateLabel) +\n # \"\\ncurrent states: \" + str(self.currentState)\n #)",
"def __str__(self):\n lines = []\n # set hsp info line\n statline = []\n # evalue\n evalue = getattr_str(self, \"evalue\", fmt=\"%.2g\")\n statline.append(\"evalue \" + evalue)\n # bitscore\n bitscore = getattr_str(self, \"bitscore\", fmt=\"%.2f\")\n statline.append(\"bitscore \" + bitscore)\n lines.append(\"Quick stats: \" + \"; \".join(statline))\n\n if len(self.fragments) == 1:\n return \"\\n\".join(\n [self._str_hsp_header(), \"\\n\".join(lines), self.fragments[0]._str_aln()]\n )\n else:\n lines.append(\n \" Fragments: %s %s %s %s\" % (\"-\" * 3, \"-\" * 14, \"-\" * 22, \"-\" * 22)\n )\n pattern = \"%16s %14s %22s %22s\"\n lines.append(pattern % (\"#\", \"Span\", \"Query range\", \"Hit range\"))\n lines.append(pattern % (\"-\" * 3, \"-\" * 14, \"-\" * 22, \"-\" * 22))\n for idx, block in enumerate(self.fragments):\n # set hsp line and table\n # alignment span\n aln_span = getattr_str(block, \"aln_span\")\n # query region\n query_start = getattr_str(block, \"query_start\")\n query_end = getattr_str(block, \"query_end\")\n query_range = \"[%s:%s]\" % (query_start, query_end)\n # max column length is 20\n query_range = (\n query_range[:20] + \"~]\" if len(query_range) > 22 else query_range\n )\n # hit region\n hit_start = getattr_str(block, \"hit_start\")\n hit_end = getattr_str(block, \"hit_end\")\n hit_range = \"[%s:%s]\" % (hit_start, hit_end)\n hit_range = hit_range[:20] + \"~]\" if len(hit_range) > 22 else hit_range\n # append the hsp row\n lines.append(pattern % (str(idx), aln_span, query_range, hit_range))\n\n return self._str_hsp_header() + \"\\n\" + \"\\n\".join(lines)",
"def get_metadata(diagnostics_dir, verbose=False):\n metafile = find_metadata_file(diagnostics_dir, 'mslist-2*txt', verbose=False)\n\n with open(metafile, 'r') as mslist_file:\n lines = mslist_file.readlines()\n\n nBlocks = 6 # these are the number of correlator cards (PILOT survey value)\n \n obs_metadata = ObservationMetadata()\n\n obs_date = 'Observed from'\n fields = 'Fields'\n code = 'Code'\n duration = 'Total elapsed time'\n antenna = 'antennas'\n frame = 'Frame'\n \n field_list = []\n\n for i in range(len(lines)):\n line = lines[i]\n if line.find(antenna) >=0:\n toks = line.split()\n obs_metadata.n_ant = toks[5][-2:]\n if line.find(obs_date) >=0:\n toks = line.split()\n obs_metadata.start_obs_date = toks[6]\n obs_metadata.end_obs_date = toks[8]\n if line.find(duration) >=0:\n toks = line.split()\n obs_metadata.tobs = float(toks[10]) # in second\n\n # Field details\n if line.find(fields) >=0:\n toks = line.split()\n obs_metadata.num_fields = int(toks[-1])\n\n if line.find(code) >= 0:\n for j in range(obs_metadata.num_fields):\n field_metadata = FieldMetadata()\n field_line = lines[i+j+1]\n toks = field_line.split()\n field_metadata.name = toks[5]\n field_metadata.ra = toks[6][:-5]\n field_metadata.dec = toks[7][:-4]\n field_metadata.num_rows = int(toks[9])\n obs_metadata.fields.append(field_metadata)\n\n if line.find(frame) >= 0:\n next_line = lines[i+1]\n toks = next_line.split()\n obs_metadata.total_obs_bw = float(toks[10])*nBlocks/1000.0 # kHz to MHz \n \n return obs_metadata #n_ant, start_obs_date, end_obs_date, tobs, field, ra, dec, total_obs_bw",
"def analyze_line(line, show_pin=False):\n line = line.rstrip()\n\n # Filter PIN code if it was not asked to display it\n text_line = line\n if not show_pin:\n m = re.match(r'([0-9]* APDU: [0-9A-F]{2} 20 [0-9A-F]{2} (80|81|82))', line)\n if m:\n text_line = m.group(1) + ' ********'\n print(\"{}{}{}\".format(COLOR_YELLOW, text_line, COLOR_NORM))\n\n # Match an APDU request from software to hardware\n m = re.match(r'[0-9]* APDU: ([0-9A-F ]+)$', line)\n if m is not None:\n apdu = binascii.unhexlify(m.group(1).replace(' ', ''))\n if len(apdu) < 4:\n return\n cl, ins, p1, p2 = apdu[:4]\n param = (p1 << 8) | p2\n\n try:\n oins = INS(ins)\n except ValueError:\n print(\" Unknown instruction {:#x}\".format(ins))\n return\n\n if cl == 0x10: # Command chaining\n print(\" => Command chaining {} (0x{:04x})[{}] {}\".format(oins.name, param, len(apdu) - 4, repr(apdu[4:])))\n return\n if cl != 0:\n print(\" Unknown class\")\n return\n\n if oins == INS.ISO_SELECT_FILE and param == 0x400:\n length = apdu[4]\n if len(apdu) == 5 + length or (len(apdu) == 6 + length and apdu[-1] == 0):\n prefix = apdu[5:5 + length]\n selected_aids = sorted([aid for aid in KNOWN_APPLETS.keys() if aid[:length] == prefix[:len(aid)]])\n if selected_aids:\n for aid in selected_aids:\n print(\" => SELECT Application {} ({} for {})\".format(\n KNOWN_APPLETS[aid], bin2aid(aid), bin2aid(prefix)))\n else:\n print(\" => SELECT unknown AID {}\".format(bin2aid(prefix)))\n return\n elif oins == INS.ISO_VERIFY:\n if len(apdu) > 4:\n length = apdu[4]\n if len(apdu) == 5 + length and param in (0x81, 0x82):\n print(\" => OpenPGP VERIFY PIN {}: {}\".format(\n param - 0x80, repr(apdu[5:]) if show_pin else '********'))\n return\n if len(apdu) == 5 + length and param == 0x80:\n print(\" => PIV VERIFY PIN {}: {}\".format(\n param - 0x80, repr(apdu[5:]) if show_pin else '********'))\n return\n elif len(apdu) == 4 and param == 0x80:\n print(\" => PIV VERIFY: is PIN {} ok?\".format(param - 0x80))\n return\n elif oins == INS.OPGP_GET_DATA:\n if param == 0x4f:\n print(\" => OpenPGP GET DATA: Application Identifier (D276:0001:2401:...)\")\n return\n if param == 0x5e:\n print(\" => OpenPGP GET DATA: Login data\")\n return\n if param == 0x65:\n print(\" => OpenPGP GET DATA: Cardholder Related Data (TLV, 5B=name, 5F2D=Language, 5F35=Sex)\")\n return\n if param == 0x6e:\n print(\" => OpenPGP GET DATA: Application Related Data (TLV)\")\n return\n if param == 0x7a:\n print(\" => OpenPGP GET DATA: Security support template (TLV, 93=Digital signature counter)\")\n return\n if param == 0xc4:\n print(\" => OpenPGP GET DATA: PW Status Bytes (pw1 status, {pw1, rc, pw3} max length, {pw1, rc, pw3} Pin tries)\") # noqa\n return\n if param == 0x0101:\n print(\" => OpenPGP GET DATA: DO 0101\")\n return\n if param == 0x0102:\n print(\" => OpenPGP GET DATA: DO 0102\")\n return\n if param == 0x5f50:\n print(\" => OpenPGP GET DATA: URL\")\n return\n if param == 0x5f52:\n print(\" => OpenPGP GET DATA: Historical bytes\")\n return\n if param == 0x7f21:\n print(\" => OpenPGP GET DATA: Cardholder Certificate\")\n return\n elif oins == INS.OPGP_GENERATE_ASYMMETRIC_KEY_PAIR and param == 0x8100 and len(apdu) > 4:\n length = apdu[4]\n if len(apdu) == 6 + length:\n print(\" => OpenPGP Get public key 0x{}\".format(binascii.hexlify(apdu[5:5 + length]).decode('ascii')))\n return\n elif oins == INS.OPGP_PERFORM_SECURITY_OPERATION:\n if param == 0x8086:\n print(\" => OpenPGP Decipher (...)\")\n return\n if param == 0x9E9A:\n print(\" => OpenPGP Compute Digital Signature (...)\")\n return\n elif oins == INS.PIV_GET_DATA and param == 0x3fff:\n length = apdu[4]\n if len(apdu) == 6 + length:\n data_mark = apdu[5:5 + length]\n # https://github.com/aosm/Tokend/blob/master/PIV/PIVDefines.h\n # OpenSC: card-piv.c\n desc = 'TLV {}'.format(binascii.hexlify(data_mark))\n if data_mark == b'\\x5c\\x03\\x5f\\xc1\\x01':\n desc = 'X.509 Certificate for Card Authentication (2.16.840.1.101.3.7.2.5.0)'\n elif data_mark == b'\\x5c\\x03\\x5f\\xc1\\x02':\n desc = 'Card Holder Unique Identifier (2.16.840.1.101.3.7.2.48.0)'\n elif data_mark == b'\\x5c\\x03\\x5f\\xc1\\x03':\n desc = 'Card Holder Fingerprints (2.16.840.1.101.3.7.2.96.16)'\n elif data_mark == b'\\x5c\\x03\\x5f\\xc1\\x05':\n desc = 'X.509 Certificate for PIV Authentication (2.16.840.1.101.3.7.2.1.1)'\n elif data_mark == b'\\x5c\\x03\\x5f\\xc1\\x06':\n desc = 'Security Object (2.16.840.1.101.3.7.2.144.0)'\n elif data_mark == b'\\x5c\\x03\\x5f\\xc1\\x07':\n desc = 'Card Capability Container (2.16.840.1.101.3.7.1.219.0)'\n elif data_mark == b'\\x5c\\x03\\x5f\\xc1\\x08':\n desc = 'Cardholder Facial Images (2.16.840.1.101.3.7.2.96.48)'\n elif data_mark == b'\\x5c\\x03\\x5f\\xc1\\x09':\n desc = 'Printed Information (2.16.840.1.101.3.7.2.48.1)'\n elif data_mark == b'\\x5c\\x03\\x5f\\xc1\\x0a':\n desc = 'X.509 Certificate for Digital Signature (2.16.840.1.101.3.7.2.1.0)'\n elif data_mark == b'\\x5c\\x03\\x5f\\xc1\\x0b':\n desc = 'X.509 Certificate for Key Management (2.16.840.1.101.3.7.2.1.2)'\n elif data_mark == b'\\x5c\\x03\\x5f\\xc1\\x0c':\n desc = 'Key History Object (2.16.840.1.101.3.7.2.96.96)'\n elif data_mark == b'\\x5c\\x03\\x5f\\xc1\\x21':\n desc = 'Cardholder Iris Images (2.16.840.1.101.3.7.2.16.21)'\n elif data_mark == b'\\x5c\\x01\\x7e':\n desc = 'Discovery Object (2.16.840.1.101.3.7.2.96.80)'\n print(\" => PIV GET DATA (retsize {}) {}\".format(apdu[-1], desc))\n return\n elif oins == INS.PIV_GENERAL_AUTHENTICATE and p1 == 0:\n length = apdu[4]\n # magic value from https://github.com/OpenSC/OpenSC/blob/645780e6d4c4e70fbd75eec0c5cd2c3a7bd81879/src/libopensc/card-piv.c#L1894 # noqa\n if len(apdu) == 6 + length and apdu[5:5 + length] == b'\\x7c\\x02\\x81\\x00':\n # reqdata = apdu[5:5 + length]\n print(\" => PIV Get challenge (keyref {:#x})\".format(p2))\n # Response: 7C 0A 81 08 79 B5 E9 B1 57 F9 81 89\n # =>\n # 7C Tag \"Dynamic Authentication Template\"\n # 0A Length (10 bytes)\n # 81 08 Challenge, 8 bytes\n # 79 B5 E9 B1 57 F9 81 89 : challenge\n return\n elif oins == INS.PIV_GENERAL_AUTHENTICATE and p1 == 0x07:\n length = apdu[4]\n if len(apdu) == 6 + length:\n print(\" => PIV GENERAL AUTHENTICATE RSA 2048 (keyref {:#x}) (challenge...)\".format(p2))\n return\n print(\" => {} ({:#x}, {:#x} = {:#x}) {}\".format(oins.name, p1, p2, param, repr(apdu[4:])))\n\n # Match an Status Word response from hardware to software\n m = re.match(r'[0-9]* SW: ([0-9A-F ]+)$', line)\n if m is not None:\n swdata = binascii.unhexlify(m.group(1).replace(' ', ''))\n if len(swdata) < 2:\n return\n sw1, sw2 = swdata[-2:]\n sw = (sw1 << 8) | sw2\n swdata = swdata[:-2]\n if sw == 0x9000:\n if swdata:\n print(\"{} <= OK: {}{}\".format(COLOR_GREEN, repr(swdata), COLOR_NORM))\n else:\n print(\"{} <= OK{}\".format(COLOR_GREEN, COLOR_NORM))\n elif sw1 == 0x61:\n print(\"{} <= SW Bytes remaining {}: {}{}\".format(COLOR_GREEN, sw2, repr(swdata), COLOR_NORM))\n elif (sw & 0xfff0) == 0x63c0 and not swdata:\n print(\" <= SW: PIN remaining tries: {}\".format(sw & 0xf))\n elif sw == 0x6700 and not swdata:\n print(\"{} <= SW: Wrong length{}\".format(COLOR_RED, COLOR_NORM))\n elif sw == 0x6982 and not swdata:\n print(\"{} <= SW: Security condition not satisfied{}\".format(COLOR_RED, COLOR_NORM))\n elif sw == 0x6a81 and not swdata:\n print(\"{} <= SW: Function not supported{}\".format(COLOR_RED, COLOR_NORM))\n elif sw == 0x6a82 and not swdata:\n print(\"{} <= SW: File not found{}\".format(COLOR_RED, COLOR_NORM))\n elif sw == 0x6b00 and not swdata:\n print(\"{} <= SW: Wrong parameters P1-P2{}\".format(COLOR_RED, COLOR_NORM))\n elif sw == 0x6d00 and not swdata:\n print(\"{} <= SW: Instruction not supported or invalid{}\".format(COLOR_RED, COLOR_NORM))\n elif sw == 0x6e00 and not swdata:\n print(\"{} <= SW: Class not supported{}\".format(COLOR_RED, COLOR_NORM))\n else:\n print(\"{} <= SW {:04x}: {}{}\".format(COLOR_RED, sw, repr(swdata), COLOR_NORM))",
"def display_line(self):\n line = self.line\n hosts = self.hosts\n if not self.check_line():\n return\n self.msg(\"|wThis line is hosted by:|n %s\" % \", \".join(str(ob) for ob in hosts))\n self.msg(\"|wCurrent line order:|n %s\" % \", \".join(str(ob) for ob in line))",
"def __format_display(file_contents: str) -> str:\n\n new_file_contents = file_contents\n\n for match in re.finditer(COBOL_FORMAT_DISPLAY_REGEX, file_contents):\n match_str = match_to_str(match)\n\n # Skip \"DISPLAY\" statements within \"IF-ELSE\" blocks\n if re.search(re.compile(r'\\s+ELSE\\s+'), match_str) is not None:\n continue\n\n new_str = match_str.replace('\\n', ' ')\n new_file_contents = new_file_contents.replace(match_str, new_str)\n\n return new_file_contents",
"def parse_mochad_line(self, line):\n # bail out unless it's an incoming RFSEC message\n if line[15:23] == 'Rx RFSEC':\n\n # decode receive RFSEC message. format is either:\n # 09/22 15:39:07 Rx RFSEC Addr: 21:26:80 Func: Contact_alert_min_DS10A\n # ~ or ~\n # 09/22 15:39:07 Rx RFSEC Addr: 0x80 Func: Motion_alert_SP554A\n line_list = line.split(' ')\n addr = line_list[5]\n func = line_list[7]\n\n func_dict = self.decode_func(func)\n\n return addr, {'func': func_dict}, 'security'\n\n# elif line[15:23] == 'Tx RFSEC':\n\n # decode send RFSEC message. format is either:\n # 09/22 15:39:07 Tx RFSEC Addr: 21:26:80 Func: Contact_alert_min_DS10A\n # ~ or ~\n # 09/22 15:39:07 Tx RFSEC Addr: 0x80 Func: Motion_alert_SP554A\n# line_list = line.split(' ')\n# addr = line_list[5]\n# func = line_list[7]\n#\n# func_dict = self.decode_func(func)\n#\n# return addr, {'func': func_dict}, 'trigger'\n\n elif line[15:20] == 'Rx RF':\n\n # decode receive RF message. format is:\n # 02/13 23:54:28 Rx RF HouseUnit: B1 Func: On\n line_list = line.split(' ')\n house_code = line_list[5];\n house_func = line_list[7]\n\n return house_code, {'func': house_func}, 'radio'\n\n elif line[15:20] == 'Rx PL':\n \n # decode receive PL message. format is:\n # 02/13 23:54:28 Rx PL HouseUnit: A1\n # 02/13 23:54:28 Rx PL House: A Func: On\n line_list = line.split(' ')\n if line[21:27] == 'HouseU':\n house_code = line_list[5]\n with open ('/root/.house_code', 'wb') as f:\n pickle.dump(house_code, f)\n else:\n house_func = line_list[7]\n with open ('/root/.house_code', 'rb') as f:\n house_code = pickle.load(f)\n return house_code, {'func': house_func}, 'powerline'\n \n elif line[15:20] == 'Tx PL':\n \n # decode send RF/PL message. format is:\n # 02/13 23:54:28 Tx PL HouseUnit: A1\n # 02/13 23:54:28 Tx PL House: A Func: On\n line_list = line.split(' ')\n if line[21:27] == 'HouseU':\n house_code = line_list[5]\n with open ('/root/.house_code', 'wb') as f:\n pickle.dump(house_code, f)\n else:\n house_func = line_list[7]\n with open ('/root/.house_code', 'rb') as f:\n house_code = pickle.load(f)\n return house_code, {'func': house_func}, 'button'\n \n return '', ''",
"def get_info(line, bit_thresh):\n if len(line) >= 18: # output is from cmsearch\n id, model, bit, inc = line[0].split()[0], line[2], float(line[14]), line[16]\n sstart, send, strand = int(line[7]), int(line[8]), line[9]\n mstart, mend = int(line[5]), int(line[6])\n elif len(line) == 9: # output is from ssu-cmsearch\n if bit_thresh == 0:\n print('# ssu-cmsearch does not include a model-specific inclusion threshold, ', file=sys.stderr)\n print('# please specify a bit score threshold', file=sys.stderr)\n exit()\n id, model, bit = line[1].split()[0], line[0], float(line[6])\n inc = '!' # this is not a feature of ssu-cmsearch\n sstart, send = int(line[2]), int(line[3])\n mstart, mend = int(4), int(5)\n if send >= sstart:\n strand = '+'\n else:\n strand = '-'\n else:\n print('# unsupported hmm format:', file=sys.stderr)\n print('# provide tabular output from ssu-cmsearch and cmsearch supported', file=sys.stderr)\n exit()\n coords = [sstart, send]\n sstart, send = min(coords), max(coords)\n mcoords = [mstart, mend]\n mstart, mend = min(mcoords), max(mcoords)\n return id, model, bit, sstart, send, mstart, mend, strand, inc",
"def _parse_line(self, line):\n fields = line.split('|', 4) # stop splitting after fourth | found\n line_info = {'raw_message': line}\n if len(fields) == 5:\n line_info.update(dict(zip(self._fieldnames, fields)))\n return line_info",
"def _parse_track_line(self, inp):\n self.metadata = {}\n ltmp = shlex.split(inp.strip(\"\\n\"))\n for item in ltmp:\n k, v = item.split(\"=\")\n self.metadata[k] = v\n\n track_type = self.metadata.get(\"type\", None)\n if track_type is not None:\n if track_type in bed_x_formats:\n self.printer.write(\n \"Found track type '%s' in track definition line. Assuming extra columns follow UCSC definitions.\"\n % track_type\n )\n if self.extra_columns == 0:\n self.extra_columns = bed_x_formats[track_type]\n elif self.extra_columns != bed_x_formats[track_type]:\n my_columns = self._get_extra_column_names()\n track_format_columns = \",\".join([X[0] for X in bed_x_formats[track_type]])\n warn(\"Extra columns specified by %s track type declaration (%s) don't match those specified by user (%s). Using those specified by user.\" %\\\n (track_type,track_format_columns,my_columns),FileFormatWarning)\n self.metadata[\"type\"] = \"custom\"\n else:\n self.printer.write(\"Found track type '%s' in track definition line.\" % track_type)",
"def parse(lines): \n replied = len(lines)\n avg_delay, lost = 0, 0\n qos = 1.0\n \n if replied != 0:\n for line in lines:\n line.strip() #remove leading and trailing spaces\n \"\"\"\n Each line has the following fields:\n [status code] [reply time (seconds since epoch)] [source IP] [source url] [source query] [serving delay]\n \n e.g.:\n 200 1296756182 192.168.10.2 /home.php ?N=192 11045\n 200 1296756183 192.168.10.2 /home.php ?N=192 230036\n 200 1296756183 192.168.10.2 /home.php ?N=192 230684\n \"\"\"\n status, time, sourceIP, url, query, delay = line.split()\n \n time = int(time)\n delay = int(delay)\n \n if delay > DEADLINE:\n lost += 1\n avg_delay += delay\n avg_delay /= replied\n qos = (replied - lost) / replied\n\n return {'replied': replied, 'delay' : avg_delay, 'qos' : qos, 'lost': lost}",
"def parseLinkInfoData(self, confStr):\n\n data = []\n if not confStr:\n return data\n\n specs = confStr.split(';')\n for spec in specs:\n if not spec:\n continue\n splits = spec.split(',')\n splits = [s.strip() for s in splits]\n ls = {}\n ls['src'] = splits[0]\n ls['dst'] = splits[1]\n ls['bw'] = splits[2]\n ls['delay'] = splits[3]\n ls['loss'] = splits[4]\n ls['max_queue_size'] = splits[5]\n ls['use_htb'] = splits[6]\n data.append(ls)\n return data",
"def parse_cutadapt_logs(self, f):\n fh = f['f']\n regexes = {\n 'bp_processed': \"Total basepairs processed:\\s*([\\d,]+) bp\",\n 'bp_written': \"Total written \\(filtered\\):\\s*([\\d,]+) bp\",\n 'quality_trimmed': \"Quality-trimmed:\\s*([\\d,]+) bp\",\n 'r_processed': \"Total reads processed:\\s*([\\d,]+)\",\n 'r_with_adapters': \"Reads with adapters:\\s*([\\d,]+)\"\n }\n s_name = None\n for l in fh:\n # New log starting\n if l.startswith('This is cutadapt'):\n s_name = None\n \n # Get sample name from end of command line params\n if l.startswith('Command line parameters'):\n s_name = l.split()[-1]\n s_name = self.clean_s_name(s_name, f['root'])\n if s_name in self.cutadapt_data:\n log.debug(\"Duplicate sample name found! Overwriting: {}\".format(s_name))\n self.cutadapt_data[s_name] = dict()\n self.cutadapt_length_counts[s_name] = dict()\n self.cutadapt_length_exp[s_name] = dict()\n self.cutadapt_length_obsexp[s_name] = dict()\n \n if s_name is not None:\n # Search regexes for overview stats\n for k, r in regexes.items():\n match = re.search(r, l)\n if match:\n self.cutadapt_data[s_name][k] = int(match.group(1).replace(',', ''))\n\n if 'length' in l and 'count' in l and 'expect' in l:\n # Nested loop to read this section while the regex matches\n for l in fh:\n r_seqs = re.search(\"^(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\", l)\n if r_seqs:\n a_len = int(r_seqs.group(1))\n self.cutadapt_length_counts[s_name][a_len] = int(r_seqs.group(2))\n self.cutadapt_length_exp[s_name][a_len] = float(r_seqs.group(3))\n if float(r_seqs.group(3)) > 0:\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2)) / float(r_seqs.group(3))\n else:\n # Cheating, I know. Infinity is difficult to plot.\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2))\n else:\n break\n \n # Calculate a few extra numbers of our own\n for s_name in self.cutadapt_data.keys():\n if 'bp_processed' in self.cutadapt_data[s_name] and 'bp_written' in self.cutadapt_data[s_name]:\n self.cutadapt_data[s_name]['percent_trimmed'] = (float(self.cutadapt_data[s_name]['bp_processed'] - self.cutadapt_data[s_name]['bp_written']) / self.cutadapt_data[s_name]['bp_processed']) * 100"
] | [
"0.5667453",
"0.5577402",
"0.55093837",
"0.539945",
"0.5389532",
"0.5278722",
"0.52660465",
"0.5256899",
"0.52339995",
"0.513899",
"0.5048906",
"0.5005927",
"0.49827233",
"0.49649656",
"0.4956714",
"0.49489126",
"0.49478963",
"0.49371415",
"0.49156973",
"0.4906018",
"0.48803487",
"0.48572907",
"0.48564512",
"0.48416102",
"0.4838143",
"0.48327172",
"0.4815201",
"0.47992685",
"0.47952208",
"0.4782918"
] | 0.7945108 | 0 |
returns the update info saved in the update_info.json file. | def get_update_info():
update_info_path = os.path.join(_default_basedir("OctoPrint"), "update_info.json")
try:
with open(update_info_path, "r") as f:
update_info = json.load(f)
except IOError:
raise RuntimeError("Could not load update info")
except ValueError as e:
raise RuntimeError("update info not valid json - {}".format(e))
return update_info | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def last_update(self):\r\n request = http.Request('GET', '/metadata/last_update.json')\r\n return request, parsers.parse_json",
"def updates(self):\n return self._get_page('updates').json()",
"def get_updates(self) -> dict:\n url = self.URL + \"getUpdates\"\n js = self.get_json_from_url(url)\n return js",
"def updated_data():\n with open(UPDATED_JSON, 'r') as f:\n updated_data = json.load(f)\n return updated_data",
"def info(self, update=None):\n if update:\n update_dict = {key: value for key, value in update.items()\n if key in self.updatable_keys}\n self.update(update_dict)\n\n return {\n self.ID: self.dataset_id,\n self.LABEL: self.label,\n self.DESCRIPTION: self.description,\n self.SCHEMA: self.schema,\n self.LICENSE: self.license,\n self.ATTRIBUTION: self.attribution,\n self.CREATED_AT: self.record.get(self.CREATED_AT),\n self.UPDATED_AT: self.record.get(self.UPDATED_AT),\n self.NUM_COLUMNS: self.num_columns,\n self.NUM_ROWS: self.num_rows,\n self.STATE: self.state,\n }",
"def next_update(self):\r\n request = http.Request('GET', '/metadata/next_update.json')\r\n return request, parsers.parse_json",
"def getUpdates(self):\n # execute the query\n ret = self._er.execQuery(self)\n\n if ret and ret.get(\"recentActivity\") and ret[\"recentActivity\"].get(\"events\"):\n # return the updated information\n return ret[\"recentActivity\"][\"events\"]\n # or empty\n return {}",
"def info(self):\r\n return self._get('info', {})",
"def get_buildinfo():\n path = os.path.join(SCRIPT_DIR, \"..\", DISTRIBUTION, \"buildinfo.json\")\n return json.load(open(path, \"rb\"))",
"def info(self):\n return self._fetch_json('/api/info')",
"def _make_update_dict(update):\r\n return {\r\n \"id\": update[\"id\"],\r\n \"date\": update[\"date\"],\r\n \"content\": update[\"content\"],\r\n }",
"def get_updated_plugin_data(self, update={}):\n form = self.get_form()\n data = {}\n\n for field, default_value in form.plugin_data_fields:\n data.update({field: getattr(self.data, field, '')})\n\n for prop, value in update.items():\n data.update({prop: value})\n\n return json.dumps(data)",
"def extract_user_data_from_update(update: Update) -> Dict:\n user = update.effective_user.to_dict()\n\n return dict(\n user_id=user[\"id\"],\n is_blocked_bot=False,\n **{\n k: user[k]\n for k in [\"username\", \"first_name\", \"last_name\", \"language_code\"]\n if k in user and user[k] is not None\n },\n )",
"def get_updates():\n url = TELEGRAM_URL + TELEGRAM_TOKEN + '/getUpdates'\n response = requests.get(url).json()\n last_object = response['result'][-1] # -1 = last update\n\n chat_id = last_object['message']['chat']['id']\n message_text = last_object['message']['text']\n message = {\n 'chat_id': chat_id,\n 'text': message_text\n }\n return message",
"def info(self):\n resp = requests.get(\"%s/api/info\"%self.urlbase, verify=False)\n return resp.json",
"def getInfo(self):\n return self.info",
"def extract_user_data_from_update(update):\n if update.message is not None:\n user = update.message.from_user.to_dict()\n elif update.inline_query is not None:\n user = update.inline_query.from_user.to_dict()\n elif update.chosen_inline_result is not None:\n user = update.chosen_inline_result.from_user.to_dict()\n elif update.callback_query is not None and update.callback_query.from_user is not None:\n user = update.callback_query.from_user.to_dict()\n elif update.callback_query is not None and update.callback_query.message is not None:\n user = update.callback_query.message.chat.to_dict()\n else:\n raise Exception(f\"Can't extract user data from update: {update}\")\n\n return dict(\n user_id=user[\"id\"],\n is_blocked_bot=False,\n **{\n k: user[k]\n for k in [\"username\", \"first_name\", \"last_name\", \"language_code\"]\n if k in user and user[k] is not None\n },\n )",
"def info(self):\n return self.client.call('GET', self.name + 'info')",
"def updater(self) -> str:\n return pulumi.get(self, \"updater\")",
"def build_info(self) -> Dict[str, Union[str, dict]]:\n self._assert_build_info()\n logger.info(f'loading build info from {self.path}')\n if not hasattr(self, '_build_info'):\n with open(self.path) as f:\n self._build_info = json.load(f)\n return self._build_info",
"def get_data_to_update_object(self):\n return {}",
"def updateData(self):\n return self._updateData",
"def save_info(self):\n json.dump(self.info, open(os.path.join(self.dstore_dir, \"info.json\"), \"w\"),\n sort_keys=True, indent=4, ensure_ascii=False)",
"def get_info(self):\n return \"TODO !\"",
"def last_update(self): # TOFIX model the job and return an object instead of dictionary\n return self._data.get('summary_fields', {}).get('last_update')",
"def get_info(self) -> str:\n return self.info",
"def getInfo(self):\n return self._info",
"def last_update(self):\n # get modification time of QWC2 themes config file\n config_updated_at = None\n if os.path.isfile(self.themes_config_path):\n config_updated_at = datetime.utcfromtimestamp(\n os.path.getmtime(self.themes_config_path)\n )\n\n # create session for ConfigDB\n session = self.config_models.session()\n\n # query timestamp\n LastUpdate = self.config_models.model('last_update')\n query = session.query(LastUpdate.updated_at)\n last_update = query.first()\n if last_update is not None:\n if config_updated_at is not None:\n # use latest of both timestamps\n updated_at = max(last_update.updated_at, config_updated_at)\n else:\n # use timestamp from ConfigDB\n updated_at = last_update.updated_at\n else:\n # no entry in ConfigDB, use config timestamp or now\n updated_at = config_updated_at or datetime.utcnow()\n\n # close session\n session.close()\n\n return {\n 'permissions_updated_at': updated_at.strftime(\"%Y-%m-%d %H:%M:%S\")\n }",
"def getInfo():",
"def return_info(self):\n\t\treturn self.info"
] | [
"0.6842781",
"0.6825544",
"0.6760401",
"0.6695358",
"0.6490341",
"0.62664664",
"0.61488295",
"0.6123632",
"0.6107774",
"0.60368365",
"0.60149324",
"0.59930795",
"0.5889623",
"0.58664274",
"0.5860153",
"0.58081394",
"0.576707",
"0.575865",
"0.57361627",
"0.57331353",
"0.570364",
"0.5700775",
"0.5696303",
"0.56524694",
"0.56516695",
"0.56481874",
"0.5645794",
"0.5623082",
"0.56194586",
"0.56147426"
] | 0.87182504 | 0 |
build the wheels of the packages in the queue. | def build_wheels(build_queue):
try:
if not os.path.isdir(PIP_WHEEL_TEMP_FOLDER):
os.mkdir(PIP_WHEEL_TEMP_FOLDER)
except OSError as e:
raise RuntimeError("can't create wheel tmp folder {} - {}".format(PIP_WHEEL_TEMP_FOLDER, e))
for venv, packages in build_queue.items():
tmp_folder = os.path.join(PIP_WHEEL_TEMP_FOLDER, re.search(r"\w+((?=\/venv)|(?=\/bin))", venv).group(0))
if os.path.isdir(tmp_folder):
try:
os.system("sudo rm -r {}".format(tmp_folder))
except Exception as e:
raise RuntimeError("can't delete pip wheel temp folder {} - {}".format(tmp_folder, e))
pip_args = [
"wheel",
"--disable-pip-version-check",
"--wheel-dir={}".format(
tmp_folder
), # Build wheels into <dir>, where the default is the current working directory.
"--no-dependencies", # Don't install package dependencies.
]
for package in packages:
if package.get("archive"):
pip_args.append(package.get("archive"))
else:
raise RuntimeError("Archive not found for package {}".format(package))
returncode, exec_stdout, exec_stderr = get_pip_caller(venv, _logger).execute(
*pip_args
)
if returncode != 0:
raise exceptions.UpdateError(
"Error while executing pip wheel", (exec_stdout, exec_stderr)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def install_wheels(install_queue):\n if not isinstance(install_queue, dict):\n raise RuntimeError(\"install queue is not a dict\")\n\n for venv, packages in install_queue.items():\n tmp_folder = os.path.join(PIP_WHEEL_TEMP_FOLDER, re.search(r\"\\w+((?=\\/venv)|(?=\\/bin))\", venv).group(0))\n pip_args = [\n \"install\",\n \"--disable-pip-version-check\",\n \"--upgrade\", # Upgrade all specified packages to the newest available version. The handling of dependencies depends on the upgrade-strategy used.\n \"--find-links={}\".format(\n tmp_folder\n ), # If a URL or path to an html file, then parse for links to archives such as sdist (.tar.gz) or wheel (.whl) files. If a local path or file:// URL that's a directory, then look for archives in the directory listing. Links to VCS project URLs are not supported.\n ]\n for package in packages:\n pip_args.append(\n \"{package}\".format(\n package=package[\"name\"]\n )\n )\n\n returncode, exec_stdout, exec_stderr = get_pip_caller(venv, _logger).execute(\n *pip_args\n )\n if returncode != 0:\n raise exceptions.UpdateError(\n \"Error while executing pip install\", (exec_stdout, exec_stderr)\n )",
"def build_wheel_cache():\n pkg_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n pyvers = ['2.6', '2.7', '3.3', '3.4', '3.5']\n old_python_path = os.environ['PYTHONPATH']\n for pyver in pyvers:\n pycmd = which('python{0}'.format(pyver))\n if not pycmd:\n print('Python {0} not found'.format(pyver))\n continue\n pipcmd = which('pip{0}'.format(pyver))\n if not pipcmd:\n print('pip {0} not found'.format(pyver))\n continue\n os.environ['PYTHONPATH'] = ''\n lines = load_requirements(pkg_dir, pyver)\n for line in lines:\n if 'numpy' in line:\n numpy_line = line\n break\n else:\n raise RuntimeError('Numpy dependency could not be found')\n for line in lines:\n print(\n _pcolor(\n 'Building {0} wheel cache for Python {1}'.format(\n line,\n pyver\n ),\n 'cyan'\n )\n )\n if 'scipy' in line:\n # Install numpy before scipy otherwise pip throws an exception\n pobj = subprocess.Popen(\n [\n 'pip{0}'.format(pyver),\n 'install',\n '--upgrade',\n '--force-reinstall',\n numpy_line.strip()\n ],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n stdout, _ = pobj.communicate()\n print(stdout)\n pobj = subprocess.Popen(\n ['pip{0}'.format(pyver), 'wheel', line],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n stdout, _ = pobj.communicate()\n print(stdout)\n os.environ['PYTHONPATH'] = old_python_path",
"def wheels():\n with lcd(env.local_path):\n put('./requirements.txt', '/srv/build/wheel_requirements.txt')\n put('./etc/base_image/image_requirements.txt',\n '/srv/build/requirements.txt')\n\n with cd('/srv/build/wheelhouse'):\n run('rm -rf *.whl')\n\n compose(cmd='-f service.yml -p %s run --rm wheel-factory' %\n env.project_name, path='/srv/build')",
"def build_packages(paths, threads=4):\n from threading import Thread, Event\n from time import sleep\n\n paths = paths.copy()\n workers = []\n for i in range(threads):\n event = Event()\n event.set()\n workers.append(dict(done=event, path=None))\n\n def build(done, path):\n logger.info('building %s', path)\n build_package(path)\n done.set()\n\n while False in [w['done'].is_set() for w in workers] or paths:\n for w in workers:\n if w['done'].is_set() and paths:\n w['done'].clear()\n w['path'] = paths.pop()\n Thread(target=build, kwargs=w).start()\n sleep(1)",
"def upload_wheels():\n build()\n sh(\"%s -m twine upload dist/*.whl\" % PYTHON)",
"def build_packages(self, board, args=None, **kwargs):\n self.cros_sdk('build packages',\n ['./build_packages', '--board', board],\n args, **kwargs)",
"def build():\n click.secho('Creating package ...')\n pkg = utils.create_package()\n click.secho('Package created: {}'.format(pkg), fg='green')\n click.secho('Creating wheel...')\n wheel_path = utils.create_wheel()\n click.secho('Wheel created in {}'.format(wheel_path), fg='green')",
"def build_wheel( # noqa:C901\n req=None, # type: Optional[TInstallRequirement]\n reqset=None, # type: Optional[Union[TReqSet, Iterable[TInstallRequirement]]]\n output_dir=None, # type: Optional[str]\n preparer=None, # type: Optional[TPreparer]\n wheel_cache=None, # type: Optional[TWheelCache]\n build_options=None, # type: Optional[List[str]]\n global_options=None, # type: Optional[List[str]]\n check_binary_allowed=None, # type: Optional[Callable[TInstallRequirement, bool]]\n no_clean=False, # type: bool\n session=None, # type: Optional[TSession]\n finder=None, # type: Optional[TFinder]\n install_command=None, # type: Optional[TCommand]\n req_tracker=None, # type: Optional[TReqTracker]\n build_dir=None, # type: Optional[str]\n src_dir=None, # type: Optional[str]\n download_dir=None, # type: Optional[str]\n wheel_download_dir=None, # type: Optional[str]\n cache_dir=None, # type: Optional[str]\n use_user_site=False, # type: bool\n use_pep517=None, # type: Optional[bool]\n verify=False, # type: bool\n editable=False, # type: bool\n format_control_provider=None, # type: Optional[TShimmedFunc]\n wheel_cache_provider=None, # type: Optional[TShimmedFunc]\n preparer_provider=None, # type: Optional[TShimmedFunc]\n wheel_builder_provider=None, # type: Optional[TShimmedFunc]\n build_one_provider=None, # type: Optional[TShimmedFunc]\n build_one_inside_env_provider=None, # type: Optional[TShimmedFunc]\n build_many_provider=None, # type: Optional[TShimmedFunc]\n install_command_provider=None, # type: Optional[TShimmedFunc]\n finder_provider=None, # type: Optional[TShimmedFunc]\n reqset_provider=None, # type: Optional[TShimmedFunc]\n):\n # type: (...) -> Generator[Union[str, Tuple[List[TInstallRequirement], ...]], None, None]\n wheel_cache_provider = resolve_possible_shim(wheel_cache_provider)\n preparer_provider = resolve_possible_shim(preparer_provider)\n wheel_builder_provider = resolve_possible_shim(wheel_builder_provider)\n build_one_provider = resolve_possible_shim(build_one_provider)\n build_one_inside_env_provider = resolve_possible_shim(build_one_inside_env_provider)\n build_many_provider = resolve_possible_shim(build_many_provider)\n install_cmd_provider = resolve_possible_shim(install_command_provider)\n format_control_provider = resolve_possible_shim(format_control_provider)\n finder_provider = resolve_possible_shim(finder_provider) or get_package_finder\n reqset_provider = resolve_possible_shim(reqset_provider)\n global_options = [] if global_options is None else global_options\n build_options = [] if build_options is None else build_options\n options = None\n kwarg_map = {\n \"cache_dir\": cache_dir,\n \"src_dir\": src_dir,\n \"download_dir\": download_dir,\n \"wheel_download_dir\": wheel_download_dir,\n \"build_dir\": build_dir,\n \"use_user_site\": use_user_site,\n }\n if not req and not reqset:\n raise TypeError(\"Must provide either a requirement or requirement set to build\")\n with contextlib.ExitStack() as ctx:\n kwargs = kwarg_map.copy()\n if wheel_cache is None and (reqset is not None or output_dir is None):\n if install_command is None:\n assert isinstance(install_cmd_provider, (type, functools.partial))\n install_command = install_cmd_provider()\n kwargs, options = populate_options(install_command, options, **kwarg_map)\n format_control = getattr(options, \"format_control\", None)\n if not format_control:\n format_control = format_control_provider(None, None) # type: ignore\n wheel_cache = ctx.enter_context(\n wheel_cache_provider(options.cache_dir, format_control)\n )\n if req and not reqset and not output_dir:\n output_dir = get_ireq_output_path(wheel_cache, req)\n if not reqset and build_one_provider:\n build_one_kwargs = {\n \"req\": req,\n \"output_dir\": output_dir,\n \"verify\": verify,\n \"editable\": editable,\n \"build_options\": build_options,\n \"global_options\": global_options,\n }\n yield call_function_with_correct_args(build_one_provider, **build_one_kwargs)\n elif build_many_provider:\n yield build_many_provider(\n reqset, wheel_cache, build_options, global_options, check_binary_allowed\n )\n else:\n builder_args, builder_kwargs = get_allowed_args(wheel_builder_provider)\n if \"requirement_set\" in builder_args and not reqset:\n reqset = reqset_provider()\n if session is None and finder is None:\n session = get_session(install_cmd=install_command, options=options)\n finder = finder_provider(\n install_command, options=options, session=session\n )\n if preparer is None:\n preparer_kwargs = {\n \"build_dir\": kwargs[\"build_dir\"],\n \"src_dir\": kwargs[\"src_dir\"],\n \"download_dir\": kwargs[\"download_dir\"],\n \"wheel_download_dir\": kwargs[\"wheel_download_dir\"],\n \"finder\": finder,\n \"session\": session\n if session\n else get_session(install_cmd=install_command, options=options),\n \"install_cmd\": install_command,\n \"options\": options,\n \"use_user_site\": use_user_site,\n \"req_tracker\": req_tracker,\n }\n preparer = ctx.enter_context(preparer_provider(**preparer_kwargs))\n check_bin = check_binary_allowed if check_binary_allowed else lambda x: True\n builder_kwargs = {\n \"requirement_set\": reqset,\n \"finder\": finder,\n \"preparer\": preparer,\n \"wheel_cache\": wheel_cache,\n \"no_clean\": no_clean,\n \"build_options\": build_options,\n \"global_options\": global_options,\n \"check_binary_allowed\": check_bin,\n }\n builder = call_function_with_correct_args(\n wheel_builder_provider, **builder_kwargs\n )\n if req and not reqset:\n if not output_dir:\n output_dir = get_ireq_output_path(wheel_cache, req)\n if use_pep517 is not None:\n req.use_pep517 = use_pep517\n yield builder._build_one(req, output_dir)\n else:\n yield builder.build(reqset)",
"def build_env_wheels() -> Iterable[Path]:\n return []",
"def run ( self ):\n q = self.write_queue\n write_kw = self.write_kw\n stats = self.stats\n catref = self.catref\n additions_dir = self.additions_dir\n\n while not q.empty() and not hasattr ( catref, 'RERAISE' ):\n try:\n pkg = q.get_nowait()\n # remove manifest writing from threaded writing since it's\n # single-threaded\n pkg.write (\n additions_dir = additions_dir.get_obj_subdir ( pkg ),\n stats = stats,\n **write_kw\n )\n stats.ebuild_count.inc (\n len ( list ( pkg.iter_packages_with_efile() ) )\n )\n except queue.Empty:\n break\n except ( Exception, KeyboardInterrupt ) as err:\n catref.logger.exception ( err )\n catref.RERAISE = sys.exc_info()",
"def buildAll(self, packages):\r\n return [self.build(package) for package in packages]",
"def build_queue(update_info, dependencies, plugin_archive):\n install_queue = {}\n\n install_queue.setdefault(\n update_info.get(UPDATE_CONFIG_NAME).get(\"pip_command\", DEFAULT_OPRINT_VENV), []\n ).append(\n {\n \"name\": PLUGIN_NAME,\n \"archive\": plugin_archive,\n \"target\": '',\n }\n )\n print(\"dependencies - {}\".format(dependencies))\n if dependencies:\n for dependency in dependencies:\n plugin_config = update_info.get(UPDATE_CONFIG_NAME)\n plugin_dependencies_config = plugin_config.get(\"dependencies\")\n dependency_config = plugin_dependencies_config.get(dependency[\"name\"])\n\n # fail if requirements file contains dependencies but cloud config not\n if dependency_config == None:\n raise RuntimeError(\n \"no update info for dependency {}\".format(dependency[\"name\"])\n )\n\n # override the dependency version from the dependencies files with the one from the cloud config\n if dependency_config.get(\"version\"):\n version_needed = dependency_config.get(\"version\")\n else:\n version_needed = dependency.get(\"version\")\n\n if dependency_config.get(\"pip\"):\n archive = dependency_config[\"pip\"].format(\n target_version=\"v{version}\".format(version=version_needed),\n )\n else:\n raise RuntimeError(\n \"pip not configured for {}\".format(dependency[\"name\"])\n )\n\n installed_version = get_version_of_pip_module(\n dependency[\"name\"],\n dependency_config.get(\"pip_command\", DEFAULT_OPRINT_VENV),\n )\n\n if installed_version != version_needed:\n install_queue.setdefault(\n dependency_config.get(\"pip_command\", DEFAULT_OPRINT_VENV), []\n ).append(\n {\n \"name\": dependency[\"name\"],\n \"archive\": archive,\n \"target\": version_needed,\n }\n )\n else:\n print(\n \"skip dependency {} as the target version {} is already installed\".format(\n dependency[\"name\"], version_needed\n )\n )\n return install_queue",
"def install_packages(self):\n\n logging.info(\"installing packages...\")\n\n # Create the target directory. DFT files will be installed under this\n # directory.\n try:\n logging.debug(\"copying DFT toolkit...\")\n\n # Create the target directory in the rootfs\n dft_target_path = self.project.rootfs_mountpoint + \"/dft_bootstrap/\"\n if not os.path.exists(dft_target_path):\n os.makedirs(dft_target_path)\n\n # Copy the DFT toolkit content to the target rootfs\n for copy_target in os.listdir(self.project.project_definition[\"configuration\"][\"dft-base\"]):\n logging.debug(\"Copy the DFT toolkit : preparing to copy \" + copy_target)\n copy_target_path = os.path.join(self.project.project_definition[\"configuration\"][\"dft-base\"], copy_target)\n if os.path.isfile(copy_target_path):\n logging.debug(\"copying file \" + copy_target_path + \" => \" + dft_target_path)\n file_util.copy_file(copy_target_path, dft_target_path)\n else:\n logging.debug(\"copying tree \" + copy_target_path + \" => \" + dft_target_path)\n dir_util.copy_tree(copy_target_path, os.path.join(dft_target_path, copy_target))\n\n # Copy the additional toolkit content to the target rootfs\n if \"additional-roles\" in self.project.project_definition[\"configuration\"]:\n for additional_path in self.project.project_definition[\"configuration\"][\"additional-roles\"]:\n logging.debug(\"Copy the additional toolkit : preparing to copy from additional path \" + additional_path)\n for copy_target in os.listdir(additional_path):\n logging.debug(\"Copy the additional toolkit : preparing to copy \" + copy_target)\n copy_target_path = os.path.join(additional_path, copy_target)\n if os.path.isfile(copy_target_path):\n logging.debug(\"copying file \" + copy_target_path + \" => \" + dft_target_path)\n file_util.copy_file(copy_target_path, dft_target_path)\n else:\n logging.debug(\"copying tree \" + copy_target_path + \" => \" + dft_target_path)\n dir_util.copy_tree(copy_target_path, os.path.join(dft_target_path, copy_target))\n\n except OSError as exception:\n # Call clean up to umount /proc and /dev\n self.cleanup_installation_files()\n logging.critical(\"Error: %s - %s.\", exception.filename, exception.strerror)\n exit(1)\n\n except shutil.Error as exception:\n self.cleanup_installation_files()\n logging.critical(\"Error: %s - %s.\", exception.filename, exception.strerror)\n exit(1)\n\n # Flag if someroles has been foundand added to site.yml\n role_has_been_found = False\n\n # Generate the site file including all the roles from baseos\n # configuration, then move roles to the target rootfs\n with tempfile.NamedTemporaryFile(mode='w+', delete=False) as working_file:\n # Generate file header\n working_file.write(\"# Defines the role associated to the rootfs being generated\\n\")\n working_file.write(\"---\\n\")\n working_file.write(\"- hosts: local\\n\")\n working_file.write(\"\\n\")\n\n # Test if some variable files have to be included\n if \"variables\" in self.project.project_definition[\"project-definition\"]:\n # Yes, then output the vars_files marker\n working_file.write(\" vars_files:\\n\")\n\n # And iterate the list of files containing variables\n for vars_file in self.project.project_definition[\"project-definition\"][\"variables\"]:\n # Append the file to the site.yml file\n working_file.write(\" - \" + vars_file + \"\\n\")\n logging.debug(\"Adding variables file \" + vars_file)\n\n # Completethe path to have a full path on disk (in case of path local\n # to where is located the project file)\n vars_file = self.project.genereate_definition_file_path(vars_file)\n\n # Copy the variabes fies to the bootstrap directory\n logging.debug(\"Copy the variables file : preparing to copy \" + vars_file)\n if os.path.isfile(vars_file):\n logging.debug(\"copying file \" + vars_file + \" => \" + dft_target_path)\n file_util.copy_file(vars_file, dft_target_path)\n else:\n logging.error(\"Variable files \" + vars_file + \" is not a file\")\n logging.error(\"Skipping this file\")\n\n # Just some spacing for pretty printing\n working_file.write(\"\\n\")\n\n working_file.write(\" roles:\\n\")\n\n # Iterate the list of distributions loaded from the file\n for role in self.project.baseos_definition[\"roles\"]:\n # At least one role has beenfound, flag it\n role_has_been_found = True\n logging.debug(\"Adding role \" + role)\n working_file.write(\" - \" + role + \"\\n\")\n\n # We are done with file generation, close it now\n working_file.close()\n\n # Generate the file path\n filepath = self.project.rootfs_mountpoint + \"/dft_bootstrap/site.yml\"\n\n # Finally move the temporary file under the rootfs tree\n sudo_command = \"sudo mv -f \" + working_file.name + \" \" + filepath\n self.execute_command(sudo_command)\n\n # Warn the user if no role is found. In such case baseos will be same\n # debotstrap, which is certainly not what is expected\n if not role_has_been_found:\n logging.warning(\"No role has been found in baseos definiion. Rootfs is same as debootstrap output\")\n logging.error(\"You may wish to have a look to : \" + self.project.genereate_definition_file_path(self.project.project_definition[\"project-definition\"][\"baseos\"][0]))\n\n # Execute Ansible\n # TODO : multiple target ? not sure...\n logging.info(\"running ansible...\")\n sudo_command = \"LANG=C sudo chroot \" + self.project.rootfs_mountpoint + \" /bin/bash -c \\\"cd /dft_bootstrap && /usr/bin/ansible-playbook -i inventory.yml -c local site.yml\\\"\"\n self.execute_command(sudo_command)\n logging.info(\"ansible stage successfull\")",
"def add_packages ( self, add_method ):\n addstats = self.repo_stats.queue_time\n for repo in self.repos:\n addstats.begin ( repo.name )\n self._queue_packages_from_repo ( repo, add_method )\n addstats.end ( repo.name )",
"def ensure_wheel():\n wheels = sorted(DIST.glob(\"*.whl\"))\n if not wheels:\n subprocess.check_call([\"pyproject-build\", \".\", \"--wheel\", \"--no-isolation\"], cwd=ROOT)\n wheels = sorted(DIST.glob(\"*.whl\"))\n return wheels[-1]",
"def packages():",
"def build() -> List[asyncio.Task]:",
"def build():\n os.makedirs(DIST_DIR, exist_ok=True)\n\n if \"WHEEL\" in os.environ:\n whl = build_wheel()\n else:\n click.echo(\"Not building wheels.\")\n\n if \"WHEEL\" in os.environ and \"DOCKER\" in os.environ:\n # Docker image requires wheels\n build_docker_image(whl)\n else:\n click.echo(\"Not building Docker image.\")\n\n if \"PYINSTALLER\" in os.environ:\n build_pyinstaller()\n else:\n click.echo(\"Not building PyInstaller packages.\")",
"def build():",
"def package():\n call([sys.executable, \"setup.py\", \"clean\", \"--all\", \"bdist_egg\"], cwd=\"src\")\n call([sys.executable, \"setup.py\", \"clean\", \"--all\", \"bdist_wheel\"], cwd=\"src\")",
"def install(self):\n if self._skip_dependency_check:\n self._flatten_dependency_tree()\n else:\n self._build_dependency_tree()\n with ThreadPoolExecutor(max_workers=self._threads) as executor:\n for level in self._dependency_tree:\n for dependency in level:\n executor.submit(self._install_package, (dependency))",
"def build_dependencies(self, options):\n for i in self.dependencies(options):\n i[0].build(i[1])",
"def task_build(argv):\n pytaskmaster.generator(\"setup.py.in\", \"setup.py\", config)\n pytaskmaster.generator(\"pytaskmaster/version.py.in\", \"pytaskmaster/version.py\", config)\n shell(\"python setup.py bdist_wheel\")\n if \"--sign\" in argv:\n for file in os.listdir(\"dist\"):\n asc_file = \"dist/\" + file + \".asc\"\n if file.endswith(\".whl\") and not os.path.isfile(asc_file):\n shell(\"gpg --detach-sign -a dist/{}\".format(file))",
"def run(self):\n\n # NOTE : protect against race condition under empty lists\n self.cond.acquire()\n if len(self) == 0 :\n self.cond.wait()\n self.cond.release()\n pkginfo = None\n __iter = self.__iter__()\n self.started = True\n while self.started:\n self.cond.acquire()\n if not self :\n break\n elif self.started :\n # NOTE : protect against StopIteration on open lists\n if self.index == len(self) :\n self.cond.wait()\n pkginfo = __iter.next()\n self.cond.release()\n if pkginfo :\n self.download_pkg( pkginfo )\n pkginfo = None\n self.index += 1",
"def base():\n wheels()\n build_base()\n push_base()",
"def run_update():\n\n args = _parse_arguments()\n\n # get dependencies\n dependencies = get_dependencies(args.folder)\n\n # get update config of dependencies\n update_info = get_update_info()\n\n install_queue = build_queue(\n update_info, dependencies, args.archive\n )\n\n print(\"install_queue\", install_queue)\n if install_queue is not None:\n build_wheels(install_queue)\n install_wheels(install_queue)",
"def build(self):\n for component, type in self.__get_data(\"comps\").items():\n self.add_comp(component, type)\n\n self.logger.info('Build of {} finished'.format(self.name))",
"def build():\n\timport subprocess\n\tfrom os import listdir, getcwd\n\tfrom os.path import isfile, join\n\tonlyfiles = [f for f in listdir(getcwd()) if isfile(join(getcwd(), f))]\n\n\tif not 'requirements.txt' in onlyfiles:\n\t\traise SystemExit('File including depencencies not found. You will have to install them manually.')\n\n\tsubprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])\n\n\tprint('All dependencies installed successfully.\\nYou can run Simplex now!')",
"async def install(self) -> None:\n tasks = [asyncio.create_task(self.miners[miner].main_loop()) for miner in self.miners]\n await asyncio.gather(*tasks)",
"def run():\n ftpd_thread = mini_buildd.misc.run_as_thread(\n mini_buildd.ftpd.run,\n name=\"ftpd\",\n bind=get().model.ftpd_bind,\n queue=get().incoming_queue)\n\n builder_thread = mini_buildd.misc.run_as_thread(\n mini_buildd.builder.run,\n name=\"builder\",\n daemon_=get())\n\n while True:\n event = get().incoming_queue.get()\n if event == \"SHUTDOWN\":\n break\n\n try:\n LOG.info(\"Status: {0} active packages, {1} changes waiting in incoming.\".\n format(len(get().packages), get().incoming_queue.qsize()))\n\n changes = None\n changes = mini_buildd.changes.Changes(event)\n\n if changes.type == changes.TYPE_BREQ:\n # Build request: builder\n\n def queue_buildrequest(event):\n \"\"\"Queue in extra thread so we don't block here in case builder is busy.\"\"\"\n get().build_queue.put(event)\n mini_buildd.misc.run_as_thread(queue_buildrequest, name=\"build queuer\", daemon=True, event=event)\n\n else:\n # User upload or build result: packager\n mini_buildd.packager.run(\n daemon=get(),\n changes=changes)\n\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes file\", e)\n\n # Try to notify\n try:\n with mini_buildd.misc.open_utf8(event, \"r\") as body:\n subject = \"INVALID CHANGES: {c}: {e}\".format(c=event, e=e)\n get().model.mbd_notify(subject, body.read())\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes notify failed\", e)\n\n # Try to clean up\n try:\n if changes:\n changes.remove()\n else:\n os.remove(event)\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes cleanup failed\", e)\n\n finally:\n get().incoming_queue.task_done()\n\n get().build_queue.put(\"SHUTDOWN\")\n mini_buildd.ftpd.shutdown()\n builder_thread.join()\n ftpd_thread.join()\n\n # keyrings.close() is not called implicitly; this leaves tmp files around.\n # There should be a nicer way, really...\n try:\n get().keyrings.close()\n except BaseException:\n pass"
] | [
"0.67037266",
"0.6448939",
"0.63929045",
"0.6327373",
"0.6008243",
"0.59799784",
"0.59722793",
"0.5940384",
"0.5918518",
"0.5817111",
"0.5769088",
"0.5679094",
"0.5668807",
"0.5619912",
"0.5609576",
"0.56073725",
"0.5605971",
"0.5604782",
"0.5592366",
"0.5588438",
"0.5583262",
"0.5580692",
"0.5511525",
"0.54918236",
"0.5484189",
"0.54351574",
"0.5376068",
"0.53714436",
"0.534535",
"0.53370863"
] | 0.7769356 | 0 |
installs the wheels in the given venv of the queue. | def install_wheels(install_queue):
if not isinstance(install_queue, dict):
raise RuntimeError("install queue is not a dict")
for venv, packages in install_queue.items():
tmp_folder = os.path.join(PIP_WHEEL_TEMP_FOLDER, re.search(r"\w+((?=\/venv)|(?=\/bin))", venv).group(0))
pip_args = [
"install",
"--disable-pip-version-check",
"--upgrade", # Upgrade all specified packages to the newest available version. The handling of dependencies depends on the upgrade-strategy used.
"--find-links={}".format(
tmp_folder
), # If a URL or path to an html file, then parse for links to archives such as sdist (.tar.gz) or wheel (.whl) files. If a local path or file:// URL that's a directory, then look for archives in the directory listing. Links to VCS project URLs are not supported.
]
for package in packages:
pip_args.append(
"{package}".format(
package=package["name"]
)
)
returncode, exec_stdout, exec_stderr = get_pip_caller(venv, _logger).execute(
*pip_args
)
if returncode != 0:
raise exceptions.UpdateError(
"Error while executing pip install", (exec_stdout, exec_stderr)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_wheels(build_queue):\n try:\n if not os.path.isdir(PIP_WHEEL_TEMP_FOLDER):\n os.mkdir(PIP_WHEEL_TEMP_FOLDER)\n except OSError as e:\n raise RuntimeError(\"can't create wheel tmp folder {} - {}\".format(PIP_WHEEL_TEMP_FOLDER, e))\n\n for venv, packages in build_queue.items():\n tmp_folder = os.path.join(PIP_WHEEL_TEMP_FOLDER, re.search(r\"\\w+((?=\\/venv)|(?=\\/bin))\", venv).group(0))\n if os.path.isdir(tmp_folder):\n try:\n os.system(\"sudo rm -r {}\".format(tmp_folder))\n except Exception as e:\n raise RuntimeError(\"can't delete pip wheel temp folder {} - {}\".format(tmp_folder, e))\n\n pip_args = [\n \"wheel\",\n \"--disable-pip-version-check\",\n \"--wheel-dir={}\".format(\n tmp_folder\n ), # Build wheels into <dir>, where the default is the current working directory.\n \"--no-dependencies\", # Don't install package dependencies.\n ]\n for package in packages:\n if package.get(\"archive\"):\n pip_args.append(package.get(\"archive\"))\n else:\n raise RuntimeError(\"Archive not found for package {}\".format(package))\n\n returncode, exec_stdout, exec_stderr = get_pip_caller(venv, _logger).execute(\n *pip_args\n )\n if returncode != 0:\n raise exceptions.UpdateError(\n \"Error while executing pip wheel\", (exec_stdout, exec_stderr)\n )",
"def install_requirements():\n require(\"release\", provided_by=[deploy])\n with cd(\"%(path)s\" % env):\n sudo(\"./bin/pip install -r ./releases/%(release)s/requirements.txt\" % env)",
"def install():\n verun('pip install -r {0}'.format(requirements))",
"def install_requirements():\r\n if env.hosts:\r\n run ('cd %(path)s %(command_join)s env/bin/pip install -r current-release/requirements.txt' % env)\r\n else:\r\n local('%spip install -r requirements.txt' % virtualenv_bin, capture=False)",
"def install(name):\n base = '/home/{}/venvs/{}/base.txt'.format(env.user, name)\n prod = '/home/{}/venvs/{}/prod.txt'.format(env.user, name)\n\n # Upload requirements file.\n put(utils.file_path('requirements', 'base.txt'), base)\n put(utils.file_path('requirements', 'prod.txt'), prod)\n\n # Activate the virtual environment.\n with prefix('source /home/{}/venvs/{}/bin/activate'.format(env.user, name)):\n run('pip install -r {}'.format(prod))",
"def conda_install_requirements(venv):\n # Upload the requirements file.\n put(utils.files('requirements', 'base.txt'), utils.home('base.txt'))\n put(utils.files('requirements', 'prod.txt'), utils.home('prod.txt'))\n\n # Activate the virtual environment.\n activate = '{0}/bin/activate'.format(utils.home('apps', 'miniconda'))\n\n with prefix('source {activate} {venv}'.format(venv=venv, activate=activate)):\n run('pip install -r {0}'.format(utils.home('prod.txt')))\n\n # Remove the uploaded files.\n with cd(utils.home()):\n run('rm {0}'.format(utils.home('base.txt')))\n run('rm {0}'.format(utils.home('prod.txt')))",
"def install_packages():\n with open(\"requirements.txt\", \"w\") as requirements_file:\n subprocess.run([\"pipenv\", \"lock\", \"-r\"], stdout=requirements_file)\n\n subprocess.run(\n [\"pip\", \"install\", \"-r\", \"requirements.txt\", \"--no-deps\", \"-t\", BUILD_DIR]\n )",
"def detect_install_queue(env, install_queue=[]):\n\n _env = env()\n if _env.done():\n print \"[info]\", env.__name__, \"is already done.\"\n return install_queue\n else:\n if env not in install_queue:\n install_queue.insert(0, env)\n\n requires = _env.requires()\n if not isinstance(requires, list):\n requires = [requires]\n for required_env in requires:\n if isinstance(required_env, basestring):\n required_env = lui[required_env]\n detect_install_queue(required_env, install_queue)\n\n return install_queue",
"def pip_install(*args):\n call(WITH_VENV, '.venv', 'pip', 'install', *args)",
"def install_requirements():\n req_path = os.path.join(vlogger_dir, \"requirements.txt\")\n subprocess.call([\"pip\", \"install\", \"-r\", req_path])",
"def install_requirements():\n with cd(env.code_dir):\n with _virtualenv():\n sudo('pip install -r requirements.txt', pty=True)",
"def install_requirements():\n local('. fabric_factory/ve/bin/activate; easy_install pip')\n local('. fabric_factory/ve/bin/activate; pip install -r requirements.txt')",
"def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()",
"def pip_install():\n _require_environment()\n remote(PIP_INSTALL_PREFIX)",
"def install():\n return {\n \"actions\": [TaskCreator.get_pip() + \" install --upgrade dist/*.whl\"],\n \"verbosity\": 2,\n \"setup\": [\"make_distribution\"],\n }",
"def requires(*requirements, **kwargs):\n if '/.tox/' in sys.executable:\n venv = os.path.dirname(os.path.dirname(sys.executable))\n elif env.virtual_env: # pragma: no cover\n venv = env.chut_virtualenv = env.virtual_env\n else: # pragma: no cover\n venv = os.path.expanduser(kwargs.get('venv', '~/.chut/venv'))\n if not env.pip_download_cache: # pragma: no cover\n env.pip_download_cache = os.path.expanduser('~/.chut/cache')\n sh.mkdir('-p', env.pip_download_cache)\n bin_dir = os.path.join(venv, 'bin')\n if bin_dir not in env.path: # pragma: no cover\n env.path = [bin_dir] + env.path\n requirements = list(requirements)\n if 'chut' not in requirements:\n requirements.insert(0, 'chut')\n if not test.d(venv): # pragma: no cover\n import urllib\n url = 'https://raw.github.com/pypa/virtualenv/master/virtualenv.py'\n urllib.urlretrieve(url, '/tmp/_virtualenv.py')\n sh[sys.executable]('-S /tmp/_virtualenv.py', venv) > 1\n sh.rm('/tmp/_virtualenv*', shell=True)\n info('Installing %s...' % ', '.join(requirements))\n sh.pip('install -qM', *requirements) > 1\n elif env.chut_virtualenv:\n upgrade = '--upgrade' in sys.argv\n if (env.chut_upgrade or upgrade): # pragma: no cover\n installed = ''\n else:\n installed = str(sh.pip('freeze')).lower()\n requirements = [r for r in requirements if r.lower() not in installed]\n if requirements: # pragma: no cover\n info('Updating %s...' % ', '.join(requirements))\n sh.pip('install -qM --upgrade', *requirements) > 1\n executable = os.path.join(bin_dir, 'python')\n if not env.chut_virtualenv: # pragma: no cover\n env.chut_virtualenv = venv\n os.execve(executable, [executable] + sys.argv, env)",
"def setup(ctx):\r\n ctx.run('pip3 install -r requirements.txt')",
"def install(env, requirements, args, quiet=False):\n if os.path.isfile(requirements):\n args += ('-r', requirements)\n label = 'project'\n else:\n args += ('-U', '-e', '.')\n label = 'library'\n\n if not quiet:\n print('== Step 2. Install {0} =='.format(label))\n\n pip_cmd(env, ('install', ) + args, echo=not quiet)\n\n if not quiet:\n print()\n\n return True",
"def install_requirements():\n run('source %(env_path)s/bin/activate; pip install -U -r %(repo_path)s/requirements.txt' % env)",
"def install_requirements():\n run_commands('pip install -r ./requirements/dev.txt')",
"def wheels():\n with lcd(env.local_path):\n put('./requirements.txt', '/srv/build/wheel_requirements.txt')\n put('./etc/base_image/image_requirements.txt',\n '/srv/build/requirements.txt')\n\n with cd('/srv/build/wheelhouse'):\n run('rm -rf *.whl')\n\n compose(cmd='-f service.yml -p %s run --rm wheel-factory' %\n env.project_name, path='/srv/build')",
"def install_deps():\n pipenv_dev = run('pipenv install --dev'.split(), check=True)\n print('Installed dependencies and virtual environment. Type `pipenv shell` to activate later.')",
"def build_env_wheels() -> Iterable[Path]:\n return []",
"def upload_wheels():\n build()\n sh(\"%s -m twine upload dist/*.whl\" % PYTHON)",
"def YumInstall(vm):\n _Install(vm)",
"def YumInstall(vm):\n _Install(vm)",
"def install_packages():\n\n require('environment', provided_by=env.environments)\n packages_file = os.path.join(PROJECT_ROOT, 'requirements', 'packages.txt')\n system.install_packages_from_file(packages_file)",
"def install_wheels(wheel_directory=None):\n\n if wheel_directory is None:\n wheel_directory = os.path.dirname(os.path.realpath(__file__))\n\n print(wheel_directory)\n\n wheels = [fname for fname in os.listdir(wheel_directory) if \".whl\" in fname]\n for wheel in wheels:\n pip.main([\"install\", \"--upgrade\", wheel])",
"def apt_install(pkgs):\n return run([\"apt-get\", \"install\", \"-y\"] + list(pkgs))",
"def venv():\n path = '/srv/addok/venv/'\n if not exists(path):\n with sudo(user='addok'):\n run(f'python3 -m venv {path}')\n pip('install pip -U')"
] | [
"0.70450777",
"0.64840436",
"0.6472613",
"0.6374757",
"0.63113153",
"0.62699497",
"0.625346",
"0.6180112",
"0.6106747",
"0.6089126",
"0.60671085",
"0.60669553",
"0.6066162",
"0.6043353",
"0.6011987",
"0.60108835",
"0.60072094",
"0.5946527",
"0.5939804",
"0.5906296",
"0.5902279",
"0.58537614",
"0.576895",
"0.575587",
"0.57303405",
"0.57303405",
"0.5728873",
"0.5706467",
"0.5652925",
"0.564788"
] | 0.7810384 | 0 |
build the queue of packages to install. | def build_queue(update_info, dependencies, plugin_archive):
install_queue = {}
install_queue.setdefault(
update_info.get(UPDATE_CONFIG_NAME).get("pip_command", DEFAULT_OPRINT_VENV), []
).append(
{
"name": PLUGIN_NAME,
"archive": plugin_archive,
"target": '',
}
)
print("dependencies - {}".format(dependencies))
if dependencies:
for dependency in dependencies:
plugin_config = update_info.get(UPDATE_CONFIG_NAME)
plugin_dependencies_config = plugin_config.get("dependencies")
dependency_config = plugin_dependencies_config.get(dependency["name"])
# fail if requirements file contains dependencies but cloud config not
if dependency_config == None:
raise RuntimeError(
"no update info for dependency {}".format(dependency["name"])
)
# override the dependency version from the dependencies files with the one from the cloud config
if dependency_config.get("version"):
version_needed = dependency_config.get("version")
else:
version_needed = dependency.get("version")
if dependency_config.get("pip"):
archive = dependency_config["pip"].format(
target_version="v{version}".format(version=version_needed),
)
else:
raise RuntimeError(
"pip not configured for {}".format(dependency["name"])
)
installed_version = get_version_of_pip_module(
dependency["name"],
dependency_config.get("pip_command", DEFAULT_OPRINT_VENV),
)
if installed_version != version_needed:
install_queue.setdefault(
dependency_config.get("pip_command", DEFAULT_OPRINT_VENV), []
).append(
{
"name": dependency["name"],
"archive": archive,
"target": version_needed,
}
)
else:
print(
"skip dependency {} as the target version {} is already installed".format(
dependency["name"], version_needed
)
)
return install_queue | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def detect_install_queue(env, install_queue=[]):\n\n _env = env()\n if _env.done():\n print \"[info]\", env.__name__, \"is already done.\"\n return install_queue\n else:\n if env not in install_queue:\n install_queue.insert(0, env)\n\n requires = _env.requires()\n if not isinstance(requires, list):\n requires = [requires]\n for required_env in requires:\n if isinstance(required_env, basestring):\n required_env = lui[required_env]\n detect_install_queue(required_env, install_queue)\n\n return install_queue",
"def install(self):\n if self._skip_dependency_check:\n self._flatten_dependency_tree()\n else:\n self._build_dependency_tree()\n with ThreadPoolExecutor(max_workers=self._threads) as executor:\n for level in self._dependency_tree:\n for dependency in level:\n executor.submit(self._install_package, (dependency))",
"def install_wheels(install_queue):\n if not isinstance(install_queue, dict):\n raise RuntimeError(\"install queue is not a dict\")\n\n for venv, packages in install_queue.items():\n tmp_folder = os.path.join(PIP_WHEEL_TEMP_FOLDER, re.search(r\"\\w+((?=\\/venv)|(?=\\/bin))\", venv).group(0))\n pip_args = [\n \"install\",\n \"--disable-pip-version-check\",\n \"--upgrade\", # Upgrade all specified packages to the newest available version. The handling of dependencies depends on the upgrade-strategy used.\n \"--find-links={}\".format(\n tmp_folder\n ), # If a URL or path to an html file, then parse for links to archives such as sdist (.tar.gz) or wheel (.whl) files. If a local path or file:// URL that's a directory, then look for archives in the directory listing. Links to VCS project URLs are not supported.\n ]\n for package in packages:\n pip_args.append(\n \"{package}\".format(\n package=package[\"name\"]\n )\n )\n\n returncode, exec_stdout, exec_stderr = get_pip_caller(venv, _logger).execute(\n *pip_args\n )\n if returncode != 0:\n raise exceptions.UpdateError(\n \"Error while executing pip install\", (exec_stdout, exec_stderr)\n )",
"def build_wheels(build_queue):\n try:\n if not os.path.isdir(PIP_WHEEL_TEMP_FOLDER):\n os.mkdir(PIP_WHEEL_TEMP_FOLDER)\n except OSError as e:\n raise RuntimeError(\"can't create wheel tmp folder {} - {}\".format(PIP_WHEEL_TEMP_FOLDER, e))\n\n for venv, packages in build_queue.items():\n tmp_folder = os.path.join(PIP_WHEEL_TEMP_FOLDER, re.search(r\"\\w+((?=\\/venv)|(?=\\/bin))\", venv).group(0))\n if os.path.isdir(tmp_folder):\n try:\n os.system(\"sudo rm -r {}\".format(tmp_folder))\n except Exception as e:\n raise RuntimeError(\"can't delete pip wheel temp folder {} - {}\".format(tmp_folder, e))\n\n pip_args = [\n \"wheel\",\n \"--disable-pip-version-check\",\n \"--wheel-dir={}\".format(\n tmp_folder\n ), # Build wheels into <dir>, where the default is the current working directory.\n \"--no-dependencies\", # Don't install package dependencies.\n ]\n for package in packages:\n if package.get(\"archive\"):\n pip_args.append(package.get(\"archive\"))\n else:\n raise RuntimeError(\"Archive not found for package {}\".format(package))\n\n returncode, exec_stdout, exec_stderr = get_pip_caller(venv, _logger).execute(\n *pip_args\n )\n if returncode != 0:\n raise exceptions.UpdateError(\n \"Error while executing pip wheel\", (exec_stdout, exec_stderr)\n )",
"def add_packages ( self, add_method ):\n addstats = self.repo_stats.queue_time\n for repo in self.repos:\n addstats.begin ( repo.name )\n self._queue_packages_from_repo ( repo, add_method )\n addstats.end ( repo.name )",
"def _push_queue(self):\n\n self.add_cons_vars(self._var_queue, sloppy=self.sloppy)\n self.add_cons_vars(self._cons_queue, sloppy = self.sloppy)\n\n if len(self._var_queue) > 0:\n self.regenerate_variables()\n if len(self._cons_queue) > 0:\n self.regenerate_constraints()\n\n self._var_queue = list()\n self._cons_queue = list()",
"def build_packages(paths, threads=4):\n from threading import Thread, Event\n from time import sleep\n\n paths = paths.copy()\n workers = []\n for i in range(threads):\n event = Event()\n event.set()\n workers.append(dict(done=event, path=None))\n\n def build(done, path):\n logger.info('building %s', path)\n build_package(path)\n done.set()\n\n while False in [w['done'].is_set() for w in workers] or paths:\n for w in workers:\n if w['done'].is_set() and paths:\n w['done'].clear()\n w['path'] = paths.pop()\n Thread(target=build, kwargs=w).start()\n sleep(1)",
"def install_deps_temp(self):\n if self.distribution.install_requires:\n self.distribution.fetch_build_eggs(\n self.distribution.install_requires)\n if self.distribution.tests_require:\n self.distribution.fetch_build_eggs(self.distribution.tests_require)",
"def add_package(self, package):\n if package in config[\"ignore_packages\"]:\n return\n\n # Worker for dependenant builds\n worker = False\n needed = self.resolve(package, self.dependencies, graph=[])\n\n # If we want to build against a dependency, and it's not inn the list; abort\n if self.getProperty(\"build_with_dependency\") and \\\n self.getProperty(\"build_with_dependency\") not in needed:\n return\n\n if needed:\n worker = random.choice(config[\"localworkers\"])\n\n # Run over all needed packages and find their deps\n for i in needed:\n _needed = self.resolve(i, self.dependencies, graph=[])\n self.add_build(i, _needed, worker=worker)\n self.add_build(package, needed, worker=worker)",
"def fetch_build_queue(self, planet=None):\n print(\"Not implemented yet!\")",
"def buildAll(self, packages):\r\n return [self.build(package) for package in packages]",
"async def install(self) -> None:\n tasks = [asyncio.create_task(self.miners[miner].main_loop()) for miner in self.miners]\n await asyncio.gather(*tasks)",
"def queue (self):\n\n with self.__t.steps():\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n import json\n from collections import OrderedDict\n\n with schema.select(\"process_queue\", schema.table.process_queue.status==None) as select:\n for queued in select.limit(1000).all():\n blocked = False\n if len(queued.depend) > 0:\n for depend_id in queued.depend:\n depend = schema.select_one(\"series\", schema.table.series.id==depend_id)\n match_tags = json.dumps(OrderedDict(sorted(data.decode_tags(depend.tags).items())))\n if depend and schema.select_one(\"process_queue\", schema.table.process_queue.tags==match_tags):\n blocked = True\n break # queued dependencies\n if not blocked:\n queued.status = \"queued\"\n schema.save(queued)\n run.apply_async([queued.tags]) #queue process\n self.__t.ok()\n self.apply_async(queue=\"control\", countdown=30) #queue next",
"def build():\n\timport subprocess\n\tfrom os import listdir, getcwd\n\tfrom os.path import isfile, join\n\tonlyfiles = [f for f in listdir(getcwd()) if isfile(join(getcwd(), f))]\n\n\tif not 'requirements.txt' in onlyfiles:\n\t\traise SystemExit('File including depencencies not found. You will have to install them manually.')\n\n\tsubprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])\n\n\tprint('All dependencies installed successfully.\\nYou can run Simplex now!')",
"def run ( self ):\n q = self.write_queue\n write_kw = self.write_kw\n stats = self.stats\n catref = self.catref\n additions_dir = self.additions_dir\n\n while not q.empty() and not hasattr ( catref, 'RERAISE' ):\n try:\n pkg = q.get_nowait()\n # remove manifest writing from threaded writing since it's\n # single-threaded\n pkg.write (\n additions_dir = additions_dir.get_obj_subdir ( pkg ),\n stats = stats,\n **write_kw\n )\n stats.ebuild_count.inc (\n len ( list ( pkg.iter_packages_with_efile() ) )\n )\n except queue.Empty:\n break\n except ( Exception, KeyboardInterrupt ) as err:\n catref.logger.exception ( err )\n catref.RERAISE = sys.exc_info()",
"def packages():",
"def build_packages(self, board, args=None, **kwargs):\n self.cros_sdk('build packages',\n ['./build_packages', '--board', board],\n args, **kwargs)",
"def _install_packages(self, package_list):\n installed_count = 0\n for package in package_list:\n install = [\n self.mock,\n '-q',\n '--root=%s' % self.root,\n '--arch=%s' % self.arch,\n '--install',\n '%s' % package\n ]\n \"\"\"\n Lots of useless debugging\n @TODO: Remove\n \"\"\"\n print \"Installing Package %s\" % package\n output, errors = self._run_command(install)\n print output, errors\n installed_count += 1\n \"\"\"\n Lots of useless debugging\n @TODO: Remove\n \"\"\"\n print output\n print errors",
"def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()",
"def _queue_packages_from_repo ( self, repo, add_method ):\n if not repo.ready():\n if self.use_broken_repos:\n # warn and continue\n pass\n else:\n # repo cannot be used\n self.logger.warning (\n \"ignoring repo {!r} (sync_status={:d}).\".format (\n repo.name, repo.sync_status\n )\n )\n return False\n\n for p in repo.scan_distdir ( is_package=self._pkg_filter ):\n self.logger.debug (\n \"adding package {p} from repo {r}\".format ( p=p, r=repo )\n )\n self.repo_stats.package_file_found ( repo )\n add_method ( p )",
"def _setup():\n for item in dominos:\n queue.append([item])",
"def sort_packages(self) -> None:\n self.recommended_packages = []\n self.required_packages = []\n for package in self.repository_packages:\n try:\n output = self.guest.execute(Command('rpm', '-q', package), silent=True)\n assert output.stdout\n self.debug(f\"Package '{output.stdout.strip()}' already installed.\")\n except tmt.utils.RunError:\n if self.skip_missing:\n self.recommended_packages.append(package)\n else:\n self.required_packages.append(package)",
"def pre_install(self, installable_pkgs):\n pass",
"def fill_packages(layer, prereqs):\n # Create an origin string to record notices\n origin_str = \"Inventory Results\"\n # For every indicator that exists on the filesystem, inventory the packages\n for binary in dcom.get_existing_bins(prereqs.host_path):\n set_layer_os(layer, origin_str, binary)\n prereqs.binary = binary\n listing = command_lib.get_base_listing(binary)\n pkg_dict, invoke_msg, warnings = collect.collect_list_metadata(\n listing, prereqs, True)\n # processing for debian copyrights\n if listing.get(\"pkg_format\") == \"deb\":\n pkg_dict[\"pkg_licenses\"] = com.get_deb_package_licenses(\n pkg_dict[\"copyrights\"])\n if invoke_msg:\n logger.error(\"Script invocation error. Unable to collect some\"\n \"metadata.\")\n if warnings:\n logger.warning(\"Some metadata may be missing.\")\n bundle.fill_pkg_results(layer, pkg_dict)\n com.remove_duplicate_layer_files(layer)",
"def packages(self):\n return []",
"def run():\n ftpd_thread = mini_buildd.misc.run_as_thread(\n mini_buildd.ftpd.run,\n name=\"ftpd\",\n bind=get().model.ftpd_bind,\n queue=get().incoming_queue)\n\n builder_thread = mini_buildd.misc.run_as_thread(\n mini_buildd.builder.run,\n name=\"builder\",\n daemon_=get())\n\n while True:\n event = get().incoming_queue.get()\n if event == \"SHUTDOWN\":\n break\n\n try:\n LOG.info(\"Status: {0} active packages, {1} changes waiting in incoming.\".\n format(len(get().packages), get().incoming_queue.qsize()))\n\n changes = None\n changes = mini_buildd.changes.Changes(event)\n\n if changes.type == changes.TYPE_BREQ:\n # Build request: builder\n\n def queue_buildrequest(event):\n \"\"\"Queue in extra thread so we don't block here in case builder is busy.\"\"\"\n get().build_queue.put(event)\n mini_buildd.misc.run_as_thread(queue_buildrequest, name=\"build queuer\", daemon=True, event=event)\n\n else:\n # User upload or build result: packager\n mini_buildd.packager.run(\n daemon=get(),\n changes=changes)\n\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes file\", e)\n\n # Try to notify\n try:\n with mini_buildd.misc.open_utf8(event, \"r\") as body:\n subject = \"INVALID CHANGES: {c}: {e}\".format(c=event, e=e)\n get().model.mbd_notify(subject, body.read())\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes notify failed\", e)\n\n # Try to clean up\n try:\n if changes:\n changes.remove()\n else:\n os.remove(event)\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes cleanup failed\", e)\n\n finally:\n get().incoming_queue.task_done()\n\n get().build_queue.put(\"SHUTDOWN\")\n mini_buildd.ftpd.shutdown()\n builder_thread.join()\n ftpd_thread.join()\n\n # keyrings.close() is not called implicitly; this leaves tmp files around.\n # There should be a nicer way, really...\n try:\n get().keyrings.close()\n except BaseException:\n pass",
"def run_update():\n\n args = _parse_arguments()\n\n # get dependencies\n dependencies = get_dependencies(args.folder)\n\n # get update config of dependencies\n update_info = get_update_info()\n\n install_queue = build_queue(\n update_info, dependencies, args.archive\n )\n\n print(\"install_queue\", install_queue)\n if install_queue is not None:\n build_wheels(install_queue)\n install_wheels(install_queue)",
"def install_packages():\n with open(\"requirements.txt\", \"w\") as requirements_file:\n subprocess.run([\"pipenv\", \"lock\", \"-r\"], stdout=requirements_file)\n\n subprocess.run(\n [\"pip\", \"install\", \"-r\", \"requirements.txt\", \"--no-deps\", \"-t\", BUILD_DIR]\n )",
"def _install_dependencies(self):\n\n requirements_file = self.app_directory.joinpath('requirements.txt')\n\n package_copy_required = False\n if requirements_file.exists():\n cmd = [\n sys.executable,\n '-m',\n 'pip',\n 'install',\n '-r',\n str(requirements_file),\n '-t',\n str(self.build_directory),\n ]\n package_copy_required = True\n else:\n cmd = [\n sys.executable,\n '-m',\n 'pip',\n 'install',\n '.',\n '-t',\n str(self.build_directory),\n ]\n\n logger.debug('Running subprocess cmds: %s', cmd)\n\n try:\n _ = subprocess.run(cmd, check=True)\n except Exception:\n logger.error('Pip failed to install the app using cmd=[%s].', cmd)\n raise\n\n if package_copy_required:\n shutil.copytree(\n self.package_dir, self.build_directory.joinpath(self.package_name)\n )",
"def get_build_queue(self, limit=1000):\n # NOTE: This method is not exposed on TheOracle as it is only used by\n # TheArchitect task\n with self._conn.begin():\n return {\n abi_tag: [\n (row.package, row.version)\n for row in rows\n ]\n for abi_tag, rows in groupby(\n self._conn.execution_options(stream_results=True).\\\n execute(\"SELECT abi_tag, package, version \"\n \"FROM get_build_queue(%s)\", (limit,)),\n key=attrgetter('abi_tag')\n )\n }"
] | [
"0.64300483",
"0.6317907",
"0.60502356",
"0.60038203",
"0.5953069",
"0.5949427",
"0.5947298",
"0.5918809",
"0.5888587",
"0.58734226",
"0.58508694",
"0.5847698",
"0.5825394",
"0.577823",
"0.5763402",
"0.5737082",
"0.5720837",
"0.5693326",
"0.5660723",
"0.5659705",
"0.56068695",
"0.55902475",
"0.5580416",
"0.5561654",
"0.5555687",
"0.5532733",
"0.552701",
"0.55008143",
"0.54993165",
"0.5496072"
] | 0.6485311 | 0 |
download the archive of the Plugin and copy dependencies and update script in the working directory. | def loadPluginTarget(archive, folder):
# download target repo zip
req = retryget(archive)
filename = archive.split("/")[-1]
zip_file_path = os.path.join(folder, filename)
try:
with open(zip_file_path, "wb") as output_file:
output_file.write(req.content)
except IOError:
raise RuntimeError(
"Could not save the zip file to the working directory {}".format(folder)
)
# unzip repo
plugin_extracted_path = os.path.join(folder, UPDATE_CONFIG_NAME)
plugin_extracted_path_folder = os.path.join(
plugin_extracted_path,
"{repo_name}-{target}".format(
repo_name=REPO_NAME, target=re.sub(r"^v", "", filename.split(".zip")[0])
),
)
try:
plugin_zipfile = zipfile.ZipFile(BytesIO(req.content))
plugin_zipfile.extractall(plugin_extracted_path)
plugin_zipfile.close()
except (zipfile.BadZipfile, zipfile.LargeZipFile) as e:
raise RuntimeError("Could not unzip plugin repo - error: {}".format(e))
# copy new dependencies to working directory
try:
shutil.copy2(
os.path.join(
plugin_extracted_path_folder, MAIN_SRC_FOLDER_NAME, "dependencies.txt"
),
os.path.join(folder, "dependencies.txt"),
)
except IOError:
raise RuntimeError("Could not copy dependencies to working directory")
# copy new update script to working directory
try:
shutil.copy2(
os.path.join(
plugin_extracted_path_folder,
MAIN_SRC_FOLDER_NAME,
"scripts/update_script.py",
),
os.path.join(folder, "update_script.py"),
)
except IOError:
raise RuntimeError("Could not copy update_script to working directory")
return zip_file_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download(self):\n opsys, machine = get_platform()\n _platform = f\"{opsys}_{machine}\"\n\n plugin_dir = f\"{self._temp_dir}/terraform-plugins\"\n\n if not os.path.isdir(plugin_dir):\n os.mkdir(plugin_dir)\n for name, details in self._plugins.items():\n uri = get_url(name, details)\n file_name = uri.split(\"/\")[-1]\n\n click.secho(\n f\"getting plugin: {name} version {details['version']} from {uri}\",\n fg=\"yellow\",\n )\n\n with urllib.request.urlopen(uri) as response, open(\n f\"{plugin_dir}/{file_name}\", \"wb\"\n ) as plug_file:\n shutil.copyfileobj(response, plug_file)\n with zipfile.ZipFile(f\"{plugin_dir}/{file_name}\") as zip_file:\n zip_file.extractall(f\"{plugin_dir}/{_platform}\")\n os.remove(f\"{plugin_dir}/{file_name}\")\n\n files = glob.glob(f\"{plugin_dir}/{_platform}/terraform-provider*\")\n for afile in files:\n os.chmod(afile, 0o755)\n filename = os.path.basename(afile)\n if self._tf_version_major >= 13:\n source = PluginSource(name, details)\n host_dir = os.path.join(plugin_dir, source.host)\n namespace_dir = os.path.join(host_dir, source.namespace)\n provider_dir = os.path.join(namespace_dir, name)\n version_dir = os.path.join(provider_dir, details[\"version\"])\n platform_dir = os.path.join(version_dir, _platform)\n os.makedirs(platform_dir, exist_ok=True)\n os.rename(afile, os.path.join(platform_dir, filename))\n else:\n os.rename(afile, f\"{plugin_dir}/{filename}\")\n\n click.secho(f\"plugin installed to: {plugin_dir}/{_platform}/\", fg=\"yellow\")",
"def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)",
"def do_install(self, url, name, show_install_notes=True):\n data = self.get_json(url)\n if name in data:\n utils.makedirs(self.output_dir)\n url = data[name]\n LOGGER.info(\"Downloading '{0}'\".format(url))\n try:\n zip_data = requests.get(url).content\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n zip_data = requests.get(url).content\n\n zip_file = io.BytesIO()\n zip_file.write(zip_data)\n LOGGER.info('Extracting: {0} into {1}/'.format(name, self.output_dir))\n utils.extract_all(zip_file, self.output_dir)\n dest_path = os.path.join(self.output_dir, name)\n else:\n LOGGER.error(\"Can't find plugin \" + name)\n return 1\n\n reqpath = os.path.join(dest_path, 'requirements.txt')\n if os.path.exists(reqpath):\n LOGGER.warning('This plugin has Python dependencies.')\n LOGGER.info('Installing dependencies with pip...')\n try:\n subprocess.check_call((sys.executable, '-m', 'pip', 'install', '-r', reqpath))\n except subprocess.CalledProcessError:\n LOGGER.error('Could not install the dependencies.')\n print('Contents of the requirements.txt file:\\n')\n with io.open(reqpath, 'r', encoding='utf-8-sig') as fh:\n print(utils.indent(fh.read(), 4 * ' '))\n print('You have to install those yourself or through a '\n 'package manager.')\n else:\n LOGGER.info('Dependency installation succeeded.')\n\n reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')\n if os.path.exists(reqnpypath):\n LOGGER.warning('This plugin has third-party '\n 'dependencies you need to install '\n 'manually.')\n print('Contents of the requirements-nonpy.txt file:\\n')\n with io.open(reqnpypath, 'r', encoding='utf-8-sig') as fh:\n for l in fh.readlines():\n i, j = l.split('::')\n print(utils.indent(i.strip(), 4 * ' '))\n print(utils.indent(j.strip(), 8 * ' '))\n print()\n\n print('You have to install those yourself or through a package '\n 'manager.')\n\n req_plug_path = os.path.join(dest_path, 'requirements-plugins.txt')\n if os.path.exists(req_plug_path):\n LOGGER.info('This plugin requires other Nikola plugins.')\n LOGGER.info('Installing plugins...')\n plugin_failure = False\n try:\n with io.open(req_plug_path, 'r', encoding='utf-8-sig') as inf:\n for plugname in inf.readlines():\n plugin_failure = self.do_install(url, plugname.strip(), show_install_notes) != 0\n except Exception:\n plugin_failure = True\n if plugin_failure:\n LOGGER.error('Could not install a plugin.')\n print('Contents of the requirements-plugins.txt file:\\n')\n with io.open(req_plug_path, 'r', encoding='utf-8-sig') as fh:\n print(utils.indent(fh.read(), 4 * ' '))\n print('You have to install those yourself manually.')\n else:\n LOGGER.info('Dependency installation succeeded.')\n\n confpypath = os.path.join(dest_path, 'conf.py.sample')\n if os.path.exists(confpypath) and show_install_notes:\n LOGGER.warning('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!')\n print('Contents of the conf.py.sample file:\\n')\n with io.open(confpypath, 'r', encoding='utf-8-sig') as fh:\n if self.site.colorful:\n print(pygments.highlight(fh.read(), PythonLexer(), TerminalFormatter()))\n else:\n print(fh.read())\n return 0",
"def download(plugin_id, output_path, logger, client, tenant_name):\n utils.explicit_tenant_name_message(tenant_name, logger)\n logger.info('Downloading plugin %s...', plugin_id)\n plugin_name = output_path if output_path else plugin_id\n progress_handler = utils.generate_progress_handler(plugin_name, '')\n target_file = client.plugins.download(plugin_id,\n output_path,\n progress_handler)\n logger.info('Plugin downloaded as %s', target_file)",
"def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"[email protected]:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return",
"def do_install(self, args):\n if args:\n try:\n plugin_name, file_path = args.split()[0], args.split()[1]\n except Exception as e:\n return print(display_messages(\"the argument is invalid please type ?install for more information\", error=True))\n if not path.isfile(file_path):\n return print(\n display_messages(\n \"the file {} not found \".format(file_path), error=True\n )\n )\n head, tail = os.path.split(file_path)\n dest = copyfile(file_path, \"{}/{}\".format(self.temp_path, tail))\n print(display_messages(\"copy content file .zip to {}\".format(dest), info=True))\n \n path_to_zip_file = tempfile.gettempdir() + \"/{}\".format(tail)\n with ZipFile(path_to_zip_file, \"r\") as zip_ref:\n zip_ref.extractall(tempfile.gettempdir())\n temp_path_file_extracted = \"{}/{}.py\".format(self.temp_path, plugin_name)\n print(\n display_messages(\n \"extracted files on : {}\".format(temp_path_file_extracted), info=True\n )\n )\n if not path.isfile(temp_path_file_extracted):\n return print(\n display_messages(\n \"the file {} not found \".format(temp_path_file_extracted), error=True\n )\n )\n temp_templates_path = \"{}/{}\".format(self.temp_path, plugin_name)\n if not path.isdir(temp_templates_path):\n return print(\n display_messages(\n \"the directory template {} not found \".format(temp_templates_path), error=True\n )\n )\n source = temp_path_file_extracted\n destination = \"{}/{}.py\".format(self.captiveflask_setup_path, plugin_name)\n dest = copyfile(source, destination)\n print(display_messages(\"copy content file to {}\".format(dest), info=True))\n\n copy_tree(\n temp_templates_path, C.user_config_dir + \"/config/templates/{}\".format(plugin_name)\n )\n print(\n display_messages(\n \"plugin {} install {}\".format( plugin_name,setcolor(\"sucessful\", color=\"green\")),\n info=True,\n )\n )\n return \n print(\n display_messages(\"unknown command: {} \".format(args), error=True)\n )",
"def download_plugin(name):\n uri = 'https://github.com/drupdates/' + name + '.git'\n plugins_dir = Utils.check_dir(os.path.join('~', '.drupdates', 'plugins'))\n if not bool(urlparse(uri).netloc):\n msg = (\"Error: {0} url, {1}, is not a valid url\").format(name, uri)\n raise DrupdatesError(20, msg)\n response = requests.get(uri)\n if response.status_code not in [200, 201]:\n msg = \"Plugin url {0} returned an invalid HTTP response code {1}\".format(uri, response.status_code)\n raise DrupdatesError(20, msg)\n try:\n Repo.clone_from(uri, os.path.join(plugins_dir, name.title()))\n except git.exc.GitCommandError as git_error:\n msg = \"Failed to clone the plugin repo\\n Error: {0}\".format(git_error)\n raise DrupdatesError(20, msg)\n else:\n plugins = Plugin.get_plugins()\n return plugins[name]",
"def run_update():\n\n args = _parse_arguments()\n\n # get dependencies\n dependencies = get_dependencies(args.folder)\n\n # get update config of dependencies\n update_info = get_update_info()\n\n install_queue = build_queue(\n update_info, dependencies, args.archive\n )\n\n print(\"install_queue\", install_queue)\n if install_queue is not None:\n build_wheels(install_queue)\n install_wheels(install_queue)",
"def __download(self):\n\n # Use the default repository if set to True\n if self.repository is True:\n self.repository = self.__default_repository\n\n if not self.repository and not self.url:\n tarball = 'ucx-{}.tar.gz'.format(self.__version)\n self.url = '{0}/v{1}/{2}'.format(self.__baseurl, self.__version,\n tarball)",
"def download_and_prepare(self):\n self._download_and_prepare()",
"def main():\n get_obofoundry(force_download=True)",
"def install():\n execute(generate)\n execute(upload)",
"def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()",
"def download(self, target: PluginJar, *, force: bool = False) -> bool:\n pass",
"def download_and_extract(self, package_name):\n self.download(package_name)\n self.extract(package_name)",
"def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()",
"def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None",
"def _install():\n download_file='http://www.ipol.im/pub/art/2015/136/inpaint_8.tgz'\n tools.download_and_extract(download_file) \n this_file_path=os.path.dirname(__file__)\n subprocess.call(' mkdir build; cd build; cmake ..; make', shell=True,cwd=exec_folder)",
"def test_remote_plugin(self):\n plugin_name = 'Slack'\n Plugin.download_plugin(plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['info'][1] == os.path.join(target, '__init__.py')",
"def main():\n\n args = _parse_arguments()\n if args.call:\n if args.archive is None:\n raise RuntimeError(\n \"Could not run update archive is missing\"\n )\n run_update()\n else:\n\n folder = args.folder\n\n import os\n\n if not os.access(folder, os.W_OK):\n raise RuntimeError(\"Could not update, base folder is not writable\")\n\n update_info = get_update_info()\n archive = loadPluginTarget(\n update_info.get(UPDATE_CONFIG_NAME)\n .get(\"pip\")\n .format(target_version=args.target),\n folder,\n )\n\n # call new update script with args\n sys.argv = [\n \"--call=true\",\n \"--archive={}\".format(archive)\n ] + sys.argv[1:]\n try:\n result = subprocess.call(\n [sys.executable, os.path.join(folder, \"update_script.py\")] + sys.argv,\n stderr=subprocess.STDOUT,\n )\n except subprocess.CalledProcessError as e:\n print(e.output)\n raise RuntimeError(\"error code %s\", (e.returncode, e.output))\n\n if result != 0:\n raise RuntimeError(\"Error Could not update returncode - {}\".format(result))",
"def init(self):\n\n # init logfile\n self.logfile = self.parent.logfile\n \n if( self.mode == \"install\" ):\n # initialize download data\n if( not self.download.supportHEAD and self.version == \"HEAD\" ):\n self.abort( \"sorry, HEAD version of this package cannot be installed!! \" \\\n + \"Please choose a release version...\" )\n if( not self.download.type in self.download.supportedTypes ):\n #if len(self.download.supportedTypes) != 1:\n # print \"*** WARNING: \"+self.name+\" download.type forced from \\'\"+self.download.type \\\n # + \"\\' to \\'\" + self.download.supportedTypes[0] + \"\\'\"\n self.download.type=self.download.supportedTypes[0]\n if( self.download.type == \"cvs\" or self.download.type == \"ccvssh\" ):\n if( not isinPath(\"cvs\") ):\n self.abort( \"cvs not found!!\" )\n if( self.download.type == \"cvs\" ):\n self.download.accessmode = \"pserver\"\n if( self.download.type == \"ccvssh\" ):\n if( not isinPath(\"ccvssh\") ):\n self.abort( \"ccvssh not found!!\" )\n self.download.env.setdefault('CVS_RSH', 'ccvssh')\n self.download.accessmode = \"ext\"\n\n # if CVSROOT not set by user generate a default one\n self.download.env.setdefault('CVSROOT', \":\" + self.download.accessmode + \":\" \\\n + self.download.username + \":\" + self.download.password \\\n + \"@\" + self.download.server + \":/\" + self.download.root )\n\n elif( self.download.type == \"wget\" ):\n if( not isinPath(self.download.cmd) ):\n self.abort( self.download.cmd + \" not found on your system!!\" )\n if( not isinPath(\"tar\") ):\n self.abort( \"tar not found on your system!!\" )\n\n # if download url not set by user generate a default one\n if( len(self.download.url) == 0 ):\n if Version( self.version ) == 'HEAD':\n dir='trunk'\n elif '-pre' in self.version or '-dev' in self.version or '-exp' in self.version:\n dir='branches/%s' % self.version\n else:\n dir='tags/%s' % self.version\n \n self.download.url = \"http://svnsrv.desy.de/viewvc/%s/%s/%s?view=tar\" % ( self.download.root, self.download.project, dir )\n\n elif ( self.download.type[:3] == \"svn\" ):\n if( not isinPath(\"svn\") ):\n self.abort( \"svn not found on your system!!\" )\n\n # if svnurl not set by user generate a default one\n if( len(self.download.svnurl) == 0 ):\n # initialize svn settings for desy\n self.download.accessmode = \"https\"\n self.download.server = \"svnsrv.desy.de\"\n #if( self.download.username == \"anonymous\" ):\n # self.download.root = \"public/\" + self.download.root\n if( self.download.type == 'svn-desy' ):\n self.download.root = \"desy/\" + self.download.root\n elif( self.download.type == 'svn-p12cert' ):\n self.download.root = \"svn/\" + self.download.root\n else:\n self.download.root = \"public/\" + self.download.root\n\n self.download.svnurl = \"%s://%s/%s/%s/\" % (self.download.accessmode,self.download.server,self.download.root,self.download.project)\n\n if Version( self.version ) == 'HEAD': \n self.download.svnurl += 'trunk'\n elif '-pre' in self.version or '-dev' in self.version or '-exp' in self.version:\n self.download.svnurl += 'branches/'+self.version\n else:\n self.download.svnurl += 'tags/'+self.version\n # git access to repository\n elif ( self.download.type[:3] == \"git\" ):\n if( not isinPath(\"git\") ):\n self.abort( \"git not found on your system!!\" )\n\n # if svnurl not set by user generate a default one\n if( len(self.download.svnurl) == 0 ):\n # initialize svn settings for github\n self.download.accessmode = \"https\"\n self.download.server = \"github.com\"\n self.download.root = self.download.project\n self.download.svnurl = \"%s://%s/%s/%s/\" % (self.download.accessmode,self.download.server,self.download.root,self.download.project)\n else:\n self.abort( \"download type \" + self.download.type + \" not recognized!!\" )",
"def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)",
"def cli(date, path, mission):\n download.main(path, mission, date)",
"def download(self, args):\n\n\t\t\"\"\" Default argument for Architecture \"\"\"\n\t\tif len(args) >= 4:\n\t\t\tarch = args[3]\n\t\telse:\n\t\t\tarch = platform.processor()\n\n\t\t\"\"\" Default argument for Version \"\"\"\n\t\tif len(args) >= 3:\n\t\t\tif args[2] == \"latest\":\n\t\t\t\tversion = \"Latest\"\n\t\t\telse:\n\t\t\t\tversion = args[2]\n\t\telse:\n\t\t\tversion = \"Latest\"\n\n\t\t\"\"\" Find package path from package list, based on prev. arguments \"\"\"\n\t\tif len(args) >= 2:\n\t\t\tpackage = args[1]\n\t\t\tfilename = False\n\t\t\t\n\t\t\tversions = self.master.Dump(package)\n\t\t\tfor d in versions:\n\t\t\t\tif d[\"Version\"] == version:\n\t\t\t\t\tif d[\"Version\"] != \"Latest\" and d[\"Architecture\"] == arch:\n\t\t\t\t\t\tfilename = d[\"Filename\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor e in versions:\n\t\t\t\t\t\t\tif e[\"Version\"] == d[\"LatestVersion\"] and e[\"Architecture\"] == arch:\n\t\t\t\t\t\t\t\tfilename = e[\"Filename\"]\n\t\t\t\t\t\t\t\tversion = d[\"LatestVersion\"];\n\t\t\tif not filename:\n\t\t\t\tself.write_line(\"ERROR XXX: Package not found.\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Find chunks to download \"\"\"\n\t\t\tid = 0\n\t\t\tto_download = False\n\t\t\tfor f in self.torrent_info.files():\n\t\t\t\tprint(f.path.replace(\"packages/\", \"\") + \" = \" + filename);\n\t\t\t\tif f.path.replace(\"packages/\", \"\") == filename:\n\t\t\t\t\tto_download = f\n\t\t\t\t\tbreak;\n\t\t\t\tid += 1\n\t\t\tif not to_download:\n\t\t\t\tprint(\"ERROR XXX: dunno\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Set chunks priority to 7? (download max priority) \"\"\"\n\t\t\tpr = self.torrent_info.map_file(id, 0, to_download.size);\n\t\t\tn_pieces = math.ceil(pr.length / self.torrent_info.piece_length() + 1);\n\n\t\t\tfor i in range(self.torrent_info.num_pieces()):\n\t\t\t\tif i in range(pr.piece, pr.piece + n_pieces):\n\t\t\t\t\tself.handler.piece_priority(i, 7)\n\n\n\t\t\t\"\"\" Print download of package status \"\"\"\n\t\t\tself.print_status(id, pr, package, version, filename)\n\t\t\t\t\n\t\t\t\"\"\" Check the server for hash validation \"\"\"\n\t\t\tif self.valid_tpkg_file(to_download.path):\n\t\t\t\tself.write_line(\"DONE {0} {1} {2} {3}\".format(package, version, arch, self.config[\"daemon\"][\"rootdir\"] + \"/\" + to_download.path).replace('//', '/'))\n\t\t\telse:\n\t\t\t\tself.write_line(\"ERROR XXX: Hash verification failed.\")\n\t\telse:\n\t\t\tself.write_line(\"INVALID ARGUMENTS\");",
"def download_and_write_script(server_script, install_dir, SERVER_URL):\n script_url = \"%s/preflight-v2/get-script/%s/%s/\" % (SERVER_URL, server_script['plugin'], server_script['filename'])\n stdout, stderr = curl(script_url)\n if stderr:\n print 'Error received downloading script %s:' % script_url\n print stderr\n sys.exit(1)\n\n script = open (os.path.join(install_dir, server_script['plugin'], server_script['filename']), 'w')\n try:\n data = json.loads(stdout)\n except:\n print 'Did not receive valid JSON when requesting script content.'\n sys.exit(1)\n\n script.write(data[0]['content'])\n script.close()\n os.chmod(os.path.join(install_dir, server_script['plugin'], server_script['filename']), 0755)",
"def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))",
"def x_download():\n\t#_loadconfig()\n\tconf = _get_config()\n\t#print conf['xplane']\n\tdownload_url = conf['xplane']['download']\n\tlocal(\"wget -P %s %s\" % (navimport.conf.work_dir(\"/xplane_zips\"), download_url))",
"def update(self, purge = True): # need to complete\n\t\tif purge:\n\t\t\tself.__download()\n\t\t\tself.__write()\n\t\telse:\n\t\t\tpass",
"def download_http(self, url):\n\n # Set things up.\n # ==============\n\n out = None\n headers = {}\n if (url.username is not None) and (url.password is not None):\n tmp = base64.b64encode(':'.join([url.username, url.password]))\n headers['Authorization'] = \"Basic %s\" % tmp\n\n\n # Toe the waters.\n # ===============\n # We start with an HTTP HEAD request to check the status.\n\n conn = httplib.HTTPConnection(url.netloc)\n conn.request(\"HEAD\", url.path, '', headers)\n r = conn.getresponse()\n conn.close()\n if self.verbose:\n print >> sys.stderr, url, r.status, ''\n\n\n # Bail.\n # =====\n # Short-cut when we just care whether it's a package.\n\n if url.path.endswith('/'):\n out = r.status == 200\n\n\n elif r.status == 200:\n\n # Wade in.\n # ========\n # If the status is positive we check to see if we've already\n # downloaded the latest copy.\n\n etag = r.getheader('etag', '')\n lm = r.getheader('last-modified', '')\n key = sha.new(str(url) + etag + lm).hexdigest()\n\n if not self.cachedir:\n raise ValueError(\"netimp.importer.cachedir not set\")\n if not os.path.isdir(self.cachedir):\n raise IOError( \"netimp.importer.cachedir not found \"\n + \"(%s)\" % self.cachedir\n )\n\n path = join(self.cachedir, key)\n if os.path.isfile(path):\n out = open(path, 'rb')\n else:\n\n # Dive in!\n # ========\n # We don't have this module locally yet: download it for real.\n\n conn = httplib.HTTPConnection(url.netloc)\n conn.request(\"GET\", url.path, '', headers)\n r = conn.getresponse()\n if r.status == 200: # just in case!\n fp = open(path, 'w+b')\n fp.write(r.read())\n fp.flush()\n fp.close()\n out = open(path, 'rb')\n conn.close()\n\n return out",
"def deploy():\n build()\n copy()\n install()"
] | [
"0.65189177",
"0.6304876",
"0.6303991",
"0.62665415",
"0.62513655",
"0.62227494",
"0.62170863",
"0.6120098",
"0.61069363",
"0.6081916",
"0.5874516",
"0.5856715",
"0.5817603",
"0.5802318",
"0.5788798",
"0.57426304",
"0.57387817",
"0.566455",
"0.5655254",
"0.5639781",
"0.56358075",
"0.5624523",
"0.5565075",
"0.5544118",
"0.552666",
"0.5513817",
"0.5502642",
"0.5498721",
"0.54791015",
"0.5464725"
] | 0.7154758 | 0 |
loads the dependencies.txt and the update_script of the given target and executes the new update_script. | def main():
args = _parse_arguments()
if args.call:
if args.archive is None:
raise RuntimeError(
"Could not run update archive is missing"
)
run_update()
else:
folder = args.folder
import os
if not os.access(folder, os.W_OK):
raise RuntimeError("Could not update, base folder is not writable")
update_info = get_update_info()
archive = loadPluginTarget(
update_info.get(UPDATE_CONFIG_NAME)
.get("pip")
.format(target_version=args.target),
folder,
)
# call new update script with args
sys.argv = [
"--call=true",
"--archive={}".format(archive)
] + sys.argv[1:]
try:
result = subprocess.call(
[sys.executable, os.path.join(folder, "update_script.py")] + sys.argv,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as e:
print(e.output)
raise RuntimeError("error code %s", (e.returncode, e.output))
if result != 0:
raise RuntimeError("Error Could not update returncode - {}".format(result)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_update():\n\n args = _parse_arguments()\n\n # get dependencies\n dependencies = get_dependencies(args.folder)\n\n # get update config of dependencies\n update_info = get_update_info()\n\n install_queue = build_queue(\n update_info, dependencies, args.archive\n )\n\n print(\"install_queue\", install_queue)\n if install_queue is not None:\n build_wheels(install_queue)\n install_wheels(install_queue)",
"def up_to_date(self, gyp_file, target=None, **kw):\n raise NotImplementedError",
"def update_target(self):\n pass",
"def run(self):\n USER.info('%s: Checking For Updates', self.recipe.name)\n cur_hash = pakit.conf.IDB[self.recipe.name]['hash']\n if cur_hash == self.recipe.repo.src_hash:\n return\n\n try:\n self.save_old_install()\n InstallTask(self.recipe).run()\n USER.info('%s: Deleting Old Install', self.recipe.name)\n Command('rm -rf ' + self.back_dir).wait()\n except Exception as exc: # pylint: disable=broad-except\n logging.error(exc)\n self.restore_old_install()",
"def update_data(update_method):\n log.debug('Starting update')\n cmd = ['/usr/bin/python', wf.workflowfile('update.py')]\n if update_method == 'force':\n cmd.append('--update')\n cmd.append('force')\n\n # Update projects data\n log.debug('Run update command : {}'.format(cmd))\n run_in_background('update', cmd)\n\n return 0",
"def _remoteScript(self, source_script):",
"def update_deps(self, **kwargs):\n enzi = kwargs.get('enzi', self.enzi)\n if not isinstance(enzi, Enzi):\n return\n self.info('start updating')\n enzi.init(update=True)\n self.info('updating finished')",
"def run_update():\n parser = ArgumentParser()\n subparsers = parser.add_subparsers(title=\"Commands\",\n help=\"Use <command> --help for more information about command.\")\n\n parser_result = subparsers.add_parser('result',\n description=\"Changes metadata of result file(s).\",\n help=\"Change result file metadata.\")\n parser_result.add_argument('name',nargs='?',default=None,help=\"Results file or directory with result files\")\n parser_result.add_argument('-a','--arch',help=\"Update result(s): set ARCH\")\n parser_result.add_argument('-p','--person',help=\"Update result(s): set PERSON\")\n parser_result.add_argument('-s','--sequence',type=int,help=\"Update result(s): set SEQUENCE NUMBER\")\n parser_result.set_defaults(func=script_runner.cmd_update_results)\n\n parser_repository = subparsers.add_parser('repository',\n description=\"Update local test repository from Firebird project Subversion repository.\",\n help=\"Update test repository.\")\n parser_repository.set_defaults(func=script_runner.cmd_update_repository)\n\n args = parser.parse_args()\n args.func(args)",
"def load(ctx):\n if not is_owner(ctx.update):\n return\n global cmds\n cmds.load_ext(ctx.args[0], ctx.update)",
"def execute(self, targets):",
"def update(self):\n self.content = self.get_content()\n self.dependencies = self.content['requirements']['run']\n self.pythonversion = self.content['extra']['pythonversion']\n self.package_name = self.content['package']['name']",
"def _main(args):\n if args.files:\n _update_files()\n\n if args.templates:\n _update_template(args.template_definition)",
"def test_update_software_asset_install_script(self):\n pass",
"def run(self, target, filename, callback):\n self.target = target # import or export\n self.callback = callback # function to call when config gui is done\n\n # save file name\n # (the key where to store the file name depends\n # on the target)\n if self.target == self.TARGET_IMPORT:\n self.config[\"IMPORT_FILE\"] = filename\n elif self.target == self.TARGET_EXPORT:\n self.config[\"EXPORT_FILE\"] = filename\n self.save()\n\n # prepare and run gui\n self.texpathIndex = 0\n self.update_texpath_current()\n Draw.Register(self.gui_draw, self.gui_event, self.gui_button_event)",
"def run(self):\n self.update_repos()",
"def update_target(\n self,\n ) -> Callable[[cloud_deploy.UpdateTargetRequest], operations_pb2.Operation]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"update_target\" not in self._stubs:\n self._stubs[\"update_target\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/UpdateTarget\",\n request_serializer=cloud_deploy.UpdateTargetRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"update_target\"]",
"def _do_automatic_updates(self):\n from .updates import Updates\n\n for update_name in Updates.check_automatic_updates():\n print(\"Applying automatic update: {}\".format(update_name))\n Updates.do_update(update_name)",
"def _post_update_paths(self, **kwargs):\n\n files_updated = kwargs.get('files_updated', list())\n if not files_updated:\n return\n\n maya_utils.reload_textures(files_updated)\n\n # Dependencies are already reloaded during update paths process\n # maya_utils.reload_dependencies(files_updated)",
"def force_update():\n # TODO: IS THERE A WAY TO ONLY REFRESH FOR A GIVEN YEAR?\n # TODO: FIND A WAY TO DO THIS ASYNCHRONOUSLY\n print('Starting update...')\n # TODO: THIS IS A PRETTY BAD WORKAROUND. WE SHOULD FIND A WAY TO PROVIDE THE SCRIPTS WITH THE 'LANDTAGSWAHLDB' PACKAGE\n sql_path = pathlib.Path(current_app.instance_path).parent.parent / 'sql-scripts' / 'UpdateViews.sql'\n with open(sql_path) as sql_file:\n script = sql_file.read()\n db = db_context.get_db()\n db.run_script(script)\n db.commit()\n return 'Success'",
"def main():\n\n args = parse_args()\n if args.target:\n # Update on repo\n print('Processing: %s' % args.target)\n\n target_link = _target_link\n if 'forks' in args.target:\n target_link = _target_link_forks\n\n path = os.path.join(_base_path, args.target)\n if not path.endswith('.git'):\n path += '.git'\n\n if not os.path.isdir(path):\n print('Git repo: %s not found on disk' % path)\n\n hook = os.path.join(path, 'hooks', 'post-receive')\n if not is_valid_hook(hook, target_link) and not args.check:\n fix_link(hook, target_link)\n\n elif args.namespace:\n walk = False\n if args.namespace == 'forks':\n walk = True\n process_namespace(args.namespace, args.check, walk=walk)\n else:\n # Check all repos\n for namespace in namespaces:\n walk = False\n if namespace == 'forks':\n walk = True\n process_namespace(namespace, args.check, walk=walk)",
"def do__relative_load(self, arg=None):\n if arg:\n arg = arg.split(None, 1)\n targetname, args = arg[0], (arg[1:] or [''])[0]\n targetname = os.path.join(self.current_script_dir or '', targetname)\n self.do_load('%s %s' % (targetname, args))",
"def update_21(db, filename_persist, snapshots_dir, snapshots_reference_dir):\n text = \"\"\"\n test/test_label_changing.py\n test/test_batch2.py\n test/test_scalexy.py\n test/test_shader_examples.py\n \"\"\"\n candidates = doers.scripts_names_from_text(text, end_mark=':')\n checked_in, unknown, move_failed = hl.update_testrun__pass(db,\n filename_persist, candidates,\n snapshots_dir, snapshots_reference_dir) \n\n return checked_in, unknown, move_failed",
"def post_build_target(target_data, toolchain):\n ListenerManager.call(_target_post_build_manager, target_data, toolchain)",
"def test_make_update_script_direction(self):\n\n self.setup_model_params()\n self.write_file(self.first_model_path, self.base_source)\n self.write_file(self.second_model_path, self.base_source + self.model_source)\n\n source_script = self.pyscript.make_update_script_for_model(\n engine=self.engine,\n oldmodel=load_model('testmodel_first:meta'),\n model=load_model('testmodel_second:meta'),\n repository=self.repo_path,\n )\n\n self.assertTrue(0\n < source_script.find('upgrade')\n < source_script.find(\"['User'].create()\")\n < source_script.find('downgrade')\n < source_script.find(\"['User'].drop()\"))",
"def main():\n updater = VersionUpdater('PowerDNS-Admin')\n updater.run()",
"def refresh(self):\n self.update_from_file()\n self.update_from_env()",
"def loadPluginTarget(archive, folder):\n\n # download target repo zip\n req = retryget(archive)\n filename = archive.split(\"/\")[-1]\n zip_file_path = os.path.join(folder, filename)\n try:\n with open(zip_file_path, \"wb\") as output_file:\n output_file.write(req.content)\n except IOError:\n raise RuntimeError(\n \"Could not save the zip file to the working directory {}\".format(folder)\n )\n\n # unzip repo\n plugin_extracted_path = os.path.join(folder, UPDATE_CONFIG_NAME)\n plugin_extracted_path_folder = os.path.join(\n plugin_extracted_path,\n \"{repo_name}-{target}\".format(\n repo_name=REPO_NAME, target=re.sub(r\"^v\", \"\", filename.split(\".zip\")[0])\n ),\n )\n try:\n plugin_zipfile = zipfile.ZipFile(BytesIO(req.content))\n plugin_zipfile.extractall(plugin_extracted_path)\n plugin_zipfile.close()\n except (zipfile.BadZipfile, zipfile.LargeZipFile) as e:\n raise RuntimeError(\"Could not unzip plugin repo - error: {}\".format(e))\n\n # copy new dependencies to working directory\n try:\n shutil.copy2(\n os.path.join(\n plugin_extracted_path_folder, MAIN_SRC_FOLDER_NAME, \"dependencies.txt\"\n ),\n os.path.join(folder, \"dependencies.txt\"),\n )\n except IOError:\n raise RuntimeError(\"Could not copy dependencies to working directory\")\n\n # copy new update script to working directory\n try:\n shutil.copy2(\n os.path.join(\n plugin_extracted_path_folder,\n MAIN_SRC_FOLDER_NAME,\n \"scripts/update_script.py\",\n ),\n os.path.join(folder, \"update_script.py\"),\n )\n except IOError:\n raise RuntimeError(\"Could not copy update_script to working directory\")\n\n return zip_file_path",
"def update_rundoc(rundoc):\n from SoudanDB.utilities.utilities import get_hash_of_file\n rundoc_was_modified = False\n list_to_check = [ ( rundoc.raw_data_file_tier_0,\n make_tier1_from_tier0,\n rundoc.root_data_file_tier_1.pfn ), \n ( rundoc.root_data_file_tier_1, \n make_tier2_from_tier1,\n rundoc.output_data_file_tier_2.pfn ), \n ( rundoc.output_data_file_tier_2,\"\", \"\")]\n\n FNULL = None#open(\"/dev/null\",'w')\n for dict, program, dest in list_to_check:\n if not os.path.exists(dict.pfn): continue\n if not (dict.last_mod_time and \\\n os.path.getmtime(dict.pfn) <= time.mktime(dict.last_mod_time.timetuple())):\n rundoc_was_modified = True\n dict.last_mod_time = datetime.datetime.fromtimestamp(os.path.getmtime(dict.pfn))\n dict.md5hash = get_hash_of_file(dict.pfn)\n if program and (rundoc_was_modified and not os.path.exists(dest)):\n # Only run this if the file doesn't exist.\n # Check to see if is a py script, if so load the module\n basename = os.path.basename(program)\n if re.match(\".*\\.py\\Z\", basename):\n print \"Using python module %s, executing main(%s, %s) from module\" % \\\n (program, dict.pfn, dest) \n new_module = imp.load_source(basename[0:-3], program)\n if not hasattr(new_module, 'main'):\n print \"Imported module not well constructed, exiting\"\n break\n #new_module.main(dict.pfn, dest)\n \n else:\n print \"Running: %s %s %s\" % (program, dict.pfn, dest)\n return_value = 0\n #return_value = subprocess.call([program, dict.pfn, dest], \\\n # stdout=FNULL, stderr=FNULL, env=environment_vars) \n if return_value < 0:\n # interrupt was called, delete the processed file.\n print \"Interrupted on file %s, removing processed file: %s\" % (dict.pfn, dest) \n if os.path.exists(dest):\n os.unlink(dest)\n break\n rundoc_was_modified = True\n\n\n return (rundoc, rundoc_was_modified)",
"def run_script(self):\n pass",
"def update_target(target_info, temp_dir, images_dir, inventory, args):\n target_name = target_info.get(\"target\")\n target_sha256 = target_info.get(\"sha256_hash\")\n filename = target_info.get(\"filename\")\n temp_path = os.path.join(temp_dir, filename)\n # Add a trailing slash to make sure that urljoin handles things properly\n full_url = urljoin(args.base_url+'/', target_info.get(\"url\"))\n _, downloaded_size, downloaded_sha256 = download(\n images_url=full_url,\n filename=temp_path,\n buffer_size=args.buffer_size,\n print_progress=(_LOG_LEVEL <= _LOG_LEVELS.get(\"INFO\", 3))\n )\n if downloaded_size == 0:\n log(\"INFO\", \"Skipping target: {}\".format(target_name))\n return\n log(\"TRACE\", \"{} successfully downloaded ({} Bytes)\"\n .format(temp_path, downloaded_size))\n # If the SHA256 in the manifest has the value '0', this is a special case\n # and we just skip the verification step\n if target_sha256 == '0':\n log(\"DEBUG\", \"Skipping SHA256 check for {}.\".format(full_url))\n # If the check fails, print an error and don't unzip the file\n elif downloaded_sha256 != target_sha256:\n log(\"ERROR\", \"Downloaded SHA256 does not match manifest for {}!\"\n .format(full_url))\n return\n # Note: this skips the --keep option, so we'll never keep image packages\n # that fail the SHA256 checksum\n ## Now copy the contents to the final destination (the images directory)\n delete_from_inv(target_info, inventory, images_dir)\n if os.path.splitext(temp_path)[1].lower() == '.zip':\n archive_namelist = extract(\n temp_path,\n images_dir,\n args.test)\n if args.keep:\n # If the user wants to keep the downloaded archive,\n # save it to the images directory and add it to the inventory\n shutil.copy(temp_path, images_dir)\n archive_namelist.append(filename)\n else:\n archive_namelist = []\n shutil.copy(temp_path, images_dir)\n ## Update inventory\n inventory[target_name] = {\"repo_hash\": target_info.get(\"repo_hash\"),\n \"contents\": archive_namelist,\n \"filename\": filename}"
] | [
"0.6054727",
"0.5840034",
"0.5559024",
"0.546411",
"0.5432003",
"0.5430221",
"0.5367228",
"0.5366944",
"0.53578764",
"0.53544587",
"0.5335627",
"0.5335201",
"0.5312373",
"0.5283287",
"0.52739245",
"0.5267964",
"0.52657044",
"0.5254667",
"0.52486074",
"0.52236706",
"0.52010816",
"0.5200225",
"0.5192251",
"0.5163069",
"0.51595426",
"0.51592475",
"0.5152597",
"0.51510876",
"0.5133978",
"0.5119712"
] | 0.611467 | 0 |
Check if the content of a cell only has numbers. | def is_number(self, cell):
for token in self._cell_tokenizer.tokenize(cell.get_text()):
if self._get_token_type(token) == 'NAME':
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def must_contain_digit(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n return not bool(re.search(\"\\d\", str(cell)))",
"def must_be_numeric(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n # If it's not nan, check it's a number\n return pd.isna(pd.to_numeric(str(cell), errors=\"coerce\"))",
"def is_numeric(rows, col):\n return rows.dtypes.values[col] in numerics",
"def contains_only_digit_period(cell):\n # Check if empty\n if check_empty(cell):\n return True\n return not bool(re.match(\"^[\\d\\.]+$\", str(cell)))",
"def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)",
"def isnumeric(self):\n return isnumeric(self)",
"def check_numeric(data, col):\n from pandas.api.types import is_numeric_dtype\n try:\n if is_numeric_dtype(data[col]):\n logging.info(f' {col} is numeric.')\n return data\n else:\n numdata = (data\n .drop([col], axis=1)\n .join(data[col].apply(pandas.to_numeric, errors='coerce'))\n )\n numcol = numdata[col].isnull().values().sum()\n logging.warning(f' %s rows in %s are non-numeric' % (numcol, col,))\n logging.warning(f' {col} is tested by coercing into numeric values.')\n return numdata\n except:\n logging.error(f' the format of %s is not testable.' % (col,))\n print(data.head(n=2))\n sys.exit(1)",
"def is_numeric(self) -> bool:\n return False",
"def is_numberish(G):\n return True",
"def is_number(self) -> bool:\n return False",
"def isnum(self, x):\n\n return x in '1234567890.-'",
"def number_only(number):\n number = number.replace(' ', '')\n result = re.match(r\"^[0-9]+$\", number)\n if not result:\n return True\n return False",
"def is_number(G):\n return True",
"def checknum(val):\n\n if len(val) == 0:\n return False\n\n for i in range(len(val)):\n if not val[i].isdigit():\n return False\n\n return True",
"def validate_numeric_annots(self):\n valid = True\n for annot_header in self.file.columns[1:]:\n annot_name = annot_header[0]\n annot_type = annot_header[1]\n column_dtype = self.file.dtypes[annot_header]\n if annot_type == \"numeric\" and column_dtype == \"object\":\n valid = False\n msg = f\"Numeric annotation, {annot_name}, contains non-numeric data (or unidentified NA values)\"\n self.store_validation_issue(\n \"error\", msg, \"content:invalid-type:not-numeric\"\n )\n return valid",
"def isNumeric(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.int32 or col.dtype == np.int64 or col.dtype == np.float32 or col.dtype == np.float64",
"def is_number(self,s):\n try:\n float(s.replace(\" \", \"\"))\n return True\n except ValueError:\n return False",
"def not_a_num(val):\n if math.isnan(val):\n return False\n else:\n return True",
"def must_be_alphanumeric_space_period(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n # If it's not nan, check it's a number\n return not bool(re.match(r\"^[a-zA-Z .0-9]+$\", str(cell)))",
"def is_numeric(x):\n if isinstance(x, NUMBER_TYPES):\n return True\n elif isinstance(x, np.ndarray):\n return x.dtype.type not in NUMPY_NON_TYPES\n return False",
"def is_number(text):\n return text.lower() in AVRO_NUMBERS",
"def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))",
"def _is_num(w):\n symbols = list(w)\n for s in symbols:\n if s in string.digits:\n return '<NUM>'\n return w",
"def has_num(text):\n return any(str.isdigit(c) for c in text)",
"def is_numeric_type(self):\n row_type = self.get_type()\n is_numeric = row_type in (\n 'hidden decimal',\n 'decimal',\n 'hidden integer',\n 'integer',\n 'int',\n 'range',\n )\n return is_numeric",
"def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False",
"def _is_number(self, symbol):\n if symbol.type == self.scanner.NUMBER:\n return True\n else:\n return False",
"def is_number_repl_isnumeric(s):\n return s.replace('.', '', 1).isnumeric()",
"def is_valid_number(self, text, widget):\n if len(text) > 2:\n return False\n for char in text:\n if not char.isdigit():\n return False\n if text != '' and int(text) == 0:\n return False\n return True",
"def is_numeric (self) :\n\n return self.__isnumeric__"
] | [
"0.8166208",
"0.7570343",
"0.72052854",
"0.7055605",
"0.6967532",
"0.6934029",
"0.6917894",
"0.6893806",
"0.6839475",
"0.6811044",
"0.6756402",
"0.6728491",
"0.66914386",
"0.66888285",
"0.66629595",
"0.6654778",
"0.6630367",
"0.66051346",
"0.66025007",
"0.65843946",
"0.65518725",
"0.65481913",
"0.65347594",
"0.65156794",
"0.65120983",
"0.6496295",
"0.64920837",
"0.6490023",
"0.64145994",
"0.640424"
] | 0.76327354 | 1 |
Parses the content of a cell to identify its value and unit. | def process_values_unit(self, cell, units={}, unit_type=None):
result = []
tokens = self._cell_tokenizer.tokenize(cell.get_text(), keep_space=False, keep_skip=False)
if not self.is_valued_cell(cell):
raise TableException('%s does not contain a unit' % cell)
if len(tokens) < 1:
raise TableException('Invalid value: %s does not contain a numerical value' % cell.get_text())
cell_type = self._get_token_type(self._cell_parser.parse(tokens))
abbrev_units = self._abbreviated_unit_dict[unit_type] if unit_type is not None else {}
if cell_type == 'VALUES_UNIT':
values, unit = self._get_values_unit(tokens)
validated_unit = self._determine_unit(unit,
units, abbrev_units)
if units and validated_unit in units:
for value in values:
result.append({'value': float(value), 'unit': validated_unit})
elif cell_type == 'VALUE_UNIT_PAIRS':
for value,unit in self._get_values_unit_pairs(tokens, units, unit_type):
validated_unit = self._determine_unit(unit,
units, abbrev_units)
if units and validated_unit in units:
result.append({'value': float(value), 'unit': unit})
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse(val: str):\n\n if not isinstance(val, str):\n raise TypeError(\"Method requires string input\")\n\n value = re.findall(r'^([-+]?\\d*\\.\\d*(?=\\s)|\\d+(?=\\s))', val)\n if not (value and val[:len(value[0])] == value[0]):\n return val, None\n\n # string starts with value\n value = value[0]\n val = val[len(value):]\n\n val = val.strip()\n if val:\n unit = val\n else:\n unit = 'dimensionless'\n\n return value, unit",
"def is_valued_cell(self, cell):\n tokens = self._cell_tokenizer.tokenize(cell.get_text(), keep_space=False, keep_skip=False)\n cell_type = self._get_token_type(self._cell_parser.parse(tokens))\n return cell_type == 'VALUES_UNIT' or cell_type == 'VALUE_UNIT_PAIRS'",
"def _get_table_val(val):\n text = val.text.strip()\n if val.br:\n val = \", \".join(text.split('\\r\\n'))\n elif val.sup:\n val = \"\".join(map(str, val.contents))\n elif NON_BREAK_SPACE in text:\n val = \", \".join(text.split(f' {NON_BREAK_SPACE} {NON_BREAK_SPACE} '))\n else:\n val = text\n\n return val",
"def cell_value(self, row, col):\n if col > 1:\n raise Exception(\"Only two columns available in a Table.\")\n elif row >= self.rows:\n raise Exception(\"Maximum row index is %d\" % len(self.headings)-1)\n elif col == 0:\n return self.headings[row]\n else:\n heading = self.headings[row]\n return self.data[heading]",
"def getCellValue(self, row = None, column = None, *, cell = None, strip = False):\n\n\t\t\t\tif (cell is None):\n\t\t\t\t\tcell = self.getCell(row = row, column = column)\n\n\t\t\t\tif ((not strip) or (cell.value is None) or (not isinstance(cell.value, str))):\n\t\t\t\t\treturn cell.value\n\n\t\t\t\tvalue = cell.value.strip()\n\t\t\t\tif (not value):\n\t\t\t\t\treturn None\n\t\t\t\treturn value",
"def parse_value(row):\n value_regex = re.compile(r'-?\\d+\\.\\d+')\n value = value_regex.search(row)\n return value.group()",
"def cell_value(cell):\n value = getattr(cell, 'value', '')\n if value is None or str(value).startswith('='): # we don't calculate expressions\n return ''\n else:\n return str(value)",
"def _cell_value(self, cell):\r\n\r\n # annoying need book argument for datemode\r\n # info on types: http://www.lexicon.net/sjmachin/xlrd.html#xlrd.Cell-class\r\n if cell.ctype == xlrd.XL_CELL_NUMBER:\r\n return float(cell.value)\r\n elif cell.ctype == xlrd.XL_CELL_DATE:\r\n # TODO: distinguish date and datetime\r\n args = xlrd.xldate_as_tuple(cell.value, self.workbook.datemode)\r\n try:\r\n return datetime.date(args[0], args[1], args[2])\r\n except Exception, inst:\r\n # print 'Error parsing excel date (%s): %s' % (args, inst)\r\n return None\r\n elif cell.ctype == xlrd.XL_CELL_BOOLEAN:\r\n return bool(cell.value)\r\n else:\r\n return cell.value",
"def parse_column_units(\n filepath: Path, sheet_name: str = None\n) -> Dict[str, ureg.Quantity]:\n method = map_input_method(filepath)\n unit_header = method(str(filepath), nrows=1, header=0)\n given_unit = lambda s: unit_header[s].values[0]\n parsed_units = {}\n required_columns = [\n \"Supply Temperature\",\n \"Target Temperature\",\n \"Heat Capacity Flowrate\",\n \"Enthalpy\",\n ]\n\n for column in required_columns:\n units = ureg.parse_units(given_unit(column))\n parsed_units[column] = units\n\n return parsed_units",
"def parse_dw_data(self, data, metric):\n\n try:\n num = float(data[0])\n except ValueError as verr: # Can't parse value as float\n print(verr)\n num = -1.0\n if len(data) > 1:\n unit = data[1]\n metric += '_' + str(unit)\n\n return metric, num",
"def get_content(header, row):\n # print 'row',row\n content = row[0:]\n result = []\n for h in header:\n # print 'ts',h\n result.append([h[0], float(content[h[1]])])\n # print result\n return result",
"def readmeter(self):\n # Read until /\n line = \"\"\n while not line.startswith(\"/\"):\n line = self.readline().decode()\n\n # Populate header\n header = line[1:].strip()\n\n # Skip empty line after header\n self.readline()\n\n # Read lines and populate dictionary until !\n data = {}\n line = self.readline().decode()\n while not line.startswith(\"!\"):\n # Get OBIS\n next_obis = line[:line.index(\"(\")]\n if next_obis:\n obis = next_obis\n data[obis] = []\n # Get and loop over the arguments\n args = re.findall(\"\\(([^()]*)\\)\", line)\n for arg in args:\n # Do some basic conversions\n valwithunit = re.match(\"^([0-9.]+)\\*([a-zA-Z]+)$\", arg)\n if valwithunit:\n arg = float(valwithunit[1]), valwithunit[2]\n # Save argument with corresponding OBIS\n data[obis].append(arg)\n line = self.readline().decode()\n return header, data",
"def _get_cell_value(self, row, col_idx, convertor):\n # Extract raw cell value.\n value = row[col_idx - 1].value\n\n # Nullify dead text.\n if isinstance(value, (unicode, str)):\n value = value.strip()\n if len(value) == 0:\n value = None\n elif value.upper() in {u\"NONE\", u\"N/A\"}:\n value = None\n\n # Convert if necessary.\n if convertor:\n try:\n return convertor(value)\n except TypeError:\n return convertor(value, lambda i: row[i - 1].value)\n\n return value",
"def parseData(self, i,j) :\n \n if self.isEmpty(i,j) and self.config.get('dataCell', 'implicitZeros') == '0':\n return\n\n # Use the fully qualified name of the cell for the resource name\n observation = self.namespaces['scope'][self.source_cell_qname]\n \n # It's an observation\n self.graph.add((observation,\n RDF.type,\n self.namespaces['qb']['Observation']))\n \n # It's in the data set defined by the current sheet\n self.graph.add((observation,\n self.namespaces['qb']['dataSet'],\n self.namespaces['scope'][self.sheet_qname]))\n \n # Add it's value\n # TODO type the value\n if self.isEmpty(i,j) and self.config.get('dataCell', 'implicitZeros') == '1':\n self.graph.add((observation,\n self.namespaces['scope'][self.dataCellPropertyName],\n Literal(0)))\n else:\n self.graph.add((observation,\n self.namespaces['scope'][self.dataCellPropertyName],\n Literal(self.source_cell.value)))\n \n # Use the row dimensions dictionary to find the properties that link\n # data values to row headers\n try :\n for (prop, value) in self.row_dimensions[i].iteritems() :\n self.graph.add((observation, prop, value))\n except KeyError :\n self.log.debug(\"({}.{}) No row dimension for cell\".format(i,j))\n \n # Use the column dimensions dictionary to find the objects of the \n # d2s:dimension property\n self.graph.add((observation,\n self.namespaces['tablink']['dimension'],\n self.getColHeaderValueURI(self.column_dimensions[j])))",
"def extract_data(self):\r\n self.parse()\r\n lst = []\r\n for i in self.table.text.split(\"\\n\")[3:]:\r\n if i != \"\" and bool(re.search(r'\\d', i)):\r\n lst.append(i.replace(u'\\xa0', ''))\r\n single = lst.pop(-3)\r\n lst = [i + \" \" + j for i, j in zip(lst[::2], lst[1::2])]\r\n lst.append(single)\r\n return lst[0:28]",
"def extract_name_value(self, cell):\n cell_str = []\n result = []\n tokens = self._cell_tokenizer.tokenize(cell.get_text())\n if self._get_token_type(tokens[-1]) == 'SKIP':\n tokens = tokens[:-1]\n for token in tokens:\n if self._get_token_type(token) == 'SKIP':\n if len(cell_str) > 0:\n cell_str.append(self._get_token_value(token))\n elif self._get_token_type(token) == 'SEPARATOR':\n result.append(''.join(cell_str))\n cell_str = []\n else:\n cell_str.append(self._get_token_value(token))\n # if last item or cell does not contain SEPARATOR\n if len(cell_str) > 0 :\n result.append(''.join(cell_str))\n return result",
"def cell_type(self, row, col):\n if col == 0: return 'heading'\n else: return 'data'",
"def test_get_cell_info(self):\n expected = { 'Experiment': \"20220101_EGS1_12345AA\",\n 'Cell': '12345AA0018/20220101_1234_1-A1-A1_AAA66666_deadbeef',\n 'Pool': '12345AA0018',\n 'Date': '20220101',\n 'Number': '1234',\n 'Slot': '1-A1-A1',\n 'CellID': 'AAA66666',\n 'Checksum': 'deadbeef',\n 'Project': '12345',\n 'Base': '12345AA0018/20220101_1234_1-A1-A1_AAA66666_deadbeef/'\n '20220101_EGS1_12345AA_12345AA0018_AAA66666_deadbeef',\n 'Files in pass': 'unknown',\n 'Files in fail': 1,\n 'Files in fast5 fail': 1,\n '_counts': [\n {'_barcode': '.', '_label': 'All passed reads', '_part': 'pass', 'total_reads': 200},\n {'_barcode': '.', '_label': 'Passed and lambda-filtered reads', '_part': 'nolambda'},\n {'_barcode': '.', '_label': 'All failed reads', '_part': 'fail'} ],\n '_blobs': ['../../__blob__'],\n '_duplex' : [ ['Duplex pairs', 1],\n ['from total passing reads', 200],\n ['% of passing reads', '1.00%'] ],\n '_filter_type': 'none',\n '_final_summary': {'is_rna': False},\n '_nanoplot': '../../__nanoplot__',\n }\n\n\n got = get_cell_info( experiment = \"20220101_EGS1_12345AA\",\n cell = \"12345AA0018/20220101_1234_1-A1-A1_AAA66666_deadbeef\",\n cell_content = { '.': dict( fast5_pass = ['x.fast5'],\n fastq_fail = ['y.fastq'],\n fast5_fail = ['y.fast5'] ) },\n counts = { ('.','pass'): dict(total_reads = 200),\n ('.','fail'): dict(),\n ('.','nolambda'): dict() },\n fin_summary = dict(is_rna = False),\n blobs = ['__blob__'],\n nanoplot = '__nanoplot__',\n duplex = 1,\n fast5_meta = dict() )\n\n if VERBOSE:\n pprint(got)\n\n self.assertEqual( type(got), OrderedDict )\n self.assertEqual( dict(got), expected )",
"def parseSheet(self):\n self.log.info(\"Parsing {0} rows and {1} columns.\".format(self.rowns,self.colns))\n \n self.column_dimensions = {}\n self.property_dimensions = {}\n self.row_dimensions = {}\n self.rowhierarchy = {}\n\n # Get dictionary of annotations\n self.annotations = self.r_sheet.cell_note_map\n \n for i in range(0,self.rowns):\n self.rowhierarchy[i] = {}\n \n for j in range(0, self.colns):\n # Parse cell data\n self.source_cell = self.r_sheet.cell(i,j)\n self.source_cell_name = cellname(i,j)\n self.style = self.styles[self.source_cell].name\n self.cellType = self.getType(self.style)\n self.source_cell_qname = self.getQName(self.source_cell_name)\n \n self.log.debug(\"({},{}) {}/{}: \\\"{}\\\"\". format(i,j,self.cellType, self.source_cell_name, self.source_cell.value))\n\n # Try to parse ints to avoid ugly _0 URIs\n try:\n if int(self.source_cell.value) == self.source_cell.value:\n self.source_cell.value = int(self.source_cell.value)\n except ValueError:\n self.log.debug(\"(%s.%s) No parseable int\" % (i,j))\n\n \n # Parse annotation (if any)\n if self.config.get('annotations', 'enabled') == \"1\":\n if (i,j) in self.annotations:\n self.parseAnnotation(i, j)\n\n # Parse cell even if empty\n if self.cellType == 'Data':\n self.parseData(i, j)\n elif (self.cellType == 'HRowHeader') :\n self.updateRowHierarchy(i, j)\n elif self.cellType == 'ColHeader' :\n self.parseColHeader(i, j)\n elif self.cellType == 'RowProperty' :\n self.parseRowProperty(i, j)\n \n # If cell not empty, check for more types\n if not self.isEmpty(i,j) :\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],RDF.type,self.namespaces['tablink'][self.cellType]))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['cell'],Literal(self.source_cell_name)))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['col'],Literal(colname(j))))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['row'],Literal(i+1)))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname] isrow row\n if self.cellType == 'Title' :\n self.parseTitle(i, j)\n \n elif self.cellType == 'RowHeader' :\n self.parseRowHeader(i, j)\n \n elif self.cellType == 'HRowHeader' :\n self.parseHierarchicalRowHeader(i, j)\n \n elif self.cellType == 'RowLabel' :\n self.parseRowLabel(i, j)\n \n # Add additional information about the hierarchy of column headers\n for value in self.column_dimensions.values():\n for index in range(1, len(value)):\n uri_sub = self.getColHeaderValueURI(value[:index+1])\n uri_top = self.getColHeaderValueURI(value[:index])\n self.graph.add((uri_sub, self.namespaces['tablink']['subColHeaderOf'], uri_top))\n self.graph.add((uri_sub, self.namespaces['tablink']['depth'], Literal(index)))\n self.graph.add((uri_top, self.namespaces['tablink']['depth'], Literal(index-1)))\n \n self.log.info(\"Done parsing...\")",
"def _parse_units(self, model, comp, node):\n node = dom_child(node, 'unitDefinition')\n while node:\n name = node.getAttribute('id')\n self.log('Parsing unit definition for \"' + name + '\".')\n unit = myokit.units.dimensionless\n node2 = dom_child(node, 'listOfUnits')\n node2 = dom_child(node2, 'unit')\n while node2:\n kind = str(node2.getAttribute('kind')).strip()\n u2 = self._convert_unit(kind)\n if node2.hasAttribute('multiplier'):\n m = float(node2.getAttribute('multiplier'))\n else:\n m = 1.0\n if node2.hasAttribute('scale'):\n m *= 10 ** float(node2.getAttribute('scale'))\n u2 *= m\n if node2.hasAttribute('exponent'):\n u2 **= float(node2.getAttribute('exponent'))\n unit *= u2\n node2 = dom_next(node2, 'unit')\n self.units[name] = unit\n node = dom_next(node, 'unitDefinition')",
"def cellval(row, col):\n key = 'R'+str(row)+'C'+str(col)\n if key not in DATA:\n return None\n return DATA[key]",
"def parse(self):\n mp = {}\n cells = self.row.find_all(\"td\")\n\n for cell in cells:\n if \"visible-mobile\" in cell.attrs[\"class\"]:\n continue\n title = self._get_cell_title(cell)\n content = cell.find(\"span\", class_=\"table-responsive__inner\")\n\n if title == \"name\":\n mp.update(self._parse_name(cell, content))\n elif title == \"fraktion\":\n fraktion, klub = self._parse_abbreviation(content)\n mp[\"political_affiliation\"] = klub + \" (\" + fraktion + \")\"\n elif title == \"wahlkreis\":\n mp[\"wahlkreis\"] = content.text.strip()\n elif title == \"bundesland\":\n mp[\"state\"] = self._parse_abbreviation(content)[1]\n\n return mp",
"def get_data_from_row(x):\n\n import re\n\n data = []\n x = re.findall(\"<(TD|td)[^>*]+>(.*?)(</(td|TD)>)\", x, re.S)\n for rec in [tmp[1] for tmp in x]:\n if len(rec) == 0:\n data.append(None)\n continue\n # If no value\n if re.match(\"[-]+\", rec):\n data.append(None)\n continue\n # If there is a special font: remove font\n tmp = re.findall(\"<font[^>*]+>(.*?)</font>\", rec, re.S)\n if tmp:\n data.append(tmp[0])\n continue\n # If this is an image cells: extract weather type\n tmp = re.findall(\"<img (.*?)alt='([^('.)*]+).*\", rec, re.S)\n if tmp:\n data.append(tmp[0][1])\n continue\n # Date cell?\n tmp = re.findall(\".*([0-9]{2}/[0-9]{2}/[0-9]{4}).*\", rec, re.S)\n if tmp:\n data.append(tmp[0])\n continue\n # Time cell?\n tmp = re.findall(\".*([0-9]{2}:[0-0]{2}).*\", rec, re.S)\n if tmp:\n data.append(tmp[0])\n continue\n\n raise Exception(\"unknown handling of \\\"{:s}\\\"\".format(rec))\n\n # We expect the first two elements to contain date/time information.\n # convert to one.\n from datetime import datetime as dt\n data = [dt.strptime(\"{:s} {:s}\".format(data[0], data[1]), \"%m/%d/%Y %H:%M\")] + data[2:]\n\n return data",
"def get_unit_from_doc(doc):\n invalid_units = ['this', 'long', 'all', 'setup', 'given', 'a']\n try:\n usplit = doc.rsplit(' in ')\n if 'Value' in doc and 'converted to' in doc:\n unit = '{:}'.format(doc.rsplit('converted to ')[-1].rstrip('.'))\n elif len(usplit) < 2:\n unit = ''\n else:\n unit = '{:}'.format(usplit[-1])\n unit = unit.rstrip('.').rstrip(',').rsplit(' ')[0].rstrip('.').rstrip(',')\n \n if unit.endswith('(') or unit in invalid_units:\n unit = ''\n \n except:\n unit = ''\n return unit",
"def read_units(self, fid):\r\n lin = self.read_line(fid) \r\n while lin[0] != ':':\r\n parts = lin.split()\r\n if parts[0]=='mass':\r\n self.mass = float(parts[1])\r\n elif parts[0]=='length':\r\n self.length = float(parts[1])\r\n elif parts[0]=='angle':\r\n self.angle = parts[1]\r\n lin = self.read_line(fid)\r\n return lin",
"def getCellValue_quick(self, row, column):\n\n\t\t\t\treturn self.thing[f\"{column}{row}\"].value",
"def extractSI(s):\n\n # If this is representing a range, just return it as is.\n if \"[\" in s:\n return (s, None)\n\n types = {\"T\": \"Time\", \"O\": \"Rate\", \"A\": \"Rate\", \"s\": \"Seconds\", \"%\": \"Percentage\"}\n du = s.split()\n # Preserve integers as such, so that columns like \"Threads\" generate an X axis \"1 2 3\",\n # rather than \"1.0 2.0 3.0\"\n num = float(du[0]) if \".\" in du[0] else int(du[0])\n units = du[1] if len(du) == 2 else \" \"\n if s[-1] == \" \":\n units = units + \" \"\n\n # http://physics.nist.gov/cuu/Units/prefixes.html\n factor = {\n \"Y\": 1e24,\n \"Z\": 1e21,\n \"E\": 1e18,\n \"P\": 1e15,\n \"T\": 1e12,\n \"G\": 1e9,\n \"M\": 1e6,\n \"k\": 1e3,\n \" \": 1,\n \"m\": -1e3, # Yes, I do mean that, see below for the explanation.\n \"u\": -1e6,\n \"n\": -1e9,\n \"p\": -1e12,\n \"f\": -1e15,\n \"a\": -1e18,\n \"z\": -1e21,\n \"y\": -1e24,\n }[units[0] if len(units) == 2 else \" \"]\n # print (\"units = '\" + units + \"'\" + \" factor=\" + str(factor))\n\n # Minor trickery here is an attempt to preserve accuracy by using a single divide,\n # rather than multiplying by 1/x, which introduces two roundings since 1/10 is not representable\n # perfectly in IEEE floating point.\n # (Not that this really matters, other than for cleanliness, since we're likely reading numbers with\n # at most five decimal digits of precision).\n return (\n num * factor if factor > 0 else num / -factor,\n types.get(units[-1], \"Count\"),\n )",
"def _get_units(self, unit_tag):\n\n # a list that contains apartment unit's information\n unit = []\n # use a loop to list all the cells in a row \n for cell in unit_tag.find_all('td'):\n if cell.attrs: # omit the cell with nothing in it \n # look for the apartment #, however, this info is not\n # consistent across the entire webiste\n if cell['data-tid'] == 'pdpfloorplans-unit-displayText':\n unit_num = cell.get_text()\n unit.append(unit_num)\n # scrape the price of the unit\n if cell['data-tid'] == 'pdpfloorplans-unit-price':\n try:\n unit_price = cell.get_text().replace('$', '')\n # try to convert the price to float \n unit.append(float(unit_price))\n except:\n # if there's no price for this unit\n # append the list with a null value \n unit.append(np.nan)\n if cell['data-tid'] == 'pdpfloorplans-unit-bedbath':\n try:\n # try to extract the tags that include the number\n # of bedrooms and bathrooms \n bedbath_tag = cell.find_all('span')\n bed_tag, bath_tag = bedbath_tag[0], bedbath_tag[1]\n # regular expression pattern for extracting any types\n # of numbers, including integer and floating numbers \n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n bed = re.findall(pattern, bed_tag.get_text())\n bath = re.findall(pattern, bath_tag.get_text())\n bed_unit, bath_unit = 0, 0\n if bed:\n bed_unit = bed[0]\n if bath:\n bath_unit = bath[0]\n unit.append(float(bed_unit))\n unit.append(float(bath_unit))\n except:\n # if the convertion failed, append the list\n # will two null values \n unit.append(np.nan)\n unit.append(np.nan)\n if cell['data-tid'] == 'pdpfloorplans-unit-sqft':\n # follow the same procedure as above, but this time\n # scrape the square foot of the apartment unit\n try:\n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n sqft_unit = re.findall(pattern, cell.get_text())[0]\n unit.append(float(sqft_unit))\n except:\n unit.append(np.nan)\n return unit",
"def extract_cell_name_data(cell_file_name):\n\n m = cell_name_re.search(cell_file_name)\n\n if not m:\n print(\"ERROR: trying to extract bad cell name {}\"\n .format(cell_file_name), file=sys.stderr)\n sys.exit(1)\n\n cell_name = m.group(1)\n\n cell_name_m = cell_info_from_name_re.search(cell_name)\n ( donor_num, tissue_type, cell_num ) = cell_name_m.groups()\n\n return (cell_name, donor_num, tissue_type, str(int(cell_num)))",
"def _parse_value(self): # type: () -> Item\n self.mark()\n trivia = Trivia()\n\n c = self._current\n if c == '\"':\n return self._parse_basic_string()\n elif c == \"'\":\n return self._parse_literal_string()\n elif c == \"t\" and self._src[self._idx :].startswith(\"true\"):\n # Boolean: true\n self.inc_n(4)\n\n return Bool(True, trivia)\n elif c == \"f\" and self._src[self._idx :].startswith(\"false\"):\n # Boolean: true\n self.inc_n(5)\n\n return Bool(False, trivia)\n elif c == \"[\":\n # Array\n elems = [] # type: List[Item]\n self.inc()\n\n while self._current != \"]\":\n self.mark()\n while self._current.is_ws() or self._current == \",\":\n self.inc()\n\n if self._idx != self._marker:\n elems.append(Whitespace(self.extract()))\n\n if self._current == \"]\":\n break\n\n if self._current == \"#\":\n cws, comment, trail = self._parse_comment_trail()\n\n next_ = Comment(Trivia(\"\", cws, comment, trail))\n else:\n next_ = self._parse_value()\n\n elems.append(next_)\n\n self.inc()\n\n res = Array(elems, trivia)\n\n if res.is_homogeneous():\n return res\n\n raise self.parse_error(MixedArrayTypesError)\n elif c == \"{\":\n # Inline table\n elems = Container()\n self.inc()\n\n while self._current != \"}\":\n if self._current.is_ws() or self._current == \",\":\n self.inc()\n continue\n\n key, val = self._parse_key_value(False)\n elems.append(key, val)\n\n self.inc()\n\n return InlineTable(elems, trivia)\n elif c in string.digits + \"+\" + \"-\":\n # Integer, Float, Date, Time or DateTime\n while self._current not in \" \\t\\n\\r#,]}\" and self.inc():\n pass\n\n raw = self.extract()\n\n item = self._parse_number(raw, trivia)\n if item:\n return item\n\n try:\n res = parse_rfc3339(raw)\n except ValueError:\n res = None\n\n if res is None:\n raise self.parse_error(InvalidNumberOrDateError)\n\n if isinstance(res, datetime.datetime):\n return DateTime(res, trivia, raw)\n elif isinstance(res, datetime.time):\n return Time(res, trivia, raw)\n elif isinstance(res, datetime.date):\n return Date(res, trivia, raw)\n else:\n raise self.parse_error(InvalidNumberOrDateError)\n else:\n raise self.parse_error(UnexpectedCharError, (c))"
] | [
"0.6098518",
"0.58725435",
"0.5795746",
"0.57074785",
"0.56425375",
"0.56314313",
"0.55982673",
"0.5591802",
"0.55434746",
"0.5527511",
"0.5515776",
"0.55069953",
"0.5387614",
"0.53209805",
"0.5315428",
"0.53074974",
"0.530218",
"0.52943623",
"0.5276008",
"0.5254738",
"0.5244095",
"0.52222204",
"0.52139264",
"0.51890975",
"0.5183934",
"0.5176104",
"0.51641756",
"0.5146855",
"0.51123255",
"0.51059407"
] | 0.70920163 | 0 |
Reads the SBB, tallys votes using commitment openings posted by PS, and compares commitment openings to previously posted ComT postings by PS. If all tallys are permutations of the same values and the openings match the previously posted ComT postings then returns the election tally, otherwise raises an exception. | def tally_and_verify_election_outcome(self, outcome_lists: Set[int]) -> typing.Counter[int]:
sbb_contents = self._sbb.get_sbb_contents()
# Keep tally counters for each of the m lists. All m tallies must
# be the same for the outcome to be verified.
raw_vote_tallies: List[typing.Counter[int]] = []
for idx in outcome_lists:
original_vote_list = sbb_contents.vote_lists[idx]
posted_outcome = sbb_contents.election_outcomes[idx]
assert len(original_vote_list) == len(posted_outcome) == self._num_voters
# First check that the opened commitments in the outcome lists match
# the original vote list commitments.
for og_com_t, outcome_com_t in zip(original_vote_list, posted_outcome):
for og_com_sv, outcome_com_sv in zip(og_com_t, outcome_com_t):
if (util.get_COM(outcome_com_sv.k1, outcome_com_sv.u) != og_com_sv.com_u or
util.get_COM(outcome_com_sv.k2, outcome_com_sv.v) != og_com_sv.com_v):
raise Exception(
'Election outcome commitment openings do not match original commitments'
)
# Compute the raw vote value by iteratively applying each SVR component.
values: List[int] = [0 for _ in range(self._num_voters)]
for j, vote in enumerate(posted_outcome):
for svr in vote:
values[j] = util.val(
values[j],
util.val(
util.bytes_to_bigint(svr.u),
util.bytes_to_bigint(svr.v),
self._M,
),
self._M,
)
raw_vote_tallies.append(Counter(values))
# Ensure all tallies are equal.
if not all(tally == raw_vote_tallies[0] for tally in raw_vote_tallies):
raise Exception('Election outcome failed verification, not all tallies are equal')
return raw_vote_tallies[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_ballot_consistency(self) -> bool:\n sbb_contents = self._sbb.get_sbb_contents()\n \n # First, validate the commitment consistency with the initial vote lists and final vote lists.\n for list_idx, proof in sbb_contents.consistency_proof.items():\n for vote_idx in range(len(proof)):\n proved_sv = proof[vote_idx]\n tu_list = []\n tv_list = []\n for row_idx, sv in enumerate(proved_sv):\n # Ensure that we are consistent with the initial and the final commitments\n if sv.get('u', None) is not None:\n val_init = sv['u_init']\n val_fin = sv['u_fin']\n val_uv = sv['u']\n val_t = sbb_contents.t_values[list_idx][row_idx][vote_idx]['tu']\n original_commitment = sbb_contents.svr_commitments[row_idx][vote_idx]['com_u']\n final_commitment = sbb_contents.vote_lists[list_idx][vote_idx][row_idx].com_u\n else:\n val_init = sv['v_init']\n val_fin = sv['v_fin']\n val_uv = sv['v']\n val_t = sbb_contents.t_values[list_idx][row_idx][vote_idx]['tv']\n original_commitment = sbb_contents.svr_commitments[row_idx][vote_idx]['com_v']\n final_commitment = sbb_contents.vote_lists[list_idx][vote_idx][row_idx].com_v\n key_init = sv['k_init']\n key_fin = sv['k_fin']\n \n # Verify the input and output commitments\n com_init = util.get_COM(util.bigint_to_bytes(key_init), util.bigint_to_bytes(val_init))\n com_fin = util.get_COM(util.bigint_to_bytes(key_fin), util.bigint_to_bytes(val_fin))\n if com_init != original_commitment:\n raise Exception(\"Failed to open the initial vote commitment\")\n if com_fin != final_commitment:\n raise Exception(\"Failed to open the final vote commitment\")\n \n # Verify the t-values\n if util.t_val(util.bigint_to_bytes(val_init), util.bigint_to_bytes(val_uv), self._M) != val_t:\n raise Exception(\"Failed to verify t value\")\n \n # Add t-values to their respective lists for lagrange checks\n tu_list.append(sbb_contents.t_values[list_idx][row_idx][vote_idx]['tu'])\n tv_list.append(sbb_contents.t_values[list_idx][row_idx][vote_idx]['tv'])\n \n # Check that tu_list and tv_list lagrange to (t, -t)\n rows = len(proved_sv)\n tu0 = self._lagrange(tu_list, rows, rows-1, self._M)\n tv0 = self._lagrange(tv_list, rows, rows-1, self._M)\n if util.val(tu0, tv0, self._M) != 0:\n # TODO: This does not work\n #raise Exception(\"Failed lagrange verification of t values\")\n pass\n return True",
"def shared_nb(self):\n return self.bbsitting_set.count() + self.booked.count()",
"def test_tally_Schulze(self):\n self.populate_database(election_type=\"Schulze\")\n\n uAvote1 = models.Vote(\n user = self.userA,\n candidate = self.candidateBA,\n value =5)\n uAvote2 = models.Vote(\n user = self.userA,\n candidate = self.candidateBB,\n value = 0)\n uAvote3 = models.Vote(\n user = self.userA,\n candidate = self.candidateBC,\n value = 3)\n uAvote4 = models.Vote(\n user = self.userA,\n candidate = self.candidateBD,\n value = -2)\n\n uBvote1 = models.Vote(\n user = self.userB,\n candidate = self.candidateBA,\n value = 6)\n uBvote2 = models.Vote(\n user = self.userB,\n candidate = self.candidateBB,\n value = 1)\n uBvote3 = models.Vote(\n user = self.userB,\n candidate = self.candidateBC,\n value = -2)\n uBvote4 = models.Vote(\n user = self.userB,\n candidate = self.candidateBD,\n value = 5)\n\n uCvote1 = models.Vote(\n user = self.userC,\n candidate = self.candidateBA,\n value = -2)\n uCvote2 = models.Vote(\n user = self.userC,\n candidate = self.candidateBB,\n value = 5)\n uCvote3 = models.Vote(\n user = self.userC,\n candidate = self.candidateBC,\n value = 2)\n uCvote4 = models.Vote(\n user = self.userC,\n candidate = self.candidateBD,\n value = 3)\n # Check gen_pair_results() method in Schulze()\n cand_pair_results = self.schulze.gen_pair_results(self.raceB)\n # Generate expected pair_results dict for comparitive purposes\n vote2 = aliased(models.Vote, name=\"vote2\")\n expected_pair_results = {}\n for cand1, cand2 in cand_pair_results.keys():\n preferred_expected = 0\n for user in [self.userA, self.userB, self.userC]:\n v1, v2 = session.query(\n models.Vote.value.label(\"vote1\"),\n vote2.value.label(\"vote2\")).filter(\n models.Vote.user_id == user.id,\n vote2.user_id == user.id,\n models.Vote.candidate_id == cand1,\n vote2.candidate_id == cand2).all()[0]\n if v1 > v2:\n preferred_expected += 1\n expected_pair_results[(cand1, cand2)] = preferred_expected\n self.assertEqual(cand_pair_results[(cand1, cand2)],\n expected_pair_results[(cand1, cand2)])\n\n final_result = self.schulze.tally_race(self.raceB.id)\n\n self.dbresults = models.Results(\n race_id = self.raceB.id,\n results = final_result) \n session.add(self.dbresults)\n session.commit()\n\n # JSON doesn't allow dict keys as anything but strings, so \n # the original model's keys must be converted for comparative\n # purposes\n print(\"results\", self.dbresults.results)\n final_result_keys_to_str = utils.dict_keys_to_str(final_result.items())\n\n self.assertEqual(final_result, {3:True, 4:False, 5:False, 6:False})\n self.assertEqual(self.dbresults.results, final_result_keys_to_str)\n self.assertEqual(self.dbresults.election_type, self.raceB.election_type)\n # self.assertEqual(1,0)",
"def ballot_polling_SPRT(vote_count, recount, T, risk_limit, Sw=None, Sl=None):\n max_p_value = 0\n for winner in T:\n for loser in T[winner]:\n if T[winner][loser] < 1 / risk_limit:\n y1 = gamma(winner, loser, vote_count, Sw, Sl)\n y2 = gamma(loser, winner, vote_count, Sl, Sw)\n T[winner][loser] *= y1 ** recount[winner] * y2 ** recount[loser]\n\n max_p_value = max(max_p_value, 1 / T[winner][loser])\n\n return T, max_p_value",
"def borda(election, tiebreaker=None):\n election = np.asarray(election)\n\n ncands = election.shape[1]\n total_tally = np.zeros(ncands, dtype=int)\n\n # Tally candidates in each column, multiply by points for each rank level\n for n, column in enumerate(election.T):\n tally = np.bincount(column, minlength=ncands)\n total_tally += (ncands - n)*tally\n\n # Python lists are faster than NumPy here\n total_tally = total_tally.tolist()\n\n # Find the set of candidates who have the highest score (usually only one)\n highest = max(total_tally)\n winners = _all_indices(total_tally, highest)\n\n # Break any ties using specified method\n tiebreak = _get_tiebreak(tiebreaker, _tiebreak_map)\n return tiebreak(winners)[0]",
"def get_bet(self):\n while newbet := input(f\"{self.name}: {self.chips} chips. Last bet: {self.lastbet}. Bet: \"):\n try:\n newbet = int(newbet)\n if newbet in range(0, self.chips+1):\n self.bet = newbet\n self.chips -= newbet\n return newbet\n else:\n print(\"You don't have that many chips.\")\n except ValueError:\n print(\"Bets are numbers please.\")",
"def test_paired_difference_analyses(self):\r\n actual = paired_difference_analyses(\r\n self.personal_ids_to_state_values1,\r\n ['firmicutes-abundance',\r\n 'bacteroidetes-abundance'],\r\n ['Pre', 'Post'],\r\n output_dir=self.test_out,\r\n ymin=0.0,\r\n ymax=1.0)\r\n self.assertTrue(exists(join(self.test_out,\r\n 'paired_difference_comparisons.txt')))\r\n self.assertTrue(\r\n exists(join(self.test_out, 'firmicutes-abundance.pdf')))\r\n self.assertTrue(\r\n exists(join(self.test_out, 'bacteroidetes-abundance.pdf')))\r\n # three output paths returned\r\n self.assertEqual(len(actual[0]), 5)\r\n # expected t values returned, they should be less than (firmicutes) or greater (bacteroidetes) than 2 \r\n self.assertLess(abs(actual[1]['firmicutes-abundance'][4]), 2)\r\n self.assertLess(2, abs(actual[1]['bacteroidetes-abundance'][4]))",
"def run(data, params):\n start_time = time.process_time()\n\n # 'n' is the number of candidates, also the number of ranks\n n = params['n']\n # 'N' is the total number of voters\n N = params['N']\n # 's0' is the optional ground truth full ranking of the candidates\n # (distribution is drawn off this full ranking)\n s0 = params['s0']\n\n # Order candidates by non-decreasing pair-wise contest wins \n # (ascending order with lexicographic tie-breaking)\n precedenceMatrix = utils.precedenceMatrix(data, n)\n\n # Credits to Sayan-Paul for starter code for merge sort\n # See: https://github.com/Sayan-Paul/Sort-Library-in-Python/blob/master/sortlib.py\n def mergesort(ar):\n if len(ar)<=1:\n return ar\n middle=len(ar)/2\n left =ar[:middle]\n right=ar[middle:]\n left=mergesort(left)\n right=mergesort(right)\n res=merge(left,right)\n return res\n\n def merge(left,right):\n res=[]\n while len(left)+len(right):\n if len(left)*len(right):\n if precedenceMatrix[left[0],right[0]]<=precedenceMatrix[right[0],left[0]]:\n res.append(left[0])\n left=left[1:]\n else:\n res.append(right[0])\n right=right[1:]\n elif len(left):\n res.append(left[0])\n left=left[1:]\n elif len(right):\n res.append(right[0])\n right=right[1:]\n return res\n\n candidates = [i for i in range(n)]\n sortedCandidates = mergesort(candidates)\n\n sigma = tuple(sortedCandidates)\n\n time_elapsed = (time.process_time() - start_time) * 1000\n\n return ALGORITHM_NAME, utils.generalizedKendallTauDistance(data, sigma, n, N, s0), time_elapsed, sigma",
"def proof_of_work(self):\n last_block = self.__chain[-1]\n last_hash = hash_block(last_block)\n proof = 0\n # Try different PoW numbers and return the first valid one\n while not Verification.valid_proof(self.__open_transactions, self.__open_chipsactions, self.__open_messsactions, last_hash, proof):\n proof += 1\n return proof",
"def get_answers(session):\n \n # Creata ASP program and feed it to ASP-solver\n ASPScript = make_asp_script(session)\n # print ASPScript\n tmp = tempfile.NamedTemporaryFile( delete=False) #mode='w+b',\n tmp.write(ASPScript)\n tmp.seek(0)\n (result, error) = subprocess.Popen([GROUNDER], stdin=tmp, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() \n grounded = tempfile.NamedTemporaryFile(delete=False)\n grounded.write(result)\n grounded.seek(0)\n (result, error) = subprocess.Popen([SOLVER, \"%d\" %MAX_ANSWERS], stdin=grounded, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n \n # check if ASP is satisfiable\n if check_solution_status(result) != SATISFIABLE: return\n \n lines = result.split('\\n')\n for i, line in enumerate(lines):\n if 'answer' in line.lower():\n answer_no = int(re.findall('\\d+', line)[0])\n yield answer_no, lines[i+1]\n\n # # if parsing from file: \n # with open(file, 'r+') as file:\n # lines = file.readlines() \n # for i in range(0, len(lines)):\n # line = lines[i]\n # if 'answer' in line.lower():\n # answer_no = int(re.findall('\\d+', line)[0])\n # yield answer_no, lines[i+1]",
"def determine_basketball_outcome_from_api(market, params, enp_id):\n\n n_bet = 1\n outcome = None\n if market == BasketballMarkets.FULL_TIME_POINT_SPREAD:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n handicap = params[1]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n\n\n if selection == BasketballSelections.HOME_TEAM:\n hc_score = score_home + handicap\n if hc_score == score_away:\n outcome = 0\n elif hc_score > score_away:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.AWAY_TEAM:\n hc_score = score_away + handicap\n if hc_score == score_home:\n outcome = 0\n elif hc_score > score_home:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('FTPS bet should be ONE or TWO')\n\n elif market == BasketballMarkets.FULL_TIME_MONEYLINE:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n\n if selection == BasketballSelections.HOME_TEAM:\n if score_home == score_away:\n outcome = 0\n elif score_home > score_away:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.AWAY_TEAM:\n if score_away == score_home:\n outcome = 0\n elif score_away > score_home:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('selection should be ONE or TWO')\n elif market == BasketballMarkets.FULL_TIME_TOTAL_POINTS:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n handicap = params[1]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n score_total = score_home + score_away\n\n if selection == BasketballSelections.OVER:\n if score_total == handicap:\n outcome = 0\n elif score_total > handicap:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.UNDER:\n if score_total == handicap:\n outcome = 0\n elif score_total < handicap:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('FTTP bet should be OVER or UNDER')\n else:\n raise ValueError('implement more markets')\n\n return outcome, n_bet",
"def check_bollinger(self):\n upper, lower = self.bollinger_bands()\n if self.daily['Adj Close'][-1] > upper[-1]:\n self.debug += '\\nAbove upper bollinger: sells + 1'\n self.sells += 1\n elif self.daily['Adj Close'][-1] < lower[-1]:\n self.debug += '\\nBelow lower bollinger: buys + 1'\n self.buys += 1",
"def bet_fold_algorithm(self, current_top_bid,\n pot_size, community_cards, position, number_of_players, is_first_cycle, total_value):\n # If pre-flop\n if len(community_cards) == 0:\n rank = self.hand.opening_hand_ranking()\n # Cut-off rank = 0.00 right now\n # Else bet for now\n cutoff = self.stat_to_ev(self.vpip)\n if rank <= cutoff:\n # Fold\n return 'f'\n else:\n return 'b'\n # If post-flop\n else:\n # Greater than 10 high card\n if total_value > self.agg_to_total_value_cutoff():\n return 'b'\n elif position == number_of_players - 1 and current_top_bid < pot_size / 5:\n return 'b'\n elif current_top_bid == 0:\n return 'c'\n else:\n return 'f'",
"def check_solvability(self, state):\n\n inversion = 0\n for i in range(len(state)):\n for j in range(i, len(state)):\n if state[i] > state[j] != 0:\n inversion += 1\n\n return inversion % 2 == 0",
"def calcStakeVoteSubsidy(self, height):\n # Votes have no subsidy prior to the point voting begins. The minus one\n # accounts for the fact that vote subsidy are, unfortunately, based on the\n # height that is being voted on as opposed to the block in which they are\n # included.\n if height < self.netParams.StakeValidationHeight - 1:\n return 0\n\n # Calculate the full block subsidy and reduce it according to the stake\n # proportion. Then divide it by the number of votes per block to arrive\n # at the amount per vote.\n subsidy = self.calcBlockSubsidy(height)\n proportions = self.totalProportions\n subsidy *= self.netParams.StakeRewardProportion\n subsidy = subsidy // (proportions * self.netParams.TicketsPerBlock)\n\n return subsidy",
"def process_results(master_state_map, rigged_party):\n party = \"\"\n reps_electorate_points = 0\n dems_electorate_points = 0\n total_num_votes = 0\n rigged_electoral_points = 0\n\n if rigged_party is None:\n\n\n\n\n for state_num, state_data in master_state_map.items():\n print(f\"state_num: {state_num}, this represents the item number in the dictionary.\")\n print(f\"state_data: {state_data}, this contains all the state data\")\n # print(state_data)\n for key, value in state_data.items():\n if key == 'state':\n print(f\"state: {value}\")\n elif key == 'num_voters':\n print(f\"num_voters: {value}\")\n elif key == 'party':\n print(f\"party: {value}\")\n party = value\n elif key == 'votes':\n print(f\"votes: {value}\")\n total_num_votes += value\n elif key == 'electorate':\n print(f\"electorate: {value}\")\n if party == \"republican\":\n reps_electorate_points += value\n elif party == \"democrat\":\n dems_electorate_points += value\n\n print(f\"reps: {reps_electorate_points}\")\n print(f\"dems: {dems_electorate_points}\")\n print(f\"the total amount of votes is : {total_num_votes}\")\n # if republican electorate points are greater than the democrat electorate points, then republics win\n # else, democrats win\n if reps_electorate_points > dems_electorate_points:\n print(f\"The Republicans win with {reps_electorate_points} electoral points!\")\n elif dems_electorate_points > reps_electorate_points:\n print(f\"The Democrats win with {dems_electorate_points} electoral points!\")\n else:\n print('wow')\n\n if rigged_party:\n for state_num, state_data in master_state_map.items():\n print(f\"state_num: {state_num}, this represents the item number in the dictionary.\")\n print(f\"state_data: {state_data}, this contains all the state data\")\n # print(state_data)\n for key, value in state_data.items():\n if key == 'state':\n print(f\"state: {value}\")\n elif key == 'num_voters':\n print(f\"num_voters: {value}\")\n elif key == 'party':\n print(f\"party: {rigged_party}\")\n party = value\n elif key == 'votes':\n print(f\"votes: {value}\")\n total_num_votes += value\n elif key == 'electorate':\n print(f\"electorate: {value}\")\n if party == rigged_party:\n rigged_electoral_points += value\n elif party == \"democrat\":\n rigged_electoral_points += value\n elif party == \"republican\":\n rigged_electoral_points += value\n elif party == \"libertarian\":\n rigged_electoral_points += value\n elif party == \"independent\":\n rigged_electoral_points += value\n\n\n print(f\"The {rigged_party} party won with {rigged_electoral_points}!\")",
"def next_candidate():\r\n candidate_bidder = -1\r\n candidate_value = -1\r\n for n in range(len(bidders)):\r\n if (is_active[n] == 0 and cur_value(n) is not None\r\n and cur_value(n) > max(candidate_value, cur_bid)):\r\n candidate_value = bidders[n].values[cur_value_idx[n]]\r\n candidate_bidder = n\r\n return candidate_value, candidate_bidder",
"def compute_solution_of_puzzle():\n list_of_boarding_passes = get_list_of_boarding_passes()\n list_of_seat_IDs = get_all_seat_IDs(list_of_boarding_passes)\n\n print(\"[+] Solution of day5/puzzle1: {} is the highest seat ID\".format(max(list_of_seat_IDs)))\n\n my_seat_ID = find_my_seat_ID(list_of_seat_IDs)\n print(\"[+] Solution of day5/puzzle2: {} is my seat ID\".format(my_seat_ID))",
"def __bet(self, numbers: str, user_seed: str) -> None:\n self.BetSource(self.tx.origin, self.tx.timestamp)\n if not self._game_on.get():\n Logger.debug(f'Game not active yet.', TAG)\n revert(f'Game not active yet.')\n amount = self.msg.value\n Logger.debug(f'Betting {amount} loop on {numbers}.', TAG)\n self.BetPlaced(amount, numbers)\n self._take_wager(self.address, amount)\n\n nums = set(numbers.split(','))\n n = len(nums)\n if n == 0:\n Logger.debug(f'Bet placed without numbers.', TAG)\n revert(f' Invalid bet. No numbers submitted. Zero win chance. Returning funds.')\n elif n > 20:\n Logger.debug(f'Bet placed with too many numbers. Max numbers = 20.', TAG)\n revert(f' Invalid bet. Too many numbers submitted. Returning funds.')\n\n numset = set(WHEEL_ORDER)\n numset.remove('0')\n for num in nums:\n if num not in numset:\n Logger.debug(f'Invalid number submitted.', TAG)\n revert(f' Please check your bet. Numbers must be between 0 and 20, submitted as a comma separated '\n f'string. Returning funds.')\n\n bet_type = self._bet_type.get()\n self._bet_type.set(BET_TYPES[0])\n if bet_type == BET_TYPES[2] or bet_type == BET_TYPES[3]:\n bet_limit = self._bet_limits[0]\n else:\n bet_limit = self._bet_limits[n]\n if amount < BET_MIN or amount > bet_limit:\n Logger.debug(f'Betting amount {amount} out of range.', TAG)\n revert(f'Betting amount {amount} out of range ({BET_MIN} -> {bet_limit} loop).')\n\n if n == 1:\n bet_type = BET_TYPES[4]\n if bet_type == BET_TYPES[1]:\n payout = int(MULTIPLIERS[BET_TYPES[5]] * 1000) * amount // (1000 * n)\n else:\n payout = MULTIPLIERS[bet_type] * amount\n if self.icx.get_balance(self.address) < payout:\n Logger.debug(f'Not enough in treasury to make the play.', TAG)\n revert('Not enough in treasury to make the play.')\n\n spin = self.get_random(user_seed)\n winningNumber = WHEEL_ORDER[int(spin * 21)]\n Logger.debug(f'winningNumber was {winningNumber}.', TAG)\n win = winningNumber in nums\n payout = payout * win\n self.BetResult(str(spin), winningNumber, payout)\n\n if win == 1:\n self._wager_payout(self.address, payout)\n else:\n Logger.debug(f'Player lost. ICX retained in treasury.', TAG)",
"def proof_of_work(self):\n last_block = self.__chain[-1]\n last_hash = hash_block(last_block)\n proof = 0\n # Try different PoW numbers and return the first valid one\n while not Verification.valid_proof(self.__open_transfers, last_hash, proof):\n proof += 1\n print(proof)\n return proof",
"def get_sports_job_postings():\n # Gather HTML code from TeamworkOnline.com\n url = 'https://www.teamworkonline.com/jobs-in-sports'\n response = requests.get(url)\n html = response.content\n soup = BeautifulSoup(html, features='lxml')\n matches = soup.findAll(name='h3')\n orgs = soup.findAll(name='span', attrs={'class': 'icon-bullet__content icon-bullet__content--recent-job-card'})\n links = soup.findAll(name='a', attrs={'class': 'result-cta button button--wire'})\n \n # Create lists for job title, organization, and link to posting\n joblist = []\n orglist = []\n linklist = []\n for match in matches:\n joblist.append(match.text)\n \n for org in orgs:\n orglist.append(org.text)\n \n for link in links:\n linklist.append('https://www.teamworkonline.com/' + link.get('href'))\n\n # Eliminate certain responses since they're just ads for the site\n joblist = [x for x in joblist if x != 'Know when your application is reviewed.']\n joblist = [x for x in joblist if x != 'Teamwork Online Individual Resume and Profile Review'] \n orglist = [x for x in orglist if 'Sports Jobs in' not in x]\n\n # Create zipped list of all three data points\n all_list = [i + ' | ' + j + ' | ' + k for i, j, k in zip(joblist, orglist, linklist)]\n\n # Write master list to text file to compare each running with it to only print new jobs\n with open ('/Users/User/Desktop/falck_workspace/one_offs/other_files/two_jobs.txt', 'rb') as f:\n old_all_list = pickle.load(f)\n\n just_new = set(all_list) - set(old_all_list)\n too_many = 'Too many job postings...'\n\n # Check if the set of differences between the old set and new set is empty, then print only the differences\n isEmpty = (len(just_new) == 0)\n total_entry = []\n if isEmpty:\n total_entry += ['No new postings.']\n else:\n for entry in just_new:\n entrysplit = entry.split('|')\n post = '\\n- ' + entrysplit[0] + ' at ' + entrysplit[1] + '\\n'\n total_entry.append(post)\n \n # Turn list of all new jobs into one string\n return_string = ' '.join(total_entry)\n \n # Overwrite the old text file with new data so when it's run again, it only compares postings to the last time function was run\n with open('/Users/User/Desktop/falck_workspace/one_offs/other_files/two_jobs.txt', 'wb') as f:\n pickle.dump(all_list, f)\n\n if len(return_string)>1600:\n return too_many\n else:\n return return_string",
"def test_solvation_box(self):\n waters = np.random.randint(1000, 10000)\n log.debug('Trying {} waters in an isometric box...'.format(waters))\n solvate(tleapfile='./cb6-but/tleap.in', pdbfile='cb6-but.pdb',\n bufferwater=waters, pbctype=0)\n grepped_waters = sp.check_output([\"grep -oh 'WAT' ./cb6-but/solvated.prmtop | wc -w\"],\n shell=True)\n self.assertEqual(int(grepped_waters), waters)",
"def betting_round(self, method, params):\n self.bet_history += [[]]\n current_bets = [self.starting_player] * len(self.agents)\n \n max_bet = 0\n if method == self.deal_cards:\n max_bet = big_blind\n current_bets[self.starting_player] = small_blind\n current_bets[(self.starting_player + 1) % len(self.agents)] = big_blind\n\n (self.all_in[self.starting_player], bet) = self.normalize_bet(self.chips[self.starting_player], method(self.agents[self.starting_player], params[self.starting_player]), max_bet)\n self.in_game[self.starting_player] = (not self.all_in[self.starting_player])\n current_bets[self.starting_player] = bet\n self.chips[self.starting_player] -= bet\n check = True if bet == 0 else False\n max_bet = max(max_bet, bet)\n self.pot += bet\n self.bet_history[-1] += [bet]\n\n raised_player = self.starting_player\n i = (raised_player + 1) % len(self.agents)\n\n if method == self.deal_cards:\n # raised_player = (self.starting_player + 1) % len(agents)\n check = False\n if bet > max_bet:\n raised_player = i\n max_bet = bet\n\n if bet == 0:\n self.in_game[i] = False\n self.in_game_count -= 1\n\n while (i != raised_player) and (not self.all_in[i]) and (current_bets[i] <= max_bet):\n if self.in_game[i]:\n (self.all_in[i], bet) = self.normalize_bet(self.chips[i], method(self.agents[i], params[i]), max_bet)\n self.in_game[i] = (not self.all_in[i])\n delta_bet = max(0, bet - current_bets[i])\n current_bets[i] = bet\n self.chips[i] -= delta_bet\n self.pot += delta_bet\n self.bet_history[-1] += [bet]\n\n if bet > max_bet:\n check = False\n raised_player = i\n max_bet = bet\n\n if bet == 0 and not check:\n self.in_game[i] = False\n self.in_game_count -= 1\n\n i = (i + 1) % len(self.agents)",
"def find_best(self):\n best_st = 0\n best_bt = 0\n best_perf = -1.1\n for bt in self.btl:\n for st in self.stl:\n if self.total[bt, st, \"perf\"] > best_perf:\n best_perf = self.total[bt, st, \"perf\"]\n best_st = st\n best_bt = bt\n return (best_perf, self.total[best_bt, best_st, \"count\"], best_bt, best_st)",
"def part2(problem_input: Iterable[str]) -> int:\n\n blueprints: dict[int, tuple[RobotCost, RobotCost, RobotCost, RobotCost]] = {}\n for s in problem_input:\n num = re.search(r\"Blueprint (\\d+):\", s)[1] # type: ignore\n r_r = re.search(r\"Each ore robot costs (\\d+) ore.\", s)[1] # type: ignore\n c_r = re.search(r\"Each clay robot costs (\\d+) ore.\", s)[1] # type: ignore\n _, b_r, b_c = re.search(r\"Each obsidian robot costs (\\d+) ore and (\\d+) clay.\", s) # type: ignore\n _, g_r, g_b = re.search(r\"Each geode robot costs (\\d+) ore and (\\d+) obsidian.\", s) # type: ignore\n blueprints[int(num)] = (\n RobotCost(int(r_r), 0, 0),\n RobotCost(int(c_r), 0, 0),\n RobotCost(int(b_r), int(b_c), 0),\n RobotCost(int(g_r), 0, int(g_b)),\n )\n\n def max_geodes_harvested(blueprint: int, time: int) -> int:\n orebot_cost, claybot_cost, obsidianbot_cost, geodebot_cost = blueprints[\n blueprint\n ]\n initial_state = EconomicState(0, 0, 0, 0, 1, 0, 0, 0)\n end_states = set([initial_state])\n for t in range(time):\n print(f\"{blueprint} {t}..\")\n future_states: set[EconomicState] = set()\n for s in end_states:\n future_states.add(\n EconomicState(\n ore=s.ore + s.ore_robots,\n clay=s.clay + s.clay_robots,\n obsdian=s.obsdian + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots,\n )\n )\n if (\n geodebot_cost.ore <= s.ore\n and geodebot_cost.clay <= s.clay\n and geodebot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - geodebot_cost.ore + s.ore_robots,\n clay=s.clay - geodebot_cost.clay + s.clay_robots,\n obsdian=s.obsdian\n - geodebot_cost.obsdian\n + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots + 1,\n )\n )\n if (\n obsidianbot_cost.ore <= s.ore\n and obsidianbot_cost.clay <= s.clay\n and obsidianbot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - obsidianbot_cost.ore + s.ore_robots,\n clay=s.clay - obsidianbot_cost.clay + s.clay_robots,\n obsdian=s.obsdian\n - obsidianbot_cost.obsdian\n + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots + 1,\n geode_robots=s.geode_robots,\n )\n )\n if (\n claybot_cost.ore <= s.ore\n and claybot_cost.clay <= s.clay\n and claybot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - claybot_cost.ore + s.ore_robots,\n clay=s.clay - claybot_cost.clay + s.clay_robots,\n obsdian=s.obsdian - claybot_cost.obsdian + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots + 1,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots,\n )\n )\n if (\n orebot_cost.ore <= s.ore\n and orebot_cost.clay <= s.clay\n and orebot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - orebot_cost.ore + s.ore_robots,\n clay=s.clay - orebot_cost.clay + s.clay_robots,\n obsdian=s.obsdian - orebot_cost.obsdian + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots + 1,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots,\n )\n )\n end_states = future_states\n if t >= 25:\n high_score = max(s.geodes for s in end_states)\n if high_score > 0:\n end_states = set(s for s in end_states if s.geodes > high_score * 0.67)\n g = max(s.geodes for s in end_states)\n print(f\"{blueprint} >>>{g}<<<\")\n return g\n\n return prod(max_geodes_harvested(n, 32) for n in [1,2,3])",
"def test_parallel_pick_otus_blast(self):\r\n\r\n params = {'refseqs_fp': self.refseqs1_fp,\r\n 'similarity': 0.97,\r\n 'blast_db': None,\r\n 'max_e_value': 1e-10,\r\n 'min_aligned_percent': 0.50\r\n }\r\n\r\n app = ParallelPickOtusBlast()\r\n r = app(self.inseqs1_fp,\r\n self.test_out,\r\n params,\r\n job_prefix='BTEST',\r\n poll_directly=True,\r\n suppress_submit_jobs=False)\r\n otu_map_fp = glob(join(self.test_out, '*otus.txt'))[0]\r\n otu_map = parse_otu_map(open(otu_map_fp, 'U'))\r\n # some basic sanity checks: at least one OTU per reference sequence\r\n self.assertTrue(len(otu_map[0]) > 5)\r\n self.assertEqual(set(otu_map[2]), set(['r1', 'r2', 'r3', 'r4', 'r5']))",
"def branchNBound2(nationtxt, bound, scheme):\n\n\n nation = nationLoader(nationtxt)\n transmitterCosts = scheme\n\n neighborCount = {}\n for province in nation:\n neighborCount.update({province:len(nation.get(province)[0])})\n\n\n neighborCountSorted = sorted(neighborCount, key=neighborCount.__getitem__)\n\n #~ neighborCountSorted = sorted(neighborCount, key=neighborCount.__getitem__, reverse=True)\n\n for key in neighborCountSorted:\n provinces.append(key)\n #~ print provinces\n\n upperbound = bound\n #~ print bound\n\n\n\n solution = []\n\n\n counter = 0\n\n\n\n\n while index >= 0:\n\n counter += 1\n if counter % 100000000 == 0:\n print counter\n print \"Now at:\", nation\n\n\n if index == -1:\n break\n\n # Assign transmitter\n if nation[provinces[index]][1] == numTransmitters:\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n else:\n costs, index = updateTransmitter(nation, False, scheme, provinces, costs, index)\n\n # Check if costs are above upper bound\n if (costs + (len(provinces) - (index + 1)) * transmitterCosts[0]) > upperbound:\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n # Check if a neighbor has the same transmitter\n conflict = False\n for neighbor in nation[provinces[index]][0]:\n if nation[neighbor][1] == nation[provinces[index]][1]:\n conflict = True\n break\n\n if conflict:\n continue\n\n # Check if a solution is found\n if index == len(provinces) - 1:\n #~ print \"\\nSOLUTION:\"\n if costs < upperbound:\n solution = []\n solution.append(json_deep_copy(nation))\n upperbound = costs\n #~ print \"Score:\", upperbound\n #~ print nation\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n index += 1\n\n\n\n usedTrans = []\n fivePlus = 0\n fivePlusNoDuplicate = 0\n\n for nation in solution:\n\n one = 0\n two = 0\n three = 0\n four = 0\n five = 0\n six = 0\n seven = 0\n\n for province in nation:\n\n if nation[province][1] == 1:\n one += 1\n if nation[province][1] == 2:\n two += 1\n if nation[province][1] == 3:\n three += 1\n if nation[province][1] == 4:\n four += 1\n if nation[province][1] == 5:\n five += 1\n if nation[province][1] == 6:\n six += 1\n if nation[province][1] == 7:\n seven += 1\n\n\n if five > 0 or six > 0 or seven > 0:\n fivePlus += 1\n if transmitterCosts[3] != transmitterCosts[4]:\n fivePlusNoDuplicate += 1\n\n usedTrans.append([one, two, three, four, five, six, seven])\n\n return counter",
"def check_correctness(self):\n\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = gt_file.readlines()\n\n # Check for inequality\n if len(out_lines) != len(gt_lines):\n return 0\n\n # Check for inequality\n for i in range(len(out_lines)):\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n\n return 1",
"def _collate_rollcalls(self, congress_num, cats, silent=True):\n if not silent:\n print \"Gathering valid votes (this is the longest part)\"\n house_votes = [[],[],[],[],[]]\n senate_votes = [[],[],[],[],[]]\n\n vote_files = self._get_specific_votes(congress_num, cats)\n num_votes = len(vote_files)\n if not silent:\n print \"Processing %s votes:\" % (num_votes)\n for f in vote_files:\n vote = self._format_vote(f)\n if vote[4]['where'] == 'senate':\n loc = senate_votes\n else:\n loc = house_votes\n for i in range(len(vote)):\n loc[i].append(vote[i])\n num_votes = num_votes - 1\n if not silent:\n print \": %s votes to go\" % (num_votes)\n return (house_votes, senate_votes)",
"def test_get_state_comparison_stats_former_slaveholder(self):\n\n key = '% Former slaveholder employees'\n\n # Kentucky: 0 former slaveholder employees, and 2 who weren't\n for _ in range(2):\n employee = EmployeeFactory()\n employee.bureau_states.add(self.kentucky)\n\n # Mississippi: 1 former slaveholder employee who died during an assignment, and 1 who wasn't\n for _ in range(1):\n employee = EmployeeFactory(slaveholder=True)\n employee.bureau_states.add(self.mississippi)\n for _ in range(1):\n employee = EmployeeFactory()\n employee.bureau_states.add(self.mississippi)\n\n # Texas: 1 former slaveholder employee, and 9 who weren't\n for _ in range(1):\n employee = EmployeeFactory(slaveholder=True)\n employee.bureau_states.add(self.texas)\n for _ in range(9):\n employee = EmployeeFactory()\n employee.bureau_states.add(self.texas)\n\n expected_output = [('Mississippi', 50), ('Texas', 10)]\n\n stats = get_state_comparison_stats(number=2)\n top_states = self.get_state_stats_for_key(stats, key)\n\n self.assertListEqual(top_states, expected_output,\n f\"'{key}' should contain states with the top x % of former slaveholder employees\")"
] | [
"0.5839099",
"0.4828558",
"0.47592798",
"0.47367328",
"0.4653196",
"0.4641992",
"0.4624824",
"0.4595079",
"0.45939693",
"0.4589177",
"0.45874888",
"0.45654458",
"0.45465288",
"0.45387793",
"0.4514298",
"0.45136043",
"0.45117742",
"0.45021626",
"0.4501531",
"0.44877023",
"0.44857976",
"0.4470353",
"0.44635305",
"0.44595915",
"0.4449689",
"0.44302008",
"0.4417568",
"0.44131193",
"0.44105613",
"0.44087476"
] | 0.60203 | 0 |
Test the file import method Must return a task object | def test_process_file_import(self):
new_file_proc = FileImportForm()
res = new_file_proc.process_file('x')
self.assertTrue(isinstance(res, AsyncResult)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_task_files(self, session, task):\n self._fetch_info(task.imported_items(), False, True)",
"def importer():\n pass",
"def file_import(self):\r\n\r\n try:\r\n self.process_file_import()\r\n except InputError as ex:\r\n print(ex)\r\n self.file_import()",
"def test_taskfile_import(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n randpath = choice(pypath)\n\n assert modpath not in sys.modules\n assert all(not p.startswith(modpath) for p in sys.modules)\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n taskfile = import_module(randpath)\n\n expected = set(pypath) | set([modpath])\n result = set(p for p in sys.modules if p.startswith(modpath))\n\n assert modpath in sys.modules\n assert result == expected\n assert taskfile.TEST == randpath",
"def start_import_task(clientRequestToken=None, name=None, importUrl=None):\n pass",
"def test_import_process(self):\r\n good_file = self._get_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._chrome_data_test()",
"def run_import_tasks(import_tasks):\n # import the given import tasks\n asset_tools.import_asset_tasks(import_tasks)\n\n # return the imported assets as paths\n return import_task.get_editor_property(\"imported_object_paths\")",
"def test_import_process(self):\r\n good_file = self._get_google_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._google_data_test()",
"def test_import_process(self):\r\n good_file = self._get_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._firefox_data_test()",
"def test_54_import_tasks(self, mock):\r\n Fixtures.create()\r\n self.register()\r\n self.new_application()\r\n # Without tasks, there should be a template\r\n res = self.app.get('/app/sampleapp/tasks/import', follow_redirects=True)\r\n err_msg = \"There should be a CSV template\"\r\n assert \"template=csv\" in res.data, err_msg\r\n err_msg = \"There should be an Image template\"\r\n assert \"mode=image\" in res.data, err_msg\r\n err_msg = \"There should be a Map template\"\r\n assert \"mode=map\" in res.data, err_msg\r\n err_msg = \"There should be a PDF template\"\r\n assert \"mode=pdf\" in res.data, err_msg\r\n # With tasks\r\n self.new_task(1)\r\n res = self.app.get('/app/sampleapp/tasks/import', follow_redirects=True)\r\n err_msg = \"There should load directly the basic template\"\r\n err_msg = \"There should not be a CSV template\"\r\n assert \"template=basic\" not in res.data, err_msg\r\n err_msg = \"There should not be an Image template\"\r\n assert \"template=image\" not in res.data, err_msg\r\n err_msg = \"There should not be a Map template\"\r\n assert \"template=map\" not in res.data, err_msg\r\n err_msg = \"There should not be a PDF template\"\r\n assert \"template=pdf\" not in res.data, err_msg\r\n self.signout()\r\n\r\n self.signin(email=Fixtures.email_addr2, password=Fixtures.password)\r\n res = self.app.get('/app/sampleapp/tasks/import', follow_redirects=True)\r\n assert res.status_code == 403, res.status_code",
"def test_import_process(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()",
"def test_import_test_asset(self):\n pass",
"def run_task(self) -> Task:",
"def test_import_process(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_xml_data_test()",
"def prepare_taskfile(taskfile):\n path = os.path.dirname(taskfile)\n taskmodulename = os.path.splitext(os.path.basename(taskfile))[0]\n logging.info(\"Loading task file %s from %s\", taskmodulename, path)\n fp, pathname, description = imp.find_module(taskmodulename, [path])\n try:\n return imp.load_module(taskmodulename, fp, pathname, description)\n finally:\n if fp: \n fp.close()",
"def test_taskfile_taskmod_loaded(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n randpath = choice(pypath)\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n import_module(modpath)\n\n # Forcibly remove the generated taskfile\n sys.modules.pop(randpath)\n\n import_module(randpath)",
"def test_import():\n assert tfio is not None",
"def test_upload_file(self):\n pass",
"def task(self):\n return import_path_to_callable(self.func)",
"def test_get_file_object(self):\n pass",
"def test_taskmod_taskfiles_only(monkeypatch, modpath):\n\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n task = import_module(modpath)\n\n assert modpath in sys.modules\n assert sys.modules[modpath] is task\n assert task.__taskmodules__ == pypath\n for n in names:\n assert hasattr(task, n)\n assert getattr(task, n).TEST == '{}.{}'.format(modpath, n)",
"def import_file(self, filename, **kwargs):\n raise NotImplementedError",
"def test_get_infile(self):\r\n pass # not practically testable, but obvious file I/O\r",
"def test_load(api):\n # upload file to file.io servers\n uploaded_file = api.upload(\n tag='test_file',\n expiry='1d',\n path='tests/test_file.txt'\n )\n\n # check that instance of FileIO has these fields\n assert uploaded_file.link\n assert uploaded_file.key\n assert uploaded_file.tag\n assert uploaded_file.path\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list\n\n # export the file in json format\n api.export('tests/exported.json')\n\n # check that exported file exists\n assert path.isfile('tests/exported.json')\n\n # set it to empty list\n api.file_obj_list = []\n\n # load the file in json format\n api.load('tests/exported.json')\n\n # remove the file\n remove('tests/exported.json')\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list\n\n # export the file in pkl format\n api.export('tests/exported.pkl')\n\n # set it to empty list\n api.file_obj_list = []\n\n # load the file in pkl format\n api.load('tests/exported.pkl')\n\n # remove exported.pkl file\n remove('tests/exported.pkl')\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list",
"def test_get_imports(self):\n pass",
"async def test_flow_import(hass):\n mocked_device = _create_mocked_device()\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=CONF_DATA\n )\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == FRIENDLY_NAME\n assert result[\"data\"] == CONF_DATA\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_not_called()",
"def get_file_import(file_import_id: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n workspace_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFileImportResult:\n __args__ = dict()\n __args__['fileImportId'] = file_import_id\n __args__['resourceGroupName'] = resource_group_name\n __args__['workspaceName'] = workspace_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:securityinsights:getFileImport', __args__, opts=opts, typ=GetFileImportResult).value\n\n return AwaitableGetFileImportResult(\n content_type=pulumi.get(__ret__, 'content_type'),\n created_time_utc=pulumi.get(__ret__, 'created_time_utc'),\n error_file=pulumi.get(__ret__, 'error_file'),\n errors_preview=pulumi.get(__ret__, 'errors_preview'),\n files_valid_until_time_utc=pulumi.get(__ret__, 'files_valid_until_time_utc'),\n id=pulumi.get(__ret__, 'id'),\n import_file=pulumi.get(__ret__, 'import_file'),\n import_valid_until_time_utc=pulumi.get(__ret__, 'import_valid_until_time_utc'),\n ingested_record_count=pulumi.get(__ret__, 'ingested_record_count'),\n ingestion_mode=pulumi.get(__ret__, 'ingestion_mode'),\n name=pulumi.get(__ret__, 'name'),\n source=pulumi.get(__ret__, 'source'),\n state=pulumi.get(__ret__, 'state'),\n system_data=pulumi.get(__ret__, 'system_data'),\n total_record_count=pulumi.get(__ret__, 'total_record_count'),\n type=pulumi.get(__ret__, 'type'),\n valid_record_count=pulumi.get(__ret__, 'valid_record_count'))",
"def upload_tasks(self, batch_id = None, input_file='', file_type = 'Excel'):\n url = self._base_url + urlConfig.URLS['Project'] + '/v2/' + self._project_id + \"/batch/\" + batch_id + \"/tasks/import?fileType=\" + file_type\n \n \n try: \n if input_file.endswith('.gz'):\n fileContent = open(input_file, 'rb').read()\n encoded = base64.b64encode(fileContent)\n else:\n fileContent = open(input_file, 'rb').read()\n with gzip.open('file.txt.gz', 'wb') as f:\n f.write(fileContent)\n fileContent = open('file.txt.gz', 'rb').read()\n encoded = base64.b64encode(fileContent)\n os.remove('file.txt.gz')\n response = requests.post(url, encoded, headers = {\n 'Content-Type': 'text/plain',\n 'Authorization': 'Bearer ' + self._get_token()})\n logging.debug(response.json())\n parsed = response.json()\n job_id = parsed['job_id']\n logging.debug('job id = %s', job_id)\n return job_id\n \n except Exception as e: print(e)",
"def test_import_upload(self):\r\n self._login_admin()\r\n\r\n # verify we get the form\r\n res = self.app.get('/admin/import')\r\n self.assertTrue(\r\n '<form' in res.body,\r\n 'Should have a form in the body for submitting the upload')\r\n\r\n res = self._upload()\r\n\r\n self.assertEqual(\r\n res.status,\r\n \"302 Found\",\r\n msg='Import status is 302 redirect by home, ' + res.status)\r\n\r\n # now verify that we've got our record\r\n imp = ImportQueueMgr.get_ready()\r\n imp = imp[0]\r\n self.assertTrue(imp, 'We should have a record')\r\n self.assertTrue(imp.file_path.endswith('admin.delicious.html'))\r\n self.assertEqual(imp.status, 0, 'start out as default status of 0')",
"def __init__(self, file_format, location):\n self.file_format = file_format\n self.location = location\n Task.__init__(self)"
] | [
"0.67191964",
"0.6706263",
"0.65843284",
"0.65285414",
"0.64675957",
"0.6450101",
"0.63975704",
"0.63933843",
"0.63705486",
"0.63664347",
"0.6353522",
"0.6304793",
"0.6303843",
"0.6255984",
"0.6252447",
"0.62489635",
"0.6212668",
"0.6196374",
"0.619023",
"0.61739105",
"0.6154853",
"0.6126853",
"0.61174643",
"0.61074936",
"0.60998714",
"0.60858",
"0.608212",
"0.60446256",
"0.6036579",
"0.6026089"
] | 0.7435163 | 0 |
devo collegarmi al link della rosa su fantacalcio.it e trovare il link corrispondente al giocatore da li poi estraggo voto e fantavoto di media | def scraper_voto(self):
#per trovare il link a fantacalcio.it devo prima trovare il link della squadra e trovare il suo nome
soup_rosa = BeautifulSoup(
requests.get(f"{self.LINK_FANTACALCIO_IT}/{self.team}#rosa").text,
"html.parser",
)
print(self.name)
displayed_name = self.name
if displayed_name == "Coulibaly": # caso estremo, il sito si confonde
displayed_name = "Coulibaly M."
# trovo il link personale del giocatore e glielo assegno
link = soup_rosa.find("a", text=displayed_name.upper())["href"]
self.scheda_giocatore = link
# leggo voto e media voto
soup = BeautifulSoup(requests.get(link).text, "html.parser")
self.media_voto = float(soup.find_all(class_="nbig2")[0].text.replace(",", "."))
self.media_fantavoto = float(
soup.find_all(class_="nbig2")[1].text.replace(",", ".")
)
# leggo anche il ruolodalla schedina delle info
infos = soup.find_all(class_="col-lg-6 col-md-6 col-sm-12 col-xs-12")[-2]
self.ruolo = str(infos.find("span").text)
# compilo i dati: partite, gol e assist
dati_partite = soup.find_all(class_="nbig")
partite = "🥅 " + dati_partite[0].text
# i portieri hanno statistiche diverse!
if self.ruolo == "P":
goal = "❌ " + dati_partite[1].text
self.dati = "<br>".join([partite, goal])
else:
goal = "⚽ " + dati_partite[1].text
assist = "👟 " + dati_partite[2].text
self.dati = "<br>".join([partite, goal, assist])
# aggiungo stellina al nome se hanno una bella media voto
if self.media_fantavoto > 7:
self.name += " ⭐" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getLink(self):",
"def create_link(self):\n self.filename = App.get_running_app().root.ids.camera_screen.capture()\n self.url = FileSharer(self.filename).share()\n self.ids.label.text = self.url",
"def getVotacion(self, url):",
"def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/media/{}'.format(obj.id)\n )",
"async def aifursona(self, ctx: commands.Context):\n async with ctx.typing():\n rint = random.randint(0, 99999)\n link = f\"http://thisfursonadoesnotexist.com/v2/jpgs/seed{rint}.jpg\"\n async with self.session.get(link) as resp:\n if resp.status == 200:\n data = BytesIO(await resp.read())\n data.name = \"ai-fursona.jpg\"\n data.seek(0)\n file = discord.File(data)\n return await ctx.send(file=file)\n else:\n await ctx.send(f\"API returned status code: {resp.status}\")",
"def full_text_doc_url(self):\n base_url = 'https://pic.datamade.us/chicago/document/'\n # base_url = 'http://127.0.0.1:5000/chicago/document/'\n \n if self.documents.filter(document_type='V').all():\n legistar_doc_url = self.documents.filter(document_type='V').first().document.url\n doc_url = '{0}?filename={2}&document_url={1}'.format(base_url, \n legistar_doc_url, \n self.identifier)\n return doc_url\n else:\n return None",
"async def olá(self):\r\n\t\tawait self.client.say('© Maddie 2017')\r\n\t\te = Embed()\r\n\t\te.set_image(url='https://cdn.discovery.pgsitecore.com/en-us/-/media/Olay_PathFinder/Images/a/OLAY%20TE%207IN1%20DEEP%20PENETRATING%20MOISTURE%20BODY%20WASH_Front.png?w=460&v=1-201705260605')\r\n\t\tawait self.client.say(embed=e)",
"def link_callback(uri, rel):\n #print(\"uri : \"+uri) \n sUrl = settings.STATIC_URL # Typically /static/\n sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/\n mUrl = settings.MEDIA_URL # Typically /media/\n mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/\n\n #print(\"sUrl : \"+sUrl)\n #print(\"sRoot : \"+sRoot)\n #print(\"mUrl : \"+mUrl)\n #print(\"mRoot : \"+mRoot) \n\n if uri.startswith(mUrl):\n path = os.path.join(mRoot, uri.replace(mUrl, \"\"))\n elif uri.startswith(sUrl):\n path = os.path.join(sRoot, uri.replace(sUrl, \"\"))\n else:\n return uri\n\n #print(\"path : \"+path) \n # make sure that file exists\n if not os.path.isfile(path):\n raise Exception(\n 'media URI must start with %s or %s' % (sUrl, mUrl)\n )\n return path",
"def get_url(self, source):\n if source == 'nomads':\n if self.model == 'rap':\n base = 'https://nomads.ncep.noaa.gov/pub/data/nccf/com/rap/prod/'\n path = f'rap.{self.date:%Y%m%d}/rap.t{self.date:%H}z.awip32f{self.fxx:02d}.grib2'\n else:\n base = 'https://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod/'\n if self.model == 'hrrr':\n path = f\"hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.grib2\"\n elif self.model == 'hrrrak':\n path = f\"hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.ak.grib2\"\n elif source == 'aws':\n if self.model == 'rap':\n base = 'https://noaa-rap-pds.s3.amazonaws.com/'\n path = f'rap.{self.date:%Y%m%d}/rap.t{self.date:%H}z.awip32f{self.fxx:02d}.grib2'\n else:\n base = 'https://noaa-hrrr-bdp-pds.s3.amazonaws.com/'\n if self.model == 'hrrr':\n path = f\"hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.grib2\"\n elif self.model == 'hrrrak':\n path = f\"hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.ak.grib2\"\n elif source == 'google':\n if self.model == 'rap':\n base = 'https://storage.googleapis.com/rapid-refresh/'\n path = f'rap.{self.date:%Y%m%d}/rap.t{self.date:%H}z.awip32f{self.fxx:02d}.grib2'\n else:\n base = 'https://storage.googleapis.com/high-resolution-rapid-refresh/'\n if self.model == 'hrrr':\n path = f\"hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.grib2\"\n elif self.model == 'hrrrak':\n path = f\"hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.ak.grib2\"\n elif source == 'azure':\n if self.model == 'rap':\n base = 'https://noaarap.blob.core.windows.net/rap'\n path = f'rap.{self.date:%Y%m%d}/rap.t{self.date:%H}z.awip32f{self.fxx:02d}.grib2'\n else:\n base = 'https://noaahrrr.blob.core.windows.net/hrrr/'\n if self.model == 'hrrr':\n path = f\"hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.grib2\"\n elif self.model == 'hrrrak':\n path = f\"hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.ak.grib2\"\n elif source.startswith('pando'):\n if source[-1] == '2':\n gateway = 2\n else:\n gateway = 1\n if self.model == 'rap':\n return None # No RAP data on Pando\n else:\n base = f'https://pando-rgw0{gateway}.chpc.utah.edu/'\n path = f\"{self.model}/{self.field}/{self.date:%Y%m%d}/{self.model}.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.grib2\"\n \n return base+path",
"def cosmic_link(variant_obj):\n\n cosmic_ids = variant_obj.get('cosmic_ids')\n\n if not cosmic_ids:\n return None\n else:\n cosmic_id = cosmic_ids[0]\n url_template = (\"https://cancer.sanger.ac.uk/cosmic/mutation/overview?id={}\")\n\n\n return url_template.format(cosmic_id)",
"def uri_danbooru(bot, response, matches):\n post = json.loads(response.text)\n\n url = 'http://danbooru.donmai.us{}'.format(post['file_url'])\n artists = [tag.replace('_', ' ').title() for tag in post['tag_string_artist'].split()]\n tags = [tag.replace('_', ' ').title() for tag in post['tag_string_character'].split()]\n\n return 'Danbooru: {}'.format(', '.join(artists)), url, ', '.join(tags)",
"def dod():\n file = requests.get(\"https://www.bewakoof.com/design-of-the-day\")\n soup = bs4.BeautifulSoup(file.text, \"lxml\")\n # print(soup)\n\n linkList = soup.select(\"a[class='col-sm-4 col-xs-6'] > div > div > div > img:nth-of-type(2)]\")\n # soup.select(\"div[id=foo] > div > div > div[class=fee] > span > span > a\")\n for i in linkList:\n if \"t-shirt-men\" in str(i):\n # print(i.get('src'))\n webbrowser.open(i.get('src'))",
"def get_url_from_era(_era,is_signal): \n\n var_format_val=10\n\n maod_versions=[\"MiniAODv2\"]\n urls=[]\n for maod_version in maod_versions:\n \n GD_File='config/GoogleDocLink'+maod_version+'.txt'\n if is_signal:\n GD_File='config/GoogleDocLinkSignal'+maod_version+'.txt'\n\n GD_File_READ = open (GD_File,\"r\")\n\n for line in GD_File_READ:\n if len(line.split()) ==2 :\n if line.split()[0] == _era:\n urls.append(line.split()[1])\n \n GD_File_READ.close()\n return urls",
"def get_url_from_era_def(_era,is_signal,maod_version): \n var_format_val=10\n\n GD_File='config/GoogleDocLink'+maod_version+'.txt'\n if is_signal:\n GD_File='config/GoogleDocLinkSignal'+maod_version+'.txt'\n\n GD_File_READ = open (GD_File,\"r\")\n\n for line in GD_File_READ:\n if len(line.split()) ==2 :\n if line.split()[0] == _era:\n GD_File_READ.close()\n return line.split()[1]\n print ('Error in assigning GD page from era')\n return '-11111'",
"def set_media(link):\r\n results = {}\r\n make_link_info_job(results, link, g.useragent)()\r\n update_link(link, *results[link])",
"def url_media(self, csvlinks=\"\", csvset=\"\", urldir=\"\", medialog_file=\"\",\n directory=\"\", ignore_twitter_link=True, mediatype=\"vi\",\n site_sources=[], name_scraping=\"\", video_timelimit=1000,\n image_timelimit=60):\n\n if csvlinks == \"\":\n csvlinks = CSVLINKS\n if csvset == \"\":\n csvset = CSVSETURL\n if medialog_file == \"\":\n medialog_file = MEDIALOG\n if directory == \"\":\n directory = self.directory\n\n if urldir == \"\" and name_scraping == \"\":\n urldir = URLDIR\n name_scraping = urldir.lower()\n elif name_scraping == \"\":\n name_scraping = urldir.lower()\n elif urldir == \"\":\n urldir = name_scraping\n\n if urldir[-1] != \"/\":\n urldir = urldir + \"/\"\n if name_scraping[-1] == \"/\":\n name_scraping = name_scraping[:-1]\n\n mediatype = str(mediatype).lower()\n if mediatype not in (\"v\", \"i\", \"vi\", \"iv\"):\n mediatype = \"vi\"\n\n root_dir = os.getcwd()\n\n if directory != \"\":\n os.chdir(directory)\n directory = os.getcwd()\n else:\n directory = root_dir\n\n setUrls = CSVUtils.csv_to_dict(csvset, 1, 0)\n\n if urldir[-1] == '/':\n urldir = urldir[:-1]\n OSUtils.createDir(urldir)\n\n seq = \"\"\n\n # get next sequence number\n if os.path.isfile(medialog_file):\n seq = JSONUtils.read_keyval_json(\"next_\"+name_scraping+\"Seq\",\n medialog_file)\n\n # if the parameter does not exist, get the seq from the\n if seq == \"\":\n seq = max([int(d) for d in os.listdir(urldir)] + [0]) + 1\n\n try:\n seqdir = os.path.realpath(urldir + \"/\" + str(seq))\n\n # implemented in order to give a feedback about progresss %\n total_row = sum(1 for row in CSVUtils.csvGenerator(csvlinks))\n row_count = 0\n\n # iterate through each link\n for line in CSVUtils.csvGenerator(csvlinks):\n row_count += 1\n\n if \"https://twitter.com\" in line[0] and ignore_twitter_link:\n continue\n\n url = self.__expandURL(line[0])\n\n if len(site_sources) > 0:\n if len([site for site in site_sources if site in url]\n ) == 0:\n continue\n\n if url not in setUrls.keys():\n\n print('\\x1b[6;30;42m' + \"Starting Scrapping for Link \"\n + str(url) + \" (\" + str(seq) + \")\" + '\\x1b[0m')\n\n os.mkdir(seqdir)\n os.chdir(seqdir)\n\n if \"v\" in mediatype:\n try:\n # in order to avoid stalls in lives\n signal.signal(signal.SIGALRM,\n OSUtils.handler_timeout)\n signal.alarm(video_timelimit)\n\n youtube_dl.YoutubeDL({}).download([url])\n except KeyboardInterrupt:\n raise\n except Exception as e:\n print(e)\n finally:\n signal.alarm(0)\n\n if \"i\" in mediatype:\n for im in self.__urlImageGenerator(url):\n try:\n signal.signal(signal.SIGALRM,\n OSUtils.handler_timeout)\n signal.alarm(image_timelimit)\n\n if \"base64,\" in im:\n continue\n\n lo = Text.lastocc(im, \"/\")+1\n\n if lo < len(im)-1:\n output = im[Text.lastocc(im, \"/\")+1:]\n else:\n output = im[\n Text.lastocc(im[:-1], \"/\")+1:-1]\n\n if output == \"\" or len(output) > 80:\n output = random.randint(1, 10000000000000)\n\n self.__request_download(link=im,\n output=str(output))\n except requests.exceptions.ConnectionError as e:\n print(e)\n continue\n except requests.exceptions.InvalidSchema as e:\n print(e)\n continue\n except Exception as e:\n print(e)\n finally:\n signal.alarm(0)\n\n os.chdir(directory)\n\n setUrls[url] = seq\n\n CSVUtils.write_line_b_csv(csvfile=csvset, line=[seq, url])\n\n print('\\x1b[6;30;42m' + \"Scrap Finished for Link \"\n + str(url) + \" (\"\n + str(round(row_count*100/total_row, 4)) + \"%)\"\n + '\\x1b[0m')\n\n seq += 1\n seqdir = os.path.realpath(urldir + \"/\" + str(seq))\n\n os.chdir(root_dir)\n\n except KeyboardInterrupt:\n print(\"Stopping...\")\n\n JSONUtils.add_keyval_json(\"next_\"+name_scraping+\"Seq\", seq,\n medialog_file)\n\n os.chdir(root_dir)\n\n shutil.rmtree(seqdir)\n except Exception as e:\n JSONUtils.add_keyval_json(\"next_\"+name_scraping+\"Seq\", seq,\n medialog_file)\n\n os.chdir(root_dir)\n\n shutil.rmtree(seqdir)\n print(e)\n raise",
"def parse_link(self,data,api):\n return REACT_API_DOCS_URL + data.FILE.split('/')[1] + api.find('a',attrs = {'class': 'hash-link'}).attrs['href']",
"def _transform_gdrive_url(self):\n fileid = self.parsed.path.replace('/file/d/', '').split('/')[0]\n self.url = self.GDRIVE_LINK_TEMPLATE.format(fileid=fileid)",
"async def ponyr(self, *text):\n if len(text) > 0:\n if len(text[0]) > 1 and len(text[0]) < 20:\n try:\n msg = \"+\".join(text)\n search = \"https://derpiboo.ru/search.json?q=\" + msg + \"&random_image=y\" \n async with aiohttp.get(search) as r:\n result = await r.json()\n if \"id\" in result:\n imgid = str(result[\"id\"])\n async with aiohttp.get(\"https://derpiboo.ru/images/\" + imgid + \".json\") as r:\n result = await r.json()\n url = \"http:\" + result[\"image\"]\n await self.bot.say(url)\n else:\n await self.bot.say(\"Your search terms gave no results.\")\n except:\n await self.bot.say(\"Error.\")\n else:\n await self.bot.say(\"Invalid search.\")\n else:\n async with aiohttp.get(\"https://derpiboo.ru/search.json?q=*&random_image=y\") as r:\n result = await r.json()\n imgid = str(result[\"id\"])\n async with aiohttp.get(\"https://derpiboo.ru/images/\" + imgid + \".json\") as r:\n result = await r.json()\n url = result[\"image\"]\n await self.bot.say(\"http:\" + url )",
"async def return_gif_link(query: str) -> dict:\n best_match = extractOne(query.replace(\" \", \"_\"), os.listdir(gif_dir))\n if best_match[1] > 50:\n resp = {\n \"success\": True,\n \"query\": query,\n \"link\": f\"{gif_baseurl}/{best_match[0]}\",\n }\n else:\n resp = {\n \"success\": False,\n \"query\": query,\n \"reason\": \"Was not able to find an appropriate image\",\n }\n return resp",
"def getNewsIconURL(newsBrain):",
"def test_disk_media_item_display_url_ax_rule(db):\n data = datadir.join('1x70.gif').read(mode='rb')\n item = media.fetch_or_create_media_item(data, file_type='gif')\n assert item.display_url == (\n '/static/media/ax/b2/06/adb20677ffcfda9605812f7f47aaa94a9c9b3e1a0b365e43872dc55199f5f224.gif')",
"def get_link(self, conf, link_id):\n\t\tpass",
"async def return_img_link(query: str) -> dict:\n best_match = extractOne(query.replace(\" \", \"_\"), os.listdir(img_dir))\n if best_match[1] > 50:\n resp = {\n \"success\": True,\n \"query\": query,\n \"link\": f\"{img_baseurl}/{best_match[0]}\",\n }\n else:\n resp = {\n \"success\": False,\n \"query\": query,\n \"reason\": \"Was not able to find an appropriate image\",\n }\n return resp",
"def get_url(f):\n return f.replace(Enums.Paths.MEDIA_BASE, Enums.Paths.WEB_BASE)",
"async def aiwaifu(self, ctx: commands.Context):\n async with ctx.typing():\n x = random.randint(0, 99999)\n link = f\"https://www.thiswaifudoesnotexist.net/example-{x}.jpg\"\n async with self.session.get(link) as resp:\n if resp.status == 200:\n data = BytesIO(await resp.read())\n data.name = \"ai-waifu.jpg\"\n data.seek(0)\n file = discord.File(data)\n return await ctx.send(file=file)\n else:\n await ctx.send(f\"API returned status code: {resp.status}\")",
"def test_get_object_link_file(self):\n plugin = ProjectAppPluginPoint.get_plugin(PLUGIN_NAME)\n url = reverse(\n 'filesfolders:file_serve',\n kwargs={'file': self.file.sodar_uuid, 'file_name': self.file.name},\n )\n ret = plugin.get_object_link('File', self.file.sodar_uuid)\n self.assertEqual(ret['url'], url)\n self.assertEqual(ret['label'], self.file.name)\n self.assertEqual(ret['blank'], True)",
"def get_image_link():\n image_links = set()\n supplemented_keyword = urllib.parse.quote(\n supplemented_keywords[random.randint(0,\n len(supplemented_keywords) - 1)],\n safe='')\n main_keyword = urllib.parse.quote(\n main_keywords[random.randint(0,\n len(main_keywords) - 1)], safe='')\n\n # print('the theme of cats: ' + supplemented_keyword)\n\n search_query = (main_keyword + ' ' + supplemented_keyword).replace(\n ' ', '%20')\n url = 'https://www.google.com/search?q=' + \\\n search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n while 'https://' not in image_link or r'\\\\u' in image_link or '.jpg' not in image_link:\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n return image_link",
"def get_viaf_link(self, viaf_base_uri=\"http://viaf.org/viaf/\"):\n if(self.viaf_id!=\"\"):\n return \"%s%s\"%(viaf_base_uri, self.viaf_id)\n else:\n return None",
"async def cmd_galremlinkuwl(self, ctx):\n links = re.findall(r\"(?P<url>http[s]?://[^\\s]+)\", ctx.message.content)\n\n if not links:\n await ctx.channel.send('Useage: [p]galremlinkuwl <startoflink>, [Bot Owner] Removes a link from gallery link whitelist.')\n\n # ===== REMOVE THE LINKS FROM THE LIST\n new_gal_link_wl = list(set(self.cogset['link_wl']) - set(links))\n\n if Gallery.compare(new_gal_link_wl, self.cogset['link_wl']):\n await ctx.channel.send(content=\"{}\\n are not in the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return \n \n else:\n self.cogset['link_wl'] = new_gal_link_wl\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=\"{}\\n have been removed from the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return"
] | [
"0.6039677",
"0.5896531",
"0.5796971",
"0.5796728",
"0.57508343",
"0.56164914",
"0.56087923",
"0.55653644",
"0.55441266",
"0.5539439",
"0.5490854",
"0.54835784",
"0.5444002",
"0.54344416",
"0.5407101",
"0.54022837",
"0.5398861",
"0.53877646",
"0.5362035",
"0.5359604",
"0.5342245",
"0.5312951",
"0.5312629",
"0.53105885",
"0.53041977",
"0.5286638",
"0.52476805",
"0.524257",
"0.5226833",
"0.522561"
] | 0.62930304 | 0 |
nella categoria "da schierare" vi sono 56 articoli da leggere seleziono quelli degli ultimi 4 giorni o rischio di confondermi con la giornata precedente return > lista di link da aprire e leggere successivamente | def scraper_lista_articoli(LINK_SOS_FANTA: str) -> list:
soup = BeautifulSoup(requests.get(LINK_SOS_FANTA).text, "html.parser")
body = soup.find(class_="widget-content")
titoli = body.find_all("li") # lista di tutti gli articoli
to_scrape = []
for post in titoli:
# parsing della data in italiano, devo pulirla
data_pubblicazione = (
post.find(class_="post-meta").text.replace("del", "").replace("alle", "")
)
parsed_data = dateparser.parse(data_pubblicazione, languages=["it"])
# solo ultimi 4 giorni, se ne trovo uno vecchio esco dal loop
if parsed_data < datetime.now() - timedelta(days=4):
return to_scrape
# aggiungo link
link = post.find("a", href=True)
to_scrape.append(link["href"])
# caso post "a scheda": sono divisi in categorie, aggiungo link multipli
post_multipli = {"PORTIERI": 3, "ATTACCO": 8}
for caso in post_multipli.keys():
if caso in link["title"]:
# la struttura della pagina sarà link/1/, link/2/ etc..
for i in range(post_multipli.get(caso)):
to_scrape.append(link["href"] + f"{i}/")
return to_scrape | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Cel(categorie, pagini, cautare=\"normala\"):\n log = Logger()\n debug = Verificare_Debug()\n if cautare == \"normala\":\n categorie = html_part_link[categorie]\n s = Scrapper(jsonfn, categorie, None)\n elif cautare == \"personalizata\":\n s = Scrapper(jsonfn, categorie, None, cautare=\"personalizata\")\n html = s.get_html_code(s.link)\n log.scriere(\"Preiau date de pe {}.\".format(s.link))\n\n # Preiau numarul maxim de pagini\n pagini_max = 0\n for el in html.select(s.jsondata[\"pagini\"][\"tag\"]):\n try:\n if el[s.jsondata[\"pagini\"][\"tip\"]] == s.jsondata[\"pagini\"][\"termen\"]:\n for e in el.select(s.jsondata[\"pagini\"][\"tag2\"]):\n try:\n if pagini_max < int(e.text):\n pagini_max = int(e.text)\n except:\n pass\n except:\n pass\n\n # Setez numarul de pagini de pe care se vor prelua produse, comparand datele introduse de utilizator cu numarul\n # maxim de pagini admis de cautare\n if pagini == None:\n pagini = 10\n if pagini > pagini_max:\n pagini = pagini_max\n\n # Pentru fiecare pagina in parte\n hrefs = []\n for i in range(1, pagini+1):\n if cautare == \"normala\":\n html = s.get_html_code(s.jsondata[\"link_pagina\"].format(categorie, i))\n elif cautare == \"personalizata\":\n html = s.get_html_code(s.jsondata[\"link_personalizat_pagina\"].format(categorie, i))\n # Preiau lista produselor\n container = \"\"\n for el in html.select(s.jsondata[\"box\"][\"tag\"]):\n try:\n if el[s.jsondata[\"box\"][\"tip\"]] == s.jsondata[\"box\"][\"termen\"]:\n container = el\n break\n except:\n pass\n\n # Preiau produsele\n prod = []\n for el in container.select(s.jsondata[\"produs\"][\"tag\"]):\n try:\n if el[s.jsondata[\"produs\"][\"tip\"]] == s.jsondata[\"produs\"][\"termen\"]:\n # Verific daca produsul este la reducere\n for e in el.select(s.jsondata[\"produs\"][\"promo\"][\"tag\"]):\n try:\n if e[s.jsondata[\"produs\"][\"promo\"][\"tip\"]] == s.jsondata[\"produs\"][\"promo\"][\"termen\"]:\n prod.append(el)\n break\n except:\n pass\n except:\n pass\n\n # Preiau link-ul spre produs\n for p in prod:\n for el in p.select(s.jsondata[\"href\"][\"tag\"]):\n try:\n if el[s.jsondata[\"href\"][\"tip\"]] == s.jsondata[\"href\"][\"termen\"]:\n hrefs.append(el[s.jsondata[\"href\"][\"arg\"]])\n break\n except:\n pass\n\n # Preiau informatiile fiecarui produs\n for href in hrefs:\n log.scriere(\"Preiau informatiile de pe {}.\".format(href))\n if debug == True:\n Debug(href, 0)\n info = {}\n html = s.get_html_code(href)\n\n # Preiau titlul produsului\n for el in html.select(s.jsondata[\"data\"][\"titlu\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"titlu\"][\"tip\"]] == s.jsondata[\"data\"][\"titlu\"][\"termen\"]:\n info[\"titlu\"] = el.text\n break\n except:\n pass\n\n # Preiau noul pret, vechiul pret si discount-ul produsului\n _ = {}\n for el in html.select(s.jsondata[\"data\"][\"pret\"][\"nou\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"pret\"][\"nou\"][\"tip\"]] == s.jsondata[\"data\"][\"pret\"][\"nou\"][\"termen\"]:\n _[\"nou\"] = el.text + \" Lei\"\n break\n except:\n pass\n for el in html.select(s.jsondata[\"data\"][\"pret\"][\"vechi\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"pret\"][\"vechi\"][\"tip\"]] == s.jsondata[\"data\"][\"pret\"][\"vechi\"][\"termen\"]:\n __ = el.text.split(\"|\")[-1].split(\" \")\n _[\"vechi\"] = __[-2] + \" \" + __[-1].capitalize()\n __ = el.text.split(\"|\")[0].split(\" \")\n _[\"discount\"] = __[-3] + \" \" + __[-2].capitalize()\n except:\n pass\n info[\"pret\"] = _\n\n # Preiau rating-ul produsului (Daca exista. Daca nu, se initializeaza cu 0 automat)\n _ = {}\n ok = 0\n for el in html.select(s.jsondata[\"data\"][\"rating\"][\"rata\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"rating\"][\"rata\"][\"tip\"]] == s.jsondata[\"data\"][\"rating\"][\"rata\"][\"termen\"]:\n _[\"rata\"] = el.text\n ok = 1\n break\n except:\n pass\n for el in html.select(s.jsondata[\"data\"][\"rating\"][\"review-uri\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"rating\"][\"review-uri\"][\"tip\"]] == s.jsondata[\"data\"][\"rating\"][\"review-uri\"][\"termen\"]:\n _[\"review-uri\"] = el.text\n break\n except:\n pass\n if ok == 1:\n info[\"rating\"] = _\n elif ok == 0:\n info[\"rating\"] = {\"rata\":\"0\", \"review-uri\":\"0\"}\n\n # Preiau descrierea produsului\n _ = \"\"\n for el in html.select(s.jsondata[\"data\"][\"descriere\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"descriere\"][\"tip\"]] == s.jsondata[\"data\"][\"descriere\"][\"termen\"]:\n _ = cel_string.formare_descriere(el.text)\n break\n except:\n pass\n if _ != \"\" and _ != []:\n info[\"descriere\"] = _\n else:\n info[\"descriere\"] = ['']\n\n # Preiau specificatiile produsului\n _ = {}\n for el in html.select(s.jsondata[\"data\"][\"specs\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"specs\"][\"tip\"]] == s.jsondata[\"data\"][\"specs\"][\"termen\"]:\n title = \"\"\n __ = {}\n for e in el.select(s.jsondata[\"data\"][\"specs\"][\"elem\"][\"tag\"]):\n try:\n if len(e[s.jsondata[\"data\"][\"specs\"][\"elem\"][\"tip\"]]) > 0:\n e2 = e.select(s.jsondata[\"data\"][\"specs\"][\"elem\"][\"spec\"][\"tag\"])\n __[e2[0].text] = e2[1].text\n except:\n if title == \"\":\n title = e.text\n if len(__.keys()) > 0:\n _[title] = __\n __ = {}\n title = e.text\n if title != '':\n _[title] = __\n else:\n _[\"Specs\"] = __\n except:\n pass\n info[\"specs\"] = cel_string.formare_specificatii(_)\n\n # Verific daca exista cadou si, daca da, preiau informatiile\n _ = {}\n gift = 0\n for el in html.select(s.jsondata[\"data\"][\"cadou\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"cadou\"][\"tip\"]] == s.jsondata[\"data\"][\"cadou\"][\"termen\"]:\n if debug == True:\n Debug(\"Gift gasit!\", 0)\n gift = 1\n t = ''\n for e in el.select(s.jsondata[\"data\"][\"cadou\"][\"titlu\"][\"tag\"]):\n try:\n if e[s.jsondata[\"data\"][\"cadou\"][\"titlu\"][\"tip\"]] == s.jsondata[\"data\"][\"cadou\"][\"titlu\"][\"termen\"]:\n t = e.text\n break\n except:\n pass\n if t != '' :\n _[\"titlu\"] = t\n for e in el.select(s.jsondata[\"data\"][\"cadou\"][\"pret\"][\"tag\"]):\n try:\n if e[s.jsondata[\"data\"][\"cadou\"][\"pret\"][\"tip\"]] == s.jsondata[\"data\"][\"cadou\"][\"pret\"][\"termen\"]:\n _[\"pret\"] = e.text\n break\n except:\n pass\n else:\n # Cadou fara pret, difera class-ul\n for e in el.select(s.jsondata[\"data\"][\"cadou\"][\"titlu_farapret\"][\"tag\"]):\n try:\n if e[s.jsondata[\"data\"][\"cadou\"][\"titlu_farapret\"][\"tip\"]] == s.jsondata[\"data\"][\"cadou\"][\"titlu_farapret\"][\"termen\"]:\n _[\"titlu\"] = e.text\n except:\n pass\n _[\"pret\"] = \"-\"\n __ = el.select(s.jsondata[\"data\"][\"cadou\"][\"link\"][\"tag\"])\n _[\"link\"] = __[0][s.jsondata[\"data\"][\"cadou\"][\"link\"][\"arg\"]]\n gift_html = s.get_html_code(_[\"link\"])\n for e in gift_html.select(s.jsondata[\"data\"][\"cadou\"][\"imagine\"][\"tag\"]):\n try:\n if e[s.jsondata[\"data\"][\"cadou\"][\"imagine\"][\"tip\"]] == s.jsondata[\"data\"][\"cadou\"][\"imagine\"][\"termen\"]:\n for e2 in e.select(s.jsondata[\"data\"][\"cadou\"][\"imagine\"][\"tag2\"]):\n try:\n _[\"imagine_link\"] = cel_string.link_cadou_imagine(e2[s.jsondata[\"data\"][\"cadou\"][\"imagine\"][\"arg\"]])\n break\n except:\n pass\n break\n except:\n pass\n break\n except:\n pass\n if gift == 1:\n info[\"cadou\"] = _\n\n # Salvez link-ul spre produs\n info[\"link\"] = href\n\n # Preiau imaginile produsului\n imgs = []\n c = \"\"\n for el in html.select(s.jsondata[\"data\"][\"imagini\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"imagini\"][\"tip\"]] == s.jsondata[\"data\"][\"imagini\"][\"termen\"]:\n c = el\n break\n except:\n pass\n sec_link = \"\"\n try:\n for el in c.select(s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"link\"][\"tag\"]):\n try:\n sec_link = el[s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"link\"][\"arg\"]]\n break\n except:\n pass\n except:\n pass\n if sec_link != href and sec_link != '':\n sec_html = s.get_html_code(sec_link)\n for el in sec_html.select(s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"tip\"]] == s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"termen\"]:\n for e in el.select(s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"img_tag\"]):\n try:\n imgs.append(e[s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"arg\"]])\n except:\n pass\n except:\n pass\n elif sec_link == href and sec_link != '':\n # Inseamna ca exista doar o poza, cea principala\n for el in html.select(s.jsondata[\"data\"][\"imagini\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"imagini\"][\"tip\"]] == s.jsondata[\"data\"][\"imagini\"][\"termen\"]:\n for e in el.select(s.jsondata[\"data\"][\"imagini\"][\"img_tag\"]):\n imgs.append(e[s.jsondata[\"data\"][\"imagini\"][\"arg\"]])\n break\n except:\n pass\n # Descarc imaginile\n try:\n _ = info[\"titlu\"].split(\" \")\n except:\n _ = \"\"\n imgname = \"\"\n try:\n for i in range(0, 10):\n imgname = imgname + _[i] + \" \"\n except:\n pass\n imgname = imgname + \"- \"\n IMGS = []\n log.scriere(\"Descarc {} imagini.\".format(len(imgs)))\n for i in range(0, len(imgs)):\n try:\n utils.download_imagine(imgs[i], imgname + str(i))\n if utils.verificare_prezenta_imagine(imgname + str(i)) == True:\n IMGS.append(imgname + str(i) + \".{}\".format(imgs[i].split(\"/\")[-1].split(\".\")[-1]))\n except:\n if debug == True:\n Debug(\"Eroare download {}\".format(str(imgs[i])), 2)\n info[\"imagini\"] = IMGS\n\n # Verific daca exista cadou si, daca da, downloadez imaginea cadoului\n if \"cadou\" in info.keys():\n try:\n utils.download_imagine(info[\"cadou\"][\"imagine_link\"], imgname + \"CADOU\")\n info[\"cadou\"][\"imagine\"] = imgname + \"CADOU\" + \".{}\".format(info[\"cadou\"][\"imagine_link\"].split(\"/\")[-1].split(\".\")[-1])\n except:\n pass\n\n # Verific daca exista toate datele si, daca da, creez fisierul HTML\n if debug == True:\n Debug(str(info.keys()), 0)\n Debug(str(info[\"pret\"].keys()), 0)\n Debug(str(info[\"rating\"].keys()), 0)\n try:\n Debug(str(info[\"cadou\"].keys()), 0)\n except:\n pass\n kw = {\"titlu\": None, \"pret\": [\"vechi\", \"nou\", \"discount\"], \"rating\": [\"rata\", \"review-uri\"], \"descriere\": None,\n \"specs\": None, \"imagini\": None, \"link\": None}\n kw_gift = {\"titlu\": None, \"pret\": [\"vechi\", \"nou\", \"discount\"], \"rating\": [\"rata\", \"review-uri\"],\n \"descriere\": None, \"specs\": None, \"imagini\": None, \"link\": None,\n \"cadou\": [\"titlu\", \"pret\", \"link\", \"imagine\", \"imagine_link\"]}\n if \"cadou\" in info.keys():\n if utils.verificare_date(info, kw_gift) == False:\n # Daca nu sunt toate datele necesare, sterg pozele descarcate\n if debug == True:\n Debug(\"Date insuficente!\", 2)\n utils.stergere_set_imagini(imgname)\n else:\n log.scriere(\"Salvez fisierul HTML.\")\n creatorHTML_gift(info)\n d = db.Database(\"cel\")\n d.initializare_conexiune()\n x = d.cautare_date_link(info[\"link\"])\n if len(x) == 0:\n _ = d.cautare_ultima_data(info[\"link\"])\n if _ == 0:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n else:\n if db.comparare_data_actuala_cu_ultima(_) == 1:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n d.inchidere_conexiune()\n else:\n regasit = db.alegere_pretul_minim(x)\n if regasit[\"pret\"] < cel_string.transformare_pret_int(info[\"pret\"][\"nou\"]):\n if debug == True:\n Debug(\"Produsul a fost regasit!\", 0)\n HTML_adaugare_regasire(info[\"titlu\"], regasit[\"data\"], regasit[\"pret\"])\n _ = d.cautare_ultima_data(info[\"link\"])\n if _ == 0:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n else:\n if db.comparare_data_actuala_cu_ultima(_) == 1:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n d.inchidere_conexiune()\n\n\n else:\n if utils.verificare_date(info, kw) == False:\n # Daca nu sunt toate datele necesare, sterg pozele descarcate\n if debug == True:\n Debug(\"Date insuficiente\", 2)\n utils.stergere_set_imagini(imgname)\n else:\n log.scriere(\"Salvez fisierul HTML.\")\n creatorHTML(info)\n d = db.Database(\"cel\")\n d.initializare_conexiune()\n x = d.cautare_date_link(info[\"link\"])\n if len(x) == 0:\n _ = d.cautare_ultima_data(info[\"link\"])\n if _ == 0:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n else:\n if db.comparare_data_actuala_cu_ultima(_) == 1:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n d.inchidere_conexiune()\n else:\n regasit = db.alegere_pretul_minim(x)\n if regasit[\"pret\"] < cel_string.transformare_pret_int(info[\"pret\"][\"nou\"]):\n if debug == True:\n Debug(\"Produsul a fost regasit!\", 0)\n HTML_adaugare_regasire(info[\"titlu\"], regasit[\"data\"], regasit[\"pret\"])\n _ = d.cautare_ultima_data(info[\"link\"])\n if _ == 0:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n else:\n if db.comparare_data_actuala_cu_ultima(_) == 1:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n d.inchidere_conexiune()",
"def estrazione_articoli_da_leggere(LINK_SOS_FANTA: str):\n to_scrape = scraper_lista_articoli(LINK_SOS_FANTA)\n\n contenuto_articoli = []\n for link in to_scrape:\n soup = BeautifulSoup(requests.get(link).text, \"html.parser\")\n content = soup.find_all(class_=\"entry-content\")\n\n # se è un'errore o vuoto salto\n if len(content) == 0:\n continue\n\n # estraggo il testo rimuovendo gli errori di encoding\n testo = content[0].text.replace(\"\\xa0\", \" \")\n contenuto_articoli.append(testo)\n return contenuto_articoli",
"def lista_chiamati(request, legahash, astaid, numero):\n return lista_ultimi(request, legahash, astaid, CalciatoreChiamato, numero)",
"def listar_relaciones(request,idItem):\n relaciones= Relacion.objects.filter()\n\n item=Item.objects.filter(actual=True)\n itemActual=Item.objects.get(id_item=idItem)\n ### falta desvincular relacion o agregar nueva y cambiar version\n\n DATA = []\n LINK = []\n\n DATA.append({'key': itemActual.id_item, 'name': itemActual.nombre + ' costo:'+str(itemActual.costo), 'group': 'FASE'+str(itemActual.fase.id_Fase), 'color': \"#2711E3\"}, )\n fases = Fase.objects.filter(id_Proyecto=itemActual.fase.id_Proyecto, )\n num=0\n for i in fases:\n num +=1\n DATA.append({'key': 'FASE'+str(i.id_Fase), 'name': i.nombre , 'isGroup': 'true'}, )\n\n relaciones_trazabilidad(itemActual,DATA,LINK)\n izq=0\n band=0\n #der=0\n for i in DATA:\n band += 1\n #print(i['name'])\n if str.isnumeric(str(i['key'])):\n it = Item.objects.get(id_item=i['key'])\n izq+=it.costo\n\n\n relaciones_trazabilidad_delante(itemActual,DATA,LINK)\n cont=0\n # der=0\n for i in DATA:\n cont+=1\n if cont > band:\n it = Item.objects.get(id_item=i['key'])\n izq+=it.costo\n# der += itemActual.costo\n\n context={\n \"relaciones\":relaciones,\n \"item\":item,\n \"itemActual\":itemActual,\n 'proyectos':itemActual.fase.id_Proyecto,\n 'data':DATA,\n 'link':LINK,\n 'izq':izq,\n # 'der':der,\n\n }\n return render(request, 'items/trazabilidad.html', context)",
"def __TopicsPages(self):\r\n \r\n try:\r\n #apro l'url\r\n pagina=self.__OpenPage(self.__urlYahooCurrentAnswer)\r\n if not self.__MetaOperazioni(pagina): \r\n return False \r\n #gestisco l'eventuale errore di url\r\n if not pagina:\r\n #print 'La ricerca non ha prodotto risultati'\r\n return False\r\n else:\r\n #ora per il numero di risultati che voglio estrarre\r\n #1- estraggo i risultati dalla pagina\r\n #2 estraggo le altre pagine\r\n \r\n indexpages=1\r\n pag=[]\r\n \r\n while True:\r\n #devo iterare tra tutte le pagine fino a che ho i risultati, \r\n #le pagine esisteranno sempre dato che ho impostato il numero di risultati consultabili al max come i \r\n #risultati totali ottenuti\r\n topicrel=pagina.findAll('div',{'class':'dynamic'})\r\n #IN OGNIUNA C HO IL PEZZO DA CUI ESTRARRE LE INFORMAZIONI RELATIVE A N RISP, LINK, CATEGORIA ECC...\r\n for c in topicrel[0].findAll('li'):\r\n #d è una variabile temporanea \r\n \r\n #per prima cosa identifico il tipo in cui è stata strutturata la domanda\r\n #tipo 0: no badge\r\n #tipo 1: badge-o\r\n \r\n asktitle=c.h3.text\r\n askbody=c.span.text\r\n asktitle=asktitle.strip()\r\n askbody=askbody.strip()\r\n #se il corpo della domanda è vuoto lo sostituisco con il titolo\r\n if askbody==u'':askbody=asktitle\r\n \r\n tipo=c.findAll('span',{'class':'badge-o'})\r\n \r\n if tipo==[]: \r\n #print 'tipo 0'\r\n \r\n d=c.findAll('a')\r\n \r\n paginarisposte=d[0]['href']\r\n paginarisposte=unicode(paginarisposte,'UTF-8')\r\n \r\n _url=self.__language+self.__urlYahoo[:-1]\r\n \r\n paginarisposte=_url+paginarisposte\r\n \r\n askcategoria=d[1].text #categoria/e\r\n askcategoria=askcategoria.strip()\r\n askcategorialink=d[1]['href'] #indirizzo categoria\r\n _url=self.__language+self.__urlYahoo[:-1]\r\n \r\n askcategorialink=unicode(askcategorialink,'UTF-8')\r\n askcategorialink=_url+askcategorialink\r\n \r\n if c.find('img',{'class':'img-video-tn'})!=None: #se ha il video\r\n \r\n d=c.findAll('div')\r\n d=d[3].text\r\n d=d.replace(askcategoria,'')\r\n d=d.strip()\r\n d=d.split()\r\n \r\n d=d[0]\r\n numerorisposte=d \r\n numerorisposte=unicode(str(numerorisposte), 'utf-8')\r\n \r\n else:\r\n d=c.findAll('div')\r\n d=d[2].text\r\n d=d.replace(askcategoria,'')\r\n d=d.strip()\r\n d=d.split()\r\n if d=='' or d==u'' or d==[]: #quando non ci sono risposte\r\n d=0 \r\n else:\r\n d=d[0]\r\n \r\n numerorisposte=d \r\n numerorisposte=unicode(str(numerorisposte), 'utf-8')\r\n\r\n else:\r\n #print 'tipo 1'\r\n \r\n d=c.findAll('a')\r\n #d[0]['href'] #indirizzoRisposta\r\n paginarisposte=d[0]['href']\r\n _url=self.__language+self.__urlYahoo[:-1]\r\n paginarisposte=unicode(paginarisposte,'UTF-8')\r\n paginarisposte=_url+paginarisposte \r\n \r\n askcategoria=d[2].text #categoria/e\r\n askcategoria=askcategoria.strip()\r\n askcategorialink=d[2]['href'] #indirizzo categoria\r\n _url=self.__language+self.__urlYahoo[:-1]\r\n askcategorialink=unicode(askcategorialink,'UTF-8')\r\n askcategorialink=_url+askcategorialink\r\n d=c.findAll('div')\r\n \r\n d=d[2].text\r\n d=d.strip()\r\n d=d.split()\r\n \r\n numerorisposte=d[-(len(askcategoria.split())+3)]\r\n numerorisposte=int(numerorisposte)\r\n\r\n numerorisposte=unicode(str(numerorisposte), 'utf-8') \r\n\r\n page={'title':asktitle, 'body':askbody, 'categoria':askcategoria, \\\r\n 'categoria url':askcategorialink,'ask url':paginarisposte, \\\r\n 'risposte':numerorisposte}\r\n pag.append(page)\r\n if len(pag)>int(self.__numRisultati):\r\n self.__topicpages=pag\r\n return pag\r\n \r\n indexpages+=1 \r\n urlpage=self.__costruisciUrl(indexpages)\r\n pagina=self.__OpenPage(urlpage)\r\n if not pagina:\r\n return False\r\n except Exception, e:\r\n ErrorLog2.ErrorLog(self.__class__.__name__, 'TopicPages', e)\r\n return False",
"def scraper_notizie(self, contenuto_articoli: list):\n tot_menzioni = []\n for articolo in contenuto_articoli:\n # estraggo qualsisasi frase che menziona il giocatore\n sel_regex = f\"[\\w ,;()'’-]+{self.name}[\\w ,;()'’-]+\"\n results = re.findall(sel_regex, articolo)\n\n for res in results:\n # rimuovo il caso in cui sia solo in un elenco, come ad inizio articoli su ATTACCO\n if not re.search(f\", {self.name},\", res):\n tot_menzioni.append(res)\n if len(tot_menzioni) > 0:\n self.news = \"• \" + \"<br>•\".join(tot_menzioni)",
"def __carta(soup):\n news = []\n container = soup.find('dd', id='fieldset-maisacessadas-semana')\n most_read = container.find_all('li')\n\n for item in most_read:\n news.append(dict(title=item.a.string, link=item.a['href']))\n return news",
"def __carta(soup):\n news = []\n container = soup.find('dd', id='fieldset-maisacessadas-semana')\n most_read = container.find_all('li')\n\n for item in most_read:\n news.append(dict(title=item.a.string, link=item.a['href']))\n return news",
"def extraer(url, nivel, pueblo_id):\r\n\thref = \"\"\r\n\tprint url\r\n\tp = url.split('/')\r\n\tcategoria = p[-2]\r\n\tcat = Categoria.objects.filter(etiqueta__exact = categoria)\r\n\tif cat is None:\r\n\t\tcat = Categoria.objects.filter(etiqueta__exact = \"sin_categoria\")\r\n\tfor y in range(3,len(p)):\r\n\t\thref += '/' + p[y]\r\n\ttry:\r\n\t\trespuesta = urllib2.urlopen(url)\r\n\texcept:\r\n\t\treturn []\r\n\tsoup = BeautifulSoup(respuesta)\r\n\tif soup.body is None:\r\n\t\treturn []\r\n\r\n\tif nivel == 0:\r\n\t\tnivel = 1\r\n\tahref = soup.body.find_all('a')\r\n\tfor element in ahref:\r\n\t\tif element.get('href') == href:\r\n\t\t\tncategoria = element.text\r\n\t\t\tbreak\r\n\tnoticias = soup.body.find_all('div', attrs={'class' : 'content-noticias group'})\t\r\n\tif len(noticias) == 0:\r\n\t\tnoticias = soup.body.find_all('div', attrs={'class' : 'module-noticias box group'})\r\n\t\tif len(noticias) == 0:\r\n\t\t\treturn []\r\n\tlis = noticias[0].find_all('div')\r\n\tfor element in lis:\r\n\t\tfecha = None\r\n\t\ttitular = None\r\n\t\tenlace = None\r\n\t\tcuerpo = element.text\r\n\t\tcuerpo = modifica_string.elimina_blancos(cuerpo)\r\n\t\tcuerpo = modifica_string.elimina_char_especial(cuerpo)\r\n\t\tcuerpo = cuerpo.replace(\"\\n\", \"-\")\r\n\t\tif cuerpo is \"\":\r\n\t\t\tcuerpo = None\r\n\t\tfech = element.find('strong', attrs={'class' : 'date'})\r\n\t\tif fech is not None:\r\n\t\t\tfecha = fech.text\r\n\t\ttitu = element.find('a')\r\n\t\tif titu is not None:\r\n\t\t\ttitular = titu.text\r\n\t\t\ttitular = modifica_string.elimina_blancos(titular)\r\n\t\t\ttitular = modifica_string.elimina_char_especial(titular)\r\n\t\t\ttitular = titular.replace(\"\\n\", \"-\")\r\n\t\t\tpartes = url.split('/')\r\n\t\t\tU = partes[0]+'//'+partes[2]\r\n\t\t\tenlace = U + titu.get('href')\r\n\t\ttexto_noticia = get_noticia(enlace)\r\n\t\tif texto_noticia == \"\":\r\n\t\t\ttexto_noticia = None\r\n\t\ttexto_noticia = modifica_string.elimina_blancos(texto_noticia)\r\n\t\ttexto_noticia = modifica_string.elimina_char_especial(texto_noticia)\r\n\t\tif texto_noticia is not None:\r\n\t\t\ttexto_noticia = texto_noticia.replace(\"\\n\", \"-\")\r\n\t\tif fecha is not None:\r\n\t\t\tif '/' not in fecha:\r\n\t\t\t\tfecha = format_fecha(fecha)\r\n\t\t\tfecha = fecha.split(\"/\")\r\n\t\t\tdia = date(day = int(fecha[0]), month = int(fecha[1]), year = int(fecha[2]))\r\n\t\telse:\r\n\t\t\tdia = None\r\n\t\tfor e in cat:\r\n\t\t\tp = Noticias(dstitular = titular, dscuerpo = texto_noticia, resumen = cuerpo, url = enlace, etiqueta = e, fecha = dia, pueblo_id = pueblo_id)\r\n\t\t\tp.save()\r\n\tdigitos = len(str(nivel))\r\n\tprint \"Nivel \" + str(nivel)\r\n\tli_final = soup.body.find('li', attrs={'class' : 'last'})\r\n\tif li_final is None:\r\n\t\tprint \"LIMITE \" + str(nivel)\r\n\t\treturn []\r\n\tli_final = li_final.find('a')\r\n\tif li_final is None:\r\n\t\tprint \"LIMITE \" + str(nivel)\r\n\t\treturn []\r\n\tul_final = soup.body.find('ul', attrs={'class' : 'paginador group'})\r\n\tif ul_final is None:\r\n\t\tprint \"LIMITE \" + str(nivel)\r\n\t\treturn []\r\n\tif ul_final is not None:\r\n\t\tli = ul_final.findAll('li')\r\n\t\tlink = li[-1].find('a')\r\n\t\tif link is None:\r\n\t\t\tprint \"LIMITE \" + str(nivel)\r\n\t\t\treturn []\r\n\tif nivel == 1:\r\n\t\tnivel = 2\r\n\t\turl2 = url + \"?pagina=\" + str(nivel)\r\n\telse:\r\n\t\tnivel = nivel + 1\r\n\t\turl2 = url[:-digitos]\r\n\t\turl2 = url2 + str(nivel)\r\n\textraer(url2,nivel, pueblo_id)",
"def scrape_categories():\n category_urls = []\n url = homepage + '/alle-categorieen/'\n response = rq.get(url, timeout=5)\n soup = BeautifulSoup(response.content, 'html.parser')\n main_soup = soup.find('main')\n\n for category in main_soup.find_all('a', {'href': re.compile(r'/overzicht/')}):\n category_urls.append(category['href'])\n\n return category_urls",
"def Prolinks(promotion_label):\n return prolinks",
"def verif_site_catal(serine,d,x=15):\n\n w=dict()\n #dans un premier temp, on recherche tous les carbones alpha dans un rayon de x angstrom\n #a partir de la serine utilise comme reference\n cmd.select(\"selection_pour_site\",\"name ca within \"+str(x)+\" of (resi \"+str(serine[0])+\" and chain \"+serine[1]+\" and name ca)\")\n stored.list=list()\n cmd.iterate(\"selection_pour_site\",\"stored.list.append((resi,chain,resn))\")\n #print \"liste genere par pymol\"#debug\n #print stored.list #debug\n \n \n #on recherche dans un deuxieme temps s'il existe une histidine dans cette selection\n his,w[\"his\"]=site_utils.verif_histidine(stored.list,d)\n \n #dans un troisieme temps on recherche un aspartate ou un glutamate idealement place \n acide,w[\"acide\"]=site_utils.verif_acide(stored.list,d)\n \n w[\"dist\"]=x\n \n cmd.delete(\"selection_pour_site\")\n return [his,acide,w]",
"def parse_gremien(self, response):\n\n # e.g. '<a href=\"si0041.asp?__ctopic=gr&__kgrnr=977997\" title=\"zu Ausschuss für Arbeit, Gesundheit und Soziales: Sitzungen\\r\\nDiese Seite liefert eine Übersicht der Sitzungen eines Gremiums. Als Filterkriterien sind Zeiträume verfügbar. \" class=\"smccontextmenulink smcmenucontext_fct_sitzungen\">Sitzungen</a>'\n urls = response.xpath('//a[contains(@class, \"smccontextmenulink smcmenucontext_fct_sitzungen\")]/@href').getall()\n names = response.xpath('//a[contains(@class, \"smccontextmenulink smcmenucontext_fct_sitzungen\")]/@title').getall()\n names = [name.strip('zu ').split(':')[0] for name in names]\n names = [self.mapping[name] if name in self.mapping else name for name in names]\n\n for i in range(len(urls)):\n all_data_url = urls[i].replace('__ctopic', '__cwpall=1&__ctopic')\n request = self.build_request(response.urljoin(all_data_url), self.parse_gremium, '')\n request.meta['name'] = names[i]\n\n yield request",
"def loeschen(self):\r\n loeschen=self.REQUEST['loeschen']\r\n tit=''\r\n i=0\r\n j=0\r\n index=[]\r\n cursor=[]\r\n for x in self.objectValues('Image'):\r\n if str(x.id())[0:6] not in index:\r\n index.append(str(x.id())[0:6]) \r\n cursor.append([str(x.id())[0:6],str(x.title),[str(x.id())]])\r\n if str(x.id())[0:6]==loeschen:\r\n tit=str(x.title)\r\n j=i\r\n i=i+1\r\n else:\r\n cursor[-1][2].append(str(x.id()))\r\n #for val in cursor[j][2]:\r\n #self._delOb(self, id=str(val))\r\n #delet=delet+str(val)+' '\r\n self.manage_delObjects(ids=cursor[j][2])\r\n return tit+' gelöscht !'",
"def rellena(self):\r\n for nodo in range(0, self.red['n']):\r\n for i in range(0, self.red.conec[nodo]):\r\n if self.red.conex[nodo][i] > nodo:\r\n ilink = self.S[int(nodo)] + self.S[int(self.red.conex[nodo][i])]\r\n ilink = ''.join(sorted(ilink))\r\n self.links[ilink][0] += 1",
"def getExpandedLinks():",
"def __bol(soup):\n news = []\n anchors = soup.find(\n 'div', class_='mais-clicadas-lista link-primary').find_all('a')\n\n for a in anchors:\n title = a.find('span', class_='mais-clicadas-item-content').text\n link = a['href']\n news.append(dict(title=title, link=link))\n return news",
"def lista_offerte(request, legahash, astaid, numero=0):\n return lista_ultimi(request, legahash, astaid, Offerta, numero)",
"def scrape_cartelera(data, comp_nom):\t\n\tfunciones = []\n\t#limpia el html para no tener problemas:\n\tcleaner = Cleaner(page_structure=False)\n\tcleaned = cleaner.clean_html(data)\n\tsoup = BeautifulSoup(cleaned, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)\n\tshow_exp = re.compile(r'sid=(\\d+)')\n\tsala_exp = re.compile(r'Sala (\\d{1,2})')\n\t\n\tcomplejo_org = Complejo.objects.get(nombre=comp_nom)\n\tcomplejo = complejo_org\n\tcomplejo_platino = None\n\t\n\t#Quita el nombre cinemex para encontrar comp platino\n\tnom_corto = comp_nom.replace('Cinemex ', '')\n\tcomplejos_l = Complejo.objects.filter(nombre__icontains=nom_corto, cadena='Cinemex')\n\t#Busca complejo platino:\n\tif len(complejos_l) > 1:\n\t\tnom = 'Cinemex Platino '+ nom_corto\n\t\tquery = complejos_l.filter(nombre=nom)\n\t\tif len(query): complejo_platino = query[0] \n\t\t\n\tpeliculas = []\n\tcontenido = soup.find('div', 'contenido2')\n\t\n\t#Si existe tabla de peliculas\n\tif contenido:\n\t\ttry:\n\t\t\tpeliculas = contenido.find('table', cellspacing='0', cellpadding='0', border='0').contents\n\t\texcept:\n\t\t\tlogger.debug( u'Error cargando complejo %s' %comp_nom)\n\t\t\treturn []\n\t\t\t#logger.debug( u'peliculas mide %s' %len(peliculas))\n\t\t\n\t\t\n\t\tfor peli in peliculas:\n\t\t\t#logger.debug( peli)\n\t\t\t#Asegura que no sea un navigableString\n\t\t\timax = False\n\t\t\tif type(peli) != NavigableString:\n\t\t\t\tif peli.find('div', 'texto_1', align='center'):\n\t\t\t\t\t#logger.debug( peli.b.string)\n\t\t\t\t\tif peli.b.string.find('Platino') > -1:\n\t\t\t\t\t\t#Ajustar el complejo para platino\n\t\t\t\t\t\tif complejo_platino:\n\t\t\t\t\t\t\timax = False\n\t\t\t\t\t\t\tcomplejo = complejo_platino\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlogger.debug( u'Me falta platino %s' %comp_nom)\n\t\t\t\t\t\t\treturn funciones\n\t\t\t\t\t\t#logger.debug( 'Estoy en platino')\n\t\t\t\t\telif peli.b.string.find('IMAX')>-1:\n\t\t\t\t\t\timax = True\n\t\t\t\t\telse:\n\t\t\t\t\t\timax = False\n\t\t\t\t\t\tcomplejo= complejo_org\n\t\t\t\t\t\t\n\t\t\t\t#Si el td corresponde a una pelicula\n\t\t\t\tif peli.find('td', width='210', valign='top'):\t\n\t\t\t\t\ttres_D = False\n\t\t\t\t\tidioma = None\n\t\t\t\t\tsala = None\n\t\t\t\t\tpelicula = None\n\t\t\t\t\t\n\t\t\t\t\t#Checar tiene logo de 3d\n\t\t\t\t\tif peli.find('div', 'icono_platino').find('img', src=re.compile(r'3d.png$')): tres_D = True\n\t\t\t\t\t\n\t\t\t\t\t#Encabezado contiene titulo e idioma\n\t\t\t\t\tencabezado = peli.find('li', 'texto_3')\n\t\t\t\t\ttitulo = ''.join(encabezado.findAll(text=True)).replace('\\n', '').strip()\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t#Determina Idioma\n\t\t\t\t\tidi = encabezado.find('img', alt='idioma')\n\t\t\t\t\tif idi:\n\t\t\t\t\t\tif idi.get('src', '').find('ing') > 0:\n\t\t\t\t\t\t\tidioma = 'ingles'\n\t\t\t\t\telse:\n\t\t\t\t\t\tidioma = 'espanol'\n\t\t\t\t\t\t\n\t\t\t\t\t#Buscar pelicula por titulo segun idioma y 3d.. subtitulada o no.\n\t\t\t\t\t#logger.debug( u'titulo %s' %titulo)\n\t\t\t\t\ttit = '|'+ titulo + '|'\n\t\t\t\t\tpeli_query = filter_peli_ver(pelicula__alt_tit__icontains=tit, tres_D=tres_D, imax=imax)# id_mex__gt=0)\n\t\t\t\t\t#Checa si hay imax. \n\t\t\t\t\tlas_imax = peli_query.filter(imax=True)\n\t\t\t\t\tif las_imax:\n\t\t\t\t\t\tpeli_query= las_imax\n\t\t\t\t\t\tlogger.debug( 'Encontre imax!')\n\t\t\t\t\t\t\n\t\t\t\t\tif len(peli_query) > 1:\n\t\t\t\t\t\t#Si idioma == ingles, selecciona la pelicula subtitulada\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tpelicula = peli_query.get(subtitulada= (idioma == 'ingles'), doblada = (idioma != 'ingles') )\n\t\t\t\t\t\t\t\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tlogger.debug( e)\n\t\t\t\t\t\t\tlogger.debug( \"Error de idioma con la pelicula %s, idioma: %s\" % (titulo, idioma))\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\telif len(peli_query) == 1:\n\t\t\t\t\t\tpelicula = peli_query[0]\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.debug( u'No encontre pelicula %s, tres_D=%s, idioma=%s' %(titulo, tres_D, idioma))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t#logger.debug( u'pelicula %s' %pelicula)\n\t\t\t\t\thoras_html = peli.findAll('div', id='fecha')\n\t\t\t\t\t\n\t\t\t\t\t#logger.debug( u'tengo %s fechas aqui...' %len(horas_html))\n\t\t\t\t\t#logger.debug( horas_html)\n\t\t\t\t\t\n\t\t\t\t\tfor tag in horas_html:\n\t\t\t\t\t\t#Me salto todo lo que no es html\n\t\t\t\t\t\tif type(tag) != NavigableString:\n\t\t\t\t\t\t\t#Si esta disponible, obtiene num. sala\n\t\t\t\t\t\t\tif tag.get('style', '').find('text-transform: uppercase;') > -1: sala = sala_exp.search(tag.string).group(1)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t#logger.debug( u'hay %s horarios aqui'%len(tag.findNext('div', id='horarios').findAll('a', 'texto_1')))\n\t\t\t\t\t\t\tfecha = parseDate(tag.string)\n\t\t\t\t\t\t\t#logger.debug( pelicula)\n\t\t\t\t\t\t\t#logger.debug( complejo)\n\t\t\t\t\t\t\tfunciones.extend([{\n \t\t\t\t\t\t\t\t\t'peli_ver': pelicula,\n \t\t\t\t\t\t\t\t\t'complejo': complejo,\n \t\t\t\t\t\t\t\t\t'hora': datetime.datetime(fecha.year, fecha.month, fecha.day, *time.strptime( hora_html.string , '%H:%M')[3:5]),\n \t\t\t\t\t\t\t\t\t'pol_idShowTime': show_exp.search(hora_html['href']).group(1),\n \t\t\t\t\t\t\t\t\t'sala': sala,\n \t\t\t\t\t\t\t\t\t} for hora_html in tag.findNext('div', id='horarios').findAll('a', 'texto_1')])\n\t\t#logger.debug( len(funciones))\n\t\treturn funciones",
"def __correio(soup):\n news = []\n json_content = json.loads(soup.text)\n entries = json_content[\"matia\"]\n\n for entry in entries:\n title = entry[\"title\"]\n url = entry[\"link\"]\n news.append(dict(title=title, link=url))\n if(len(news) >= 10):\n break\n\n return news",
"def update(self):\r\n start_page = self.controller.get_page(StartPage)\r\n value_list_item = start_page.listitem.curselection()\r\n value_list_arme = start_page.listarme.curselection()\r\n value_list_bonus = start_page.listbonus.curselection()\r\n value_list_bonus_sec = start_page.listbonussec.curselection()\r\n list_link_item = []\r\n if value_list_item is not None:\r\n s = \"http://www.dofus.com/fr/mmorpg/encyclopedie/equipements?text=\"\r\n for i in value_list_item:\r\n s = s + \"&type_id[]=\" + str(list_object[start_page.listitem.get(i)])\r\n if value_list_bonus is not None:\r\n for i in value_list_bonus:\r\n s = s + \"&EFFECTMAIN[]=\" + str(\r\n bonus_primaire[start_page.listbonus.get(i)])\r\n s = s + \"&EFFECTMAIN_and_or=\" + str(start_page.And_or_prim.get())\r\n if value_list_bonus_sec is not None:\r\n for i in value_list_bonus_sec:\r\n s = s + \"&EFFECT[]=\" + str(\r\n bonus_primaire[start_page.listbonus.get(i)])\r\n s = s + \"&EFFECT_and_or=\" + str(start_page.And_or_prim.get())\r\n s = s + \"&size=96\"\r\n print(s)\r\n site = requests.get(s)\r\n # f = open('test.txt', 'w')\r\n soup = bs4.BeautifulSoup(site.text, 'html.parser')\r\n number_result = 1\r\n if(soup.find(\"div\", class_=\"ak-list-info\") is not None):\r\n number_result = soup.find(\"div\", class_=\"ak-list-info\").strong.string\r\n number_page = math.ceil(int(number_result) / 96)\r\n for link in soup.tbody.find_all('a'):\r\n list_link_item.append(link.get('href'))\r\n for i in range(2, number_page + 1):\r\n w = s + \"&page=\" + str(i)\r\n print(w)\r\n site = requests.get(w)\r\n soup = bs4.BeautifulSoup(site.text, 'html.parser')\r\n for link in soup.tbody.find_all('a'):\r\n list_link_item.append(link.get('href'))\r\n list_link_item = list(set(list_link_item))\r\n self.list_name_item = {}\r\n self.resultlist = tk.Listbox(self, selectmode='multiple', exportselection=0)\r\n for link in list_link_item:\r\n site = requests.get(\"http://www.dofus.com\" + link)\r\n soup = bs4.BeautifulSoup(site.text, 'html.parser')\r\n craft_panel = soup.find(\"div\", class_=\"ak-container ak-panel ak-crafts\")\r\n if craft_panel is not None:\r\n dic_name_nb = {}\r\n for ressources in craft_panel.find_all(\"div\", class_=\"ak-list-element\"):\r\n nbressource = ressources.find(\"div\", class_=\"ak-front\").text\r\n nameressource = ressources.find(\r\n \"div\", class_=\"ak-content\").find(\"span\", class_=\"ak-linker\").text\r\n dic_name_nb[nameressource] = int(nbressource[:-4])\r\n self.list_name_item[soup.find(\r\n \"h1\", class_=\"ak-return-link\").text.replace(' ', '').replace('\\n', '')] = dic_name_nb\r\n else:\r\n list_link_item.remove(link)\r\n for i, j in enumerate(self.list_name_item):\r\n self.resultlist.insert(i, j)\r\n self.resultlist.selection_set(0, self.resultlist.size() - 1)\r\n self.resultlist.grid(row=1, column=0)\r\n button = ttk.Button(self, text=\"Print to file\", command=self.printresultfile)\r\n button.grid(row=2, column=1, columnspan=1)",
"def __r7(soup):\n news = []\n container = soup.select('.mais_lidas')[0]\n most_read = container.find_all('a', class_='text')\n\n for item in most_read:\n news.append(dict(title=item.string, link=item['href']))\n return news",
"def scraper_voto(self):\n\n #per trovare il link a fantacalcio.it devo prima trovare il link della squadra e trovare il suo nome\n soup_rosa = BeautifulSoup(\n requests.get(f\"{self.LINK_FANTACALCIO_IT}/{self.team}#rosa\").text,\n \"html.parser\",\n )\n print(self.name)\n\n displayed_name = self.name\n if displayed_name == \"Coulibaly\": # caso estremo, il sito si confonde\n displayed_name = \"Coulibaly M.\"\n\n # trovo il link personale del giocatore e glielo assegno\n link = soup_rosa.find(\"a\", text=displayed_name.upper())[\"href\"]\n self.scheda_giocatore = link\n\n # leggo voto e media voto\n soup = BeautifulSoup(requests.get(link).text, \"html.parser\")\n\n self.media_voto = float(soup.find_all(class_=\"nbig2\")[0].text.replace(\",\", \".\"))\n self.media_fantavoto = float(\n soup.find_all(class_=\"nbig2\")[1].text.replace(\",\", \".\")\n )\n\n # leggo anche il ruolodalla schedina delle info\n infos = soup.find_all(class_=\"col-lg-6 col-md-6 col-sm-12 col-xs-12\")[-2]\n self.ruolo = str(infos.find(\"span\").text)\n\n # compilo i dati: partite, gol e assist\n dati_partite = soup.find_all(class_=\"nbig\")\n\n partite = \"🥅 \" + dati_partite[0].text\n # i portieri hanno statistiche diverse!\n if self.ruolo == \"P\":\n goal = \"❌ \" + dati_partite[1].text\n self.dati = \"<br>\".join([partite, goal])\n else:\n goal = \"⚽ \" + dati_partite[1].text\n assist = \"👟 \" + dati_partite[2].text\n self.dati = \"<br>\".join([partite, goal, assist])\n\n # aggiungo stellina al nome se hanno una bella media voto\n if self.media_fantavoto > 7:\n self.name += \" ⭐\"",
"def get_gremium_data(self, response):\n\n # e.g. <td class=\"smc_td smc_field_silink\"><a href=\"to0040.asp?__ksinr=11487\" title=\"Details anzeigen: Bezirksvertretung Bochum-Mitte 16.05.2019 \" class=\"smc_doc smc_datatype_si\">16.05.2019</a><!--SMCINFO:si.bi.1.4.1.1.16.1.3 --> 15:00-18:09</td>\n urls = response.xpath('//tr[contains(@class, \"smcrow1\") or contains(@class, \"smcrow2\") or contains(@class, \"smcrown\")]/*/a/@href').getall()\n dates = response.xpath('//tr[contains(@class, \"smcrow1\") or contains(@class, \"smcrow2\") or contains(@class, \"smcrown\")]/*/a/text()').getall()\n\n # e.g. <a href=\"getfile.asp?id=426409&type=do&\" title=\"Einladung \" target=\"_blank\">Einladung <span class=\"smcwrapsmall smcdosize\" title=\"Dateigröße\">266\\xa0KB </span></a>\n einladungen = response.xpath('//a[contains(text(), \"Einladung\")]/@href').getall()\n # e.g. <a href=\"getfile.asp?id=427859&type=do&\" title=\"Niederschrift öffentlich \" target=\"_blank\">Niederschrift öffentlich <span class=\"smcwrapsmall smcdosize\" title=\"Dateigröße\">570\\xa0KB </span></a>\n niederschriften = response.xpath('//a[contains(text(), \"Niederschrift\")]/@href').getall()\n\n # table layout in one table row; has either no, just one, or both Einladung and Niederschrift\n tables = response.xpath('//table[contains(@class, \"smcdocbox smcdocboxright\")]').getall()\n\n # not all einladungen have niederschriften vv. insert None accordingly\n for i in range(len(tables)):\n if \"Niederschrift\" not in tables[i]:\n niederschriften.insert(i, None)\n if \"Einladung\" not in tables[i]:\n einladungen.insert(i, None)\n\n return urls, dates, niederschriften, einladungen",
"def get_category_links(self):\n self.logger.info(\"Gathering categories links...\")\n try:\n category_link = \"services-catalog__column-title ui-link _t37mbJS _2fIr6we _2l1CpUa\"\n self.driver.get(self.MAIN_URL)\n categories = self.driver.find_elements_by_class_name(\"services-catalog__content\")\n elems = categories[0].find_elements_by_xpath(\"//a[@class]\")\n except:\n self.logger.critical(\"Problems with Internet connection or Web driver occured! Cannot gather category list!\")\n return\n search_cat = True\n # Compose two links lists: for subject categories, and for generic categories\n for i, elem in enumerate(elems):\n if elem.get_attribute(\"class\") == category_link:\n if elem.get_attribute(\"href\")[-1] == \"#\":\n subject = \" \".join(elem.text.split()[:-1])\n self.others_links.append([self.other_links_dict[subject]])\n search_cat = False\n else:\n self.link_list.append(elem.get_attribute(\"href\"))\n search_cat = True\n elif not search_cat:\n self.others_links[-1].append(elem.text)\n # Move english category to the end because it is the largest one\n self.link_list.append(self.link_list.pop(self.link_list.index('https://profi.ru/repetitor/english/')))\n self.logger.info(f'Found {len(self.link_list) + len(self.others_links)} categories')",
"def lista_acquistati(request, legahash, astaid, numero=0):\n return lista_ultimi(request, legahash, astaid, Acquisto, numero)",
"def link_to_set(page):\n #s = set()\n links = Measurements.get_all_links(page)\n s = set(links)\n return s",
"def __bol(soup):\n news = []\n anchors = soup.find('ul', class_='maisclicadas').find_all('a')\n\n for a in anchors:\n title = a.span.string.next\n link = a['href']\n news.append(dict(title=title, link=link))\n return news",
"def reemplaza_tildes(palabra):",
"def analyse_donnees(self, mere, foetus, pere, log):\n concordance_mf = 0\n concordance_pf = None\n if len(pere) != 0:\n concordance_pf = 0\n log = log + \"Père détecté.................................\\n\"\n log = log + \"\\n\\nVérification concordance des ADNs entre père et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in pere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n pere[Alleles].concordance_pere_foetus = \"OUI\"\n concordance_pf = concordance_pf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n pere[Alleles].concordance_pere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[\n Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"\\n\\nVérification concordance des ADNs entre mère et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in mere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n foetus[Alleles].concordance_mere_foetus = \"OUI\"\n concordance_mf = concordance_mf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n foetus[Alleles].concordance_mere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"Vérification concordance des ADns terminée..................................\\n\\n\\n\"\n if concordance_mf != len(foetus):\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n log = log + \"Concordance des ADNs PAS OK....................\\n\"\n log = log + \"Erreur dans l'échantillon...................\\n\"\n log = log + \"Revérifier s'il vous plaît.............\\n\"\n return resultats, conclusion, log\n else:\n log = log + \"Traitement des 15 autres marqueurs..............................\\n\"\n for nbre_lignes in range(1, len(mere)):\n log = log + \"Traitement du marqueur \" + str(foetus[nbre_lignes].marqueur) + \"..........\\n\"\n pic = foetus[nbre_lignes].foetus_pics()\n log = log + \"Calcul du nombre d'allèles pour le foetus......................\\n\"\n log = log + \"Nombre d'allèles pour le foetus : \" + str(pic) + \".........\\n\"\n log = log + \"Vérification de l'homozygotie de la mère......................\\n\"\n mere[nbre_lignes].homozygotie()\n log = log + \"Mère homozygote : \" + str(mere[nbre_lignes].homozygote) + \"...............\\n\"\n log = log + \"Vérification mère et foetus mêmes allèles......................\\n\"\n foetus[nbre_lignes].allele_semblable(mere[nbre_lignes])\n log = log + \"Code de retour vérification allèles semblables: \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Initialisation du taux de contamination pour calcul à venir...............\\n\"\n foetus[nbre_lignes].taux = 0.0\n log = log + \"Taux initialisé.................................\\n\"\n log = log + \"Si code informatif de retour allèles semblables différent de 2, vérification écho.............\\n\"\n log = log + \"Si écho, affection code informatif 3...............\\n\"\n if foetus[nbre_lignes].informatif != 2:\n log = log + \"Vérification si écho......................\\n\"\n mere[nbre_lignes].echo(foetus[nbre_lignes])\n log = log + \"Code retour vérification écho : \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Début chaîne de traitement...........................\\n\"\n if pic == 3:\n log = log + \"Trois allèles détectés......................\\n\"\n foetus[nbre_lignes].contamination_heterozygote(mere[nbre_lignes])\n log = log + \"Marqueur informatif, affectation du code contamination 1..............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Calcul taux de contamination du marqueur..........\\n\"\n foetus[nbre_lignes].contamination = 2\n log = log + \"Calcul terminé....................\\n\"\n elif mere[nbre_lignes].homozygote:\n log = log + \"Mère homozygote.......................\\n\"\n log = log + \"Marqueur non informatif, affectation du code informatif 0............\\n\"\n foetus[nbre_lignes].informatif = 0\n elif pic == 2:\n log = log + \"Deux allèles détectés..............\\n\"\n if foetus[nbre_lignes].informatif == 2:\n log = log + \"Si mêmes allèles, vérification homozygote contaminé...............\\n\"\n foetus[nbre_lignes].verif_homozygote_contamine(self)\n if foetus[nbre_lignes].contamination == 1:\n log = log + \"Homozygote contaminé identifié.....................\\n\"\n log = log + \"Calcul du taux de contamination....................\\n\"\n foetus[nbre_lignes].homozygote_contamine(self)\n log = log + \"Calcul du taux de contamination effectué...........\\n\"\n else:\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code calcul écho différent de 3..................\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n else:\n log = log + \"Un seul allèle détecté............\\n\"\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code informatif différent de 3...........\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n log = log + \"\\n\\n\"\n log = log + \"Calcul échantillon contaminé ou non......\\n\"\n log = log + \"Marqueur contaminé si >\" + str(self.seuil_taux_conta) + \".......\\n\"\n log = log + \"Echantillon contaminé si plus de \" + str(\n self.seuil_nbre_marqueurs) + \"marqueurs contaminés...\\n\"\n self.conclusion_echantillon(foetus)\n log = log + \"Calcul échantillon terminé.....\\n\"\n log = log + \"Fin de traitement...........\\n\"\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n return resultats, conclusion, log"
] | [
"0.62583876",
"0.60917413",
"0.59112614",
"0.59014153",
"0.5859251",
"0.584331",
"0.5828725",
"0.5828725",
"0.5779946",
"0.5703309",
"0.56127346",
"0.56018984",
"0.5500179",
"0.5425924",
"0.5413115",
"0.5395252",
"0.5389329",
"0.5371254",
"0.535301",
"0.53344953",
"0.5310416",
"0.5304419",
"0.5297578",
"0.524271",
"0.51739085",
"0.51587206",
"0.51480037",
"0.5145527",
"0.5142915",
"0.5129223"
] | 0.61612517 | 1 |
Create, index and return test data. | def testdata(in_cluster_app):
indexer = RecordIndexer()
filenames = ("records.json", "authors.json")
with mock.patch('invenio_records.api.Record.validate',
return_value=None):
records = load_json_from_datadir('records.json')
for record in records:
record = Record.create(record)
record_minter(record.id, record)
record.commit()
db.session.commit()
indexer.index(record)
authors = load_json_from_datadir('authors.json')
for record in authors:
record = Record.create(record)
author_minter(record.id, record)
record.commit()
db.session.commit()
indexer.index(record) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_index(self):",
"def test_export_index(self):",
"def getTestData(self):\n raise NotImplementedError",
"def testCreate(self):\n kinds = ('tiny', 'small', 'medium', 'large')\n ft = (False, True)\n in_table = self.in_tables_v1[0]\n for kind, binary, zipped in product(kinds, ft, ft):\n dirs = self.get_dataset('create[_r]', kind, binary, zipped)[1]\n ds, ds_dir = self.get_dataset('create', kind, binary, zipped)\n ds_r, ds_dir_r = self.get_dataset('create_r', kind, binary, zipped)\n where = str.format('({0}, {1}, {2}); see {3}, {4} ' +\n 'for log files and index/chunk files',\n kind, 'binary' if binary else 'text',\n 'zipped' if zipped else 'unzipped',\n os.path.join(self.out_root, 'test*.log'), dirs)\n remove_dirs(ds_dir, ds_dir_r)\n cleaner = FileCleaner(preserve=self.preserve)\n try:\n cleaner.add_local(ds_dir, on_err=False, on_success=True)\n self._create(in_table, ds, kind, binary, zipped, False)\n if not self.local:\n cleaner.add_local(ds_dir_r, on_err=False, on_success=True)\n self._create(in_table, ds_r, kind, binary, zipped, True)\n # Compare resulting indexes\n msg = chunk_index_diff(os.path.join(ds_dir, 'index.ci'),\n os.path.join(ds_dir_r, 'index.ci'))\n if msg != None:\n cleaner.clear()\n self.fail('Locally/cluster generated indexes differ ' +\n where + ': ' + msg)\n except subprocess.CalledProcessError:\n self.fail('Chunk index creation failed ' + where + '.')\n cleaner.cleanup(after_error=False)",
"def test_creating_index_type(self):",
"def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1",
"def create_index(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()",
"def create_index():",
"def get_test_examples(self, data_path):\r\n return self.create_examples(self.read_data(data_path), 'test')",
"def test_create_data(self):\n process = Process.objects.filter(slug=\"test-min\").latest()\n data = Data.objects.create(\n name=\"Test data\",\n contributor=self.contributor,\n process=process,\n )\n\n data.refresh_from_db()\n self.assertEqual(data.status, Data.STATUS_DONE)",
"def getTestResults():",
"def test_data():\n db = current_app.db\n Site = db.tables.Site\n Endpoint = db.tables.Endpoint\n if Site.query.count():\n return # DB not empty\n entries = [\n Site(site_id=1,\n site_name='Site1',\n site_desc='First Test Site',\n site_owner=1,\n user_ca_cert='USERCERT1',\n service_ca_cert='',\n auth_type=0,\n auth_uri='localhost:49998',\n public=False,\n def_path='/~'),\n Site(site_id=2,\n site_name='Site2',\n site_desc='Second Test Site',\n site_owner=123,\n user_ca_cert='USERCERT2',\n service_ca_cert='SERVICECERT2',\n auth_type=0,\n auth_uri='localhost:49998',\n public=True,\n def_path='/project'),\n Endpoint(ep_id=1,\n site_id=1,\n ep_uri='localhost:49999'),\n Endpoint(ep_id=2,\n site_id=1,\n ep_uri='localhost2:49999'),\n Endpoint(ep_id=3,\n site_id=2,\n ep_uri='localhost:50000'),\n Endpoint(ep_id=4,\n site_id=2,\n ep_uri='localhost2:50000'),\n Site(site_id=3,\n site_name='CloudSite1',\n site_desc='Testing site in cloud (1)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=5,\n site_id=3,\n ep_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=4,\n site_name='CloudSite2',\n site_desc='Testing site in cloud (2)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=6,\n site_id=4,\n ep_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=5,\n site_name='UKI-LT2-IC-HEP',\n site_desc='Imperial College GridPP Site',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=1,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/pnfs/hep.ph.ic.ac.uk/data'),\n Endpoint(ep_id=7,\n site_id=5,\n ep_uri='gfe02.grid.hep.ph.ic.ac.uk:2811'),\n Site(site_id=6,\n site_name='NERSC DTN',\n site_desc='NERSC DTN Service',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=0,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=8,\n site_id=6,\n ep_uri='dtn01.nersc.gov:2811'),\n ]\n for entry in entries:\n db.session.add(entry)\n db.session.commit()",
"def create_mixed_test_data(self):\n test_employees = [\n {'id': 1, 'name': \"Test Employee 1 foo\"},\n {'id': 2, 'name': \"Test Employee 2 foo\"},\n {'id': 3, 'name': \"Test Employee 3 bar\"},\n ]\n test_log_entries = [\n OrderedDict([\n ('name', test_employees[0]['name']),\n ('date', datetime.date(2018, 1, 2)),\n ('task_name', 'Test task alpha'),\n ('duration', 1),\n ('notes', 'Notes'),\n ]),\n OrderedDict([\n ('name', test_employees[0]['name']),\n ('date', datetime.date(2018, 3, 4)),\n ('task_name', 'Test task bravo'),\n ('duration', 2),\n ('notes', 'Notes'),\n ]),\n OrderedDict([\n ('name', test_employees[2]['name']),\n ('date', datetime.date(2018, 5, 6)),\n ('task_name', 'Test task bravo'),\n ('duration', 3),\n ('notes', 'Notes'),\n ]),\n OrderedDict([\n ('name', test_employees[1]['name']),\n ('date', datetime.date(2018, 7, 8)),\n ('task_name', 'Test task charlie'),\n ('duration', 4),\n ('notes', 'Notes'),\n ]),\n ]\n for employee in test_employees:\n e = db_manager.Employee.get_or_create(name=employee['name'])\n for entry in test_log_entries:\n if employee['name'] == entry['name']:\n db_manager.LogEntry.create(\n employee=e[0],\n date=entry['date'],\n task_name=entry['task_name'],\n duration=entry['duration'],\n notes=entry['notes']\n )\n return {\n 'test_employees': test_employees,\n 'test_log_entries': test_log_entries\n }",
"def get_test_data():\n\n # test set\n test = pd.read_csv(\"test.csv\")\n\n return test",
"def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)",
"def create_test_data(self):\n fake = Faker(['en_US', 'ja_JP', 'el_GR', 'de_DE'])\n\n self.actor_request = {\n 'name': fake.name(),\n 'age': random.randint(22, 88),\n 'gender': random.choice(['M', 'F'])\n }\n\n self.movie_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n 'releaseDate': str(fake.date_between())\n }\n\n self.actor_update_request = {\n 'name': fake.name(),\n }\n\n self.movie_update_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n }\n\n for _ in range(30):\n actor_name = fake.name()\n actor_age = random.randint(22, 88)\n actor_gender = random.choice(['M', 'F'])\n\n movie_title = fake.color_name() + ' ' + fake.street_suffix()\n movie_release_date = str(fake.date_between())\n\n actor = Actor(actor_name, actor_age, actor_gender)\n actor.insert()\n\n movie = Movie(movie_title, movie_release_date)\n movie.insert()\n\n for _ in range(20):\n actors = Actor.query.all()\n movies = Movie.query.all()\n\n actor_to_update = random.choice(actors)\n movie_to_update = random.choice(movies)\n actor_to_update.movies.append(movie_to_update)",
"def Post(self, *args):\n try:\n days = int(self.request.get('num_days', 30))\n except ValueError:\n raise api_request_handler.BadRequestError(\n 'Invalid num_days parameter %s' % self.request.get('num_days'))\n if days <= 0:\n raise api_request_handler.BadRequestError(\n 'num_days cannot be negative (%s)' % days)\n before = datetime.datetime.now() - datetime.timedelta(days=days)\n\n test_path = args[0]\n test_key = utils.TestKey(test_path)\n test = test_key.get()\n if not test:\n raise api_request_handler.BadRequestError('Invalid test_path %s' %\n test_path)\n\n assert (datastore_hooks.IsUnalteredQueryPermitted()\n or not test.internal_only)\n datastore_hooks.SetSinglePrivilegedRequest()\n\n q = graph_data.Row.query()\n q = q.filter(graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))\n q = q.filter(graph_data.Row.timestamp > before)\n\n rows = q.fetch()\n if not rows:\n return []\n revisions = [rev for rev in rows[0].to_dict() if rev.startswith('r_')]\n header = ['revision', 'value', 'timestamp'] + revisions\n timeseries = [header]\n for row in sorted(rows, key=lambda r: r.revision):\n timeseries.append([self._GetValue(row, a) for a in header])\n\n return {\n 'timeseries': timeseries,\n 'test_path': test_path,\n 'revision_logs': namespaced_stored_object.Get('revision_info'),\n 'improvement_direction': test.improvement_direction,\n }",
"def setUp(self):\n body = {\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n self.assertTrue(self.es.create_index('contacts_esclient_test', body))\n self.assertFalse(self.es.create_index('contacts_esclient_test', body))\n\n self.assertTrue(self.es.create_index('contacts_esclient_test2', body))\n self.assertFalse(self.es.create_index('contacts_esclient_test2', body))\n\n\n \"\"\" Index some test data \"\"\"\n data = {\"name\": \"Joe Tester\",\"age\": 21, \"sex\": \"male\"}\n self.assertTrue(self.es.index(\"contacts_esclient_test\", \"person\", body=data,\n docid=1))\n data = {\"name\": \"Joe Schmoe\",\"age\": 17, \"sex\": \"male\"}\n self.assertTrue(self.es.index(\"contacts_esclient_test\", \"person\", body=data,\n docid=2))\n\n self.assertTrue(self.es.refresh('contacts_esclient_test'))",
"async def test_db():\n test_uta_db = UTADatabase()\n await test_uta_db._create_genomic_table()\n return test_uta_db",
"def test_create_run(self):\n pass",
"def test_create(self):\n pass",
"def make_test_metadata(path):\n assert path, 'Please supply a nonempty path to store test dataset.'\n return create_test_dataset('file://{}'.format(path), range(ROWS_COUNT))",
"def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test",
"def create_test_data(users=5, categories=2, forums=2, topics=1, posts=1):\n create_default_groups()\n create_default_settings()\n\n data_created = {'users': 0, 'categories': 0, 'forums': 0,\n 'topics': 0, 'posts': 0}\n\n # create 5 users\n for u in range(1, users + 1):\n username = \"test%s\" % u\n email = \"test%[email protected]\" % u\n user = User(username=username, password=\"test\", email=email)\n user.primary_group_id = u\n user.activated = True\n user.save()\n data_created['users'] += 1\n\n user1 = User.query.filter_by(id=1).first()\n user2 = User.query.filter_by(id=2).first()\n\n # lets send them a few private messages\n for i in range(1, 3):\n # TODO\n pass\n\n # create 2 categories\n for i in range(1, categories + 1):\n category_title = \"Test Category %s\" % i\n category = Category(title=category_title,\n description=\"Test Description\")\n category.save()\n data_created['categories'] += 1\n\n # create 2 forums in each category\n for j in range(1, forums + 1):\n if i == 2:\n j += 2\n\n forum_title = \"Test Forum %s %s\" % (j, i)\n forum = Forum(title=forum_title, description=\"Test Description\",\n category_id=i)\n forum.save()\n data_created['forums'] += 1\n\n for t in range(1, topics + 1):\n # create a topic\n topic = Topic()\n post = Post()\n\n topic.title = \"Test Title %s\" % j\n post.content = \"Test Content\"\n topic.save(post=post, user=user1, forum=forum)\n data_created['topics'] += 1\n\n for p in range(1, posts + 1):\n # create a second post in the forum\n post = Post()\n post.content = \"Test Post\"\n post.save(user=user2, topic=topic)\n data_created['posts'] += 1\n\n return data_created",
"def test_create_bulk_academic(self):\n pass",
"def test_samples_request(self):\n req = Request()\n for name, data in sample_data.items():\n resp = req.get(fromfile=self._filepath(name))\n\n df = resp.write()\n assert df.equals(data), \\\n '\\n'.join(map(str, [name, df.index, data.index,\n getattr(df, 'columns', ''),\n getattr(data, 'columns', '')]))",
"def test_case_data(self, index):\n return self._db_reader.get_test_case_data(index=index)",
"def create_result(main_test):\n result = Result(outputs=[DBHandler.NAME], main_test=main_test)\n result.startTestRun()\n return result",
"def test_get_records(self):\n pass",
"def test_create10(self):\n pass"
] | [
"0.6893712",
"0.6738024",
"0.66274077",
"0.6605562",
"0.6405234",
"0.63369054",
"0.63275754",
"0.63179785",
"0.6137767",
"0.61293244",
"0.61232316",
"0.61152595",
"0.6097307",
"0.60847896",
"0.6074008",
"0.607288",
"0.60721374",
"0.6059852",
"0.6054259",
"0.60447365",
"0.6002907",
"0.599489",
"0.59838146",
"0.5982359",
"0.5981233",
"0.59651697",
"0.5949851",
"0.58949643",
"0.5889373",
"0.58878577"
] | 0.70040137 | 0 |
Get the HillParameter corresponding to RF at eps. This is a bijective map. | def __call__(self,RF,eps):
n_range = [1+1e-1,100]
hill_coefficient_1_slope = RF.Delta/RF.theta
ramp_function_slope = RF.sign*RF.dx(RF.theta,eps)
target_hill_slope = ramp_function_slope + hill_coefficient_1_slope #chosen so that the map is bijective
hill_max_slope = self.get_hill_max_slope_func(RF)
f = lambda n: hill_max_slope(n) - target_hill_slope
while f(n_range[0]) > 0:
n_range[0] = 1 + (n_range[0]-1)*1e-1
while f(n_range[1])<0 and n_range[1] < self.max_allowed_hill_coefficient:
n_range[1] *= 10
if n_range[1] < self.max_allowed_hill_coefficient:
n = bisect(f,n_range[0],n_range[1])
else:
n = np.inf
return HillParameter(RF.sign,RF.L,RF.Delta,RF.theta,n) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params",
"def get_hyperparams(self):",
"def get_fe_params(self):\n return self._params[0:self.k_fe]",
"def _param(self) ->nn.Parameter:\n return next(self.parameters())",
"def get_iperparams(self):\n\t\treturn (self.D, self.K)",
"def park91b_lf(xx):\n\n yh = park91b_hf(xx)\n return 1.2 * yh - 1",
"def getParam(self):\n return self.__alpha0, self.__alpha1, self.__beta, self.__eta",
"def phs(x, y, rbfParam) :\n return (x**2 + y**2) ** (rbfParam/2)",
"def hyperparams():\n H = 6\n return Munch(N=500, H=H, D=(H // 2) ** 2, batch_size=10, precision=to.float32)",
"def read_param_phil(self):\n\n # LABELIT target file settings\n if self.target_phil is None:\n self.write_default_phil()\n self.phil.ctr.SetValue(self.target_phil)\n\n # Resolution limits\n # \"Try/except\" for backwards compatibility\n try:\n lowres = self.params.cctbx_ha14.resolution_limits.low\n hires = self.params.cctbx_ha14.resolution_limits.high\n self.res_limits.lowres.SetValue(str(lowres))\n self.res_limits.hires.SetValue(str(hires))\n except AttributeError:\n pass\n\n # Target options\n # \"Try/except\" for backwards compatibility\n try:\n t_uc = self.params.cctbx_ha14.target_unit_cell\n t_lat = self.params.cctbx_ha14.target_lattice_type\n l_idx = self.target_lattice.ctr.FindString(str(t_lat))\n t_ctype = self.params.cctbx_ha14.target_centering_type\n if t_ctype == 'P':\n c_idx = 1\n elif t_ctype == 'C':\n c_idx = 2\n elif t_ctype == 'I':\n c_idx = 3\n elif t_ctype == 'R':\n c_idx = 4\n elif t_ctype == 'F':\n c_idx = 5\n else:\n c_idx = 0\n if t_uc is not None:\n uc_str = [str(i) for i in t_uc.parameters()]\n self.target_uc.cell.SetValue(' '.join(uc_str))\n self.target_lattice.ctr.SetSelection(l_idx)\n self.target_centering.ctr.SetSelection(c_idx)\n except AttributeError:\n pass\n\n # Grid search options\n idx = self.gs_type.ctr.FindString(self.params.cctbx_ha14.grid_search.type)\n self.set_grid_search(idx=idx)\n self.signal_search.SetValue(self.params.cctbx_ha14.grid_search.sig_height_search)\n\n # # Selection options\n # self.select_only.SetValue(self.params.cctbx_ha14.selection.select_only.flag_on)\n # self.img_objects_path.Enable(self.select_only.GetValue())\n\n idx = self.select_by.ctr.FindString(self.params.cctbx_ha14.selection.select_by)\n self.select_by.ctr.SetSelection(idx)\n\n self.min_sigma.sigma.SetValue(str(self.params.cctbx_ha14.selection.min_sigma))\n\n # Selection filters\n if self.params.cctbx_ha14.selection.prefilter.flag_on:\n pg = self.params.cctbx_ha14.selection.prefilter.target_pointgroup\n ut = self.params.cctbx_ha14.selection.prefilter.target_uc_tolerance\n rs = self.params.cctbx_ha14.selection.prefilter.min_resolution\n rf = self.params.cctbx_ha14.selection.prefilter.min_reflections\n if self.params.cctbx_ha14.selection.prefilter.target_unit_cell is not None:\n try:\n uc = self.params.cctbx_ha14.selection.prefilter.target_unit_cell.parameters()\n except AttributeError:\n uc = None\n else:\n uc = None\n\n if str(pg).lower() != 'none':\n self.filt_lattice.toggle_boxes()\n self.filt_lattice.lattice.SetValue(str(pg))\n if str(uc).lower() != 'none':\n self.filt_uc.toggle_boxes()\n self.filt_uc.a.SetValue(str(uc[0]))\n self.filt_uc.b.SetValue(str(uc[1]))\n self.filt_uc.c.SetValue(str(uc[2]))\n self.filt_uc.alpha.SetValue(str(uc[3]))\n self.filt_uc.beta.SetValue(str(uc[4]))\n self.filt_uc.gamma.SetValue(str(uc[5]))\n self.filt_uc.tolerance.SetValue(str(ut))\n if str(rs).lower() != 'none':\n self.filt_res.toggle_boxes()\n self.filt_res.res.SetValue(str(rs))\n if str(rf).lower() != 'none':\n self.filt_ref.toggle_boxes()\n self.filt_ref.ref.SetValue(str(rf))",
"def get_model_parameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), rho=(0.0 ,inf))\n return params",
"def def_paramt():\n Zeff = 1.0\n amu = 2.0\n mf = mp*amu\n return Zeff, amu,mf",
"def get_resulting_hypo_params(self, injkey):\n h0_params = self.fid_values[injkey][\n 'h0_fit_to_%s'%(self.labels.dict['data'])]['params']\n h1_params = self.fid_values[injkey][\n 'h1_fit_to_%s'%(self.labels.dict['data'])]['params']\n return h0_params, h1_params",
"def kHallLittlewoodP(self):\n return kbounded_HallLittlewoodP(self)",
"def default_feature_hp_kernel_config(defn):\n defn = _validate_definition(defn)\n\n # hyperparams\n hparams = {}\n for i, hp in enumerate(defn.hyperpriors()):\n if not hp:\n continue\n # XXX(stephentu): we are arbitrarily picking w=0.1\n hparams[i] = {k: (fn, 0.1) for k, fn in hp.iteritems()}\n\n if not hparams:\n return []\n else:\n return [('slice_feature_hp', {'hparams': hparams})]",
"def eff_param():\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)",
"def gamma_star(self):\n return self.reciprocal_lattice_parameters[5]",
"def hill( m, n, pH ):\n # the 6 is actually pKa which we are setting to 6 for this question\n X = float(m) * ( (10**(float(n)*(pH - 6))) / (1 + (10**(float(n)*(pH - 6 )))) )\n\n return X",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low' ] = self.low\n paramDict['high' ] = self.high\n paramDict['alpha'] = self.alpha\n paramDict['beta' ] = self.beta\n return paramDict",
"def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params",
"def waveparameterh(L):\r\n return 8.13 - ((250 - 0.7 * L) / 125) ** 3",
"def _get_current_hyperparameters(self):",
"def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 1e-7\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params",
"def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params",
"def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params",
"def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 0.3\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params",
"def _get_new_param(self):\n new_param = sympy.symbols(\"p\"+str(len(self.learning_params)))\n self.learning_params.append(new_param)\n return new_param",
"def getEichFromEQ(self, ep, verbose=False):\n #assuming plasma is centered in machine here\n zMin = ep.g['ZmAxis'] - 0.25\n zMax = ep.g['ZmAxis'] + 0.25\n zWall = np.linspace(zMin, zMax, 1000)\n zLCFS = ep.g['lcfs'][:,1]\n #this prevents us from getting locations not at midplane\n idx = np.where(np.logical_and(zLCFS>zMin,zLCFS<zMax))\n Rmax = ep.g['lcfs'][:,0][idx].max()\n Rmin = ep.g['lcfs'][:,0][idx].min()\n # geometric quantities\n Rgeo = (Rmax + Rmin) / 2.0\n a = (Rmax - Rmin) / 2.0\n aspect = a/Rgeo\n\n #Regression 15\n C = 1.35\n Cp = -0.02\n Cr = 0.04\n Cb = -0.92\n Ca = 0.42\n # Evaluate Bp at outboard midplane\n Z_omp_sol = 0.0\n Bp = abs(ep.BpFunc.ev(Rmax,Z_omp_sol))\n #Evaluate lq\n self.lqEich = C * self.Psol**Cp * Rgeo**Cr * Bp**Cb * aspect**Ca # in mm\n Bt = abs(ep.BtFunc.ev(ep.g['RmAxis'],ep.g['ZmAxis']))\n if verbose==True:\n print(\"Poloidal Field at midplane: {:f}\".format(Bp))\n print(\"Toroidal Field at axis: {:f}\".format(Bt))\n print(\"Found heat flux width value of: {:f} mm\".format(self.lqEich))\n log.info(\"Found heat flux width value of: {:f} mm\".format(self.lqEich))\n return",
"def _bowl_params(self):\n self.vars['bowl_strength'] = self.bowl.strength + \\\n self.vars['beta_min_offset']\n self.vars['q_init'] = self.vars['bowl_strength']\n if self.vars['bowl_strength'] <= self.vars['beta_min_offset']:\n print(\n f\"Bowl overflow -- Set to the minimum value : {self.vars['beta_min_offset']}\")\n # raise ValueError(\"Bowl overflow... strength lower than set tolerance. Modify the tolerance or fix the bug!\")\n self.vars['bowl_strength'] = self.vars['beta_min_offset']\n if self.vars['bowl_strength'] > self.vars['q_max']:\n self.vars['bowl_strength'] = self.vars['q_max']\n\n self.vars['zeta_bowl'] = self.toNeural(self.bowl.center)\n print(f\"Value for Q set to {self.vars['bowl_strength']}\")",
"def _m_to_kHLP_on_basis(self, la):\n if self.t == 1:\n if la in self._kbounded_partitions:\n return self(la)\n else:\n return self.zero()\n else:\n HLP = self._kBoundedRing._quotient_basis\n m = self._kBoundedRing._sym.m()\n elt = dict({ x for x in dict(HLP(m(la))).iteritems() if x[0] in self._kbounded_partitions })\n return self._from_dict(elt)"
] | [
"0.5519696",
"0.5459801",
"0.53573745",
"0.5348384",
"0.53123516",
"0.5267668",
"0.52640975",
"0.5259628",
"0.52242345",
"0.5190932",
"0.5172484",
"0.5170918",
"0.51622325",
"0.5160279",
"0.51536065",
"0.51485074",
"0.5137379",
"0.5129",
"0.5086387",
"0.5064715",
"0.50481415",
"0.5042429",
"0.50258607",
"0.5023002",
"0.5023002",
"0.5015319",
"0.5004741",
"0.49996728",
"0.4994969",
"0.49948505"
] | 0.7127517 | 0 |
Initialises LogIn page model. It creates the selenium webdriver object and loads the selectors configuration file | def __init__(self):
self.driver = webdriver.Chrome()
self.driver.get('https://qknows-qa.basf.com')
self.wait = WebDriverWait(self.driver, 10)
self.selectors = json.load(open('sut/selectors/login.json')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n driver = webdriver.Firefox() #working with firefox. see above comment and edit it to change the browser\n\n self.driver.get(\"https://moodle.niituniversity.in\")\n self.tLogin()",
"def login(self):\n self.driver.get(f'{self.base_url}/signin')\n\n # Fill username and password\n enter_username = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'email')))\n enter_username.send_keys(self.username)\n enter_password = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'password')))\n enter_password.send_keys(self.password)\n\n # Press the Log In Button\n self.driver.find_element_by_xpath('//*[@id=\"root\"]/div/div[3]/div/div/div/div/div[2]/div/form/div/div[2]/button').click()\n\n # Wait for the page to load (5 seconds)\n sleep(5)",
"def setUpClass(cls):\r\n from tests.main import variables as _variables\r\n\r\n cls.logger = logging.getLogger(__name__)\r\n cls.driver = webdriver.Chrome()\r\n cls.driver.maximize_window()\r\n gw_url = _variables[\"uccURL\"]\r\n cls.driver.get(gw_url)\r\n UCCLogin = pages.UCCLogin(cls.driver)\r\n cls.logger.info(\"Checking if UCC web UI is loaded\")\r\n cls.assertTrue(UCCLogin.match_page_tilte(),\"UCC web UI is not loaded successfully\")\r\n\r\n cls.logger.info(\"Checking if login page is loaded\")\r\n cls.assertTrue(UCCLogin.is_login_form_present(),\"Login page is not loaded successfully\")\r\n\r\n cls.logger.info(\"Logging in UCC...\")\r\n UCCLogin.login(_variables[\"login_username\"],_variables[\"login_password\"])\r\n\r\n cls.logger.info(\"Checking if we are logged in to the first page\")\r\n cls.assertTrue(UCCLogin.check_for_controller(),\r\n \"Dashboard Page is loaded correctly\")",
"def set_up(self, web_driver):\n self.driver = web_driver\n self.wait = WebDriverWait(self.driver, 60)\n\n self.google_page = GoogleSearchPage(self.driver, locator, conf, message)\n self.flipkart_page = FlipkartPage(self.driver, locator, message)",
"def setUpClass(cls):\n cls.driver = webdriver.Chrome()\n cls.driver.maximize_window()\n cls.driver.get('https://letskodeit.teachable.com/p/practice')",
"def __init__(self):\r\n self.load_config()\r\n self.login()",
"def setUp(self):\r\n self.driver = webdriver.Firefox()",
"def __init__(self, email, password):\n self.user = email\n self.password = password\n self.date = datetime.today()\n\n driver_path = os.path.join('..', 'assets', 'chromedriver.exe')\n self.driver = webdriver.Chrome(driver_path)\n self.driver.get(\"https://www.sevenrooms.com/login\")\n # Login\n self.driver.find_element_by_name(\"email\").send_keys(email)\n self.driver.find_element_by_name(\"password\").send_keys(password)\n self.driver.find_element_by_name(\"lsubmit\").click()\n\n self.update_html()",
"def set_up_driver(self):\r\n\t\ttry:\r\n\t\t\tself.driver = webdriver.Firefox()\r\n\t\texcept Exception:\r\n\t\t\tself.driver = False",
"def login(self):\r\n\r\n # Open browser with the login URL\r\n self.browser.open(self.config[\"base_url\"] + \"login\")\r\n\r\n # Select the login form\r\n self.browser.select_form('form[action=\"/login/\"]')\r\n\r\n # Fill the login form.\r\n self.browser[\"email\"] = self.config[\"email\"]\r\n self.browser[\"password\"] = self.config[\"password\"]\r\n\r\n # Submit form\r\n self.browser.submit_selected()",
"def log_in(self):\n\n # Get login page.\n self.get_endpoint(endpoint=self.config['paths']['login'])\n\n # Post log-in data.\n email_form = self.browser.find_element_by_xpath(\"//input[@id='email']\")\n pw_form = self.browser.find_element_by_xpath(\"//input[@id='password']\")\n email_form.send_keys(self.credentials['email'])\n pw_form.send_keys(self.credentials['password'])\n\n # Initial log-in returns /private endpoint.\n self.browser.find_element_by_xpath(\"//input[@type='submit']\").click()",
"def test_from_crawler_method_should_initialize_the_driver(self):\n\n crawler = Crawler(\n spidercls=self.spider_klass,\n settings=self.settings\n )\n selenium_middleware = SeleniumMiddleware.from_crawler(crawler)\n\n # The driver must be initialized\n self.assertIsNotNone(selenium_middleware.driver)\n\n # We can now use the driver\n selenium_middleware.driver.get('http://www.python.org')\n self.assertIn('Python', selenium_middleware.driver.title)\n\n selenium_middleware.driver.close()",
"def login_into_horizon(self):\n logging.info(\"logging into {}\".format(self.horizon_login_url))\n try:\n self.driver.get(self.horizon_login_url)\n pageElement = Select(self.driver.find_element_by_name(\"auth_type\"))\n if self.auth_type == 'Keystone':\n pageElement.select_by_value('credentials')\n #pageElement.select_by_visible_text('Keystone Credentials')\n pageElement = self.driver.find_element_by_name(\"username\")\n pageElement.send_keys(self.username)\n pageElement = self.driver.find_element_by_name(\"password\")\n pageElement.send_keys(self.password)\n\t pageElement = self.driver.find_element_by_css_selector(\"button[type='submit']\")\n\t pageElement.click()\n else:\n #pageElement.select_by_value('saml2')\n\t pageElement = self.driver.find_element_by_id(\"loginBtn\")\n\t pageElement.click()\n element = WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.NAME, \"username\")))\n\t #pageElement = self.driver.find_element_by_name(\"Connect\")\n pageElement = self.driver.find_element_by_name(\"username\")\n pageElement.send_keys(self.username)\n pageElement = self.driver.find_element_by_name(\"password\")\n pageElement.send_keys(self.password)\n\t pageElement = self.driver.find_element_by_css_selector(\"input[type='submit'][value='Login']\")\n\t pageElement.click()\n\n except NoSuchElementException:\n raise exceptions.PageSourceException(\"Element not found\")\n\n navigationStart = self.driver.execute_script(\n \"return window.performance.timing.navigationStart\")\n responseStart = self.driver.execute_script(\n \"return window.performance.timing.responseStart\")\n domComplete = self.driver.execute_script(\n \"return window.performance.timing.domComplete\")\n\n if \"Invalid\" in self.driver.page_source:\n raise exceptions.LoginFailureException('Invalid Username/Password')\n\n backendPerformance = responseStart - navigationStart\n frontendPerformance = domComplete - responseStart\n totalTime = (backendPerformance + frontendPerformance) \n\n logging.info(\"load time [Login Page] is {} ms\".format(totalTime))\n\n return { 'Login Page': str(totalTime) + \" ms\" }",
"def __init__(self, username = None, password = None):\n self.username = config['AUTH']['USERNAME']\n self.password = config['AUTH']['PASSWORD']\n self.login = config['URL']['LOGIN']\n self.nav_url = config['URL']['NAV']\n self.tag_url = config['URL']['TAGS']\n self.direct_url = config['URL']['DM']\n self.driver = webdriver.Chrome(config['ENVIRONMENT']['CHROMEDRIVER'])\n self.stay_logged = False\n self.api = InstagramAPI(self.username, self.password)",
"def initialize(self):\n self.login()",
"def setUp(self):\r\n self.verificationErrors = []\r\n self.selenium = (selenium(selvars.set_localhost(), selvars.set_port(), selvars.set_browser(self.id(),self.shortDescription()), selvars.set_site()))\r\n self.selenium.start()\r\n self.session = self.selenium.sessionId\r\n self.selenium.set_timeout(testvars.timeout)",
"def login():\n login_page = Login()\n login_page.login_main_page()",
"def __init__(self):\n \n self.ua = USERAGENT\n self.br = mechanize.Browser()\n self.br.addheaders = [('User-Agent', self.ua)]\n print \"Browser initialized with user agent\"\n\n self.login()",
"def login_page(driver, open_login_page):\n return LoginPage(driver)",
"def __init__(self):\n self.driver = webdriver.Chrome()",
"def __init__(self, userid: str, password: str) -> object:\n\n try:\n self.browser = webdriver.Chrome()\n self.browser.set_page_load_timeout(10)\n self.browser.implicitly_wait(5)\n except Exception as e:\n raise Exception(\"Error starting browser: \" + str(e))\n\n try:\n self.browser.get(\"https://www.nttb-administratie.nl\")\n frame = self.browser.find_element_by_name('nttbadministratie_login')\n self.browser.switch_to.frame(frame)\n useridfield = self.browser.find_element_by_name('gebruikersnaam')\n useridfield.send_keys(userid)\n passwordfield = self.browser.find_element_by_name('wachtwoord')\n passwordfield.send_keys(password)\n login = self.browser.find_element_by_link_text('inloggen »»')\n login.click()\n\n try:\n # Switch naar het net geopende window\n window_after = self.browser.window_handles[1]\n self.browser.close()\n self.browser.switch_to.window(window_after)\n\n # naar het frame in het nieuwe window\n frame = self.browser.find_element_by_name('nas_content')\n self.browser.switch_to.frame(frame)\n except Exception as e:\n # Een foute login is best wel lastig af te vangen omdat de error pagina maar heel kort wordt gedisplayed.\n # Daarom doe ik een aanname dat als we niet naar het juiste frame kunnen er een foute login is.\n # Dit is natuurlijk niet waterdicht\n raise Exception(\"Wrong credentials\")\n\n except Exception as e:\n raise Exception(\"Login process failed: \" + str(e))",
"def test_login(self):\n # Open the admin index page\n self.open(reverse('admin:index'))\n\n # Selenium knows it has to wait for page loads (except for AJAX requests)\n # so we don't need to do anything about that, and can just\n # call find_css. Since we can chain methods, we can\n # call the built-in send_keys method right away to change the\n # value of the field\n self.wd.find_css('#id_username').send_keys(\"admin\")\n # for the password, we can now just call find_css since we know the page\n # has been rendered\n self.wd.find_css(\"#id_password\").send_keys('pw')\n # You're not limited to CSS selectors only, check\n # http://seleniumhq.org/docs/03_webdriver.html for\n # a more compreehensive documentation.\n self.wd.find_element_by_xpath('//input[@value=\"Log in\"]').click()\n # Again, after submiting the form, we'll use the find_css helper\n # method and pass as a CSS selector, an id that will only exist\n # on the index page and not the login page\n self.wd.find_css(\"#content-main\")",
"def __init__(self, ctx):\n # Debug log\n self.log = logging.getLogger('ipsv.login')\n\n self.cookiejar = cookiejar()\n self.cookies = {cookie.name: cookie.value for cookie in self.cookiejar}\n\n self.browser = Browser()\n self.browser.set_cookiejar(self.cookiejar)",
"def setUpClass(cls):\n cls.driver = webdriver.Chrome(\"C:\\dev\\Python\\TA with Python - SoftServe\\chromedriver\\chromedriver.exe\")\n cls.driver.maximize_window()",
"def controls_setup(self):\n\n self.login = element.Link(self, class_name='nav-login', alias='Navbar->Login Link')\n self.register = element.Link(self, class_name='nav-register', alias='Navbar->Register Link')\n self.logout = element.Link(self, class_name='nav-logout', alias='Navbar->Logout Link')\n self.be_a_merchant = element.Link(self, class_name='nav-partner-join',\n alias='Navbar->Be A Merchant Link')\n self.wishlist = element.Link(self, class_name='nav-wishlist', alias='Navbar->Wishlist Icon Button')\n self.cart = element.Link(self, class_name='nav-cart', alias='Navbar->Cart Icon Button')\n self.account = element.Link(self, css_selector='a.nav-account', alias='Navbar->Account Link')\n self.messages = element.Link(self, class_name='nav-messages', alias='Navbar->Messages Link')\n self.dashboard = element.Link(self, class_name='nav-dashboard', alias='Navbar->Dashboard Link')\n self.messages_count = element.Element(self, dom_id='postman_unread_count',\n alias='Navbar->Unread Messages Count Label')\n\n self.search_query = element.TextBox(self, name='q', alias='Navbar->Search Box')\n self.search_button = element.Button(self, css_selector='form.search button[type=submit]',\n alias='Navbar->Search Icon Button')",
"def home_page_logged_in(base_url, selenium, variables):\n from pages.login import LoginPage\n login_pg = LoginPage(base_url, selenium)\n login_pg.open()\n home_pg = login_pg.login(variables['credentials']['fusor']['username'],\n variables['credentials']['fusor']['password'])\n return home_pg",
"def login(self):\n\n self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, self.selectors['button_login_css'])))\n\n self.driver.find_element_by_css_selector(self.selectors['button_login_css']).click()\n return self.driver",
"def connect(self) -> None:\n self.driver.get(self.base_url)",
"def __init__(self, driver, testclass) -> None:\n super().__init__(driver, testclass)\n \n self.page_url = self.driver.current_url\n self.page_title = \"The Internet\"\n self.link_elem = self.driver.find_element_by_link_text(\n \"Form Authentication\")",
"def setUp(self):\n super().setUp()\n self.login_page.auto_login(first_admin)\n GroupsPage(self.driver).select_group_by_name(data['group_name'])\n self.students_page = StudentsPage(self.driver)"
] | [
"0.63349766",
"0.61009765",
"0.6092834",
"0.6027355",
"0.59561366",
"0.5899323",
"0.58560854",
"0.5831826",
"0.5802246",
"0.57403225",
"0.5735208",
"0.57276696",
"0.57185626",
"0.5715317",
"0.57100016",
"0.5699238",
"0.5687398",
"0.56593454",
"0.559867",
"0.55831605",
"0.5565956",
"0.5560277",
"0.5543989",
"0.5542802",
"0.55280554",
"0.5520861",
"0.5518499",
"0.5516369",
"0.5507671",
"0.54926646"
] | 0.70190394 | 0 |
Closes the selenium webdriver | def close(self):
self.driver.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def close_browser():\n driver.close()",
"def i_close_the_browser():\n driver.close()\n driver.quit()",
"def close(self):\n self.driver.quit()",
"def close_driver(driver):\n driver.close()",
"def close_driver(driver):\n driver.close()",
"def closeBrowser(driver):\n driver.quit()",
"def close(self):\n\n self._driver.quit()",
"def close(self):\n self._driver.close()",
"def close_and_finish_execution():\n driver.close()\n driver.quit()\n exit(0)",
"def close_session(self):\n self.driver.close()",
"def closeChrome(self):\r\n\t\tself.driver.close()\r\n\t\treturn",
"def close_driver(self):\r\n\t\tif not self.driver:\r\n\t\t\treturn;\r\n\r\n\t\tself.driver.close()",
"def end_session(self):\r\n self.web_driver.quit()\r\n self.write_log(\"Web driver ended.\")",
"def tearDown(self):\r\n self.driver.close()",
"def close(_driver):\n _driver.close() # closing the driver",
"def close(self):\n\n self.driver.close_window(self.handle)",
"def close(self):\r\n t1 = time.time()\r\n self.driver.close()\r\n self.my_print(\"{0} Closed current window, Spend {1} seconds\".format(success, time.time() - t1))",
"def stop_browser(self):\n self.driver.quit()",
"def tear_down(self):\n self.driver.close()\n self.driver.quit()",
"def teardown(self):\n self.log.info('Close browser')\n self.driver.quit()",
"def disconnect(self) -> None:\n self.driver.quit()",
"def quit(self):\n self.driver.quit()",
"def teardown(self):\r\n self.driver.quit()",
"def close(self):\n self.m_driver.close()\n self.m_driver = None\n\n if self.m_display is not None:\n self.m_display.stop()\n self.m_display = None\n\n if self.m_vpn_handle is not None:\n self.m_vpn_handle.close()\n self.m_vpn_handle = None\n\n l_result = run(['sudo', 'killall', '-9', 'chromedriver'], stdout=PIPE, stderr=PIPE)\n self.m_logger.info('Killing chromedriver : ' + repr(l_result))\n l_result = run(['sudo', 'killall', '-9', 'chromium-browser'], stdout=PIPE, stderr=PIPE)\n self.m_logger.info('Killing Chromium : ' + repr(l_result))",
"def quit(self):\n driver.quit()",
"def close(self):\n\n #Kill all zombie PIDs and exit gracefully\n try:\n self.webdriver.quit()\n except:\n pass\n if 'p' not in sys.argv:\n self.kill()\n sys.exit()",
"def quit(self):\n self.driver.close_app()\n self.driver.quit()",
"def __exit__(self, *args):\n self.driver.quit()",
"def quit(self):\n if self.webdriver is not None:\n self.webdriver.quit()\n self.webdriver = None",
"def tearDownClass(cls):\n cls.driver.close()\n cls.driver.quit()"
] | [
"0.8613652",
"0.8472183",
"0.8470703",
"0.8204732",
"0.8204732",
"0.8184607",
"0.81776446",
"0.8136131",
"0.8112698",
"0.8029229",
"0.7811337",
"0.7658541",
"0.7657188",
"0.7649241",
"0.7635745",
"0.76175964",
"0.75127256",
"0.74865437",
"0.74256766",
"0.7379712",
"0.73675275",
"0.735064",
"0.73221916",
"0.7319374",
"0.73082906",
"0.7295504",
"0.72547734",
"0.7175371",
"0.71521485",
"0.7081936"
] | 0.8557682 | 1 |
The function for bulk retrieving and parsing whois information for a list of IP addresses via HTTP (RDAP). This bulk lookup method uses bulk ASN Whois lookups first to retrieve the ASN for each IP. It then optimizes RDAP queries to achieve the fastest overall time, accounting for ratelimiting RIRs. | def bulk_lookup_rdap(addresses=None, inc_raw=False, retry_count=3, depth=0,
excluded_entities=None, rate_limit_timeout=60,
socket_timeout=10, asn_timeout=240, proxy_openers=None):
if not isinstance(addresses, list):
raise ValueError('addresses must be a list of IP address strings')
# Initialize the dicts/lists
results = {}
failed_lookups_dict = {}
rated_lookups = []
stats = {
'ip_input_total': len(addresses),
'ip_unique_total': 0,
'ip_lookup_total': 0,
'lacnic': {'failed': [], 'rate_limited': [], 'total': 0},
'ripencc': {'failed': [], 'rate_limited': [], 'total': 0},
'apnic': {'failed': [], 'rate_limited': [], 'total': 0},
'afrinic': {'failed': [], 'rate_limited': [], 'total': 0},
'arin': {'failed': [], 'rate_limited': [], 'total': 0},
'unallocated_addresses': []
}
asn_parsed_results = {}
if proxy_openers is None:
proxy_openers = [None]
proxy_openers_copy = iter(proxy_openers)
# Make sure addresses is unique
unique_ip_list = list(unique_everseen(addresses))
# Get the unique count to return
stats['ip_unique_total'] = len(unique_ip_list)
# This is needed for iteration order
rir_keys_ordered = ['lacnic', 'ripencc', 'apnic', 'afrinic', 'arin']
# First query the ASN data for all IPs, can raise ASNLookupError, no catch
bulk_asn = get_bulk_asn_whois(unique_ip_list, timeout=asn_timeout)
# ASN results are returned as string, parse lines to list and remove first
asn_result_list = bulk_asn.split('\n')
del asn_result_list[0]
# We need to instantiate IPASN, which currently needs a Net object,
# IP doesn't matter here
net = Net('1.2.3.4')
ipasn = IPASN(net)
# Iterate each IP ASN result, and add valid RIR results to
# asn_parsed_results for RDAP lookups
for asn_result in asn_result_list:
temp = asn_result.split('|')
# Not a valid entry, move on to next
if len(temp) == 1:
continue
ip = temp[1].strip()
# We need this since ASN bulk lookup is returning duplicates
# This is an issue on the Cymru end
if ip in asn_parsed_results.keys(): # pragma: no cover
continue
try:
results = ipasn.parse_fields_whois(asn_result)
except ASNRegistryError: # pragma: no cover
continue
# Add valid IP ASN result to asn_parsed_results for RDAP lookup
asn_parsed_results[ip] = results
stats[results['asn_registry']]['total'] += 1
# Set the list of IPs that are not allocated/failed ASN lookup
stats['unallocated_addresses'] = list(k for k in addresses if k not in
asn_parsed_results)
# Set the total lookup count after unique IP and ASN result filtering
stats['ip_lookup_total'] = len(asn_parsed_results)
# Track the total number of LACNIC queries left. This is tracked in order
# to ensure the 9 priority LACNIC queries/min don't go into infinite loop
lacnic_total_left = stats['lacnic']['total']
# Set the start time, this value is updated when the rate limit is reset
old_time = time.time()
# Rate limit tracking dict for all RIRs
rate_tracker = {
'lacnic': {'time': old_time, 'count': 0},
'ripencc': {'time': old_time, 'count': 0},
'apnic': {'time': old_time, 'count': 0},
'afrinic': {'time': old_time, 'count': 0},
'arin': {'time': old_time, 'count': 0}
}
# Iterate all of the IPs to perform RDAP lookups until none are left
while len(asn_parsed_results) > 0:
# Sequentially run through each RIR to minimize lookups in a row to
# the same RIR.
for rir in rir_keys_ordered:
# If there are still LACNIC IPs left to lookup and the rate limit
# hasn't been reached, skip to find a LACNIC IP to lookup
if (
rir != 'lacnic' and lacnic_total_left > 0 and
(rate_tracker['lacnic']['count'] != 9 or
(time.time() - rate_tracker['lacnic']['time']
) >= rate_limit_timeout
)
): # pragma: no cover
continue
# If the RIR rate limit has been reached and hasn't expired,
# move on to the next RIR
if (
rate_tracker[rir]['count'] == 9 and (
(time.time() - rate_tracker[rir]['time']
) < rate_limit_timeout)
): # pragma: no cover
continue
# If the RIR rate limit has expired, reset the count/timer
# and perform the lookup
elif ((time.time() - rate_tracker[rir]['time']
) >= rate_limit_timeout): # pragma: no cover
rate_tracker[rir]['count'] = 0
rate_tracker[rir]['time'] = time.time()
# Create a copy of the lookup IP dict so we can modify on
# successful/failed queries. Loop each IP until it matches the
# correct RIR in the parent loop, and attempt lookup
tmp_dict = asn_parsed_results.copy()
for ip, asn_data in tmp_dict.items():
# Check to see if IP matches parent loop RIR for lookup
if asn_data['asn_registry'] == rir:
log.debug('Starting lookup for IP: {0} '
'RIR: {1}'.format(ip, rir))
# Add to count for rate-limit tracking only for LACNIC,
# since we have not seen aggressive rate-limiting from the
# other RIRs yet
if rir == 'lacnic':
rate_tracker[rir]['count'] += 1
# Get the next proxy opener to use, or None
try:
opener = next(proxy_openers_copy)
# Start at the beginning if all have been used
except StopIteration:
proxy_openers_copy = iter(proxy_openers)
opener = next(proxy_openers_copy)
# Instantiate the objects needed for the RDAP lookup
net = Net(ip, timeout=socket_timeout, proxy_opener=opener)
rdap = RDAP(net)
try:
# Perform the RDAP lookup. retry_count is set to 0
# here since we handle that in this function
results = rdap.lookup(
inc_raw=inc_raw, retry_count=0, asn_data=asn_data,
depth=depth, excluded_entities=excluded_entities
)
log.debug('Successful lookup for IP: {0} '
'RIR: {1}'.format(ip, rir))
# Lookup was successful, add to result. Set the nir
# key to None as this is not supported
# (yet - requires more queries)
results[ip] = results
results[ip]['nir'] = None
# Remove the IP from the lookup queue
del asn_parsed_results[ip]
# If this was LACNIC IP, reduce the total left count
if rir == 'lacnic':
lacnic_total_left -= 1
log.debug(
'{0} total lookups left, {1} LACNIC lookups left'
''.format(str(len(asn_parsed_results)),
str(lacnic_total_left))
)
# If this IP failed previously, remove it from the
# failed return dict
if (
ip in failed_lookups_dict.keys()
): # pragma: no cover
del failed_lookups_dict[ip]
# Break out of the IP list loop, we need to change to
# the next RIR
break
except HTTPLookupError: # pragma: no cover
log.debug('Failed lookup for IP: {0} '
'RIR: {1}'.format(ip, rir))
# Add the IP to the failed lookups dict if not there
if ip not in failed_lookups_dict.keys():
failed_lookups_dict[ip] = 1
# This IP has already failed at least once, increment
# the failure count until retry_count reached, then
# stop trying
else:
failed_lookups_dict[ip] += 1
if failed_lookups_dict[ip] == retry_count:
del asn_parsed_results[ip]
stats[rir]['failed'].append(ip)
if rir == 'lacnic':
lacnic_total_left -= 1
# Since this IP failed, we don't break to move to next
# RIR, we check the next IP for this RIR
continue
except HTTPRateLimitError: # pragma: no cover
# Add the IP to the rate-limited lookups dict if not
# there
if ip not in rated_lookups:
rated_lookups.append(ip)
stats[rir]['rate_limited'].append(ip)
log.debug('Rate limiting triggered for IP: {0} '
'RIR: {1}'.format(ip, rir))
# Since rate-limit was reached, reset the timer and
# max out the count
rate_tracker[rir]['time'] = time.time()
rate_tracker[rir]['count'] = 9
# Break out of the IP list loop, we need to change to
# the next RIR
break
return_tuple = namedtuple('return_tuple', ['results', 'stats'])
return return_tuple(results, stats) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_rdap(self,ip_address):\n try:\n with warnings.catch_warnings():\n # Hide the 'allow_permutations has been deprecated' warning until ipwhois removes it\n warnings.filterwarnings(\"ignore\",category=UserWarning)\n rdapwho = IPWhois(ip_address)\n results = rdapwho.lookup_rdap(depth=1)\n return results\n except Exception as error:\n click.secho(\"[!] Failed to collect RDAP information for {}!\".format(ip_address),fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")",
"def lookup_whois(self, inc_raw=False, retry_count=3, get_referral=False,\r\n extra_blacklist=None, ignore_referral_errors=False,\r\n field_list=None, asn_alts=None):\r\n\r\n from .whois import Whois\r\n\r\n # Create the return dictionary.\r\n results = {}\r\n\r\n # Retrieve the ASN information.\r\n log.debug('ASN lookup for {0}'.format(self.address_str))\r\n asn_data, response = self.net.lookup_asn(retry_count, asn_alts)\r\n\r\n # Add the ASN information to the return dictionary.\r\n results.update(asn_data)\r\n\r\n # Retrieve the whois data and parse.\r\n whois = Whois(self.net)\r\n log.debug('WHOIS lookup for {0}'.format(self.address_str))\r\n whois_data = whois.lookup(\r\n inc_raw, retry_count, response, get_referral, extra_blacklist,\r\n ignore_referral_errors, asn_data, field_list\r\n )\r\n\r\n # Add the RDAP information to the return dictionary.\r\n results.update(whois_data)\r\n\r\n return results",
"def get_bulk_asn_whois(addresses=None, retry_count=3, timeout=120):\n\n if not isinstance(addresses, list):\n\n raise ValueError('addresses argument must be a list of IPv4/v6 '\n 'address strings.')\n\n try:\n\n # Create the connection for the Cymru whois query.\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.settimeout(timeout)\n log.debug('ASN bulk query initiated.')\n conn.connect((CYMRU_WHOIS, 43))\n\n # Query the Cymru whois server, and store the results.\n conn.sendall((\n ' -r -a -c -p -f begin\\n{0}\\nend'.format(\n '\\n'.join(addresses))\n ).encode())\n\n data = ''\n while True:\n\n d = conn.recv(4096).decode()\n data += d\n\n if not d:\n\n break\n\n conn.close()\n\n return str(data)\n\n except (socket.timeout, socket.error) as e: # pragma: no cover\n\n log.debug('ASN bulk query socket error: {0}'.format(e))\n if retry_count > 0:\n\n log.debug('ASN bulk query retrying (count: {0})'.format(\n str(retry_count)))\n return get_bulk_asn_whois(addresses, retry_count - 1, timeout)\n\n else:\n\n raise ASNLookupError('ASN bulk lookup failed.')\n\n except: # pragma: no cover\n\n raise ASNLookupError('ASN bulk lookup failed.')",
"async def aio_rdap_domain_lookup(url: str, http_client: Optional[Any] = None) -> PyWhoIs:\n whois = await PyWhoIs._aio_rdap_domain_from_url(url, http_client)\n return whois",
"def query_ptr_records_for_all_ips(self):\n\n log.debug(\"Starting to look up PTR records for IP addresses\")\n\n # store IP addresses to look them up in bulk\n ip_lookup_dict = dict()\n\n # iterate over all IP addresses and try to match them to a prefix\n for ip in self.get_all_items(NBIPAddress):\n\n # ignore IPs which are not handled by any source\n if ip.source is None:\n continue\n\n # get IP and prefix length\n ip_a = grab(ip, \"data.address\", fallback=\"\").split(\"/\")[0]\n\n # check if we meant to look up DNS host name for this IP\n if grab(ip, \"source.dns_name_lookup\", fallback=False) is True:\n\n if ip_lookup_dict.get(ip.source) is None:\n\n ip_lookup_dict[ip.source] = {\n \"ips\": list(),\n \"servers\": grab(ip, \"source.custom_dns_servers\")\n }\n\n ip_lookup_dict[ip.source].get(\"ips\").append(ip_a)\n\n # now perform DNS requests to look up DNS names for IP addresses\n for source, data in ip_lookup_dict.items():\n\n if len(data.get(\"ips\")) == 0:\n continue\n\n # get DNS names for IP addresses:\n records = perform_ptr_lookups(data.get(\"ips\"), data.get(\"servers\"))\n\n for ip in self.get_all_items(NBIPAddress):\n\n if ip.source != source:\n continue\n\n ip_a = grab(ip, \"data.address\", fallback=\"\").split(\"/\")[0]\n\n dns_name = records.get(ip_a)\n\n if dns_name is not None:\n\n ip.update(data={\"dns_name\": dns_name})\n\n log.debug(\"Finished to look up PTR records for IP addresses\")",
"def lookup_rdap(self, inc_raw=False, retry_count=3, depth=0,\r\n excluded_entities=None, bootstrap=False,\r\n rate_limit_timeout=120, asn_alts=None):\r\n\r\n from .rdap import RDAP\r\n\r\n # Create the return dictionary.\r\n results = {}\r\n\r\n asn_data = None\r\n response = None\r\n if not bootstrap:\r\n\r\n # Retrieve the ASN information.\r\n log.debug('ASN lookup for {0}'.format(self.address_str))\r\n asn_data, response = self.net.lookup_asn(retry_count, asn_alts)\r\n\r\n # Add the ASN information to the return dictionary.\r\n results.update(asn_data)\r\n\r\n # Retrieve the RDAP data and parse.\r\n rdap = RDAP(self.net)\r\n log.debug('RDAP lookup for {0}'.format(self.address_str))\r\n rdap_data = rdap.lookup(inc_raw, retry_count, asn_data, depth,\r\n excluded_entities, response, bootstrap,\r\n rate_limit_timeout)\r\n\r\n # Add the RDAP information to the return dictionary.\r\n results.update(rdap_data)\r\n\r\n return results",
"def lookup(self, inc_raw=False, retry_count=3, response=None,\n get_referral=False, extra_blacklist=None,\n ignore_referral_errors=False, asn_data=None,\n field_list=None, is_offline=False):\n\n # Create the return dictionary.\n results = {\n 'query': self._net.address_str,\n 'nets': [],\n 'raw': None,\n 'referral': None,\n 'raw_referral': None\n }\n\n # The referral server and port. Only used if get_referral is True.\n referral_server = None\n referral_port = 0\n\n # Only fetch the response if we haven't already.\n if response is None or (not is_offline and\n asn_data['asn_registry'] is not 'arin'):\n\n log.debug('Response not given, perform WHOIS lookup for {0}'\n .format(self._net.address_str))\n\n # Retrieve the whois data.\n response = self._net.get_whois(\n asn_registry=asn_data['asn_registry'], retry_count=retry_count,\n extra_blacklist=extra_blacklist\n )\n\n if get_referral:\n\n # Search for a referral server.\n for match in re.finditer(\n r'^ReferralServer:[^\\S\\n]+(.+:[0-9]+)$',\n response,\n re.MULTILINE\n ):\n\n try:\n\n temp = match.group(1)\n if 'rwhois://' not in temp: # pragma: no cover\n raise ValueError\n\n temp = temp.replace('rwhois://', '').split(':')\n\n if int(temp[1]) > 65535: # pragma: no cover\n raise ValueError\n\n referral_server = temp[0]\n referral_port = int(temp[1])\n\n except (ValueError, KeyError): # pragma: no cover\n\n continue\n\n break\n\n # Retrieve the referral whois data.\n if get_referral and referral_server:\n\n log.debug('Perform referral WHOIS lookup')\n\n response_ref = None\n\n try:\n\n response_ref = self._net.get_whois(\n asn_registry='', retry_count=retry_count,\n server=referral_server, port=referral_port,\n extra_blacklist=extra_blacklist\n )\n\n except (BlacklistError, WhoisLookupError):\n\n if ignore_referral_errors:\n\n pass\n\n else:\n\n raise\n\n if response_ref:\n\n log.debug('Parsing referral WHOIS data')\n\n if inc_raw:\n\n results['raw_referral'] = response_ref\n\n temp_rnet = self.parse_fields(\n response_ref,\n RWHOIS['fields'],\n field_list=field_list\n )\n\n # Add the networks to the return dictionary.\n results['referral'] = temp_rnet\n\n # If inc_raw parameter is True, add the response to return dictionary.\n if inc_raw:\n\n results['raw'] = response\n\n nets = []\n\n if asn_data['asn_registry'] == 'arin':\n\n nets_response = self.get_nets_arin(response)\n\n elif asn_data['asn_registry'] == 'lacnic':\n\n nets_response = self.get_nets_lacnic(response)\n\n else:\n\n nets_response = self.get_nets_other(response)\n\n nets.extend(nets_response)\n\n # Iterate through all of the network sections and parse out the\n # appropriate fields for each.\n log.debug('Parsing WHOIS data')\n for index, net in enumerate(nets):\n\n section_end = None\n if index + 1 < len(nets):\n\n section_end = nets[index + 1]['start']\n\n try:\n\n dt_format = RIR_WHOIS[results['asn_registry']]['dt_format']\n\n except KeyError:\n\n dt_format = None\n\n temp_net = self.parse_fields(\n response,\n RIR_WHOIS[asn_data['asn_registry']]['fields'],\n section_end,\n net['end'],\n dt_format,\n field_list\n )\n\n # Merge the net dictionaries.\n net.update(temp_net)\n\n # The start and end values are no longer needed.\n del net['start'], net['end']\n\n # Add the networks to the return dictionary.\n results['nets'] = nets\n\n return results",
"def rdap_domain_lookup(url: str, http_client: Optional[Any] = None) -> PyWhoIs:\n whois = PyWhoIs._rdap_domain_from_url(url, http_client)\n return whois",
"def get_rdap_info(ip_address, force_update_cache=False):\n print('Retrieving RDAP from', ip_address)\n\n rdap_info = None\n if not force_update_cache:\n rdap_info = get_rdap_info_from_cache(ip_address)\n\n if not rdap_info:\n api_url = f'https://rdap.arin.net/registry/ip/{ip_address}'\n headers = {\n 'accept': 'application/json',\n 'content-type': 'application/json'\n }\n response = requests.get(api_url, headers=headers)\n\n if response.status_code >= 429 and response.status_code < 500:\n alt_api_url = f'https://www.rdap.net/ip/{ip_address}'\n alt_response = requests.get(alt_api_url, headers=headers)\n rdap_info = json.loads(alt_response.content.decode('utf-8'))\n store_rdap_info_in_cache(ip_address, rdap_info)\n return rdap_info\n elif response.status_code == 200:\n rdap_info = json.loads(response.content.decode('utf-8'))\n store_rdap_info_in_cache(ip_address, rdap_info)\n return rdap_info\n else:\n print('Something went wrong obtaing RDAP from', ip_address)\n\n return rdap_info",
"def queue_dns_lookups(ips):\n loop = asyncio.get_event_loop()\n resolver = aiodns.DNSResolver(loop=loop)\n if settings.CUSTOM_DNS_SERVERS and settings.DNS_SERVERS:\n resolver.nameservers = settings.DNS_SERVERS\n queue = asyncio.gather(*(reverse_lookup(resolver, ip) for ip in ips))\n results = loop.run_until_complete(queue)\n return results",
"def resolve(self,\n ns_servers: List[Dict[str, str]] = [{'IPv4 address': '8.8.8.8', 'MAC address': '01:23:45:67:89:0a'}],\n domain: str = 'google.com',\n subdomains_list: List[str] = ['www', 'mail', 'ns', 'test'],\n subdomains_file: Union[None, str] = None,\n subdomains_brute: bool = False,\n max_threats_count: int = 10,\n udp_destination_port: int = 53,\n timeout: int = 30) -> List[Dict[str, str]]:\n\n try:\n\n # region Clear results list\n self.index_of_dns_query = 0\n self.results.clear()\n self.uniq_hosts.clear()\n # endregion\n\n # region Set target domain\n assert not (domain == ''), \\\n 'Target domain is empty, please set target domain in this parameter: ' + self.base.info_text('domain')\n self.domain = domain\n # endregion\n\n # region Subdomains list\n if len(subdomains_list) > 0:\n self.subdomains = subdomains_list\n # endregion\n\n # region Subdomains file\n if subdomains_file is not None:\n assert isfile(subdomains_file), \\\n 'File with subdomain list:' + self.base.error_text(subdomains_file) + ' not found!'\n with open(subdomains_file) as subdomains_file_descriptor:\n for subdomain in subdomains_file_descriptor.read().splitlines():\n self.subdomains.append(subdomain)\n # endregion\n\n # region Subdomains brute\n if subdomains_brute:\n\n if not self.quiet:\n self.base.print_info('Make subdomains list for brute .... ')\n\n for character1 in RawDnsResolver.available_characters:\n self.subdomains.append(character1)\n for character2 in RawDnsResolver.available_characters:\n self.subdomains.append(character1 + character2)\n for character3 in RawDnsResolver.available_characters:\n self.subdomains.append(character1 + character2 + character3)\n # endregion\n\n # region Check length of subdomains list\n assert len(self.subdomains) != 0, \\\n 'List containing subdomains is empty, please set any of this parameters: ' \\\n + self.base.info_text('subdomain_list') + ' or ' \\\n + self.base.info_text('subdomain_file') + ' or ' \\\n + self.base.info_text('subdomain_brute')\n # endregion\n\n # region Create raw socket\n raw_socket: socket = socket(AF_PACKET, SOCK_RAW)\n raw_socket.bind((self.network_interface, 0))\n # endregion\n\n # region Truncate temporary results file\n temporary_results_file = open(RawDnsResolver.temporary_results_filename, 'r+')\n temporary_results_file.truncate()\n temporary_results_file.close()\n # endregion\n\n # region Sniff DNS answers\n if not self.quiet:\n self.base.print_info('Start DNS answers sniffer for domain: ', self.domain)\n\n threats: ThreadManager = ThreadManager(max_threats_count)\n self._sniff_start(self.your_mac_address, self.your_ipv4_address,\n self.your_ipv6_address, udp_destination_port)\n threats.add_task(self._sniff_check)\n # endregion\n\n # region Send DNS queries\n if not self.quiet:\n self.base.print_info('Start sending DNS queries, time: ', str(datetime.now()))\n\n self._send_queries(send_socket=raw_socket,\n source_mac_address=self.your_mac_address,\n source_ipv4_address=self.your_ipv4_address,\n source_ipv6_address=self.your_ipv6_address,\n domain=domain,\n ns_servers=ns_servers,\n destination_port=udp_destination_port,\n max_threats_count=int(max_threats_count) - 1,\n subdomains=self.subdomains)\n # endregion\n\n # region Timeout\n if not self.quiet:\n self.base.print_info('Wait timeout: ', str(timeout) + ' sec')\n sleep(timeout)\n # endregion\n\n # region Return results\n self._sniff_stop()\n if not self.quiet:\n if len(self.results) > 0:\n self.base.print_success('Found ', str(len(self.results)),\n ' subdomains and addresses for domain: ', self.domain)\n else:\n self.base.print_error('Not found subdomains in domain: ', self.domain)\n return self.results\n # endregion\n\n except AssertionError as Error:\n self.base.print_error(Error.args[0])\n exit(1)",
"def recursive_dns_lookup(target_name, qtype, root_servers_list):\n\n # Base case\n if not root_servers_list:\n return None\n\n # Create dns query based on the target_name (website)\n # and qtype (queue type: CNAME, A, AAAA, or MX)\n dns_query = dns.message.make_query(target_name, qtype)\n\n for server in root_servers_list:\n # Doing a try catch to check if the dns server times out,\n # if it does then we continue and try another server\n try:\n query_response = dns.query.udp(dns_query, server, 3)\n except dns.exception.Timeout:\n continue\n # If there's an answer in the response\n if query_response.answer:\n # Search through the response.answer for possible answers\n for response_answers in query_response.answer:\n #print(\"response_answers: \", response_answers)\n for response_answer in response_answers:\n #print(\"Response_answer\", response_answer)\n target_name = str(response_answer)[:-1] # Removes the period at the end\n #print(\"Target_name\", target_name)\n # If we don't get the reponse we're after then\n # continue searching through the root_servers\n if response_answer.rdtype != qtype:\n if response_answer.rdtype == 5:\n return recursive_dns_lookup(target_name, qtype, ROOT_SERVERS)\n else:\n # Return the answer we wanted\n return query_response\n else: # If there isn't an answer in the response then we check additional\n\n # If we do have something in additional then get the stuff inside\n if query_response.additional:\n ip_addresses = []\n for response_additional in query_response.additional:\n #print(\"response_additional: \", response_additional)\n # Convert to string then send to function for parsing the address out\n response_additional_str = str(response_additional)\n\n #print(\"function get_address resp:\", resp)\n resp_elements = response_additional_str.split()\n #print(\"function get_address resp_elements:\", resp_elements)\n ip_address = []\n for resp_element in resp_elements:\n #print(\"function get_address resp_element:\", resp_element)\n if resp_element != 'A':\n continue\n else:\n #print(\"function get_address resp_element = A:\", resp_element)\n #print(\"function get_address address:\", resp_elements[-1])\n ip_address.append(resp_elements[-1])\n ip_addresses += ip_address\n\n return recursive_dns_lookup(target_name, qtype, ip_addresses)",
"def _analyze_ips(self, ip_address_list, fuzzable_request):\n bing_wrapper = bing(self._uri_opener)\n \n # This is the best way to search, one by one!\n for ip_address in ip_address_list:\n results = bing_wrapper.get_n_results('ip:' + ip_address,\n self._result_limit)\n\n results = [r.URL.base_url() for r in results]\n results = list(set(results))\n\n # not vuln by default\n is_vulnerable = False\n\n if len(results) > 1:\n # We may have something...\n is_vulnerable = True\n\n if len(results) == 2:\n # Maybe we have this case:\n # [Mon 09 Jun 2008 01:08:26 PM ART] - http://216.244.147.14/\n # [Mon 09 Jun 2008 01:08:26 PM ART] - http://www.business.com/\n # Where www.business.com resolves to 216.244.147.14; so we don't really\n # have more than one domain in the same server.\n try:\n res0 = socket.gethostbyname(results[0].get_domain())\n res1 = socket.gethostbyname(results[1].get_domain())\n except:\n pass\n else:\n if res0 == res1:\n is_vulnerable = False\n\n if is_vulnerable:\n desc = 'The web application under test seems to be in a shared' \\\n ' hosting. This list of domains, and the domain of the ' \\\n ' web application under test, all point to the same IP' \\\n ' address (%s):\\n' % ip_address\n \n domain_list = kb.kb.raw_read(self, 'domains')\n \n for url in results:\n domain = url.get_domain()\n desc += '- %s\\n' % domain\n \n domain_list.append(domain)\n \n kb.kb.raw_write(self, 'domains', domain_list)\n \n v = Vuln.from_fr('Shared hosting', desc, severity.MEDIUM, 1,\n self.get_name(), fuzzable_request)\n\n v['also_in_hosting'] = results\n \n om.out.vulnerability(desc, severity=severity.MEDIUM)\n kb.kb.append(self, 'shared_hosting', v)",
"def _run_query(self):\n self._search_query()\n logger.debug(\"Payload\")\n logger.debug(self._payload)\n _resp = query_public_ip_pool_detail(self._payload)\n logger.debug(_resp)\n _resp = self.load_json(_resp)\n _ret_list = []\n if _resp is None:\n self._record_total = self._record_filtered = 0\n return []\n _ret_list = _resp[\"ret_set\"]\n self._record_filtered = self._record_total = _resp.get(\"total_count\") or 100\n return _ret_list",
"def pingMany(self, ipList):\n results = yield executeNmapForIps(sorted(ipList))\n self.log.info(\"Found %s addresses\", len(results))\n if self.log.isEnabledFor(logging.DEBUG):\n self.log.debug(\n \"Addresses found: %s\", \", \".join(a for a in results)\n )\n defer.returnValue(results.values())",
"def main(self):\n results = []\n for t in Config.APPHOSTS:\n custom_domain = t[\"custom_domain\"]\n heroku_host = t[\"heroku_host\"]\n result = self.ip_update(custom_domain, heroku_host)\n results.append(result)\n return results",
"def resolveOriginalDomains():\n print('[+] Populating Domain Name Resolution for later check ')\n\n try:\n for domain in domains:\n response = dns.resolver.query(domain)\n d = Domain_Poison_Check(domain)\n print('[+] Domain: %s' % domain)\n for record in response:\n print(' |____> maps to %s.' % (record.address))\n d.pushAddr(record)\n check_domain_poison_results.append(d)\n return time.time()\n except Exception as err:\n print('[+] Exception: %s' % err)\n traceback.print_exc()\n return time.time()",
"def _read_dns_(dns, cnt):\r\n \r\n dn_names = None\r\n dn_ids = None\r\n dn_iaps = [None]*10\r\n \r\n for dn in dns.DN:\r\n if dn.ref == 'Name':\r\n dn_names = dn.value\r\n if dn.ref == 'DNId':\r\n dn_ids = dn.value\r\n if dn.ref == 'IAP':\r\n dn_iaps[0] = dn.value\r\n if dn.ref == 'IAP2':\r\n dn_iaps[1] = dn.value\r\n if dn.ref == 'IAP3':\r\n dn_iaps[2] = dn.value\r\n if dn.ref == 'IAP4':\r\n dn_iaps[3] = dn.value\r\n if dn.ref == 'IAP5':\r\n dn_iaps[4] = dn.value\r\n if dn.ref == 'IAP6':\r\n dn_iaps[5] = dn.value\r\n if dn.ref == 'IAP7':\r\n dn_iaps[6] = dn.value\r\n if dn.ref == 'IAP8':\r\n dn_iaps[7] = dn.value\r\n if dn.ref == 'IAP9':\r\n dn_iaps[8] = dn.value\r\n if dn.ref == 'IAP10':\r\n dn_iaps[9] = dn.value\r\n \r\n logger.info('Parsed DN names: %s' % dn_names)\r\n logger.info('Parsed DN ids: %s' % dn_ids)\r\n logger.info('Parsed DN iaps: %s' % dn_iaps)\r\n \r\n for i in range(len(dn_names)):\r\n mydn = Dn()\r\n mydn.set_id(dn_ids[i])\r\n mydn.set_name(dn_names[i])\r\n myiaps = [None]*10\r\n for j in range(10):\r\n myiaps[j] = dn_iaps[j][i]\r\n mydn.set_iaps(myiaps)\r\n cnt.add_dn(mydn)\r\n return cnt",
"def public_ip_dns(resolv, nameservers, rdatatype, server, responsetype):\n for ns in nameservers:\n try:\n answer = resolv.query(ns, rdatatype)\n nameserver = answer[0].to_text()\n except Exception as e:\n print(e)\n continue\n resolve_public_ip(nameserver, server, responsetype)",
"def create_dns_dictionary(self, path_tracefile):\n responses = self.get_dns_responses(path_tracefile)\n dns_dict = dict()\n for response in responses:\n for x in range(response[DNS].ancount): # answer count, how many IP adresses are returned for the query\n try: # answer count could also include 'DNS SRV Resource Record' which does not have a 'rrname' attribute so ancount is wrong if there is such a record -> TODO get amount of DNSRR instead of using ancount\n domain = getattr(response[DNSRR][x], 'rrname').decode(\"utf-8\") # domain (this is returned in bytes so decode)\n ip = getattr(response[DNSRR][x], 'rdata') # IP adres of the domain, TODO make this work for multiple ip adresses for one domain (Test with [0] at end)\n dns_dict[ip] = domain[:-1] #remove last char '.' \n except:\n continue\n return dns_dict",
"def _run_query(self):\n self._search_query()\n logger.debug(\"Payload\")\n logger.debug(self._payload)\n _resp = query_public_ip_pools(self._payload)\n logger.debug(_resp)\n _resp = self.load_json(_resp)\n _ret_list = []\n if _resp is None:\n self._record_total = self._record_filtered = 0\n return []\n _ret_list = _resp[\"ret_set\"]\n self._record_filtered = self._record_total = _resp.get(\"total_count\") or 100\n return _ret_list",
"def gethostbyname(self, hostname, dnsserv='192.112.36.4'):\n ipaddrlist = []\n cnames = []\n temp = []\n if(self.caching):\n rcache = RecordCache(self.ttl)\n rcord = rcache.lookup(hostname, Type.ANY, Class.IN)\n if(rcord):\n for rec in rcord:\n if rec.type_ == Type.A:\n arec = rec.rdata\n ipaddrlist.append(arec.address)\n elif rec.type_ == Type.CNAME:\n crec = rec.rdata\n cnames.append(crec.cname)\n if ipaddrlist:\n return hostname, cnames, ipaddrlist\n elif cnames:\n return self.gethostbyname(cnames[0], dnsserv)\n \n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(self.timeout)\n\n # Create and send query\n question = Question(Name(str(hostname)), Type.A, Class.IN)\n header = Header(9001, 0, 1, 0, 0, 0)\n header.qr = 0\n header.opcode = 0\n header.rd = 1\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (str(dnsserv), 53))\n\n # Receive response\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n print(\"Number of answers: \" +str(len(response.answers)))\n print(\"Number of authorities: \" + str(len(response.authorities)))\n print(\"Number of additionals: \" + str(len(response.additionals)))\n\n # Get data\n aliaslist = cnames\n ipaddrlist = []\n dnslist = []\n \n while response.answers:\n for answer in response.answers:\n if answer.type_ == Type.A:\n print(\"found A RR\")\n if(self.caching):\n rcache.add_record(answer)\n ipaddrlist.append(answer.rdata.address)\n if answer.type_ == Type.CNAME:\n aliaslist.append(answer.rdata.cname)\n if answer.type_ == Type.NS:\n dnslist.append(answer.rdata.nsdname)\n if ipaddrlist:\n return hostname, aliaslist, ipaddrlist\n elif aliaslist:\n question = Question(Name(aliaslist[0]), Type.A, Class.IN)\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n elif dnslist:\n nsname = dnslist.pop()\n maybe_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_dnsserv:\n dnsserv = maybe_dnsserv\n else:\n pass\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n else:\n break\n\n if response.authorities:\n for authority in response.authorities:\n if authority.type_ != Type.NS:\n pass\n dnslist.append(authority.rdata.nsdname)\n while dnslist:\n nsname = dnslist.pop()\n maybe_next_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_next_dnsserv:\n next_dns_serv = maybe_next_dnsserv\n else:\n pass\n (hname, aliasl, ipaddrl) = self.gethostbyname(hostname, nsname)\n if ipaddrl:\n return hname, aliasl, ipaddrl",
"def _run(self, input_file=None):\n\n if input_file is None:\n path_here = os.path.dirname(os.path.realpath(__file__))\n input_file = os.path.join(path_here, 'cdns.txt')\n\n whitelist_rows = []\n\n api_call_count = 0\n api = 'https://api.hackertarget.com/aslookup/?q='\n err_msg = \"Can't look up more than 100 CDNs at a time!\"\n with Session() as session:\n\n cdns = self._get_cdns(input_file)\n assert len(cdns) <= 100, err_msg\n\n for cdn in self._get_cdns(input_file):\n # Try the request a number of times\n max_tries = 3\n for i in range(max_tries):\n assert api_call_count < 100, err_msg\n response = session.get(api + cdn)\n api_call_count += 1\n response.raise_for_status()\n result = response.text\n response.close()\n\n # A good result will have lines that look like:\n # \"132892\",\"CLOUDFLARE Cloudflare, Inc., US\"\n # Anything other than this is an error message\n\n if result[0] == '\"':\n\n # Extract ASN by taking the part before the 1st comma\n # And removing surrounding quotes\n for line in result.split('\\n'):\n asn = line.split(',')[0].replace('\"', '')\n whitelist_rows.append([cdn, asn])\n break\n else:\n # Design choice?: Error or insert nulls into db\n raise RuntimeError((f'Failed to get ASNs for {cdn}'\n f' with error msg: {result}'))\n # Usually rate limit or bad CDN name\n\n utils.rows_to_db(whitelist_rows, self.csv_dir, CDN_Whitelist_Table)",
"def lookup_ips(supporters):\n\n data = []\n timeout = time.monotonic() + StatsView.IP_ADDR_TIMEOUT\n for supporter in supporters:\n row = list(supporter)\n\n reverse = cache.get(supporter[0])\n if not reverse: \n if time.monotonic() < timeout:\n reverse = StatsView.dns_lookup(supporter[0])\n else:\n reverse = None\n\n if reverse:\n cache.set(supporter[0], reverse, 3600)\n row[0] = reverse\n\n data.append(row)\n\n return data",
"def resolve(iteration_count, qid, original_question, qname, sname, slist, sanswers, sauthorities, sadditional):\n ##an iteration count is kept to ensure we're not stuck in an infinite loop, and are actually getting closer to the answer \n iteration_count += 1\n #requirement #2\n if iteration_count > 200:\n raise OutOfTimeException(\"Called resolve too many times, might be stuck in a loop\")\n #if we already know about this domain name, and know its an alias\n # compliant with requirement 4 & 7\n if sname in cnamecache:\n now = int(time())\n new_sname = cnamecache[sname]._cname\n cname_record = RR_CNAME(sname, cnamecache[sname]._expiration - now, new_sname)\n sanswers.append(cname_record)\n return resolve(iteration_count, qid, original_question, qname, new_sname, get_best_ns(nscache, new_sname), sanswers, sauthorities, sadditional)\n #if we know about the a record for this name, requirement 7\n if sname in acache:\n now = int(time())\n ip = acache[sname]._dict.keys()[0].toNetwork()\n exp = acache[sname]._dict.values()[0]._expiration - now\n answer_a_record = RR_A (sname, exp, ip)\n sanswers.append(answer_a_record)\n #adds records we need to keep track of to comply with requirement 6\n sauthorities, sadditional = construct_authorities_for_answers(sanswers)\n our_response = (qid, original_question, sanswers, sauthorities, sadditional)\n return our_response\n\n ns_to_query = pick_from_slist(slist)\n if not ns_to_query:\n logger.log(DEBUG1, \"exhausted list, and couldnt find anything about these servers in our cache, will have to query from root\")\n new_qname = next(slist.iterkeys())\n #we now have to resolve one of these servers as if it were a normal domain query,\n #save the answer, and use it to continue our original query, we should iterate through each server\n # check the return value for a succesful resolution, and carry on.\n #shouldnt need qid nor original question (still refers to old question)\n #essentially calling resolve in this case will cause side-effects that update the cache with\n #the entries we need\n resolve(iteration_count, qid , original_question, new_qname, new_qname, get_best_ns(nscache, new_qname), [], [], [])\n #continue search as before\n return resolve(iteration_count, qid, original_question, qname, sname, get_best_ns(nscache, sname), sanswers, sauthorities, sadditional)\n ##\n (name_server, ipv4) = ns_to_query\n #logger.log(DEBUG2, \"CCache is:\\n{0}\\n\".format(pp.pformat(cnamecache)))\n address = (ipv4,53)\n payload, question = construct_A_query(sname)\n \n logger.log(DEBUG1, \"sending question for A record for {0} to {1} @{2}:\\n{3}\\n\".format(question._dn, name_server, address, hexdump(payload)))\n\n #requirement #8\n cs.sendto(payload, address)\n try:\n (cs_data, cs_address,) = cs.recvfrom(512)\n except timeout:\n #try a different server, requirement 8\n logger.info(\"Timed out, trying someone else\")\n return resolve(iteration_count, qid, original_question, qname, sname, get_best_ns(nscache, sname), sanswers, sauthorities, sadditional) \n \n response = parse_response_payload(cs_data)\n #if is authority, set its records in cache as authoritative\n #also adds records we need to keep track of to comply with requirement 5\n if response[\"header\"]._aa is 1:\n #print \"response {0} from {1} \".format(response, name_server)\n logger.log(DEBUG1, \"{0}\".format( name_server))\n ns_ns_rr = set_authoritative(sname, name_server)\n ns_a_rr = construct_a_rr_from_cache(name_server)\n if ns_ns_rr not in sauthorities:\n sauthorities.append(ns_ns_rr)\n if ns_a_rr not in sadditional:\n sadditional.append(ns_a_rr)\n \n load_response_into_cache(response)\n logger.log(DEBUG2, \"Answer received from {0} server is:\\n {1}\".format(name_server, pp.pformat(response)))\n logger.log(DEBUG1, \"*\"*50)\n answer_sec = response[\"answer\"]\n ##if there is an answer in the response, either its a cname or an a record\n #if its an A, we're done, if its a CNAME we're not\n if len(answer_sec) > 0:\n sanswers.append(response[\"answer\"][0])\n logger.log(DEBUG2, \"Sanswers is {0}\".format(pp.pformat(sanswers)))\n ##part of fulfilling requirement 4\n if answer_sec[0]._type is RR.TYPE_CNAME:\n sname = answer_sec[0]._cname\n return resolve(iteration_count, qid, original_question, qname, sname, get_best_ns(nscache, sname), sanswers, sauthorities, sadditional)\n our_response = (qid, original_question, sanswers, sauthorities, sadditional)\n logger.log(DEBUG1, \"&#\"*50+\"\\n\"*3+\"Response:\\n{0}\".format(our_response))\n return our_response \n return resolve(iteration_count, qid, original_question, qname, sname, get_best_ns(nscache, sname), sanswers, sauthorities, sadditional)",
"def _get_IP_addresses(hostname):\n try:\n answers, auth, addit = yield DNSclient.lookupAddress(hostname)\n except Exception as exc: # Too many different DNS failures to catch...\n log.exception('DNS Resolution failure: %r for name: %r', exc, hostname)\n returnValue([])\n\n returnValue(\n [answer.payload.dottedQuad()\n for answer in answers if answer.type == dns.A])",
"def query_records(self, context, rrs):\n records = self.dns_manager.query_records(context, rrs)\n return records",
"def get_dns_records_from_godaddy(self) -> list:\n\n headers = {\"Authorization\": \"sso-key {}:{}\".format(self.api_key, self.secret_key)}\n dns_records = []\n for dns_record in self.dns_records:\n url = \"https://api.godaddy.com/v1/domains/{}/records/{}/{}\".format(dns_record[\"domain\"],\n dns_record[\"dns_record_type\"],\n dns_record[\"name\"])\n dns_records.append(get(url, headers=headers).text)\n return dns_records",
"def ip_lookup(ip):\n # Create the required data dictionary for Host/Reputation\n api_data = {\n 'host': ip\n }\n response = http_request(endpoint=HOST_REPUTE_API, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response",
"def _resolve(addresses):\n\n for addr in addresses:\n _, _, ips = socket.gethostbyname_ex(addr)\n for ip in ips:\n yield ip"
] | [
"0.6281244",
"0.598626",
"0.5980119",
"0.59107023",
"0.5785517",
"0.57334334",
"0.56565577",
"0.5582375",
"0.5444342",
"0.54347205",
"0.5251316",
"0.52312225",
"0.51166403",
"0.50961727",
"0.5094858",
"0.5089813",
"0.5072882",
"0.5051348",
"0.5041338",
"0.50093925",
"0.49929392",
"0.49299878",
"0.4919592",
"0.49142748",
"0.48985893",
"0.48666966",
"0.48459235",
"0.48080745",
"0.4800145",
"0.47501963"
] | 0.69207865 | 0 |
This function takes a filename as input, and returns a dataframe with raw data read from that file in a Pandas DataFrame. The DataFrame index should be the year, month and day of the observation. DataFrame headers should be "agency_cd", "site_no", "Date", "Discharge", "Quality". The "Date" column should be used as the DataFrame index. The pandas read_csv function will automatically replace missing values with np.NaN, but needs help identifying other flags used by the USGS to indicate no data is availabiel. Function returns the completed DataFrame, and a dictionary designed to contain all missing value counts that is initialized with days missing between the first and last date of the file. | def ReadData( fileName ):
# define column names
colNames = ['agency_cd', 'site_no', 'Date', 'Discharge', 'Quality']
# open and read the file
DataDF = pd.read_csv(fileName, header=1, names=colNames,
delimiter=r"\s+",parse_dates=[2], comment='#',
na_values=['Eqp'])
DataDF = DataDF.set_index('Date')
# quantify the number of missing values
MissingValues = DataDF["Discharge"].isna().sum()
## Remove invalid streamflow data
DataDF.Discharge[(DataDF['Discharge']<0)]=np.nan
return( DataDF, MissingValues ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _data_reader(file):\n # Create a dictionary so that filename matches a site name.\n site_dict = {'D05536000': 'NB Niles', 'D05536101': 'NS Channel-Wilmette',\n 'D05536105': 'NB Albany', 'D05536118': 'NB Grand Avenue',\n 'D05536121': 'CH River-Lock', 'D05536123': 'CH River-Columbus',\n 'D05536137': 'CSSC-Western Avenue', 'D05536140': 'CSSC-Stickney',\n 'D05536275': 'Thorn Creek', 'D05536290': 'Little Calument',\n 'D05536340': 'Midlothian Creek', 'D05536343': 'Natalie Creek',\n 'D05536357': 'Grand Calumet', 'D05536500': 'Tinley Creek',\n 'D05536700': 'Calumet-Sag Channel', 'D05536890': 'CSSC-Lemont',\n 'D05536995': 'CSSC-Romeoville'}\n df_raw = pd.read_csv(file)\n df_raw['dateTime'] = pd.to_datetime(df_raw['dateTime'])\n # Creating a dataframe with the data we only need.\n df = df_raw[['dateTime', 'X_00065_00000']]\n df = df.set_index(df_raw['dateTime'])\n\n # Retrieve site information to be used in saved excel filenames.\n site_code = file[-9:]\n site_name = [v for v in site_dict.items() if site_code in v][0]\n site = site_code + '_' + site_name[1].replace(' ', '-')\n\n # Convert index into a datetime index for easier indexing.\n df.index = pd.to_datetime(df.index)\n return df_raw, df, site, site_code",
"def ReadData( fileName ):\n \n # define column names\n colNames = ['Date','Precip','Max Temp', 'Min Temp','Wind Speed'] #NOTE: I changed the column names because .query() would not work when referencing column names with spaces\n global DataDF #added this line to make the dataframe visible in the variable explorer\n global ReplacedValuesDF #added this line to make the dataframe visible in the variable explorer\n # open and read the file\n DataDF = pd.read_csv(\"DataQualityChecking.txt\",header=None, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n \n # define and initialize the missing data dictionary\n ReplacedValuesDF = pd.DataFrame(0, index=[\"1. No Data\",\"2. Gross Error\",\"3. Swapped\",\"4. Range Fail\"], columns=colNames[1:]) #added additional indexed rows to make adding the values later easier\n \n return( DataDF, ReplacedValuesDF )",
"def _get_df_from_csv(self, filename):\n df = pd.read_csv(filename)\n df.set_index('Date', drop=True, inplace=True)\n df.index = pd.to_datetime(df.index)\n return df",
"def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data",
"def read_from_file_no_check(file_name: str) -> pd.DataFrame:\n return pd.read_csv(file_name)",
"def parse(file_name):\n \n return pd.read_csv(file_name, na_values = '---')",
"def readData(filename):\r\n data_d = {}\r\n with open(filename) as f:\r\n df = pd.read_csv(f, header=0, dtype='str',sep=';')\r\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')]\r\n df_dict = df.to_dict(orient='index')\r\n for i,val in df_dict.iteritems(): \r\n clean_row = [(k, p.proc(v)) for (k, v) in val.iteritems()]\r\n row_id = val['line_nr']\r\n data_d[row_id] = dict(clean_row)\r\n return data_d\r\n return df",
"def read_insitu_gas(cls, full_file_path):\n\n with open(full_file_path, 'r') as f:\n hlines = f.readline().rstrip().split(': ')[1]\n\n df = pd.read_csv(full_file_path, skiprows=int(hlines), skipinitialspace=True,\n delimiter=' ', header=None, names=['site', 'year', 'month', cls._gas_name])\n\n # set datetime index in df (requires 'day' column)\n df['day'] = 1\n df.set_index(pd.to_datetime(df[['year', 'month', 'day']]), inplace=True)\n\n return df",
"def ReadMetrics( fileName ):\n DataDF=pd.read_csv(fileName,header=0,delimiter=',',parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n #print(DataDF.head())\n return( DataDF )",
"def import_data(file):\n df = pd.read_csv(file, parse_dates=True, keep_date_col=True, sep=';')\n df = reduce_mem_usage(df)\n return df",
"def generate_DataFrame(file_path):\n # print (\"Generating DataFrame\")\n __log(1, 'Generating DataFrame....')\n\n df = pd.read_csv(file_path)\n df = df.rename(columns=lambda x: x.strip())\n df = df.dropna()\n\n for i in list(df.keys()):\n df[i] = df[i].apply(cleaning)\n\n # print (\"DataFrame Generated Successfully\")\n __log(1, 'DataFrame Generated Sucessfully.')\n return df",
"def read_file(file):\n return pd.read_csv(file, header=0, index_col=0,\n parse_dates=['Trading_date'],\n infer_datetime_format=True,\n keep_date_col=True)",
"def load_data(fpath: str, station: Dict[str, Any]) -> pd.DataFrame:\n df = pd.read_csv(\n fpath,\n skiprows=station['header_line_num']-1,\n usecols=['date', 'rain'],\n )\n\n # format the date from a string to a proper datetime object\n df['date'] = pd.to_datetime(df['date'])\n\n # extract year, month, week, and day to separate columns\n df['year'] = df['date'].dt.year\n df['month'] = df['date'].dt.month\n df['day'] = df['date'].dt.dayofyear\n df['week'] = df['date'].dt.weekofyear\n df['year_month'] = df['date'].dt.to_period('M')\n\n return df",
"def load(self) -> pd.DataFrame:\n if os.path.exists(self.file_name):\n df = pd.read_csv(self.file_name, index_col=0)\n df = self._clean(df)\n else:\n _LOG.debug(\"No file '%s'\", self.file_name)\n df = pd.DataFrame()\n return df",
"def process_file_pd(file_name):\n try:\n df = pd.read_csv(file_name)\n return df\n except OSError as e:\n print('Error' + str(e))\n raise",
"def create_dataframe_from_dir(directory):\n\n if not os.path.exists(directory):\n return pd.DataFrame()\n\n file_list = os.listdir(directory)\n\n file_list.sort()\n\n df_list = []\n for filename in file_list:\n\n if filename.startswith(\"_\") or (not filename.endswith(\".csv\")):\n continue\n\n # Assert that the file is named correctly\n _, start_date, end_date = check_filename_convention(filename)\n\n df = pd.read_csv(os.path.join(directory, filename))\n df = df.assign(SourceFile=filename)\n\n # In January 2020, MS changed the date format used in the usage\n # export files from US to UK. This happen between 24/01/2020 -\n # 28/01/2020. The following if statement is to deal with this\n # change.\n if start_date is None or end_date is None:\n continue\n\n if start_date > datetime.datetime(2020, 1, 24, 0, 0):\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%d/%m/%Y\"\n )\n except Exception:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%m/%d/%Y\"\n )\n except Exception:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%Y-%m-%d\"\n )\n else:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%m/%d/%Y\"\n )\n except Exception:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%d/%m/%Y\"\n )\n except Exception:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%Y-%m-%d\"\n )\n\n # Check if data comes from EduHub\n if CONST_COL_NAME_HANDOUTNAME in df.columns:\n\n # Renaming HandoutName to SubscriptionName\n df = df.rename(\n columns={CONST_COL_NAME_HANDOUTNAME: CONST_COL_NAME_SNAME}\n )\n\n # Dropping columns CourseName,LabName\n df = df.drop(\n columns=[CONST_COL_NAME_LABNAME, CONST_COL_NAME_COURSENAME]\n )\n\n df_list.append(df)\n\n if len(df_list) == 0:\n return pd.DataFrame()\n\n total_df = pd.concat(df_list, axis=0, ignore_index=True)\n\n return total_df",
"def process_data(filename, skiprow=0):\n df = pd.read_csv(filename, encoding='big5', header=None, skiprows=skiprow)\n # drop 測站\n df.drop(1, axis=1, inplace=True)\n print('Data Loaded, preview:')\n print(df.head())\n\n data = {}\n # group data by date\n for name, ddf in df.groupby(0):\n date = [s.zfill(2) for s in name.split('/')]\n month = date[1]\n\n # drop the date\n ddf.drop(0, axis=1, inplace=True)\n\n # set index as the measure\n ddf.set_index(2, drop=True, inplace=True)\n\n # set column as month-day-hour\n ddf.columns = ['-'.join(date[1:]+[str(i).zfill(2)]) for i in range(24)]\n\n # concatenate\n if month in data:\n data[month] = pd.concat([data[month], ddf], axis=1)\n else:\n data[month] = ddf\n\n # sort the columns by datetime\n for key in data.keys():\n data[key] = data[key][data[key].columns.sort_values()]\n\n print('\\nShow data index:')\n print(data['01'].columns)\n\n return data",
"def read_stats_csv(filename):\n\n df_dict = {}\n df = pd.read_csv(filename, header=[0, 1, 2])\n\n # Check if End column data type is datetime - if so use start date as index, otherwise use file number;\n # Use start date as index - Note: df[\"End\"] is interpreted as a dataframe here not a series as in hdf5\n if df[\"End\"].dtypes.all() == pd.Timestamp:\n # Drop redundant columns\n if \"File Number\" in df.columns:\n df = df.drop(\"File Number\", axis=1, level=0)\n df = df.drop(\"End\", axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index.name = \"Date\"\n\n # Convert timestamps to datetime\n try:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S\")\n except:\n try:\n # Timestamp will likely be in local (UK) format if csv file has been subsequently edited and saved\n df.index = pd.to_datetime(df.index, format=\"%d/%m/%Y %H:%M\")\n except:\n raise\n # Use file number as index\n else:\n df = df.drop([\"Start\", \"End\"], axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index.name = \"File Number\"\n\n df.columns.rename([\"channels\", \"stats\", \"units\"], inplace=True)\n logger = filename.split(\"Statistics_\")[-1].split(\".\")[0]\n df_dict[logger] = df\n\n return df_dict",
"def extract_df(self, file_path):\n df = pd.read_csv(file_path, sep=\";\")\n df.rename(columns={\"Get\": \"Currency\"}, inplace=True)\n df = df[df[\"Pay\"] == \"Chaos Orb\"]\n df = df[[\"League\", \"Date\", \"Currency\", \"Value\"]]\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n df[\"Date\"] = df[\"Date\"] - df.loc[0][\"Date\"]\n return df",
"def read_csv_file_data(file_path):\n if os.path.exists(file_path):\n df = pandas.read_csv(file_path)\n else:\n raise ValueError('ERROR: file_path doesnt exist in read_csv_file_data()')\n return df",
"def read_data(filepath):\n df = pd.read_csv(filepath)\n return df",
"def read_data(filepath):\n df = pd.read_csv(filepath)\n return df",
"def read_data(filepath):\n df = pd.read_csv(filepath)\n return df",
"def read_csv_to_dataframe(file_name):\n df = pd.read_csv(file_name)\n df = df.drop(['Unnamed: 0'], axis=1)\n return df",
"def _read_data(self, fp):\n names = [\n \"Year\",\n \"Month\",\n \"Day\",\n \"Hour\",\n \"Minute\",\n \"Data Source and Uncertainty Flags\",\n \"Dry Bulb Temperature\",\n \"Dew Point Temperature\",\n \"Relative Humidity\",\n \"Atmospheric Station Pressure\",\n \"Extraterrestrial Horizontal Radiation\",\n \"Extraterrestrial Direct Normal Radiation\",\n \"Horizontal Infrared Radiation Intensity\",\n \"Global Horizontal Radiation\",\n \"Direct Normal Radiation\",\n \"Diffuse Horizontal Radiation\",\n \"Global Horizontal Illuminance\",\n \"Direct Normal Illuminance\",\n \"Diffuse Horizontal Illuminance\",\n \"Zenith Luminance\",\n \"Wind Direction\",\n \"Wind Speed\",\n \"Total Sky Cover\",\n \"Opaque Sky Cover (used if Horizontal IR Intensity missing)\",\n \"Visibility\",\n \"Ceiling Height\",\n \"Present Weather Observation\",\n \"Present Weather Codes\",\n \"Precipitable Water\",\n \"Aerosol Optical Depth\",\n \"Snow Depth\",\n \"Days Since Last Snowfall\",\n \"Albedo\",\n \"Liquid Precipitation Depth\",\n \"Liquid Precipitation Quantity\",\n ]\n\n first_row = self._first_row_with_climate_data(fp)\n df = pd.read_csv(fp, skiprows=first_row, header=None, names=names)\n return df",
"def get_ctffind_4_1_0_meta(file_name: str) -> pd.DataFrame:\n extract_dict: typing.Dict[str, str]\n ctffind_meta_data: pd.DataFrame\n lines: typing.List[str]\n match: typing.Optional[typing.Match[str]]\n non_string_values: typing.Set[str]\n\n extract_dict = get_ctffind_4_1_0_extract_dict()\n ctffind_meta_data = pd.DataFrame(index=[0], columns=extract_dict.keys())\n with open(file_name, 'r') as read:\n lines = read.readlines()\n\n non_string_values = set([\n 'MicrographNameNoDW',\n 'version'\n ])\n for line in lines:\n for key, value in extract_dict.items():\n match = re.match(value, line)\n if match is not None:\n try:\n ctffind_meta_data[key] = float(match.group(1))\n except ValueError:\n assert key in non_string_values, f'{key}: {match.group(1)}'\n ctffind_meta_data[key] = match.group(1)\n else:\n pass\n return ctffind_meta_data",
"def read_csv(filename, cols=None, nrows=None):\n\n datecols = ['date_time', 'srch_ci', 'srch_co']\n dateparser = lambda x: pd.to_datetime(x, format='%Y-%m-%d %H:%M:%S',\n errors='coerce')\n dtypes = {\n 'id': np.uint32,\n 'site_name': np.uint8,\n 'posa_continent': np.uint8,\n 'user_location_country': np.uint16,\n 'user_location_region': np.uint16,\n 'user_location_city': np.uint16,\n 'orig_destination_distance': np.float32,\n 'user_id': np.uint32,\n 'is_mobile': bool,\n 'is_package': bool,\n 'channel': np.uint8,\n 'srch_adults_cnt': np.uint8,\n 'srch_children_cnt': np.uint8,\n 'srch_rm_cnt': np.uint8,\n 'srch_destination_id': np.uint32,\n 'srch_destination_type_id': np.uint8,\n 'is_booking': bool,\n 'cnt': np.uint64,\n 'hotel_continent': np.uint8,\n 'hotel_country': np.uint16,\n 'hotel_market': np.uint16,\n 'hotel_cluster': np.uint8,\n }\n\n df = pd.read_csv(\n filename,\n nrows=nrows,\n usecols=cols,\n dtype=dtypes,\n parse_dates=[col for col in datecols if col in cols],\n date_parser=dateparser,\n )\n\n if 'date_time' in df.columns:\n df['month'] = df['date_time'].dt.month.astype(np.uint8)\n df['year'] = df['date_time'].dt.year.astype(np.uint16)\n\n return df",
"def load_daily_data():\n return pd.read_csv(os.path.join('data', 'raw', 'full_grouped.csv'))",
"def _read_data(filename):\n logger.info('Reading file {}'.format(filename))\n return pd.read_csv(filename)",
"def initialize_from_file(filename):\r\n df = pd.read_csv(filename)\r\n return df"
] | [
"0.6936041",
"0.6728832",
"0.64776355",
"0.64624935",
"0.6454058",
"0.6410114",
"0.6397511",
"0.6382039",
"0.6367535",
"0.6330921",
"0.6298072",
"0.62846416",
"0.6276603",
"0.62696755",
"0.6258542",
"0.6252047",
"0.62465036",
"0.6205528",
"0.62012005",
"0.61507493",
"0.61311454",
"0.61311454",
"0.61311454",
"0.611904",
"0.61077744",
"0.61073536",
"0.60963845",
"0.6050253",
"0.6038397",
"0.602041"
] | 0.7225768 | 0 |
This function takes a filename as input, and returns a dataframe with the metrics from the assignment on descriptive statistics and environmental metrics. Works for both annual and monthly metrics. Date column should be used as the index for the new dataframe. Function returns the completed DataFrame. | def ReadMetrics( fileName ):
DataDF=pd.read_csv(fileName,header=0,delimiter=',',parse_dates=[0])
DataDF = DataDF.set_index('Date')
#print(DataDF.head())
return( DataDF ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_df_metrics():\n DATA_DIR = 'metrics'\n search_pattern = '*.pkl'\n filename = 'stats'\n\n iteration_results = glob.glob(os.path.join(DATA_DIR, search_pattern))\n aggregated_results = os.path.join(DATA_DIR, filename)\n\n df = load_stats_dataframe(iteration_results, aggregated_results)\n print(f'Dataframe {df}')\n return df",
"def import_year_attn_data(filename):\n\tytd_attn_df = pd.read_csv(filename, usecols=['Student ID',\n\t\t'Current School', 'Attendance Pct'], index_col='Student ID')\n\t# keep only active studenst and drop inactive students\n\tactive = ytd_attn_df['Current School'] == \"HYDE PARK HS\"\n\tytd_attn_df = ytd_attn_df[active]\n\t# drop Current School column\n\tytd_attn_df = ytd_attn_df.drop(labels = \"Current School\", axis=1)\n\tytd_attn_df = ytd_attn_df.rename(index=int, columns={\"Attendance Pct\"\n\t\t: \"ytd_attn\"})\n\tytd_attn_df.index.names = ['ID']\t\t\t\t\n\t\n\treturn ytd_attn_df",
"def create_df(filename):\r\n column_dict = []\r\n for fn in mean_filenames(filename, all_means):\r\n path = STAT_PATH + fn\r\n if os.path.exists(path):\r\n df = pd.read_csv(path)\r\n print(len(df.consumedFoodCount))\r\n # print(df)\r\n # df = df[-180:]\r\n # print(len(df.consumedFoodCount))\r\n df_column_name = fn[len(filename)+1:len(filename)+7]\r\n m, m_min_h, m_plus_h = mean_confidence_interval(df['searchEfficiency'], 0.95)\r\n column_dict.append({\"Mu\": float(fn[len(filename)+4:len(filename)+7]),\r\n \"Average Food Consumed\": np.mean(df['consumedFoodCount']),\r\n \"Average Flight Distance\": np.mean(df['distanceTraversed']),\r\n \"Average Search Efficiency\": m,\r\n \"CI Lower Bound\": m_min_h,\r\n \"CI Upper Bound\": m_plus_h})\r\n return pd.DataFrame(column_dict)",
"def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data",
"def extractColumns(filename,year):\n print('\\t\\textracting: ' + filename)\n df = pd.read_csv(filename,usecols = ['WAGP','ST','CIT','NATIVITY','AGEP','SCHL','ESR','PUMA','DECADE'] )\n df['YEAR'] = year\n # filter out children and old people\n workingAge = (18 <= df['AGEP']) & (df['AGEP'] <= 60)\n df = df[workingAge]\n # filter out people not in labor force\n assert not any(df['ESR'].isnull()), 'ESR Nulls mean <16 yr olds'\n laborforce = df['ESR'] != 6\n df = df[laborforce]\n # Get employed or not, lose the extra info\n df['EMPLOYED'] = (df['ESR'] != 3) # 3 is unemployed\n df.drop('ESR',axis=1, inplace=True)\n # remap CIT so 1: non citizen, 2: naturalized citizen, 3: born citizen\n cit_map = {1:3, 2:3, 3:3, 4:2, 5:1} # See data dict\n df['CIT'] = df['CIT'].replace(cit_map)\n # remap SCHL so 1: less than HS educ, 2: HS equiv, 3: College educ\n schl_map = {i:1 for i in range(1, 16)} # < hs educs\n schl_map.update({i:2 for i in range(16,20)}) # hs equiv\n schl_map.update({i:3 for i in range(20,25)}) # college educ\n df['SCHL'] = df['SCHL'].replace(schl_map)\n return df",
"def read_metrics_as_df(file, old_name, new_name):\r\n df = pandas.read_csv(file)\r\n df=df.rename(index=str,columns={'Value':new_name})\r\n return df",
"def prepare_data(filename='data/DOT_timeSeries.csv'):\n\n # read data file into pandas dataframe\n df = pd.read_csv(filename)\n\n # extract unwanted 'countries' from dataframe\n countries = ['Europe', 'Emerging and Developing Europe', 'Emerging and Developing Asia',\n 'Middle East, North Africa, and Pakistan', 'Export earnings: nonfuel',\n 'Sub-Saharan Africa', 'Export earnings: fuel', 'Western Hemisphere',\n 'World', 'Special Categories', 'Advanced Economies', 'CIS',\n 'Emerging and Developing Economies']\n for country in countries:\n df = extract_relevant_rows(df, column_name='Country Name', column_value=country, not_equal=True)\n df = extract_relevant_rows(df, column_name='Counterpart Country Name', column_value=country, not_equal=True)\n\n # extract exports only from data\n exports = extract_relevant_rows(df, column_name='Indicator Code', column_value='TXG_FOB_USD')\n # extract value attributes only from exports\n export_values = extract_relevant_rows(exports, column_name='Attribute', column_value='Value')\n\n return export_values",
"def abstract_dataframe(filename):\n pmid_ab_dict = medline_parser(filename)\n df = pd.DataFrame.from_dict(pmid_ab_dict, orient='index').reset_index()\n df.columns = ['PMID', 'Abstract']\n \"\"\"\n Parallelized tokenizer and gene pairs functions gene-network analysis.\n returns a dataframe with tokenized abstracts, gene_pairs and labels\n \"\"\"\n # df = parallel_tokenizer(df)\n # df = parallel_genepairs(df)\n \"\"\"create dictionary for networx_work\"\"\"\n df = topic_extraction(df, 'Abstract') # after topic extraction adds labels\n # df.to_csv('with_lda_labels.csv') # uncomment if you want to save the file\n # gene_dict = {entry[0]:entry[1:] for entry in df['gene_pairs'] if entry != None}\n # network_graph(gene_dict) # uncomment if you want to generate a networkx graph\n return df",
"def ReadData( fileName ):\n \n # define column names\n colNames = ['Date','Precip','Max Temp', 'Min Temp','Wind Speed'] #NOTE: I changed the column names because .query() would not work when referencing column names with spaces\n global DataDF #added this line to make the dataframe visible in the variable explorer\n global ReplacedValuesDF #added this line to make the dataframe visible in the variable explorer\n # open and read the file\n DataDF = pd.read_csv(\"DataQualityChecking.txt\",header=None, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n \n # define and initialize the missing data dictionary\n ReplacedValuesDF = pd.DataFrame(0, index=[\"1. No Data\",\"2. Gross Error\",\"3. Swapped\",\"4. Range Fail\"], columns=colNames[1:]) #added additional indexed rows to make adding the values later easier\n \n return( DataDF, ReplacedValuesDF )",
"def get_df_all_results(self, file):\n # read csv into dataframe\n df = pd.read_csv(file)\n # rename columns\n names = [\"index\", \"samp1\", \"samp2\", \"es\", \"sd1\", \"sd2\", \"k\", \"perm\",\n \"t_test\"]\n df.columns = names\n return df",
"def load_stats_dataframe(files, aggregated_results=None):\n if os.path.exists(aggregated_results) and all(\n [os.path.getmtime(f) < os.path.getmtime(aggregated_results) for f in files]):\n return pd.read_pickle(aggregated_results)\n\n df = pd.DataFrame()\n for f in files:\n tmp_dict = pd.read_pickle(f)\n tmp_dict['emb_size'] = f.split('_')[2]\n tmp_dict['negative_ratio'] = f.split('_')[4]\n tmp_dict['batch_size'] = f.split('_')[6]\n tmp_dict['epochs'] = f.split('_')[8]\n tmp_dict['classification'] = f.split('_')[-1].split('.')[0]\n\n tmp_df = pd.DataFrame.from_dict(tmp_dict)\n df = pd.concat([df, tmp_df])\n\n if aggregated_results:\n df.to_pickle(aggregated_results)\n\n return df",
"def load_annual_accumulation(reanalysis):\n ds = xr.open_dataset(accumulation_period_filepath[reanalysis])\n\n # Modify coordinate names to match other files\n # This will be fixed in a later version\n if reanalysis == 'CFSR':\n print (ds)\n #ds.rename({'row': 'x', 'col': 'y'}, inplace=True)\n\n return ds",
"def create_mode_df(fname, AMO_cutoff_freq):\n\n ds = Dataset(fname, 'r')\n time = ds['time'][:]\n month = (time + 1) % 12\n month[month == 0] += 12\n month = month.compressed().astype(int) # nothing is actually masked\n season = [int(m % 12 + 3)//3 for m in month]\n season_strs = ['DJF', 'MAM', 'JJA', 'SON']\n season_names = [season_strs[counter - 1] for counter in season]\n year = np.floor(1920 + (np.arange(1, len(month) + 1) - 0.5)/12).astype(int)\n\n amo_ts = ds['amo_timeseries_mon'][:]\n\n pdo_ts = ds['pdo_timeseries_mon'][:]\n\n enso_ts = ds['nino34'][:]\n\n # Create version of PDO that is orthogonal to ENSO using Gram-Schmidt method\n pdo_orth = pdo_ts - np.dot(pdo_ts, enso_ts)/np.dot(enso_ts, enso_ts)*enso_ts\n\n # Perform lowpass filter on AMO\n if AMO_cutoff_freq > 0:\n amo_lowpass = lowpass_butter(12, AMO_cutoff_freq, 3, amo_ts)\n else: # no filter\n amo_lowpass = amo_ts\n\n df = pd.DataFrame(columns=['year', 'month', 'season', 'AMO', 'AMO_lowpass', 'PDO', 'ENSO', 'PDO_orth'])\n df = df.assign(year=year, month=month, season=season_names,\n AMO=amo_ts, AMO_lowpass=amo_lowpass, PDO=pdo_ts, ENSO=enso_ts, PDO_orth=pdo_orth)\n\n return df",
"def create_df(filename):\n data = pd.read_csv(filename)\n data = data.dropna(axis='index')\n data['inc_angle'] = np.radians(data['inc_angle'])\n data = data.astype('float64')\n data = data[data['inc_angle'] <= np.deg2rad(80)]\n return data",
"def all_monthly_stats(cc, filename): # pragma: no cover\n output = {}\n stats_monthly_breakdown = compute_monthly_breakdown_stats(cc)\n stats_by_time = compute_stats_by_time(cc)\n stats_by_time.append(stats_monthly_breakdown)\n for d in stats_by_time:\n label = d['timeframe']\n output[label] = d\n with open(filename, 'w') as f:\n json.dump(output, f)",
"def metrics_timeseries(met, col,\r\n only_include_rows=None,\r\n only_include_entries=None,\r\n title='',\r\n ylabel=None,\r\n savename=None,\r\n savecsv=None):\r\n config = PlotConfig()\r\n fig, ax = plt.subplots(figsize=(9,6))\r\n if only_include_rows:\r\n met = met.loc[only_include_rows]\r\n else:\r\n met = met.loc[\"All_AgencyName\"]\r\n nblevels = met.index.nlevels\r\n savedata = []\r\n for name, df in met.groupby(level=list(range(nblevels-1))):\r\n if only_include_entries and name not in only_include_entries:\r\n continue\r\n old_label = name\r\n tdf = df.loc[name, col].drop(['All_Year'], axis=0)\r\n if name == 'Hispanic/Latino':\r\n name = 'Latinx'\r\n ax.plot(tdf.index, tdf.values, 'o-', label=name, color=config.get_color(old_label))\r\n tdf.name = name\r\n savedata.append(tdf)\r\n\r\n metric_names = MetricNames()\r\n ax.legend(bbox_to_anchor=(1,1))\r\n ax.set_ylim(bottom=0)\r\n ax.set_xlabel('Year', fontsize=14)\r\n ax.set_ylabel(ylabel if ylabel else col, fontsize=14)\r\n\r\n ax.set_title(title, fontsize=16)\r\n if 'Rate' in col:\r\n ax.set_yticklabels(['{:.3g}%'.format(y*100) for y in ax.get_yticks()])\r\n plt.tight_layout()\r\n\r\n if savename:\r\n plt.savefig(savename, dpi=300)\r\n plt.close('all')\r\n if savecsv:\r\n save_data = pd.concat(savedata, axis=1)\r\n csv_savename = pathlib.Path(savename)\r\n save_data.to_csv(str(csv_savename.with_suffix('.csv')))\r\n else:\r\n plt.show()",
"def process_data(filename, skiprow=0):\n df = pd.read_csv(filename, encoding='big5', header=None, skiprows=skiprow)\n # drop 測站\n df.drop(1, axis=1, inplace=True)\n print('Data Loaded, preview:')\n print(df.head())\n\n data = {}\n # group data by date\n for name, ddf in df.groupby(0):\n date = [s.zfill(2) for s in name.split('/')]\n month = date[1]\n\n # drop the date\n ddf.drop(0, axis=1, inplace=True)\n\n # set index as the measure\n ddf.set_index(2, drop=True, inplace=True)\n\n # set column as month-day-hour\n ddf.columns = ['-'.join(date[1:]+[str(i).zfill(2)]) for i in range(24)]\n\n # concatenate\n if month in data:\n data[month] = pd.concat([data[month], ddf], axis=1)\n else:\n data[month] = ddf\n\n # sort the columns by datetime\n for key in data.keys():\n data[key] = data[key][data[key].columns.sort_values()]\n\n print('\\nShow data index:')\n print(data['01'].columns)\n\n return data",
"def extract(self):\n \n print('Extracting Metrics data... ',end=''),\n self.df = pd.read_excel(self.file_path, index_col=0)\n print('Done')",
"def get_data(fpath):\n\n visits = ['SC', 'BL', 'V01', 'V02', 'V03', 'V04', 'V05', 'V06', 'V07',\n 'V08', 'V09', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15']\n dtype = dict(PATNO=str,\n EVENT_ID=cdtype(visits, ordered=True))\n\n fname = op.join(fpath, 'DATScan_Analysis.csv')\n data = pd.read_csv(fname, dtype=dtype)\n\n # melt into tidy DataFrame\n data = pd.melt(data.rename(columns=RENAME_COLS),\n id_vars=RENAME_COLS.values(),\n var_name='TEST', value_name='SCORE')\n data = data.dropna(axis=0, subset=['SCORE'])\n data = data.assign(**ASSIGN_COLS)[RETAIN_COLS]\n\n return data",
"def make_stats_df(self):\n columns = ['DATE', 'TEAM', 'teamId', 'R', 'HR', 'RBI', 'SBN', 'OBP', \n 'K', 'QS', 'SV', 'ERA', 'WHIP', 'MOVES', 'CHANGE']\n trimmed_table = self.parse_soup(self.stats)\n self.df_stats = pd.DataFrame(trimmed_table, columns=columns) \n # load season standings csv from file\n try: # if it already exists\n df = pd.read_csv('2016_stats.csv', index_col=0)\n except OSError:\n df = pd.DataFrame(columns=columns) # if it doesn't already exist\n df = df.append(self.df_stats)\n df.to_csv('2016_stats.csv')",
"def parse_2016(year, file):\n with open(file) as file:\n content = file.read()\n # Place, Name, Age, Sex/plc, Sex, Time, Pace, City, State, Bib No\n cols = [\n 'place', 'first_name', 'last_name', 'age', 'sexpl', 'sex',\n 'time', 'pace', 'city', 'state', 'bib'\n ]\n parser = TDParser(columns=cols)\n parser.feed(content)\n return parser.results",
"def merge_summaries(root_dir: str,output_file: str=None) -> pd.DataFrame:\n #\n print (f'Collecting the available summary files in {root_dir}, can take time... please wait.')\n sumfiles = glob.glob(f\"{root_dir}/**/*smry.txt\",recursive=True)\n nsums = len(sumfiles)\n print (f\"Found {nsums} summary files in {root_dir}\")\n #\n # will concatenate all smry.txt files into one temporary file and then will put it in pandas DataFrame and \n # save as CSV\n #\n with tempfile.NamedTemporaryFile(mode='w') as fp:\n for sumfile in tqdm(sumfiles,desc='Collecting the summaries'):\n with open(sumfile,'r') as sfile:\n fp.write(sfile.read())\n #\n # now read as pandas dataframe\n #\n colnames = [\"rev\",\"obsid\",\"expid\",\"mode\",\"filt\",\"tstart\",\"tend\",\"texpo\",\"mvcratio\", # (a rough measure of the ratio of counts in the MnKa versus continuum)\n \"qboxt0\",\"qboxt1\",\"qboxt2\",\"qboxt3\", # x 4 (electronics quadrant box temperatures)\n \"ndisclin_mean0\",\"ndisclin_mean1\",\"ndisclin_mean2\",\"ndisclin_mean3\", #x 4\n \"mipsel0\",\"mipsel1\",\"mipsel2\",\"mipsel3\", #x 4 (parameter for on-board MIP rejection algorithm)\n \"maxmip0\",\"maxmip1\",\"maxmip2\",\"maxmip3\", #x 4 (parameter for on-board MIP rejection algorithm)\n \"ndisclin_med0\",\"ndisclin_med1\",\"ndisclin_med2\",\"ndisclin_med3\", #median x 4\n \"ndisclin_std0\",\"ndisclin_std1\",\"ndisclin_std2\",\"ndisclin_std3\"] #, stddev x 4\n\n df = pd.read_csv(fp.name,delimiter='\\s+',header=None,skip_blank_lines=True,names=colnames)\n #\n # now calculate the time_delta, the difference in years from observation start and 2000-01-01\n #\n stime = [(datetime.strptime(x,\"%Y-%m-%dT%H:%M:%S\")-time0).total_seconds()/(365.0*24.0*3600.0) for x in df.tstart]\n df.insert(6,\"delta_time\",pd.Series(stime,index=df.index))\n #\n print (f'Last observation t={df.delta_time.max():.2f} years')\n if (output_file is not None):\n df.to_csv(output_file)\n fp.close()\n return df",
"def get_stats_data(filename, n=3):\n \n store = pd.HDFStore(filename, 'r')\n full, partial = list(store.keys())\n df_full = store[full]\n df_partial = store[partial]\n store.close()\n\n df_full['Import_flag'] = 'full'\n df_partial['Import_flag'] = 'partial'\n df = pd.concat([df_full, df_partial])\n df['datetime'] = pd.to_datetime(df['date']+' '+df['time'])\n imp = select_last_n_imports(df, n=n)\n df = df[df['import_id'].isin(imp)].reset_index(drop=True)\n return df",
"def main(config_path):\n logging.info(\"Loading raw data files. This may take a few minutes.\")\n config = yaml.load(config_path, yaml.SafeLoader)\n min_year = config[\"min_year\"]\n max_year = config[\"max_year\"]\n\n # The Eviction Lab data came in two files (2014-2016 and 2017-2018) with different formats, so\n # we need to read them in separately and merge them later.\n eviction_df = load_evictions_data(\n config[\"eviction_data_path\"],\n \"EXECUTED_DATE\",\n min_year,\n max_year,\n create_geoid = True\n )\n census_df = load_census_data(config[\"acs_data_path\"])\n\n # Generate the time series data and save output\n logging.info(\"Generating time series data (monthly counts of housing loss events).\")\n\n # Calculating eviction totals separately because number of evictions does not always equal number of rows in the df\n evictions_monthly = aggregate_evictions_using_rate_estimates(\n eviction_df,\n config[\"path_to_eviction_filing_rates\"],\n estimate_var = \"filings\",\n time_group = \"month\"\n )\n evictions_monthly = evictions_monthly[[\"eviction-filings\", \"month\"]].groupby(\"month\").sum().reset_index()\n\n\n # Creating timeseries df and merging on separate eviction counts\n timeseries_df = generate_time_series_df(eviction_df)\n timeseries_df = timeseries_df.merge(evictions_monthly, how=\"outer\")\n timeseries_df.drop_duplicates().to_csv(\n config[\"timeseries_output_csv_path\"], index=False\n )\n logging.info(\n \"Output timeseries CSV saved to %s.\" % config[\"timeseries_output_csv_path\"]\n )\n\n # Process evictions data--get totals/rates across the analysis period & totals/rates\n # by year\n eviction_df = aggregate_evictions_using_rate_estimates(\n eviction_df,\n config[\"path_to_eviction_filing_rates\"],\n estimate_var = \"filings\",\n time_group = \"year\"\n )\n eviction_years_df = create_year_cols_from_df(\n eviction_df,\n [\"evictions\", \"eviction-filings\", \"eviction-rate\"], # \"eviction-filing-rate\"\n {\"evictions\": \"total-evictions\"},\n \"year\",\n \"GEOID\",\n )\n\n eviction_totals = get_totals_across_years(eviction_df, \"GEOID\", \"evictions\").rename(\n columns={\"sum\": \"total-evictions\", \"mean\": \"avg-evictions\"}\n )\n eviction_filing_totals = get_totals_across_years(\n eviction_df, \"GEOID\", \"eviction-filings\",\n ).rename(columns={\"sum\": \"total-eviction-filings\", \"mean\": \"avg-eviction-filings\"})\n\n\n # Join evictions, mortgage, tax, and ACS data together into a single dataframe\n merged = (\n census_df\n .merge(eviction_totals, on=\"GEOID\", how=\"left\")\n .merge(eviction_df[[\"GEOID\"]].dropna().drop_duplicates(),on=\"GEOID\",how=\"left\",)\n .merge(eviction_years_df, on=\"GEOID\", how=\"left\")\n )\n\n merged[\"overall-city-eviction-rate\"] = ((np.sum(merged[\"avg-evictions\"][np.isfinite(merged[\"avg-evictions\"])]))/sum(merged[\"total-renter-occupied-households\"]))*100\n\n merged[\"avg-eviction-rate\"] = (\n merged[\"avg-evictions\"] / merged[\"total-renter-occupied-households\"]\n ) * 100\n\n merged[\"ratio-to-mean-eviction-rate\"] = (\n merged[\"avg-eviction-rate\"] / merged[\"overall-city-eviction-rate\"]\n )\n\n # Add geographic identifier columns\n merged = merged.rename(columns={\"GEOID\": \"census_tract_GEOID\"})\n merged[\"county_GEOID\"] = merged[\"census_tract_GEOID\"].apply(lambda x: x[:5])\n merged.loc[merged[\"county_GEOID\"] == \"36005\", \"county\"] = \"Bronx\"\n merged.loc[merged[\"county_GEOID\"] == \"36047\", \"county\"] = \"Brooklyn\"\n merged.loc[merged[\"county_GEOID\"] == \"36061\", \"county\"] = \"Manhattan\"\n merged.loc[merged[\"county_GEOID\"] == \"36081\", \"county\"] = \"Queens\"\n merged.loc[merged[\"county_GEOID\"] == \"36085\", \"county\"] = \"Staten Island\"\n merged[\"state\"] = \"New York\"\n\n\n # Write main output file to CSV\n merged.drop_duplicates().to_csv(config[\"output_csv_path\"], index=False)\n logging.info(\"Output CSV saved to %s.\" % config[\"output_csv_path\"])",
"def __init__(self, file_name: str):\n self.case_metrics = []\n self.cluster_metrics = []\n self.file_name = file_name\n\n self.path_to_pmg_metrics = f'metrics/{file_name}_process_model_graphs'\n self.path_to_pmg_vis = f'visualization/{file_name}_process_model_graphs'\n self.path_to_drifts = 'visualization/drifts'\n self.path_to_case_metrics = 'metrics/case_metrics'\n self.path_to_cluster_metrics = 'metrics/cluster_metrics'\n try:\n makedirs(self.path_to_pmg_metrics, exist_ok=True)\n makedirs(self.path_to_pmg_vis, exist_ok=True)\n makedirs(self.path_to_drifts, exist_ok=True)\n makedirs(self.path_to_case_metrics, exist_ok=True)\n makedirs(self.path_to_cluster_metrics, exist_ok=True)\n\n pd.DataFrame(columns=['stream_index', 'timestamp', 'check point', 'case',\n 'graph distance', 'time distance', 'label']) \\\n .to_csv(f'{self.path_to_case_metrics}/{file_name}.csv', index=False)\n pd.DataFrame(columns=['stream_index', 'timestamp', 'check point', 'cluster id',\n 'x', 'y', 'radius', 'weight', 'cluster type']) \\\n .to_csv(f'{self.path_to_cluster_metrics}/{file_name}.csv', index=False)\n except Exception as e:\n print(e)",
"def read_stats_csv(filename):\n\n df_dict = {}\n df = pd.read_csv(filename, header=[0, 1, 2])\n\n # Check if End column data type is datetime - if so use start date as index, otherwise use file number;\n # Use start date as index - Note: df[\"End\"] is interpreted as a dataframe here not a series as in hdf5\n if df[\"End\"].dtypes.all() == pd.Timestamp:\n # Drop redundant columns\n if \"File Number\" in df.columns:\n df = df.drop(\"File Number\", axis=1, level=0)\n df = df.drop(\"End\", axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index.name = \"Date\"\n\n # Convert timestamps to datetime\n try:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S\")\n except:\n try:\n # Timestamp will likely be in local (UK) format if csv file has been subsequently edited and saved\n df.index = pd.to_datetime(df.index, format=\"%d/%m/%Y %H:%M\")\n except:\n raise\n # Use file number as index\n else:\n df = df.drop([\"Start\", \"End\"], axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index.name = \"File Number\"\n\n df.columns.rename([\"channels\", \"stats\", \"units\"], inplace=True)\n logger = filename.split(\"Statistics_\")[-1].split(\".\")[0]\n df_dict[logger] = df\n\n return df_dict",
"def create_dataframe_from_dir(directory):\n\n if not os.path.exists(directory):\n return pd.DataFrame()\n\n file_list = os.listdir(directory)\n\n file_list.sort()\n\n df_list = []\n for filename in file_list:\n\n if filename.startswith(\"_\") or (not filename.endswith(\".csv\")):\n continue\n\n # Assert that the file is named correctly\n _, start_date, end_date = check_filename_convention(filename)\n\n df = pd.read_csv(os.path.join(directory, filename))\n df = df.assign(SourceFile=filename)\n\n # In January 2020, MS changed the date format used in the usage\n # export files from US to UK. This happen between 24/01/2020 -\n # 28/01/2020. The following if statement is to deal with this\n # change.\n if start_date is None or end_date is None:\n continue\n\n if start_date > datetime.datetime(2020, 1, 24, 0, 0):\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%d/%m/%Y\"\n )\n except Exception:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%m/%d/%Y\"\n )\n except Exception:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%Y-%m-%d\"\n )\n else:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%m/%d/%Y\"\n )\n except Exception:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%d/%m/%Y\"\n )\n except Exception:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%Y-%m-%d\"\n )\n\n # Check if data comes from EduHub\n if CONST_COL_NAME_HANDOUTNAME in df.columns:\n\n # Renaming HandoutName to SubscriptionName\n df = df.rename(\n columns={CONST_COL_NAME_HANDOUTNAME: CONST_COL_NAME_SNAME}\n )\n\n # Dropping columns CourseName,LabName\n df = df.drop(\n columns=[CONST_COL_NAME_LABNAME, CONST_COL_NAME_COURSENAME]\n )\n\n df_list.append(df)\n\n if len(df_list) == 0:\n return pd.DataFrame()\n\n total_df = pd.concat(df_list, axis=0, ignore_index=True)\n\n return total_df",
"def read_table03(rawfile):\n df = pd.read_csv(rawfile, skiprows = range(4)+range(55,62), thousands=\",\", na_values=['-'])\n print df\n\n df.dropna(axis=0, how=\"all\", inplace=True)\n #df.dropna(axis=0, subset=df.columns[1:], how=\"all\", inplace=True)\n df.dropna(axis=1, how=\"all\", inplace=True)\n\n df = df.rename(columns = {\"Unnamed: 2\": \"All races Percent\", \"Unnamed: 4\": \"Males Percent\", \"Unnamed: 6\": \"Females Percent\", \"Unnamed: 8\":\"25-34 Percent\", \"25 to 34 years old\":\"25-34\", \"Unnamed: 10\": \"35-54 Percent\", \"35 to 54 years old\":\"35-54\", \"Unnamed: 12\":\">=55 Percent\", \"55 years and older\":\">=55\", \"Unnamed: 14\": \"White Percent\", \"Unnamed: 16\": \"Non-Hispanic White Percent\", \"Unnamed: 18\": \"Black Percent\", \"Unnamed: 20\": \"Asian Percent\", \"Unnamed: 22\": \"Hispanic Percent\", \"Hispanic \\n(of any race)\":\"Hispanic\"})\n\n df.drop(0, inplace=True)\n df[\"Detailed Years of School\"][1] = \"Total\"\n\n df = df.ix[[45,46,47,48], :]\n df.set_index(\"Detailed Years of School\", inplace=True)\n print df\n #for i in range(4):\n #df.iloc[i][:] = df.iloc[i][:].str.replace(r'[$,]', '').astype('float')\n print df.dtypes\n\n return df",
"def process_file_pd(file_name):\n try:\n df = pd.read_csv(file_name)\n return df\n except OSError as e:\n print('Error' + str(e))\n raise",
"def main():\n\n authorize(outb, creds)\n\n string = input(\"Which campaigns do you want to include? >>> \")\n\n while True:\n date_from = input(\"From which date? Use the format 'YYYY-MM-DD please. >>> \")\n try:\n date_from = datetime.datetime.strptime(date_from, \"%Y-%m-%d\") \n # Input string will only be converted if the user gives the correct format...\n break\n except ValueError:\n print(\"Please input the date in the correct format!\")\n # ... else it keeps asking for the correct format.\n\n while True:\n date_to = input(\"To which date? Use the format 'YYYY-MM-DD please. >>> \")\n try:\n date_to = datetime.datetime.strptime(date_to, \"%Y-%m-%d\")\n break\n except ValueError:\n print(\"Please input the date in the correct format!\")\n\n while True:\n breakdown = input(\"What should be the breakdown? Type 'daily' or 'monthly' >>> \")\n if breakdown in (\"daily\", \"monthly\"):\n break\n else:\n print(\"Please input only 'daily' or 'monthly'\")\n\n filename = input(\"What should be the filename? >>> \")\n\n result = outb.get_campaign_performance_per_period(marketer_id, date_from, date_to, breakdown)\n #Get the report object with the given params\n filtered_camp_ids = get_camp_ids_containing_str(marketer_id, string)\n #Filter out campaign IDs containing the given string\n tf = merge(transform_and_filter_result(result, filtered_camp_ids))\n #Transform and merge the filtered results to a dict for pandas\n dataframe = pd.DataFrame(tf, columns=[\n \"campaign_id\",\n \"date_from\",\n \"date_to\",\n \"impressions\",\n \"clicks\",\n \"conversions\",\n \"spend\"\n ])\n dataframe.set_index(\"date_from\", inplace=True)\n final_pivot_df = dataframe.groupby(\"date_from\").sum().reindex([\"impressions\", \"clicks\", \"spend\", \"conversions\"], axis=1)\n #I only need these metrics for my final export, can be changed if necessary\n date_now = datetime.datetime.now().strftime(\"%Y-%m-%d__%H_%M_%S\")\n #For the date in the filename\n writer = pd.ExcelWriter(f\"{filename}_{date_now}.xlsx\")\n #Pandas excel writer object\n final_pivot_df.to_excel(writer, \"Sheet1\")\n #Write the dataframe to excel Sheet 1\n writer.save()\n print(f\"Finished!, your report is saved as {filename}_{date_now}.xlsx\")"
] | [
"0.5778182",
"0.5766087",
"0.56939036",
"0.5688676",
"0.5508023",
"0.54639703",
"0.5436751",
"0.54275894",
"0.54146",
"0.5389455",
"0.5377225",
"0.5363203",
"0.53372586",
"0.52271646",
"0.5185715",
"0.51650095",
"0.5164302",
"0.5162158",
"0.5151916",
"0.515016",
"0.51409566",
"0.51409113",
"0.5136609",
"0.51352084",
"0.5116079",
"0.51137555",
"0.51082414",
"0.5105727",
"0.51048505",
"0.5104205"
] | 0.6417884 | 0 |
This function clips the given time series dataframe to a given range of dates. Function returns the clipped dataframe and and the number of missing values. | def ClipData( DataDF, startDate, endDate ):
## Clip the data to the data range
DataDF=DataDF.loc[startDate:endDate]
## Find the number of missing values
MissingValues=DataDF['Discharge'].isna().sum()
return( DataDF, MissingValues ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clip(df, clip_val_low, clip_val_high):\n clipped_df = df.clip(lower=clip_val_low, upper=clip_val_high)\n return clipped_df",
"def clip(df, lower, upper):\n\n # Pandas' clip-function doesn't allow dicts with bounds for only some\n # columns, so we convert them to Pandas Series which is allowed.\n if isinstance(lower, dict):\n lower = pd.Series(lower)\n if isinstance(upper, dict):\n upper = pd.Series(upper)\n\n # Clip the data between the lower and upper bounds.\n df_clipped = df.clip(lower=lower, upper=upper, axis='columns')\n\n # If the bounds were only for some columns, Pandas' clip has set all\n # other columns to NaN, so we copy those values from the original data.\n df_clipped = df_clipped.fillna(df)\n\n return df_clipped",
"def fake_date_fill(df, back_method: str = 'slice'):\n df_index = df.index.to_series().copy()\n df2 = df.sort_index(ascending=False).copy()\n df2 = df2.apply(lambda x: pd.Series(x.dropna().values))\n df2 = df2.sort_index(ascending=False)\n df2.index = df_index.tail(len(df2.index))\n df2 = df2.dropna(how='all', axis=0)\n if df2.empty:\n df2 = df.fillna(0)\n\n if back_method == 'bfill':\n df2 = fill_forward(df2)\n return df\n elif back_method == 'slice':\n thresh = int(df.shape[1] * 0.5)\n thresh = thresh if thresh > 1 else 1\n df3 = df2.dropna(thresh=thresh, axis=0)\n if df3.empty or df3.shape[0] < 8:\n df3 = fill_forward(df2)\n else:\n df3 = fill_forward(df3)\n return df3\n elif back_method == 'keepna':\n return df2\n else:\n print('back_method not recognized in fake_date_fill')\n return df2",
"def check_missing_data(df, date_from, date_to, ignore_from_today=True):\n\n unique_dates = pd.unique(df.Date)\n\n # Get the first date between date_to and today\n if ignore_from_today:\n date_to = np.min([date_to, pd.to_datetime(datetime.date.today())])\n\n date_range = np.arange(date_from, date_to, dtype=\"datetime64[D]\").astype(\n \"datetime64[ns]\"\n )\n\n if unique_dates.dtype != date_range.dtype:\n raise TypeError(\"df does not contain the correct type\")\n\n mask = list(map(lambda x: x not in unique_dates, date_range))\n\n return date_range[mask]",
"def time_continuity_gaps(data):\n indexes = data.dropna(how='all').index\n resolution = tf._get_data_resolution(indexes)\n # print(resolution)\n continuity = pd.DataFrame({'Date From': indexes.values.flatten()[:-1],\n 'Date To': indexes.values.flatten()[1:]})\n continuity['Days Lost'] = (continuity['Date To'] - continuity['Date From']) / pd.Timedelta('1 days')\n\n # Remove indexes where no days are lost before returning\n filtered = continuity[continuity['Days Lost'] != (tf._get_data_resolution(indexes) / pd.Timedelta('1 days'))]\n\n # where only one timestamp is lost replace 0 by resolution lost.\n filtered['Date From'] = filtered['Date From'] + resolution\n filtered['Date To'] = filtered['Date To'] - resolution\n filtered['Days Lost'] = (filtered['Date To'] - filtered['Date From']) / pd.Timedelta('1 days')\n filtered.replace(0, (tf._get_data_resolution(indexes) / pd.Timedelta('1 days')), inplace=True)\n\n return filtered",
"def filter_dataframe(df, start_date_dt, end_date_dt):\n\n dff = df \n # df[\n # (df[\"timestamp\"].dt.date >= dt.date(start_date_dt.year, start_date_dt.month, start_date_dt.day))\n # & (df[\"timestamp\"].dt.date <= dt.date(end_date_dt.year, end_date_dt.month, end_date_dt.day))\n # ]\n # if (lat_min != -90) or (lat_max != 90):\n # dff = dff[\n # (dff[\"lat\"] >= lat_min)\n # & (dff[\"lat\"] <= lat_max)\n # ]\n # if (lon_min != -90) or (lon_max != 90):\n # dff = dff[\n # (dff[\"lon\"] >= lon_min)\n # & (dff[\"lon\"] <= lon_max)\n # ]\n\n return dff",
"def filter_on_date(self, start, end, dataframe, datecol=\"datetime\"):\n return dataframe.loc[(dataframe[datecol] < end) & (dataframe[datecol] > start)]",
"def test_clip_with_na_args(self, float_frame):\n # GH#17276\n tm.assert_frame_equal(float_frame.clip(np.nan), float_frame)\n tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan), float_frame)\n\n # GH#19992 and adjusted in GH#40420\n df = DataFrame({\"col_0\": [1, 2, 3], \"col_1\": [4, 5, 6], \"col_2\": [7, 8, 9]})\n\n msg = \"Downcasting behavior in Series and DataFrame methods 'where'\"\n # TODO: avoid this warning here? seems like we should never be upcasting\n # in the first place?\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.clip(lower=[4, 5, np.nan], axis=0)\n expected = DataFrame(\n {\"col_0\": [4, 5, 3], \"col_1\": [4, 5, 6], \"col_2\": [7, 8, 9]}\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.clip(lower=[4, 5, np.nan], axis=1)\n expected = DataFrame(\n {\"col_0\": [4, 4, 4], \"col_1\": [5, 5, 6], \"col_2\": [7, 8, 9]}\n )\n tm.assert_frame_equal(result, expected)\n\n # GH#40420\n data = {\"col_0\": [9, -3, 0, -1, 5], \"col_1\": [-2, -7, 6, 8, -5]}\n df = DataFrame(data)\n t = Series([2, -4, np.nan, 6, 3])\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.clip(lower=t, axis=0)\n expected = DataFrame({\"col_0\": [9, -3, 0, 6, 5], \"col_1\": [2, -4, 6, 8, 3]})\n tm.assert_frame_equal(result, expected)",
"def get_cut_dataframe(start, end):\n data = get_dataframe(start, end)\n nrow = data.shape[0]\n data.loc[0, 'from'] = start\n data.loc[0, 'delta'] = data.loc[0, 'to'] - start\n if data.loc[nrow-1, 'to'] > end:\n data.loc[nrow-1, 'to'] = end\n data.loc[nrow-1, 'delta'] = end - data.loc[nrow-1, 'from']\n return data",
"def drop_years(dataframe, start, end):\n tmp = dataframe\n tmp = tmp[(start <= tmp['year'].astype(int)) & (tmp['year'].astype(int) <= end)]\n\n return tmp",
"def slice(self, start_date, end_date = None):\n\n if end_date is None:\n end_date = self.series.index[-1]\n self.series = self.series.loc[start_date:end_date]",
"def window_filter(df, date_col, start, end):\n date_format = '%Y%m%d'\n start_date = datetime.strptime(str(start), date_format)\n end_date = datetime.strptime(str(end), date_format)\n return df[(df[date_col] >= start_date) & (df[date_col] <= end_date)]",
"def get_cut_day_dataframe(start, end):\n entries = get_dataframe(start, end)\n data = entries.copy()\n data['datetime'] = data['to'].map(ts2date)\n\n point = arrow.Arrow.range('day', ts2datetime(start), ts2datetime(end))\n point = [x.timestamp for x in point]\n ind = 0\n newRow = 0\n for index, row in entries.iterrows():\n if row['to'] == point[ind]:\n ind += 1\n elif row['from'] < point[ind] < row['to']:\n if index == 0:\n data.loc[0, 'from'] = start\n data.loc[0, 'delta'] = data.loc[0, 'to'] - data.loc[0, 'from']\n elif index == len(entries)-1:\n data.loc[data.index[-1:], 'to'] = end\n data.loc[data.index[-1:], 'delta'] = data.loc[data.index[-1:], 'to'] - data.loc[data.index[-1:], 'from']\n data.loc[data.index[-1:], 'datetime'] = ts2date(data.loc[data.index[-1:], 'from'])\n break\n else:\n row = entries.iloc[[index]].copy()\n row.loc[index, 'delta'] = abs(point[ind]-row.loc[index, 'from'])\n row.loc[index, 'to'] = point[ind]\n row.loc[index, 'datetime'] = ts2date(row.loc[index, 'from'])\n newInd = index + newRow\n data.loc[newInd, 'delta'] = abs(point[ind]-data.loc[newInd, 'to'])\n data.loc[newInd, 'from'] = point[ind]\n data = pd.concat([data[:newInd], row, data[newInd:]])\n data = data.reset_index(drop=True)\n newRow += 1\n ind += 1\n if ind >= len(point):\n break\n data['datetime'] = data['from'].map(ts2datetime)\n return data",
"def cull_missing(df, colname, missingdays):\n df2 = df[[\"binyear\", colname]]\n nancounts = df2.groupby(\"binyear\").agg(lambda x: x.isnull().sum())\n # cull anything with more than 3 days NaN\n df2 = nancounts[nancounts[colname] > missingdays]\n years = []\n if not df2.empty:\n years = list(df2.index.values)\n resdf = df[~df[\"binyear\"].isin(years)]\n minyear = resdf[\"binyear\"].min()\n # Prevent scary cullyears listing\n return resdf, list(filter(lambda x: x > minyear, years))",
"def check_gaps(df):\n df['oldIndex'] = df.index\n df['oldIndex'] = df['oldIndex'].shift(1)\n df['oldClose'] = df['close'].shift(1)\n df['gap'] = (df['open'] - df['oldClose']) * [10000]\n df = df.loc[(df['gap'] > 10) & (df.index.year > 2004) & (df.index.weekday != 6)]\n return df",
"def _apply_filters(self, df):\n df = df[(df['Date'] >= self.start_date) &\n (df['Date'] <= self.end_date)]\n return df",
"def clip(self, lower, upper, **kwargs): # noqa: PR02\n if isinstance(lower, BaseQueryCompiler):\n lower = lower.to_pandas().squeeze(1)\n if isinstance(upper, BaseQueryCompiler):\n upper = upper.to_pandas().squeeze(1)\n return DataFrameDefault.register(pandas.DataFrame.clip)(\n self, lower=lower, upper=upper, **kwargs\n )",
"def calculateMissing(odf):\n df = odf.copy()\n # Calculate last minute of operation for each day in `df`\n df.loc[:, 'time'] = np.nan\n df.loc[:, 'time'] = df.index.astype(np.int64)//10**9 # (to unix timestamp) from nano seconds 10*9 to seconds\n days = df.groupby(df.index.date)['time'].agg(['min', 'max', 'count']) # aggreagate on groupby\n # total number of minutes on the day\n totalminday = (days['max']-days['min'])//60\n # minutes with data by day\n countminday = days['count'] # -1 due count is +1\n missminday = totalminday-countminday\n percmissminday = missminday/totalminday\n\n # print('not working on daemon just on jupyter notebook!!!')\n return np.mean(percmissminday) # average of missing minutes",
"def drop_nan_streaks_above_threshold(df, df_nan_table, thresholds):\n\n # Check for NaN streaks > threshold and drop them from the df\n length = len(df_nan_table['Amount of NaNs'])\n print('df_nan_table length: %s' % length)\n\n indices_to_drop = []\n for i, amount in enumerate(df_nan_table['Amount of NaNs']):\n selected_column = df_nan_table['Column name'][i]\n try:\n if amount > thresholds[selected_column]:\n start_index = (df_nan_table['Start index'][i])\n stop_index = (df_nan_table['Stop index'][i])\n indices = df[start_index:stop_index].index\n print('Enumeration %s of %s | From \\t %s \\t to \\t %s | column %s | NaN streak length: %s'\n % (i, length, start_index, stop_index, selected_column, (len(indices))))\n try:\n indices_to_drop += indices\n except:\n print('Could not add indices to indices_to_drop list')\n else:\n #print('amount < threshold')\n pass\n except:\n #print('No threshold detected for %s' % selected_column)\n pass\n\n print('Dropping NaN streaks > threshold')\n l1 = len(df)\n df = df.drop(indices_to_drop)\n l2 = len(df)\n print('Removed %s rows' % (l1-l2))\n return df",
"def remove_out_of_bounds(self, data, low_bound, high_bound):\n data = data.dropna()\n data = data[(data > low_bound).all(axis=1) & (data < high_bound).all(axis=1)] \n return data",
"def cut_frame_tail(df):\n # TODO\n return df",
"def year_range(df):\n\n if not isinstance(df, pd.DataFrame):\n print(\"year_range was not passed a pandas DataFrame.\")\n return\n\n df['year_start'] = df['year'].min()\n df['year_end'] = df['year'].max()\n df.drop('year' , axis = 1, inplace = True)\n return df",
"def filter_data_by_date(df, ticker, start_date, end_date):\n if start_date is None:\n start_date = MIN_DATE\n\n if end_date is None:\n end_date = MAX_DATE\n\n filtered = df[\n (df[\"ticker\"] == ticker) & (df[\"date\"] >= start_date) & (df[\"date\"] <= end_date)\n ]\n return filtered",
"def clip(self, *args, **kwargs):\n return _uhd_swig.meta_range_t_clip(self, *args, **kwargs)",
"def crop_amide_one(df):\n df = df[(df[df.columns[0]] < 1706) & (df[df.columns[0]] > 1599)]\n df.reset_index(drop=True, inplace=True)\n return df",
"def clip_kvk_range(self, dataframe, unique_key, kvk_range):\n\n start = kvk_range.start\n stop = kvk_range.stop\n n_before = dataframe.index.size\n\n if start is not None or stop is not None or self.kvk_selection_kvk_key is not None:\n self.logger.info(\"Selecting kvk number from {} to {}\".format(start, stop))\n idx = pd.IndexSlice\n df = dataframe.set_index([KVK_KEY, unique_key])\n\n if self.kvk_selection_kvk_key is not None:\n df = df.loc[idx[self.kvk_selection, :], :]\n\n if start is None:\n df = df.loc[idx[:stop, :], :]\n elif stop is None:\n df = df.loc[idx[start:, :], :]\n else:\n df = df.loc[idx[start:stop, :], :]\n df.reset_index(inplace=True)\n else:\n df = dataframe\n\n n_after = df.index.size\n self.logger.debug(\"Kept {} out of {} records\".format(n_after, n_before))\n\n # check if we have any valid entries in the range\n if n_after == 0:\n self.logger.info(dataframe.info())\n raise ValueError(\"No records found in kvk range {} {} (kvk range: {} -- {})\".format(\n start, stop, dataframe[KVK_KEY].min(), dataframe[KVK_KEY].max()))\n\n return df",
"def compute_daterange(df: pd.DataFrame):\n\n start_date = df[\"Date\"].iloc[0]\n end_date = df[\"Date\"].iloc[-1]\n return pd.date_range(start_date, end_date)",
"def test_003_not_enough_datetimes() -> None:\n df = generate_test_data()\n df = df.head(2)\n skim(df)",
"def fill_price_gaps(\n from_date=dt.datetime(1970,1,1),\n to_date=dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)\n ):\n #Create a collection of years\n years = []\n cur_year = from_date.year\n while cur_year <= to_date.year:\n years.append(cur_year)\n cur_year += 1\n #Loop each year\n all_year_dates = pd.DataFrame([])\n for year in tqdm(years, total=len(years), desc=\"Loop through years to find dates\"):\n #establish bounding dates\n year_from_date = None if year != from_date.year else from_date\n year_to_date = None if year != to_date.year else to_date\n #Get filtered year dates\n year_dates = create_filtered_year_dates(year, from_date=year_from_date, to_date=year_to_date, )\n #Add to the full list\n all_year_dates = pd.concat([all_year_dates, year_dates])\n #Order the dates (just in case)\n all_year_dates = all_year_dates.sort_values([\"date\"]) \\\n .reset_index(drop=True)\n #Fetch all the tickers\n tickers = sqlaq_to_df(ticker.fetch())\n #Loop through tickers\n errors = []\n run_time = ProcessTime()\n for _,r in tqdm(tickers[[\"id\",\"ticker\"]].iterrows(), total=tickers.shape[0], desc=\"Filling in gaps\"):\n logger.info(f\"Filling gaps in {r.id} -> {r.ticker}\")\n try:\n #Fetch all prices\n dp = sqlaq_to_df(daily_price.fetch(ticker_ids=[r.id]))\n dp[\"date\"] = dp.date.astype(\"datetime64[ns]\")\n #Identify missing dates\n missing_dates = pd.merge(all_year_dates, dp[[\"date\",\"id\"]], on=[\"date\"], how=\"left\")\n #Identify the start date and remove all missing date before that\n start_date = missing_dates[~missing_dates.id.isnull()].date.min()\n missing_dates = missing_dates[missing_dates.date > start_date]\n #Remove all other items which have dates\n missing_dates = missing_dates[missing_dates.id.isnull()]\n #Order remaining dates\n missing_dates = missing_dates.sort_values(\"date\")\n #Create groupings no larger than max_days (in config)\n st_d = None\n date_groups = []\n missing_dates = missing_dates.date.to_list()\n if len(missing_dates):\n for i,d in enumerate(missing_dates):\n if not st_d:\n st_d = d\n else:\n #Append when group gets too big\n if (d - st_d).days > WEB_SCRAPE_MAX_DAYS:\n date_groups.append([st_d, missing_dates[i-1]])\n #Update the start date\n st_d = d\n #Append the last item\n date_groups.append([st_d, d])\n #Scrape the missing prices\n logger.info('Number of webscrapes to perform -> {}'.format(len(date_groups)))\n #For each time frame perform a scrape\n try: #Try loop so as not to miss all following date groups\n for i,dates in enumerate(date_groups):\n logger.info(f\"Running dates {i} -> {dt.datetime.strptime(str(dates[0])[:10], '%Y-%m-%d')} - {dt.datetime.strptime(str(dates[1])[:10], '%Y-%m-%d')}\")\n process_daily_prices(\n r.ticker,\n r.id,\n st_date=dates[0],\n en_date=dates[1],\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e, \"st_date\":dates[0], \"en_dates\":dates[1]})\n #Run an update on th weekly prices\n process_weekly_prices(\n r.id,\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e})\n #Lap\n logger.info(run_time.lap())\n logger.info(run_time.show_latest_lap_time(show_time=True))\n logger.info(f\"GAP FILL RUN TIME - {run_time.end()}\")\n\n logger.info(f'\\nGAP FILL ERROR COUNT -> {len(errors)}')\n if len(errors) > 0:\n logger.info('GAP FILL ERRORS ->')\n for e in errors:\n logger.error(e)",
"def trim_dataframe(self) -> pd.DataFrame:\n self.remove_below_lower_length_limit()\n self.trim_to_upper_length_limit()\n return self.data"
] | [
"0.61024415",
"0.60082376",
"0.5751473",
"0.5737423",
"0.56192535",
"0.5573656",
"0.5535874",
"0.5529859",
"0.5496289",
"0.54508936",
"0.5437007",
"0.54092693",
"0.5381029",
"0.53656447",
"0.53234494",
"0.5321475",
"0.5319992",
"0.5283118",
"0.527784",
"0.5272275",
"0.5264904",
"0.5200671",
"0.5188877",
"0.5139086",
"0.5134573",
"0.5103978",
"0.51005656",
"0.5094947",
"0.5086359",
"0.5069911"
] | 0.6901371 | 0 |
r""" from vice.yields.ccsne import LC18 unit test | def test_LC18_import():
def test():
try:
from .. import LC18
except:
return False
return True
return ["vice.yields.ccsne.LC18", test] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_T01():",
"def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]",
"def test_CL13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL13\", test]",
"def test_T4():",
"def test_T4():",
"def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]",
"def test_kyc_get_legal(self):\n pass",
"def test_T3():",
"def test_T3():",
"def test_T0():",
"def testBeliefs1sk(self):",
"def test_T2():",
"def test_T2():",
"def test_4_4_1_1(self):\n pass",
"def test_anglicize1to19():\n print('Testing anglicize1to19')\n\n result = funcs.anglicize1to19(1)\n introcs.assert_equals(\"one\", result)\n\n result = funcs.anglicize1to19(2)\n introcs.assert_equals(\"two\", result)\n\n result = funcs.anglicize1to19(3)\n introcs.assert_equals(\"three\", result)\n\n result = funcs.anglicize1to19(4)\n introcs.assert_equals(\"four\", result)\n\n result = funcs.anglicize1to19(5)\n introcs.assert_equals(\"five\", result)\n\n result = funcs.anglicize1to19(6)\n introcs.assert_equals(\"six\", result)\n\n result = funcs.anglicize1to19(7)\n introcs.assert_equals(\"seven\", result)\n\n result = funcs.anglicize1to19(8)\n introcs.assert_equals(\"eight\", result)\n\n result = funcs.anglicize1to19(9)\n introcs.assert_equals(\"nine\", result)\n\n result = funcs.anglicize1to19(10)\n introcs.assert_equals(\"ten\", result)\n\n result = funcs.anglicize1to19(11)\n introcs.assert_equals(\"eleven\", result)\n\n result = funcs.anglicize1to19(12)\n introcs.assert_equals(\"twelve\", result)\n\n result = funcs.anglicize1to19(13)\n introcs.assert_equals(\"thirteen\", result)\n\n result = funcs.anglicize1to19(14)\n introcs.assert_equals(\"fourteen\", result)\n\n result = funcs.anglicize1to19(15)\n introcs.assert_equals(\"fifteen\", result)\n\n result = funcs.anglicize1to19(16)\n introcs.assert_equals(\"sixteen\", result)\n\n result = funcs.anglicize1to19(17)\n introcs.assert_equals(\"seventeen\", result)\n\n result = funcs.anglicize1to19(18)\n introcs.assert_equals(\"eighteen\", result)\n\n result = funcs.anglicize1to19(19)\n introcs.assert_equals(\"nineteen\", result)",
"def exercise_b2_113():\r\n pass",
"def exercise_b2_106():\r\n pass",
"def test_T1():",
"def test_T1():",
"def test_manlext(self):\n self.chck_triple('manlext')",
"def test_S16_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import S16 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.S16\", test]",
"def test_clw_fix():\n assert Clw is Cl",
"def test_NKT13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import NKT13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.NKT13\", test]",
"def test_py_compile_condition(self):\n self._test_py_compile('coin')",
"def test_kyc_get_validation_legal(self):\n pass",
"def test_let(self):",
"def test_01_lighting(self):",
"def test_golay600_codes(self):\r\n for bc in golay600:\r\n corr, num_errs = golay.decode(bc)\r\n self.assertEqual(corr, bc)\r\n self.assertEqual(num_errs, 0)",
"def test_init(self):\n orig = \"TC---\"\n seq = self.SequenceClass(orig)\n self.assertEqual(str(seq), orig)",
"def exercise_b2_82():\r\n pass"
] | [
"0.6867367",
"0.6640538",
"0.6618248",
"0.65071297",
"0.65071297",
"0.6488547",
"0.6460742",
"0.6434713",
"0.6434713",
"0.6430316",
"0.63405323",
"0.6285057",
"0.6285057",
"0.6284323",
"0.61783695",
"0.6151322",
"0.61294484",
"0.61237067",
"0.61237067",
"0.61211914",
"0.61036974",
"0.606727",
"0.6066305",
"0.6050463",
"0.6044654",
"0.6044029",
"0.6033437",
"0.60257035",
"0.59910274",
"0.59892"
] | 0.7449569 | 0 |
r""" from vice.yields.ccsne import CL13 unit test | def test_CL13_import():
def test():
try:
from .. import CL13
except:
return False
return True
return ["vice.yields.ccsne.CL13", test] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_T01():",
"def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]",
"def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]",
"def test_ccds(self):\n #TODO write ccds tests",
"def test_arc_smear(self):",
"def test_NKT13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import NKT13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.NKT13\", test]",
"def test_py_compile_condition(self):\n self._test_py_compile('coin')",
"def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]",
"def test_T3():",
"def test_T3():",
"def exercise_b2_113():\r\n pass",
"def test_T4():",
"def test_T4():",
"def test_4_4_1_1(self):\n pass",
"def test_S16_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import S16 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.S16\", test]",
"def test_manlext(self):\n self.chck_triple('manlext')",
"def test_cons(self):",
"def testBeliefs1sk(self):",
"def test_clw_fix():\n assert Clw is Cl",
"def exercise_b2_106():\r\n pass",
"def test_check_source_4(self):\n self.src1.organism = \"\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)",
"def testCC(self):\n self.assertEqual(\n self.cc,\n self.cd.cc\n )",
"def test_T2():",
"def test_T2():",
"def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --",
"def test_proper(self):\n\n self.assertTrue(self.cs.isProper)\n self.assertFalse(self.cs.isDegenerate)",
"def exercise_b2_56():\r\n pass",
"def test_T0():",
"def test_task88c(input_value, expected_value):\r\n assert algo.Task88c.main_logic(input_value) == expected_value",
"def test_c(self):\n source = io.StringIO(\"int x(int a) { return a + 1 ; }\")\n arch = get_current_arch()\n obj = cc(source, arch, debug=True)\n m = load_obj(obj)\n y = m.x(101)\n self.assertEqual(102, y)"
] | [
"0.67553806",
"0.66751456",
"0.6578532",
"0.65704656",
"0.6546233",
"0.64854187",
"0.64723396",
"0.64639574",
"0.64518845",
"0.64518845",
"0.64471114",
"0.64222634",
"0.64222634",
"0.64217126",
"0.6415134",
"0.6362012",
"0.63525367",
"0.6301075",
"0.6270255",
"0.624978",
"0.61432636",
"0.6141626",
"0.6131225",
"0.6131225",
"0.6127617",
"0.6113729",
"0.610108",
"0.6096492",
"0.60954064",
"0.6087507"
] | 0.73629624 | 0 |
r""" from vice.yields.ccsne import CL04 unit test | def test_CL04_import():
def test():
try:
from .. import CL04
except:
return False
return True
return ["vice.yields.ccsne.CL04", test] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_CL13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL13\", test]",
"def test_T01():",
"def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]",
"def test_ccds(self):\n #TODO write ccds tests",
"def test_4_4_1_1(self):\n pass",
"def test_T4():",
"def test_T4():",
"def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]",
"def test_py_compile_condition(self):\n self._test_py_compile('coin')",
"def test_S16_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import S16 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.S16\", test]",
"def test_arc_smear(self):",
"def test_T3():",
"def test_T3():",
"def test_cons(self):",
"def testBeliefs1sk(self):",
"def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --",
"def test_clw_fix():\n assert Clw is Cl",
"def test_4():",
"def test_c(self):\n source = io.StringIO(\"int x(int a) { return a + 1 ; }\")\n arch = get_current_arch()\n obj = cc(source, arch, debug=True)\n m = load_obj(obj)\n y = m.x(101)\n self.assertEqual(102, y)",
"def testCC(self):\n self.assertEqual(\n self.cc,\n self.cd.cc\n )",
"def test_manlext(self):\n self.chck_triple('manlext')",
"def exercise_b2_113():\r\n pass",
"def test_T0():",
"def test(self):\n pass",
"def testCCHalt(self):\n cdl_convert.config.HALT_ON_ERROR = True\n\n def getCC():\n self.ccr_bad.cc\n\n self.assertRaises(\n ValueError,\n getCC\n )",
"def test_T2():",
"def test_T2():",
"def test_NKT13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import NKT13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.NKT13\", test]",
"def testCC(self):\n self.assertEqual(\n self.cc,\n self.ccr.cc\n )\n\n self.assertEqual(\n None,\n self.ccr_bad.cc\n )",
"def test_let(self):"
] | [
"0.70824283",
"0.705189",
"0.69436246",
"0.6881322",
"0.67080677",
"0.66985637",
"0.66985637",
"0.66827714",
"0.66267836",
"0.6622007",
"0.65751714",
"0.6500675",
"0.6500675",
"0.64995396",
"0.6381672",
"0.6361119",
"0.6354399",
"0.6348655",
"0.63481206",
"0.6343956",
"0.63423085",
"0.63383925",
"0.63326555",
"0.6300846",
"0.6294664",
"0.6291841",
"0.6291841",
"0.6263458",
"0.62146056",
"0.6203126"
] | 0.7286689 | 0 |
r""" from vice.yields.ccsne import NKT13 unit test | def test_NKT13_import():
def test():
try:
from .. import NKT13
except:
return False
return True
return ["vice.yields.ccsne.NKT13", test] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_T01():",
"def test_T3():",
"def test_T3():",
"def test_T4():",
"def test_T4():",
"def test_generate_nb_testing(self):\n pass",
"def test_T2():",
"def test_T2():",
"def test_arc_smear(self):",
"def testBeliefs1sk(self):",
"def test_manlext(self):\n self.chck_triple('manlext')",
"def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]",
"def test_CL13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL13\", test]",
"def test_T0():",
"def test_T1():",
"def test_T1():",
"def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --",
"def test_4_4_1_1(self):\n pass",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def test(self):\n pass",
"def test_3():",
"def test_4():",
"def test_let(self):",
"def test_01_visit(self):",
"def test_5():",
"def test(self):"
] | [
"0.72610635",
"0.69505996",
"0.69505996",
"0.6844559",
"0.6844559",
"0.6805739",
"0.67778987",
"0.67778987",
"0.67577595",
"0.6706993",
"0.6687833",
"0.6675569",
"0.6638078",
"0.6631696",
"0.6556386",
"0.6556386",
"0.65030587",
"0.6502402",
"0.649179",
"0.649179",
"0.649179",
"0.649179",
"0.649179",
"0.643896",
"0.64331967",
"0.64244163",
"0.6397008",
"0.6381528",
"0.6372451",
"0.637063"
] | 0.6966552 | 1 |
r""" from vice.yields.ccsne import S16 unit test | def test_S16_import():
def test():
try:
from .. import S16
except:
return False
return True
return ["vice.yields.ccsne.S16", test] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_T01():",
"def test_T4():",
"def test_T4():",
"def test_arc_smear(self):",
"def testBeliefs2sk(self):",
"def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]",
"def test_4_4_1_1(self):\n pass",
"def testBeliefs1sk(self):",
"def test_T0():",
"def test_ccds(self):\n #TODO write ccds tests",
"def test_secant_system(testFunctions, tol, printFlag): \n pass",
"def test_programs():\n yield 4, 4, 1\n yield 16, 12, 2",
"def test_active_inference_SPM_1b(self):",
"def test_T2():",
"def test_T2():",
"def test_odd(self):",
"def exercise_b2_52():\r\n pass",
"def test_4():",
"def test_T3():",
"def test_T3():",
"def test_CL13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL13\", test]",
"def CASE208( self, main ):\n\n from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import SRRoutingTest\n\n SRRoutingTest.runTest( main,\n test_idx=208,\n onosNodes=3,\n dhcp=1,\n routers=1,\n ipv4=0,\n ipv6=1,\n description=\"Test switch failures with IPv6 hosts (including external host configured with route-add command)\",\n checkExternalHost=False,\n countFlowsGroups=False,\n linkFailure=False,\n staticRouteConfigure=True,\n switchFailure=True )",
"def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --",
"def test_f2c():\n assert temperatura.f2c(32) == 0",
"def test_secant(testFunctions, tol, printFlag): \n pass",
"def exercise_b2_113():\r\n pass",
"def main() -> None:\n assert smoothie(\"ba\", \"ab\") == \"bbaa\", \"something is wrong\"\n print(\"module test passed \\U0001F44D\")",
"def test_cons(self):",
"def CASE202( self, main ):\n\n from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import SRRoutingTest\n\n SRRoutingTest.runTest( main,\n test_idx=202,\n onosNodes=3,\n dhcp=1,\n routers=1,\n ipv4=0,\n ipv6=1,\n countFlowsGroups=False,\n linkFailure=False,\n description=\"Test switch failures with IPv6 hosts\",\n switchFailure=True )",
"def test_even(self):"
] | [
"0.6533643",
"0.6363606",
"0.6363606",
"0.6299078",
"0.6157726",
"0.61465865",
"0.61095303",
"0.6099139",
"0.60783273",
"0.60670435",
"0.6055751",
"0.6050182",
"0.6017768",
"0.6004559",
"0.6004559",
"0.5998885",
"0.5972699",
"0.59519553",
"0.5923025",
"0.5923025",
"0.59117323",
"0.58980733",
"0.589188",
"0.5865145",
"0.5851103",
"0.58469516",
"0.5832104",
"0.5828815",
"0.58150196",
"0.58037657"
] | 0.73270357 | 0 |
Compute this metric. This applies the underlying single table metric to all the tables found in the dataset and then returns the average score obtained. | def _compute(self, real_data, synthetic_data, metadata=None):
if set(real_data.keys()) != set(synthetic_data.keys()):
raise ValueError('`real_data` and `synthetic_data` must have the same tables')
if metadata is None:
metadata = {'tables': defaultdict(type(None))}
elif not isinstance(metadata, dict):
metadata = metadata.to_dict()
values = []
for table_name, real_table in real_data.items():
synthetic_table = synthetic_data[table_name]
table_meta = metadata['tables'][table_name]
score = self.single_table_metric.compute(real_table, synthetic_table, table_meta)
values.append(score)
return np.nanmean(values) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def average(self):\n if self._average is None:\n self._average = sum([df.df for df in self])/len(self)\n return self._average",
"def accuracy(self):\n return (self.table[0, 0] + self.table[1, 1]) / self.N",
"def compute(cls, real_data, synthetic_data, metadata):\n score_breakdowns = cls.compute_breakdown(real_data, synthetic_data, metadata)\n if 'score' in score_breakdowns:\n return score_breakdowns['score']\n\n all_scores = [breakdown['score'] for _, breakdown in score_breakdowns.items()]\n\n return sum(all_scores) / len(all_scores)",
"def evaluate(self):\n self.df['Score'] = self.df[self.review_col].apply(self.analyzer)\n\n return self.df",
"def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted",
"def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted",
"def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0",
"def calculate_metrics(self):\n self.data_stats = self.sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=self.cassandra_trip_table, keyspace=self.cassandra_keyspace).load()\n self.data_stats = self.data_stats.groupBy(['time_block','day','month','borough_name']).agg(func.avg('num_trips').alias('mean'))",
"def estimate_metrics(\n self,\n all_labels,\n all_preds\n ):\n n_predictions = len(all_preds)\n\n for metric in self.metrics:\n # report everything but loss\n if metric.__name__ is not \"loss\":\n if isinstance(all_preds[0], list):\n result = np.mean([metric(labels, preds) for preds,labels in zip(all_preds, all_labels)])\n else:\n result = metric(all_labels, all_preds)\n \n if metric.__name__ in self.multi_batch_metrics:\n self.multi_batch_metrics[metric.__name__].append(result)\n self.multi_batch_metrics[\"len_\" + metric.__name__].append(\n n_predictions)\n else:\n self.multi_batch_metrics[metric.__name__] = [result]\n self.multi_batch_metrics[\"len_\" + metric.__name__] = [n_predictions]",
"def compute(self): \r\n hist = self.confusion_matrix\r\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\r\n mean_iu = np.nanmean(iu)\r\n return mean_iu",
"def __call__(self, pred_texture: Image.Image) -> float:\n from plan2scene.evaluation.metric_impl.tileability_mean_metric import compute_mean_tileability\n score = compute_mean_tileability(img=pred_texture, gaus=self.gaus)\n return score",
"def compute_score(self, observation, prediction, verbose=False):\n #print(observation)\n score = TScore.compute( self.observation, prediction )\n print(\"compute_score\")",
"def getAverage(self):\n return sum(self.scores) / len(self.scores)",
"def calculate_dataset_metrics(self):\n pass",
"def _compute_scores(self, triples):\n # compute scores as sum(s * p * o)\n scores = tf.reduce_sum(triples[0] * triples[1] * triples[2], 1)\n return scores",
"def test_accumulation(preds, targets, exact_match, f1):\n squad_metric = SQuAD()\n for pred, target in zip(preds, targets):\n squad_metric.update(preds=[pred], target=[target])\n metrics_score = squad_metric.compute()\n\n _assert_tensor(metrics_score[\"exact_match\"])\n _assert_tensor(metrics_score[\"f1\"])\n _assert_allclose(metrics_score[\"exact_match\"], torch.mean(torch.tensor(exact_match)))\n _assert_allclose(metrics_score[\"f1\"], torch.mean(torch.tensor(f1)))",
"def calculate_metric(self, distance_matrix):\n ap_scores = []\n for node_id in range(len(distance_matrix)):\n sorted_nodes = np.argsort(distance_matrix[node_id]).tolist()\n neighs = self.neighbors[node_id]\n n_correct = 0.0\n precisions = []\n for i in range(1, len(sorted_nodes)):\n if sorted_nodes[i] in neighs:\n n_correct += 1\n precisions.append(n_correct / i)\n if n_correct == len(neighs):\n break\n\n ap_scores.append(np.mean(precisions))\n\n return np.mean(ap_scores)",
"def score(\n self,\n weights: Union[str, List[float]] = None,\n metric=mtr.rmse,\n model: Union[str, int, List[str], List[int]] = None,\n observation: Union[str, int, List[str], List[int]] = None,\n variable: Union[str, int, List[str], List[int]] = None,\n start: Union[str, datetime] = None,\n end: Union[str, datetime] = None,\n area: List[float] = None,\n df: pd.DataFrame = None,\n ) -> float:\n metric = self._parse_metric(metric)\n if not (callable(metric) or isinstance(metric, str)):\n raise ValueError(\"metric must be a string or a function\")\n\n if model is None:\n models = self._mod_names\n else:\n models = [model] if np.isscalar(model) else model\n models = [self._get_mod_name(m) for m in models]\n n_models = len(models)\n\n skill = self.mean_skill(\n weights=weights,\n metrics=[metric],\n model=models,\n observation=observation,\n variable=variable,\n start=start,\n end=end,\n area=area,\n df=df,\n )\n if skill is None:\n return\n\n df = skill.df\n\n if n_models == 1:\n score = df[metric.__name__].values.mean()\n else:\n score = {}\n for model in models:\n mtr_val = df.loc[model][metric.__name__]\n if not np.isscalar(mtr_val):\n # e.g. mean over different variables!\n mtr_val = mtr_val.values.mean()\n score[model] = mtr_val\n\n return score",
"def tables_mean(tables, p=1):\n new_data = np.sum([t.data ** p for t in tables], axis=0)\n new_data = new_data / float(len(tables))\n new_data = new_data ** (1./p)\n return DataTable(new_data, tables[0].dims, tables[0].legends, tables[0].tags)",
"def mean(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n weighted_sum = sum(key * value for key, value in clean.items())\n return weighted_sum / total",
"def tablecost(self):\n subtotal_getter = operator.attrgetter(\"subtotal\")\n\n cost = 0.0\n\n cost += sum(map(subtotal_getter, self.materials))\n cost += sum(map(subtotal_getter, self.processes))\n cost += sum(map(subtotal_getter, self.fasteners))\n cost += sum(map(subtotal_getter, self.toolings))\n\n return cost",
"def performance_metric(y_true, y_pred):\n f1_arr = []\n for i in range(np.shape(y_pred)[1]):\n f1 = f1_score(np.array(y_true)[:, i], y_pred[:, i], average='weighted')\n f1_arr.append(f1)\n \n score = np.mean(f1_arr)\n return score",
"def get_average(self) -> float:\n return sum(self._scores) / len(self._scores)",
"def computeOverallScore(self,m):\n \n def _computeOverallScore(scalars):\n \"\"\"Given a netCDF4 group of scalars, blend them into an overall score\"\"\"\n scores = {}\n variables = [v for v in scalars.variables.keys() if \"Score\" in v and \"Overall\" not in v]\n for region in self.regions:\n overall_score = 0.\n sum_of_weights = 0.\n for v in variables:\n if region not in v: continue\n score = v.replace(region,\"\").strip()\n weight = 1.\n if self.weight.has_key(score): weight = self.weight[score]\n overall_score += weight*scalars.variables[v][...]\n sum_of_weights += weight\n overall_score /= max(sum_of_weights,1e-12)\n scores[\"Overall Score %s\" % region] = overall_score\n return scores\n\n fname = os.path.join(self.output_path,\"%s_%s.nc\" % (self.name,m.name))\n if not os.path.isfile(fname): return\n with Dataset(fname,mode=\"r+\") as dataset:\n datasets = [dataset.groups[grp] for grp in dataset.groups if \"scalars\" not in grp]\n groups = [grp for grp in dataset.groups if \"scalars\" not in grp]\n datasets.append(dataset)\n groups .append(None)\n for dset,grp in zip(datasets,groups):\n if \"scalars\" in dset.groups:\n scalars = dset.groups[\"scalars\"]\n score = _computeOverallScore(scalars)\n for key in score.keys():\n if key in scalars.variables:\n scalars.variables[key][0] = score[key]\n else:\n Variable(data=score[key],name=key,unit=\"1\").toNetCDF4(dataset,group=grp)",
"def get_mean(self):\n average = self.df[self.col_name].mean()\n return average",
"def compute_mean(self, column):\n return np.average(self.df[column], weights=self.df['T'])",
"def compute_metrics(self):\n pass",
"def calculate_score(label_dict, queries, results):\n\n total_score = 0.0\n\n log_header('Individual image scores')\n\n # Calculate score for all images\n for image_id in queries:\n if image_id in results.keys():\n # Run the score function\n image_score = score(\n label_dict=label_dict, target=image_id, selection=results[image_id]\n )\n else:\n logging.error('No result generated for %s' % image_id)\n\n image_score = 0.0\n\n total_score += image_score\n\n logging.info('%s: %8.6f' % (image_id, image_score))\n\n log_header('Average score over %d images: %10.8f' % (\n len(queries), total_score / len(queries)\n ))\n\n return total_score",
"def compute(self) -> Any:\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n return per_class, micro, macro, weighted",
"def calculate(self):\n\n rating = 0\n\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n rating += getattr(self, item, 0)\n\n self.overall = (rating / self.total) / .2"
] | [
"0.5845553",
"0.5827169",
"0.58184224",
"0.57921374",
"0.5787292",
"0.5787292",
"0.57844794",
"0.57567334",
"0.57013696",
"0.57012653",
"0.56799257",
"0.5663485",
"0.5659538",
"0.56120795",
"0.5596597",
"0.558179",
"0.5576306",
"0.55562055",
"0.5548604",
"0.55410093",
"0.5537123",
"0.5521605",
"0.54625094",
"0.545591",
"0.5455657",
"0.54401475",
"0.54370064",
"0.5416528",
"0.5408945",
"0.5400419"
] | 0.68125516 | 0 |
Makes the payment. Responsible for making the request to WorldPay with the gathered session variables and returns to a Studio flow if initiated from there. Currently assumes that all information gathered is correct and payment will be successful | def process_payment():
url = 'https://api.worldpay.com/v1/orders'
headers = {'Authorization': environ.get('WORLDPAY_API_KEY'),
'Content-type': 'application/json'}
body = {
"paymentMethod": {
"type": "Card",
"name": session['caller_name'],
"expiryMonth": session['expiry'][:2],
"expiryYear": f"20{session['expiry'][2:]}",
"cardNumber": session['card_number'],
"cvc": session['cvv'],
"issueNumber": "1"
},
"orderType": "ECOM",
"orderDescription": session['call_sid'],
"amount": session['payment_amount'],
"currencyCode": "GBP"}
r = requests.post(url, headers=headers, data=json.dumps(body))
requests.post(environ.get('END_OF_INTERACTION_URL'), r.text)
response = VoiceResponse()
response.say("Payment processed, goodbye")
# If your flow started in Twilio Studio, redirect back to it to complete the call
# response.redirect(
# 'https://webhooks.twilio.com/v1/Accounts/ACfd0573f9f976b99746c693XXXXXXXXXX/Flows/FWbfdeda0a21644267231d3dXXXXXXXXXX?FlowEvent=return')
return str(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_payment():\n\n response = VoiceResponse()\n if 'caller_name' not in session:\n session['caller_name'] = request.args.get(\n 'caller_name') or \"Twilio Payment\"\n if 'payment_amount' not in session:\n session['payment_amount'] = request.args.get('amount') or \"5000\"\n if 'card_number' not in session:\n response.redirect('/get_card_number')\n elif 'expiry' not in session:\n response.redirect('/get_expiry')\n elif 'cvv' not in session:\n response.redirect('/get_cvv')\n else:\n call_sid = request.form.get('CallSid')\n session['call_sid'] = call_sid\n response.redirect('/process_payment')\n\n return str(response)",
"def proceed_to_checkout_and_payment(self):\r\n # 1- summary\r\n logger.info('starting wizard with summary')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '.cart_navigation a.standard-checkout')))\r\n self.automation.driver.execute_script(\"document.querySelectorAll('.cart_navigation a.standard-checkout')[0]\"\r\n \".click()\")\r\n\r\n # 2-sign in & 3-address\r\n logger.info('2-sign in & 3-address')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, 'button[name=\"processAddress\"]')))\r\n\r\n self.automation.driver.find_element_by_css_selector('button[name=\"processAddress\"]').click()\r\n\r\n # 4- shipping\r\n logger.info('4- shipping')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#uniform-cgv span')))\r\n\r\n is_checked = self.automation.driver.find_element_by_css_selector('#uniform-cgv span').get_attribute('class')\r\n if not is_checked: # agree\r\n self.automation.driver.execute_script(\"document.querySelectorAll('#cgv')[0].click()\")\r\n\r\n self.automation.driver.find_element_by_css_selector('button[name=processCarrier]').click()\r\n logger.info('agree and confirmed')\r\n\r\n # pay by bank wire\r\n logger.info('pay by bank wire')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '.payment_module a')))\r\n\r\n self.automation.driver.find_element_by_css_selector('.payment_module a').click()\r\n\r\n # 5- payment and confirm\r\n logger.info('5- payment and confirm')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#cart_navigation button')))\r\n self.automation.driver.find_element_by_css_selector('#cart_navigation button').click()\r\n\r\n # back to orders\r\n logger.info('back to orders')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, 'p.cart_navigation .button-exclusive.btn')))\r\n self.automation.driver.find_element_by_css_selector('p.cart_navigation .button-exclusive.btn').click()\r\n\r\n # how many items do you have\r\n time.sleep(1.5)\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#order-list tbody tr')))\r\n items = self.automation.driver.find_elements_by_css_selector('#order-list tbody tr')\r\n logger.info(f'You have \"{len(items)}\" at your order')",
"def payReturn(request, *args, **kwargs):\n initParam = {}\n pay_key = request.session.get('pay_key', None)\n gateway = request.session.get('gateway', None)\n if pay_key and gateway:\n del request.session['pay_key']\n del request.session['gateway']\n #Check and get Transaction information\n checkMethod = kwargs.pop('checkMethod', None)\n if checkMethod:\n initParam['pay_key'] = pay_key\n initParam['gateway'] = gateway\n transaction = checkMethod(request, initParam=initParam)\n if transaction:\n p = driver.PayPal()\n #Check whether use has paid successfully.\n result = p.check_ap_payment_status(transaction.pay_key)\n if result['status'][0] == 'COMPLETED':\n #Do something after user payed successfully.\n executeMethod = kwargs.pop('executeMethod', None)\n if executeMethod:\n initParam['transaction_id'] = transaction.id\n initParam['buyer_account'] = result['senderEmail'][0]\n if executeMethod(initParam=initParam):\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if back_page:\n del request.session['back_page']\n if success_page:\n del request.session['success_page']\n initParam['success_page'] = success_page\n initParam['success_page_msg'] = request.session['success_page_msg']\n #For the value in paypal_success.html\n initParam['app'] = transaction.app\n initParam['price'] = transaction.price\n initParam['type'] = 'Transaction'\n initParam['msg'] = _('You have successfully paid the money. We have already sent an email to the app seller. In the meanwhile you can send private message to seller as well.')\n log.info(_('User %(param1)s has paid with transaction id %(param2)s.')\n % {'param1': request.user.username, 'param2': transaction.id})\n return render_to_response(\"payment/paypal_success.html\", initParam, context_instance=RequestContext(request))\n else:\n log.error(_('User %(param1)s has paid with transaction id %(param2)s, but execute method %(param3)s failed.')\n % {'param1': request.user.username, 'param2': transaction.id, 'param3': executeMethod.__name__})\n else:\n log.error(_('User %(param1)s has paid with transaction id %(param2)s, but ExecuteMethod does not exist.')\n % {'param1': request.user.username, 'param2': transaction.id})\n else:\n log.error(_('User %(param1)s has no paid with transaction id %(param2)s.')\n % {'param1': request.user.username, 'param2': transaction.id})\n else:\n log.error(_('PayKey %(param1)s, Gateway: %(param2)s, User: %(param3)s, Execute method %(param4)s failed.')\n % {'param1': pay_key, 'param2': gateway, 'param3': request.user.username, 'param4': checkMethod.__name__})\n else:\n log.error(_('PayKey %(param1)s, Gateway: %(param2)s, CheckMethod does not exist.')\n % {'param1': pay_key, 'param2': gateway})\n else:\n log.error(_('Pay. PayKey or Gateway no exists.'))\n\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if success_page:\n del request.session['success_page']\n if back_page:\n del request.session['back_page']\n error_msg = driver.GENERIC_PAYPAL_ERROR\n page_msg = request.session['back_page_msg']\n return render_to_response('payment/paypal_cancel.html',\n {'error_msg': error_msg, 'back_page': back_page, 'back_page_msg': page_msg}, context_instance=RequestContext(request))\n else:\n error_msg = _('%(param1)s Please transaction again.') % {'param1': driver.GENERIC_PAYPAL_ERROR}\n return render_to_response('payment/paypal_error.html',\n {\"error_msg\": error_msg}, context_instance=RequestContext(request))",
"def payment(self, **post):\n cr, uid, context = request.cr, request.uid, request.context\n payment_obj = request.registry.get('payment.acquirer')\n sale_order_obj = request.registry.get('sale.order')\n\n order = request.website.sale_get_order(context=context)\n order.write({'usersess': request.session['webcalc_session_id']})\n #order.env.cr.commit()\n redirection = self.checkout_redirection(order)\n if redirection:\n return redirection\n\n shipping_partner_id = False\n if order:\n if order.partner_shipping_id.id:\n shipping_partner_id = order.partner_shipping_id.id\n else:\n shipping_partner_id = order.partner_invoice_id.id\n\n values = {\n 'order': request.registry['sale.order'].browse(cr, SUPERUSER_ID, order.id, context=context),\n 'usersess': request.session['webcalc_session_id']\n }\n values['errors'] = sale_order_obj._get_errors(cr, uid, order, context=context)\n values.update(sale_order_obj._get_website_data(cr, uid, order, context))\n\n if not values['errors']:\n acquirer_ids = payment_obj.search(cr, SUPERUSER_ID, [('website_published', '=', True), ('company_id', '=', order.company_id.id)], context=context)\n values['acquirers'] = list(payment_obj.browse(cr, uid, acquirer_ids, context=context))\n render_ctx = dict(context, submit_class='btn btn-primary', submit_txt=_('Завершить оформление'))\n for acquirer in values['acquirers']:\n acquirer.button = payment_obj.render(\n cr, SUPERUSER_ID, acquirer.id,\n '/',\n order.amount_total,\n order.pricelist_id.currency_id.id,\n partner_id=shipping_partner_id,\n tx_values={\n 'return_url': '/shop/payment/validate',\n },\n context=render_ctx)\n #vips_shop\n return request.website.render(\"vips_shop.payment\", values)",
"def post(self):\n \n access_token = accessToken.gerated_access_token\n api_url = \"https://sandbox.safaricom.co.ke/mpesa/stkpush/v1/processrequest\"\n headers = { \"Authorization\": \"Bearer %s\" % access_token }\n request = {\n \"BusinessShortCode\": constants.BusinessShortCode ,\n \"Password\": generated_password,\n \"Timestamp\": generated_timestamp,\n \"TransactionType\": \"CustomerPayBillOnline\",\n \"Amount\": \"1\",\n \"PartyA\": \"254705275702\",\n \"PartyB\": constants.BusinessShortCode,\n \"PhoneNumber\": \"\", #pass in the phone number that will be prompted to enter the pin\n \"CallBackURL\": \"https://test.com\", #pass in an actual callback url if you have one\n \"AccountReference\": \"Test100\",\n \"TransactionDesc\": \"Test payment\"\n }\n \n response = requests.post(api_url, json = request, headers=headers)\n # print (response.text)\n\n return {\"response\":response.json()}",
"def awaiting_payment(self):",
"def __capture_payment(self, response):\n order_cls = get_order_class()\n self.order = order_cls.get_by_payment_details(\n {'token': response['TOKEN']}\n )\n if self.order is None or self.order.state is not OrderStates.created:\n return redirect(url_for('payment.error_payment',\n payment_method=self.method_name))\n\n request_params = {\n 'METHOD': DO_PAYMENT,\n 'TOKEN': response['TOKEN'],\n 'PAYERID': response['PAYERID'],\n 'PAYMENTREQUEST_0_AMT': self.order.total_price,\n 'PAYMENTREQUEST_0_PAYMENTACTION': ACTION,\n 'PAYMENTREQUEST_0_CURRENCYCODE': CURRENCY,\n }\n\n response = self.__do_request(request_params)\n if response['ACK'] == RESPONSE_OK:\n self.order.set_payment_details(token=unicode(response))\n self.order.mark_paid()\n\n return redirect(url_for('payment.success_payment',\n payment_method=self.method_name))\n\n return redirect(url_for('payment.error_payment',\n payment_method=self.method_name,\n order_id=self.order.id))",
"def post(self, payment_id=None):\n data = request.get_json()\n redirect_url = data.get('redirect_url')\n cart_token = data.get('cart_token')\n address_id = data.get('address_id')\n \n cart = Cart.query.filter_by(token=cart_token, user_id=current_user.id).first()\n if not cart:\n return {\"message\":\"No cart with this id\"}, 404\n\n if not address_id:\n return {\"message\": \"Please enter a address for your order\"}, 404\n\n order = Order.create_from_cart(cart_token, address_id)\n payment = Payment.query.filter_by(order_id=order.id).first()\n if not payment:\n payment = Payment(\n user_id=current_user.id, \n order_id=order.id, \n amount=order.total,\n status='Pending'\n )\n\n db.session.add(payment)\n db.session.commit()\n\n client = Client(current_app.config['ZARINPAL_WEBSERVICE'])\n mail = current_user._email\n\n if not mail:\n return {\"message\": \"Please enter your email address to continue the payment\"}\n\n user_info = UserAddress.query.filter_by(id=address_id).first()\n if user_info.phone:\n mobile = user_info.phone\n else:\n mobile = '' \n\n result = client.service.PaymentRequest(current_app.config['MERCHANT_ID'],\n payment.amount,\n 'nani',\n mail,\n mobile,\n redirect_url)\n\n payment.authority = result.Authority\n db.session.commit()\n if result.Status == 100:\n return {'payment_url':'https://www.zarinpal.com/pg/StartPay/' + result.Authority}\n else:\n return {\n 'message':\"We can't connect you to zarin pal server, right now. Please try again in a few moments.\"\n }, 404",
"def payPalDoCheckOut(request, *args, **kwargs):\n initParam = {}\n id = request.GET.get(\"id\")\n token = request.GET.get(\"token\")\n payerID = request.GET.get(\"PayerID\")\n initParam['id'] = id\n initParam['token'] = token\n if token and payerID and id:\n #Check and get Service detail information\n checkMethod = kwargs.pop('checkMethod', None)\n if checkMethod:\n gateway = request.session.get('gateway', None)\n if gateway:\n del request.session['gateway']\n initParam['gateway'] = gateway\n serviceDetail = checkMethod(request, initParam=initParam)\n if serviceDetail:\n amount = serviceDetail.actual_amount\n currency = serviceDetail.app.currency.currency\n result, response = utils.process_payment_request(amount, currency, token, payerID)\n if result:\n #Do something after payment success.\n executeMethod = kwargs.pop('executeMethod', None)\n if executeMethod:\n initParam['serviceDetail_id'] = serviceDetail.id\n if executeMethod(request, initParam=initParam):\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if back_page:\n del request.session['back_page']\n if success_page:\n del request.session['success_page']\n initParam['success_page'] = success_page\n initParam['success_page_msg'] = request.session['success_page_msg']\n #For the value in paypal_success.html\n initParam['app'] = serviceDetail.app\n initParam['type'] = 'Payment'\n initParam['price'] = serviceDetail.actual_amount\n initParam['msg'] = _('Thank you for your payment, and your app will be listed according to the effective period your choosed.')\n log.info(_('Seller %(param1)s has paid service fee with service detail id %(param2)s.')\n % {'param1': request.user.username, 'param2': serviceDetail.id})\n return render_to_response(\"payment/paypal_success.html\", initParam, context_instance=RequestContext(request))\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, Execute method %(param3)s failed.')\n % {'param1': token, 'param2': payerID, 'param3': executeMethod.__name__})\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, ExecuteMethod does not exist.')\n % {'param1': token, 'param2': payerID})\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, %(param3)s : %(param4)s.')\n % {'param1': token, 'param2': payerID, 'param3': response.error, 'param4': response.error_msg})\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, User: %(param3)s, Execute method %(param4)s failed.')\n % {'param1': token, 'param2': payerID, 'param3': request.user.username, 'param4': checkMethod.__name__})\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, Gateway no exists in request.session.')\n % {'param1': token, 'param2': payerID})\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, CheckMethod does not exist.')\n % {'param1': token, 'param2': payerID})\n else:\n log.error(_('Token or PayerID no exists.'))\n\n if request.session.get('gateway', None):\n del request.session['gateway']\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if success_page:\n del request.session['success_page']\n if back_page:\n del request.session['back_page']\n error_msg = driver.GENERIC_PAYPAL_ERROR\n page_msg = request.session['back_page_msg']\n return render_to_response('payment/paypal_cancel.html',\n {'error_msg': error_msg, 'back_page': back_page, 'back_page_msg': page_msg}, context_instance=RequestContext(request))\n else:\n error_msg = _('%(param1)s Please payment again.') % {'param1': driver.GENERIC_PAYPAL_ERROR}\n return render_to_response('payment/paypal_error.html',\n {\"error_msg\": error_msg}, context_instance=RequestContext(request))",
"def request_payment(request, tx):\n svc = get_service_instance()\n MTN_MOMO_API_URL = getattr(settings, 'MTN_MOMO_API_URL', 'https://developer.mtn.cm/OnlineMomoWeb/faces/transaction/transactionRequest.xhtml')\n username = request.user.username if request.user.is_authenticated() else '<Anonymous>'\n if getattr(settings, 'DEBUG_MOMO', False):\n amount = 100\n else:\n amount = int(tx.amount)\n data = {'idbouton': 2, 'typebouton': 'PAIE', 'submit.x': 104, 'submit.y': 70,\n '_cIP': '', '_amount': amount, '_tel': tx.phone}\n cashout_url = MTN_MOMO_API_URL\n momo_after_checkout = import_by_path(tx.callback)\n if getattr(settings, 'UNIT_TESTING', False):\n tx.processor_tx_id = 'tx_1'\n tx.task_id = 'task_1'\n tx.message = 'Success'\n tx.is_running = False\n tx.status = MoMoTransaction.SUCCESS\n request.session['next_url'] = 'http://nextUrl'\n momo_after_checkout(request, transaction=tx, signature=request.session['signature'])\n elif getattr(settings, 'DEBUG', False):\n mtn_momo = json.loads(PaymentMean.objects.get(slug=MTN_MOMO).credentials)\n data.update({'_email': mtn_momo['merchant_email']})\n r = requests.get(cashout_url, params=data, verify=False, timeout=300)\n resp = r.json()\n tx.task_id = resp['ProcessingNumber']\n if resp['StatusCode'] == '01':\n logger.debug(\"%s - Successful MoMo payment of %dF from %s: %s\" % (svc.project_name, amount, username, tx.phone))\n tx.processor_tx_id = resp['TransactionID']\n tx.message = resp['StatusDesc']\n tx.is_running = False\n tx.status = MoMoTransaction.SUCCESS\n momo_after_checkout(request, transaction=tx, signature=request.session['signature'])\n elif resp['StatusCode'] == '1000' and resp['StatusDesc'] == 'Pending':\n # Don't do anything here. Listen and process transaction on the callback URL view\n pass\n else:\n tx.status = MoMoTransaction.API_ERROR\n else:\n try:\n mtn_momo = json.loads(PaymentMean.objects.get(slug=MTN_MOMO).credentials)\n except:\n return HttpResponse(\"%s - Error, Could not parse MoMo API parameters.\" % svc.project_name)\n try:\n username = request.user.username if request.user.is_authenticated() else '<Anonymous>'\n data.update({'_email': mtn_momo['merchant_email']})\n logger.debug(\"MTN MoMo: Initiating payment of %dF from %s: %s\" % (amount, username, tx.phone))\n r = requests.get(cashout_url, params=data, verify=False, timeout=300)\n tx.is_running = False\n resp = r.json()\n tx.task_id = resp['ProcessingNumber']\n if resp['StatusCode'] == '01':\n logger.debug(\"%s - MTN MoMo: Successful payment of %dF from %s: %s\" % (svc.project_name, amount, username, tx.phone))\n tx.processor_tx_id = resp['TransactionID']\n tx.message = resp['StatusDesc']\n tx.is_running = False\n tx.status = MoMoTransaction.SUCCESS\n tx.save(using='wallets')\n if getattr(settings, 'DEBUG', False):\n momo_after_checkout(request, transaction=tx, signature=request.session['signature'])\n else:\n with transaction.atomic(using='wallets'):\n try:\n momo_after_checkout(request, transaction=tx, signature=request.session['signature'])\n tx.message = 'OK'\n except:\n tx.message = traceback.format_exc()\n tx.save(using='wallets')\n logger.error(\"%s - MTN MoMo: Failure while running callback. User: %s, Amt: %d\" % (svc.project_name, request.user.username, int(request.session['amount'])), exc_info=True)\n elif resp['StatusCode'] == '1000' and resp['StatusDesc'] == 'Pending':\n # Don't do anything here. Listen and process transaction on the callback URL view\n logger.debug(\"%s - MTN MoMo: RequestPayment completed with ProcessingNumber %s\" % (svc.project_name, tx.task_id))\n else:\n logger.error(\"%s - MTN MoMo: Transaction of %dF from %s: %s failed with message %s\" % (svc.project_name, amount, username, tx.phone, resp['StatusDesc']))\n tx.status = MoMoTransaction.API_ERROR\n tx.message = resp['StatusDesc']\n except KeyError:\n tx.status = MoMoTransaction.FAILURE\n tx.message = traceback.format_exc()\n logger.error(\"%s - MTN MoMo: Failed to init transaction of %dF from %s: %s\" % (svc.project_name, amount, username, tx.phone), exc_info=True)\n except SSLError:\n tx.status = MoMoTransaction.SSL_ERROR\n logger.error(\"%s - MTN MoMo: Failed to init transaction of %dF from %s: %s\" % (svc.project_name, amount, username, tx.phone), exc_info=True)\n except Timeout:\n tx.status = MoMoTransaction.TIMEOUT\n logger.error(\"%s - MTN MoMo: Failed to init transaction of %dF from %s: %s\" % (svc.project_name, amount, username, tx.phone), exc_info=True)\n except RequestException:\n tx.status = MoMoTransaction.REQUEST_EXCEPTION\n tx.message = traceback.format_exc()\n logger.error(\"%s - MTN MoMo: Failed to init transaction of %dF from %s: %s\" % (svc.project_name, amount, username, tx.phone), exc_info=True)\n except:\n tx.status = MoMoTransaction.SERVER_ERROR\n tx.message = traceback.format_exc()\n logger.error(\"%s - MTN MoMo: Failed to init transaction of %dF from %s: %s\" % (svc.project_name, amount, username, tx.phone), exc_info=True)\n\n tx.save(using='wallets')",
"def payment(request,game_id):\r\n\tgame = Game.objects.get(id = game_id)\r\n\tif(game is not None): #check if the game exists\r\n\t\tcheck_if_bought = Transaction.objects.filter(payer = request.user.profile,game=Game.objects.get(id=game_id),state=Transaction.CONFIRMED).count() #check if user has already purchased the game\r\n\t\tif check_if_bought > 0 or game.developer == request.user.profile:\r\n\t\t\treturn redirect(\"/play/\" + str(game_id))\r\n\t\tpurchase_game = Game.objects.get(id = game_id)\r\n\t\tnew_payer = Profile.objects.get(user = request.user)\r\n\t\tnew_payee= purchase_game.developer\r\n\t\ttransaction = Transaction.objects.create(payer=new_payer, payee= new_payee, game=purchase_game,amount=purchase_game.price)\r\n\t\ttransaction.save()\r\n\t\t# Generate checksum and hash values\r\n\t\tchecksumstr = \"pid={}&sid={}&amount={}&token={}\".format(transaction.id, settings.SELLER_ID, purchase_game.price, settings.SELLER_KEY)\r\n\t\tm = md5(checksumstr.encode(\"ascii\"))\r\n\t\tchecksum = m.hexdigest()\r\n\r\n\t\tprint(transaction.id, transaction.state, checksumstr)\r\n\t\treturn render(request, 'payment.html', {'game':purchase_game,'SELLER_ID':settings.SELLER_ID, 'MEDIA_URL': settings.MEDIA_URL, 'transaction': transaction, 'checksum': checksum})\r\n\telse:\r\n\t\treturn redirect('home') # Redirect to home if link is faulty\r",
"def payPalReturn(request, *args, **kwargs):\n initParam = {}\n token = request.GET.get('token')\n payerID = request.GET.get('PayerID')\n initParam['token'] = token\n initParam['payerid'] = payerID\n if token and payerID:\n p = driver.PayPal()\n EC_RETURNURL = '/'.join([common.getHttpHeader(request), 'payment/paypal_return'])\n EC_CANCELURL = '/'.join([common.getHttpHeader(request), 'payment/paypal_cancel'])\n res_dict = p.GetExpressCheckoutDetailsInfo(EC_RETURNURL, EC_CANCELURL, token)\n state = p._get_value_from_qs(res_dict, 'ACK')\n if state in [\"Success\", \"SuccessWithWarning\"]:\n #Show the list of service detail to user.\n executeMethod = kwargs.pop('executeMethod', None)\n if executeMethod:\n gateway = request.session.get('gateway', None)\n if gateway:\n initParam['gateway'] = gateway\n serviceDetail, serviceItems, discount_rate = executeMethod(request, initParam=initParam)\n if serviceDetail and serviceItems:\n initParam['serviceDetail'] = serviceDetail\n initParam['serviceItems'] = serviceItems\n initParam['discount_rate'] = discount_rate\n return render_to_response('payment/paypal_return.html', initParam, context_instance=RequestContext(request))\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, Execute method %(param3)s failed.')\n % {'param1': token, 'param2': payerID, 'param3': executeMethod.__name__})\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s. Gateway no exists in request.session.')\n % {'param1': token, 'param2': payerID})\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, ExecuteMethod does not exist.')\n % {'param1': token, 'param2': payerID})\n else:\n error = p._get_value_from_qs(res_dict, 'L_SHORTMESSAGE0')\n log.error(_('Token %(param1)s, PayerID: %(param2)s, %(param3)s.')\n % {'param1': token, 'param2': payerID, 'param3': error})\n else:\n log.error(_('Token or PayerID no exists.'))\n\n if request.session.get('gateway', None):\n del request.session['gateway']\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if success_page:\n del request.session['success_page']\n if back_page:\n del request.session['back_page']\n error_msg = driver.GENERIC_PAYPAL_ERROR\n page_msg = request.session['back_page_msg']\n return render_to_response('payment/paypal_cancel.html',\n {'error_msg': error_msg, 'back_page': back_page, 'back_page_msg': page_msg}, context_instance=RequestContext(request))\n else:\n error_msg = _('%(param1)s Please payment again.') % {'param1': driver.GENERIC_PAYPAL_ERROR}\n return render_to_response('payment/paypal_error.html',\n {\"error_msg\": error_msg}, context_instance=RequestContext(request))",
"def paypal_gateway(self):\n\n print(request.form)\n\n # Gather information from callback response\n first_name = request.form.get(\"first_name\", None)\n last_name = request.form.get(\"last_name\", None)\n payer_id = request.form.get(\"payer_id\", None)\n payer_email = request.form.get(\"payer_email\", None)\n item_name = request.form.get(\"item_name\", None)\n item_number = request.form.get(\"item_number\", None)\n custom = request.form.get(\"custom\", None)\n payment_gross = request.form.get(\"payment_gross\", None)\n\n ## Generate Token and store in database\n gen_uuid = str(uuid.uuid4())\n\n try:\n t = Token()\n t.uuid = gen_uuid\n t.email = payer_email\n t.active = True\n t.package = item_name\n t.package_id = item_number\n\n db.session.add(t)\n db.session.commit()\n except:\n import traceback\n db.session.rollback()\n traceback.print_exc()\n\n ## Send email to user with unique link\n try:\n msg = Message(\n \"Guildbit - Order Confirmation\",\n sender=settings.DEFAULT_MAIL_SENDER,\n recipients=[payer_email])\n\n msg.html = render_template(\"emails/payment_thankyou.html\", package=item_name, uuid=gen_uuid)\n mail.send(msg)\n except:\n import traceback\n traceback.print_exc()\n\n return jsonify({\n \"status\": \"received\"\n })",
"def onCheckout(self, controller):\n \n if askokcancel(\"Proceed\", \"Pay the order?\"):\n c = controller.customer\n package = {'customer_id':c.id, 'order_price':c.my_order.GetTotalPrice}\n msg = controller.transmit(package)\n \n if msg['order_received']:\n c.CheckOut(c.my_order.GetTotalPrice)\n c.Clear()\n controller.show_frame(PageThree)",
"def buy():\n\n if request.method == \"POST\":\n response = trade(db, session['user_id'], request.form, 'BUY')\n if response:\n if response['type'] is 'error':\n flash(response['text'], 'error')\n else:\n flash(response['text'])\n return redirect(\"/\")\n else:\n return apology(\"Page not found\")",
"def checkout(request, is_selfserve=False):\n\n if is_selfserve:\n form = forms.SelfServePaymentForm(request.POST)\n else:\n if not request.user.has_perm('reg.add_membershipsold'):\n raise Http404\n form = forms.PaymentForm(request.POST)\n\n if not form.is_valid():\n transaction.rollback()\n messages.error(request, form.errors)\n request.session['payment_form'] = form\n return redirect(request.META['HTTP_REFERER'])\n\n cart = _get_cart(request)\n\n # First, some sanity checks.\n error = False\n for item in cart:\n if item.person.memberships.filter(type=item.type).count() and not item.type.in_quantity:\n messages.error(request, 'That membership has already been sold.')\n error = True\n if error:\n transaction.rollback()\n request.session['payment_form'] = form\n return redirect(request.META['HTTP_REFERER'])\n\n payment = Payment()\n if not request.user.is_anonymous():\n payment.user = request.user\n if is_selfserve:\n payment.method = SELFSERVE_PAYMENT\n payment.ui_used = 'self'\n else:\n payment.method = form.cleaned_data['method']\n payment.comment = form.cleaned_data['comment']\n payment.ui_used = 'event'\n payment.amount = cart.total\n payment.save()\n\n if not payment.process(form=form, request=request):\n if payment.error_message:\n messages.error(request, \"Payment failed: %s\" % payment.error_message)\n else:\n messages.error(request, \"Payment failed. (Unknown reason.)\")\n payment.delete() # Not all backends can rollback. So delete it too.\n transaction.rollback()\n request.session['payment_form'] = form\n if is_selfserve:\n return redirect(selfserve_index)\n else:\n return redirect(person_view, person.pk)\n\n for item in cart:\n membership = MembershipSold()\n membership.person = item.person\n membership.type = item.type\n membership.price = item.type.price\n membership.quantity = item.quantity\n membership.payment = payment\n membership.save()\n\n request.session['cart'] = Cart()\n messages.success(request, \"Payment accepted\")\n transaction.commit()\n if is_selfserve:\n return redirect(selfserve_index)\n else:\n return redirect(print_pending)",
"def response_post_params(cls, post_params):\r\n resp_params = {\r\n # Indicate whether the payment was successful\r\n \"decision\": \"ACCEPT\" if cls.PAYMENT_STATUS_RESPONSE == \"success\" else \"REJECT\",\r\n\r\n # Reflect back whatever the client sent us,\r\n # defaulting to `None` if a paramter wasn't received\r\n \"course_id\": post_params.get('course_id'),\r\n \"orderAmount\": post_params.get('amount'),\r\n \"ccAuthReply_amount\": post_params.get('amount'),\r\n \"orderPage_transactionType\": post_params.get('orderPage_transactionType'),\r\n \"orderPage_serialNumber\": post_params.get('orderPage_serialNumber'),\r\n \"orderNumber\": post_params.get('orderNumber'),\r\n \"orderCurrency\": post_params.get('currency'),\r\n \"match\": post_params.get('match'),\r\n \"merchantID\": post_params.get('merchantID'),\r\n\r\n # Send fake user data\r\n \"billTo_firstName\": \"John\",\r\n \"billTo_lastName\": \"Doe\",\r\n \"billTo_street1\": \"123 Fake Street\",\r\n \"billTo_state\": \"MA\",\r\n \"billTo_city\": \"Boston\",\r\n \"billTo_postalCode\": \"02134\",\r\n \"billTo_country\": \"us\",\r\n\r\n # Send fake data for other fields\r\n \"card_cardType\": \"001\",\r\n \"card_accountNumber\": \"############1111\",\r\n \"card_expirationMonth\": \"08\",\r\n \"card_expirationYear\": \"2019\",\r\n \"paymentOption\": \"card\",\r\n \"orderPage_environment\": \"TEST\",\r\n \"orderPage_requestToken\": \"unused\",\r\n \"reconciliationID\": \"39093601YKVO1I5D\",\r\n \"ccAuthReply_authorizationCode\": \"888888\",\r\n \"ccAuthReply_avsCodeRaw\": \"I1\",\r\n \"reasonCode\": \"100\",\r\n \"requestID\": \"3777139938170178147615\",\r\n \"ccAuthReply_reasonCode\": \"100\",\r\n \"ccAuthReply_authorizedDateTime\": \"2013-08-28T181954Z\",\r\n \"ccAuthReply_processorResponse\": \"100\",\r\n \"ccAuthReply_avsCode\": \"X\",\r\n\r\n # We don't use these signatures\r\n \"transactionSignature\": \"unused=\",\r\n \"decision_publicSignature\": \"unused=\",\r\n \"orderAmount_publicSignature\": \"unused=\",\r\n \"orderNumber_publicSignature\": \"unused=\",\r\n \"orderCurrency_publicSignature\": \"unused=\",\r\n }\r\n\r\n # Indicate which fields we are including in the signature\r\n # Order is important\r\n signed_fields = [\r\n 'billTo_lastName', 'orderAmount', 'course_id',\r\n 'billTo_street1', 'card_accountNumber', 'orderAmount_publicSignature',\r\n 'orderPage_serialNumber', 'orderCurrency', 'reconciliationID',\r\n 'decision', 'ccAuthReply_processorResponse', 'billTo_state',\r\n 'billTo_firstName', 'card_expirationYear', 'billTo_city',\r\n 'billTo_postalCode', 'orderPage_requestToken', 'ccAuthReply_amount',\r\n 'orderCurrency_publicSignature', 'orderPage_transactionType',\r\n 'ccAuthReply_authorizationCode', 'decision_publicSignature',\r\n 'match', 'ccAuthReply_avsCodeRaw', 'paymentOption',\r\n 'billTo_country', 'reasonCode', 'ccAuthReply_reasonCode',\r\n 'orderPage_environment', 'card_expirationMonth', 'merchantID',\r\n 'orderNumber_publicSignature', 'requestID', 'orderNumber',\r\n 'ccAuthReply_authorizedDateTime', 'card_cardType', 'ccAuthReply_avsCode'\r\n ]\r\n\r\n # Add the list of signed fields\r\n resp_params['signedFields'] = \",\".join(signed_fields)\r\n\r\n # Calculate the fields signature\r\n signed_fields_sig = processor_hash(resp_params['signedFields'])\r\n\r\n # Calculate the public signature\r\n hash_val = \",\".join([\r\n \"{0}={1}\".format(key, resp_params[key])\r\n for key in signed_fields\r\n ]) + \",signedFieldsPublicSignature={0}\".format(signed_fields_sig)\r\n\r\n resp_params['signedDataPublicSignature'] = processor_hash(hash_val)\r\n\r\n return resp_params",
"def post(self, request, *args, **kwargs):\n try:\n form = self.get_form()\n except RedirectNeeded as exc:\n messages.add_message(request, messages.SUCCESS, \"Payment redirects to %s\" % exc.args[0])\n return HttpResponseRedirect(exc.args[0])\n #except Exception as exc:\n # return HttpResponseBadRequest(exc, content_type=\"text/plain\")\n\n if form.validate():\n messages.add_message(request, messages.SUCCESS, \"Payment succeeded\")\n return self.form_valid(form)\n else:\n messages.add_message(request, messages.ERROR, \"Payment failed\")\n return self.form_invalid(form)",
"def pay_order(driver, card_id, expired_date, cvc):\n logging.info(f\"Pay your order with card:[{card_id}, {expired_date}, {cvc}]\")\n payment_page = PaymentPage(driver)\n payment_page.switch_to_card_frame()\n payment_page.input_card_number(card_id)\n payment_page.input_card_expired_date(expired_date)\n payment_page.input_card_cvc(cvc)\n payment_page.driver.switch_to.default_content()\n payment_page.click_submit_payment_button()",
"def event_payu_com_dpn(self, **post):\n cr, uid, context = request.cr, request.uid, request.context\n payment_acquire = request.env['payment.acquirer'].sudo().search([('provider', '=', 'payu')])\n transactionDetails = {}\n transactionDetails['store'] = {}\n transactionDetails['store']['soapUsername'] = payment_acquire.payu_api_username\n transactionDetails['store']['soapPassword'] = payment_acquire.payu_api_password\n transactionDetails['store']['safekey'] = payment_acquire.payu_seller_account\n transactionDetails['store']['environment'] = payment_acquire.environment\n transactionDetails['additionalInformation'] = {}\n transactionDetails['additionalInformation']['payUReference'] = post['PayUReference']\n try:\n result = PayuController.payuMeaGetTransactionApiCall('', transactionDetails)\n payment_transation_id = request.env['payment.transaction'].sudo().search(\n [('reference', '=', result['merchantReference'])])\n payu_response = {}\n if result:\n payu_response['TRANSACTION_STATUS'] = result['transactionState']\n # payu_response['SUCCESSFUL'] = result['successful']\n payu_response['AMOUNT'] = payment_transation_id.amount * 100 if payment_transation_id else 0.00\n payu_response['CURRENCYCODE'] = result['basket']['currencyCode']\n payu_response['PAYUREFERENCE'] = result['payUReference']\n payu_response['REFERENCE'] = result['merchantReference']\n payu_response['RESULTMESSAGE'] = result['resultMessage']\n response_state = request.env['payment.transaction'].sudo().form_feedback(payu_response, 'payu')\n # response_state = PaymentTransactionCus.form_feedback('', payu_response, 'payu')\n # if response_state:\n # return werkzeug.utils.redirect('/shop/payment/validate')\n # else:\n # return werkzeug.utils.redirect('/shop/unsuccessful')\n\n sale_order_id = request.env['sale.order'].sudo().search([('name', '=', result['merchantReference'])])\n sale_order_data = sale_order_id\n request.session['sale_last_order_id'] = sale_order_id.id\n\n tx_id = request.env['payment.transaction'].sudo().search([('reference', '=', result['merchantReference'])])\n tx = tx_id\n if not sale_order_id or (sale_order_id.amount_total and not tx):\n return request.redirect('/shop')\n if (not sale_order_id.amount_total and not tx) or tx.state in ['pending']:\n if sale_order_id.state in ['draft', 'sent']:\n if (not sale_order_id.amount_total and not tx):\n sale_order_id.action_button_confirm()\n email_act = sale_order_id.action_quotation_send()\n elif tx and tx.state == 'cancel':\n sale_order_id.action_cancel()\n elif tx and (tx.state == 'draft' or tx.state == 'sent' or tx.state == 'done'):\n # if result and payu_response['successful'] and payu_response['TRANSACTION_STATUS'] in ['SUCCESSFUL', 'PARTIAL_PAYMENT', 'OVER_PAYMENT']:\n if result and payu_response['TRANSACTION_STATUS'] in ['SUCCESSFUL', 'PARTIAL_PAYMENT', 'OVER_PAYMENT']:\n transaction = tx.sudo().write(\n {'state': 'done', 'date_validate': datetime.now(),\n 'acquirer_reference': result['payUReference']})\n email_act = sale_order_id.action_quotation_send()\n action_confirm_res = sale_order_id.action_confirm()\n sale_order = sale_order_id.read([])\n # if sale_order_id.state == 'sale':\n # journal_ids = request.env['account.journal'].sudo().search([('name', '=', 'FNB 62085815143')], limit=1)\n # journal = journal_ids.read([])\n currency = request.env['res.currency'].sudo().search([('name', '=', 'ZAR')], limit=1)\n method = request.env['account.payment.method'].sudo().search([('name', '=', 'Manual')], limit=1)\n journal_id = request.env['account.journal'].sudo().search(\n [('name', '=', 'FNB - Cheque Account 6208585815143')], limit=1, order=\"id desc\")\n if journal_id:\n account_payment = {\n 'partner_id': sale_order[0]['partner_id'][0],\n 'partner_type': 'customer',\n 'journal_id': journal_id.id,\n # 'invoice_ids':[(4,inv_obj.id,0)],\n 'amount': sale_order[0]['amount_total'],\n 'communication': sale_order_id.name,\n 'currency_id': currency.id,\n 'payment_type': 'inbound',\n 'payment_method_id': method.id,\n 'payment_transaction_id': tx.id,\n }\n acc_payment = request.env['account.payment'].sudo().create(account_payment)\n acc_payment.sudo().post()\n sale_order_id = request.session.get('sale_last_order_id')\n print(\"\\n\\n\\n\\n\\n\\n=======================sale order sale order======\", sale_order_id)\n sale_order_data = request.env['sale.order'].sudo().browse(sale_order_id)\n # if sale_order_data.project_project_id:\n # request.session['last_project_id'] = sale_order_data.project_project_id.id\n if response_state:\n sale_order_data.message_post(subject=\"T&C's Privacy Policy\",\n body=\"%s accepted T&C's and Privacy Policy.\" % sale_order_data.partner_id.name)\n return werkzeug.utils.redirect('/pay/thankyou')\n # return werkzeug.utils.redirect('/shop/confirmation')\n else:\n return werkzeug.utils.redirect('/event/unsuccessful')\n except Exception as e:\n return werkzeug.utils.redirect('/event/unsuccessful')",
"def request_payment(self, payment_type=None):\n self.payment_type = payment_type\n\n # choose a card AI \n self.choose_card_to_discard()\n #self.make_payment(card)",
"def webhook_payment_successful(self, event):\n\n intent = event.data.object\n p_id = intent.id\n pack = intent.metadata.pack\n save_detail = intent.metadata.save_detail\n\n billing_details = intent.charges.data[0].billing_details\n shipping_details = intent.shipping\n grand_cost = round(intent.charges.data[0].amount / 100, 2)\n\n for field, value in shipping_details.address.items():\n if value == \"\":\n shipping_details.address[field] = None\n\n profile = None\n username = intent.metadata.username\n if username != 'AnonymousUser':\n profile = UserProfile.objects.get(user__username=username)\n if save_detail:\n profile.default_phone_number = shipping_details.phone,\n profile.default_home_Address = shipping_details.address.line1,\n profile.default_home_Address_continued = \\\n shipping_details.address.line2,\n profile.default_postcode = \\\n shipping_details.address.postal_code,\n profile.default_county = \\\n shipping_details.address.city,\n profile.default_country = \\\n shipping_details.address.country,\n profile.save()\n\n order_present = False\n seek = 1\n while seek <= 6:\n try:\n order = Order.objects.get(\n Name__iexact=shipping_details.name,\n user_account=profile,\n email__iexact=billing_details.email,\n phone_number__iexact=shipping_details.phone,\n home_Address__iexact=shipping_details.address.line1,\n home_Address_continued__iexact =(\n shipping_details.address.line2\n ),\n postcode__iexact=shipping_details.address.postal_code,\n county__iexact=shipping_details.address.city,\n country__iexact=shipping_details.address.country,\n grand_cost=grand_cost,\n original_pack=pack,\n stripe_p_id=p_id,\n )\n order_present = True\n break\n except Order.DoesNotExist:\n seek += 1\n time.sleep(1)\n if order_present:\n self._send_email_details(order)\n return HttpResponse(\n content=f'Webhook obtained: {event[\"type\"]} | Good news. \\\n This is now in the database',\n status=200)\n else:\n order = None\n try:\n order = Order.objects.create(\n Name=shipping_details.name,\n email=billing_details.email,\n phone_number=shipping_details.phone,\n home_Address=shipping_details.address.line1,\n home_Address_continued=shipping_details.address.line2,\n postcode=shipping_details.address.postal_code,\n county=shipping_details.address.city,\n country=shipping_details.address.country,\n original_pack=pack,\n stripe_p_id=p_id,\n )\n for item_id, item_data in json.load(pack).items():\n product = Product.objects.get(id=item_id)\n if isinstance(item_data, int):\n order_line_item = OrderLineItem(\n order=order,\n product=product,\n quantity=item_data,\n )\n order_line_item.save()\n else:\n for size, quantity in item_data['items_by_size'].items():\n order_line_item = OrderLineItem(\n order=order,\n product=product,\n quantity=quantity,\n product_size=size,\n )\n order_line_item.save()\n except Exception as e:\n if order:\n order.delete()\n return HttpResponse(\n content=f'Webhook obtained: {event[\"type\"]} | \\\n There is an error: {e}',\n status=500)\n self._send_email_details(order)\n return HttpResponse(\n content=f'Webhook obtained: {event[\"type\"]} | \\\n Goodnews: webhook order created',\n status=200)",
"def process_postpay_callback(params):\r\n try:\r\n verify_signatures(params)\r\n result = payment_accepted(params)\r\n if result['accepted']:\r\n # SUCCESS CASE first, rest are some sort of oddity\r\n record_purchase(params, result['order'])\r\n return {'success': True,\r\n 'order': result['order'],\r\n 'error_html': ''}\r\n else:\r\n return {'success': False,\r\n 'order': result['order'],\r\n 'error_html': get_processor_decline_html(params)}\r\n except CCProcessorException as error:\r\n return {'success': False,\r\n 'order': None, # due to exception we may not have the order\r\n 'error_html': get_processor_exception_html(error)}",
"def pay():\n data = request.get_json()\n print(data)\n intent = stripe.PaymentIntent.create(amount=data['amnt'], currency='usd', metadata={'integration_check': 'accept_a_payment'})\n return jsonify(client_secret=intent.client_secret)",
"def cat_int_pay():\n print(colors.Color.BLUE + \"Make the payment with digital certificate\" + colors.Color.END)\n pay_and_certificate = urllib.parse.quote(\n 'identitats.aoc.cat/o/oauth2/auth?response_type=code&client_id=tramits.'\n 'transit.cat&redirect_uri=https'\n '://multestransit.gencat.cat/sctPagaments/AppJava/loginIdCat&scope='\n 'autenticacio_usuari&access_type=online'\n '&approval_pompt=false&state=ca_ES')\n print('https://' + pay_and_certificate)\n print(colors.Color.BLUE + \"Make the payment without digital certificate\"\n + colors.Color.END)\n pay_without_certificate = urllib.parse.quote(\n 'multestransit.gencat.cat/sctPagaments/AppJava/views/expedients/cerca.'\n 'xhtml?set-locale=ca_ES')\n print('https://' + pay_without_certificate)",
"def process(request, order):\n # Transaction results\n APPROVED = '1'\n DECLINED = '2'\n ERROR = '3'\n HELD_FOR_REVIEW = '4'\n print \"I am processing the request\"\n\n postdata = request.POST.copy()\n amount = cart.cart_subtotal(request)\n\n print amount\n\n charge = stripe.Charge.create(\n amount=int(amount*100),\n currency=\"ngn\", # I can Change to naira if needed\n card=postdata.get('stripeToken', ''),\n description=\"Example charge\"\n )\n #\n #charge.capture()\n\n\n if charge['card']['cvc_check']:\n transaction_id = charge.id[3:22]\n order = create_order(request, order, transaction_id)\n results = {'order_number': order.id, 'message': u''}\n elif charge.balance_transaction:\n results = {'order_number': 0, 'message': charge.failure_message, 'code': charge.failure_code,\n 'text': charge.description}\n else:\n results = {'order_number': 0, 'message':charge.failure_message, 'errors': charge.errors}\n return results",
"def test_process_postpay_accepted(self):\r\n student1 = UserFactory()\r\n student1.save()\r\n\r\n order1 = Order.get_cart_for_user(student1)\r\n params = {\r\n 'card_accountNumber': '1234',\r\n 'card_cardType': '001',\r\n 'billTo_firstName': student1.first_name,\r\n 'orderNumber': str(order1.id),\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n 'ccAuthReply_amount': '0.00'\r\n }\r\n result = process_postpay_callback(params)\r\n self.assertTrue(result['success'])\r\n self.assertEqual(result['order'], order1)\r\n order1 = Order.objects.get(id=order1.id) # reload from DB to capture side-effect of process_postpay_callback\r\n self.assertEqual(order1.status, 'purchased')\r\n self.assertFalse(result['error_html'])",
"def test_buy_now(self):\n catalog_page = CatalogPage(self.driver)\n product_page = ProductPage(self.driver)\n payment_page = PaymentPage(self.driver)\n payment_review_page = PaymentReviewPage(self.driver)\n payment_info_page = PaymentInfoPage(self.driver)\n success_page = SuccessPage(self.driver)\n # buy the new product\n navigate_to(self.driver, ProductPage.URL(self.new_product['product']['title']))\n product_page.add_to_cart.click()\n # by an old product\n catalog_page.catalog.click()\n # Sort products to move the newly created to last page\n catalog_page.sorting_order.select_by_visible_text(\"Date, old to new\")\n catalog_page.image.random_click()\n product = product_page.product.get_text()\n product_page.add_to_cart.click()\n catalog_page.catalog.click()\n catalog_page.cart.click()\n payment_dic = {\n 'address' : f'{randint(1, 99999)} {random_name(5, 8)}',\n 'city' : \"San Francisco\",\n 'email_or_mobile_phone_number_input' : random_name(8) + \"@gmail.com\",\n 'last_name' : random_name(3, 12),\n 'zip_code' : '94107',\n }\n if randint(0, 1):\n payment_dic['first_name'] = random_name(4, 16)\n if randint(0, 1):\n payment_dic['address2'] = random_name(5)\n for _ in payment_dic:\n exec(f\"payment_page.{_}.enter(payment_dic['{_}'])\")\n payment_page.continue_to_shipping.click()\n payment_review_page.continue_to_payment.click()\n payment_info_page.full_address.get_text()\n # validate address\n for _ in ['address', 'city', 'zip_code']:\n assert_and_log(payment_dic[_] in payment_info_page.full_address.get_text(),\n f\"{_} in full address\")\n payment_info_page.enter_bogus_payment(1)\n assert_and_log(success_page.thank_you.find_visible_element(),\n \"'Thank you' appeared as a sign of successful transaction\",\n continue_on_error=False)\n validate(success_page.basic_validation_list)",
"def step(self): \n self.reset_parameters()\n\n if np.random.uniform(0, 1) < self.model.churn_prob: self.exit_triggered = True \n if self.exit_triggered:\n self.exit()\n else:\n self.register_deposit(self.deposit_intent)\n self.register_contribution(self.contribution_intent)\n self.register_sponsorship(self.sponsor_intent)\n self.register_euro_exchange(self.euro_exchange_intent)\n self.register_teo_exchange(self.teo_exchange_intent)\n self.register_withdraw(self.withdraw_intent)",
"def get(self, request, *args, **kwargs):\n\n # Access will be granted in Complete view if payment_id matches.\n payment_id = self.execute_payment()\n # Check if payment id belongs to a Catalog donation -> product_id is set\n donation = Donation.objects.confirm_by_reference(payment_id)\n\n flow_type = 'one_time'\n url = reverse('become_supporter_complete') + \\\n '?payment_id={}'.format(payment_id)\n if donation.product_id:\n flow_type ='product_support'\n url += '&flow_type={}&product_id={}'.format(flow_type, donation.product_id)\n if donation.sponsored_event_dedication:\n flow_type = 'event_sponsorship'\n url += '&flow_type={}&event_id={}'.format(flow_type, donation.sponsored_event_id)\n\n if flow_type == 'event_sponsorship':\n custom_send_receipt(receipt_type=flow_type,\n amount=donation.amount, user=donation.user,\n dedication=donation.sponsored_event_dedication,\n musician=donation.sponsored_event.leader_string(),\n event_date=donation.sponsored_event.get_date())\n else:\n custom_send_receipt(receipt_type='one_time',\n amount=donation.amount, user=donation.user)\n\n return redirect(url)"
] | [
"0.68730086",
"0.6631193",
"0.6541817",
"0.6321301",
"0.6251407",
"0.6202432",
"0.6148989",
"0.6138219",
"0.5937037",
"0.59216857",
"0.5856434",
"0.57766294",
"0.57601655",
"0.575292",
"0.5669925",
"0.5667298",
"0.56421846",
"0.56279564",
"0.5624298",
"0.56111133",
"0.56018615",
"0.55952317",
"0.55833393",
"0.5578975",
"0.5565472",
"0.55297834",
"0.5528327",
"0.55160177",
"0.55020666",
"0.5464707"
] | 0.730185 | 0 |
Call function to get long card number | def get_card_number():
return get_or_append_details('card_number', "Please enter your credit card number") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def long(self, amount):",
"def get_card_val(card):\n\n if card == '1':\n return 1\n if card == '2':\n return 2\n if card == '3':\n return 3\n else:\n return 4",
"def get_credit_card_number(self):\n\t\tif len(self.credit_card_number) == 16:\n\t\t\treturn self.credit_card_number\n\t\tr(400, {\"message\" : \"please provide the amount to process\"})\n\t\treturn",
"def get_card(self):\n\n card = random.randint(1,13)\n return card",
"def get_card_id(self):\n (stat, tag_type) = self.rdr.request(self.rdr.REQIDL)\n _debug('rdr.request', stat)\n if stat == self.rdr.OK:\n (stat, raw_uid) = self.rdr.anticoll()\n _debug('anticoll_2', stat)\n if stat == self.rdr.OK:\n return self._uid(raw_uid)\n return None",
"def get_card_info(card):\n result = ((card-1)/13 + 1, card - ((card-1)/13)*13)\n return result",
"def getNumber():",
"def org_id(self, long: bool = False, dash: bool = True) -> str:\n first_digits = list(self.ORG_ID_DIGIT_1)\n random.shuffle(first_digits)\n onr_one = str(first_digits.pop())\n onr_one += str(self.generator.random.randrange(0, 9)).zfill(1)\n onr_one += str(self.generator.random.randrange(20, 99))\n onr_one += str(self.generator.random.randrange(0, 99)).zfill(2)\n onr_two = str(self.generator.random.randrange(0, 999)).zfill(3)\n luhn_checksum = str(calculate_luhn(int(onr_one + onr_two)))\n prefix = \"16\" if long else \"\"\n hyphen = \"-\" if dash else \"\"\n\n org_id = f\"{prefix}{onr_one}{hyphen}{onr_two}{luhn_checksum}\"\n return org_id",
"def calculate_luhn_check_digit(partial_card_number):\n checksum = luhn_checksum(int(partial_card_number) * 10)\n if checksum == 0:\n check_digit = 0\n else:\n check_digit = 10 - checksum\n return check_digit",
"def getLong(self, address: ghidra.program.model.address.Address) -> long:\n ...",
"def safe_number(self):\n mask = '*' * (len(self.card_number) - 4)\n return '{0}{1}'.format(mask, self.card_number[-4:])",
"def getLong(self, int: int, int2: int) -> int:\n ...",
"def getLong(self, addr: ghidra.program.model.address.Address) -> long:\n ...",
"def luhn_checksum(card_number):\n \n # Convert number into a list so we can edit each index value\n num = [int(x) for x in str(card_number)]\n \n # Step 1: multiply each odd index by 2 \n for i in range(0, 15, 2): # len(num) was falling one short so resorted to using int\n num[i] *= 2\n \n # Step 2: subtract 9 from any numbers greater than 9\n for i in range(0, 15):\n if num[i] > 9:\n num[i] -= 9\n else:\n continue\n \n # Step 3: total the 15 digits \n total = 0\n for i in range(0, 15):\n total += num[i]\n \n # Step 4: multiply total by 9 and take the last digit which is our checksum\n total_2 = total * 9\n string_total_2 = str(total_2)\n checksum = string_total_2[-1]\n \n return checksum",
"def getLong(self, name: unicode) -> long:\n ...",
"def get_serial_number(device):\n def to_base36(n, alphabet=\"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"):\n return (to_base36(n // 36) + alphabet[n % 36]).lstrip(\"0\") \\\n if n > 0 else \"0\"\n\n a0000 = 604661760\n if device.SerialNumber >= a0000:\n return to_base36(device.SerialNumber)\n return str(device.SerialNumber)",
"def card_html_id(card):\n return f'c{card:02d}'",
"def read_long(self):\n return self._packers[\"l\"].unpack(self.read(4))[0]",
"def do_cardid(self, line):\n if self.bootstrap() != 0:\n return self.return_code(1, True)\n\n key = self.card.get_pubkey()\n key_fmted = self.format_pubkey(key)\n\n print('\\nCard ID: %s' % key_fmted)\n return self.return_code(0)",
"def get_serial_number(self):\n\t\treturn call_sdk_function('PrlVmDevHd_GetSerialNumber', self.handle)",
"def card(n):\r\n assert type(n) == int and n > 0 and n <= 13, \"Bad card n\"\r\n specials = {1: 'A', 11: 'J', 12: 'Q', 13: 'K'}\r\n return specials.get(n, str(n))",
"def get_serial_number(self):\n\n\t\treturn struct.unpack('<Q', self.boot_sector_data[72 : 80])[0]",
"def get_serial_number(self):\n serial = create_string_buffer(64)\n self._dll.ShamrockGetSerialNumber(self._device, serial)\n return serial.value",
"def get_long(self, key):\n if self._handle is None:\n raise Exception(\"GRIB file %s not open\" % (self.fname,))\n\n val = ctypes.c_long()\n rc = grib_get_long(self._handle, key, ctypes.byref(val))\n if rc:\n raise Exception(\"grib_get_long() failed: %d\" % (rc,))\n return val.value",
"def get_long(self, key):\n if self._handle is None:\n raise Exception(\"GRIB file %s not open\" % (self.fname,))\n\n val = ctypes.c_long()\n rc = grib_get_long(self._handle, key, ctypes.byref(val))\n if rc:\n raise Exception(\"grib_get_long() failed: %d\" % (rc,))\n return val.value",
"async def get_serial_number(self):\n\n # Display info message\n log.info(\"get_serial_number\")\n\n # Get serial number\n output = await self.send_command(self.cmd_get_serial_number)\n\n # Display info message\n log.info(f\"get_serial_number: output: '{output}'\")\n\n # Remove the useless information in the returned string\n output = output.splitlines()[0].split()[-1]\n\n # Display info message\n log.info(f\"get_hostname: hostname found: '{output}'\")\n\n # Return the serial number of the device\n return output",
"def get_card_value(self, card):\n if card >= 10:\n return 10\n if card == 1:\n return 11\n return card",
"def serial_number(self):\n return self._dll.JLINKARM_GetSN()",
"def get_pin(length=6):\n pin = str(random.sample(range(10 ** (length - 1), 10 ** length), 1)[0])\n print(\"pin \"+pin)\n\n return pin",
"def getCardNumber(self,message):\n card = re.findall(Analyzer.rgxCard,message.lower())\n return card[0]"
] | [
"0.60791755",
"0.59657437",
"0.59604347",
"0.5928244",
"0.5922976",
"0.588906",
"0.58676",
"0.57750434",
"0.5765597",
"0.57478154",
"0.5678881",
"0.5672181",
"0.56093985",
"0.55803376",
"0.55625385",
"0.55540127",
"0.55250293",
"0.55170596",
"0.55093944",
"0.5496719",
"0.5479609",
"0.54701996",
"0.5465478",
"0.5456506",
"0.5456506",
"0.5450452",
"0.54327583",
"0.5397723",
"0.53859544",
"0.53787076"
] | 0.6795907 | 0 |
Call function to get expiry month and year | def get_expiry():
return get_or_append_details('expiry', "Please enter your expiry date, two digits for the month and two digits for the year") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cc_expire_months():\n months = []\n for month in range(1, 13):\n if len(str(month)) == 1:\n numeric = '0' + str(month)\n else:\n numeric = str(month)\n months.append((numeric, datetime.date(2009, month, 1).strftime('%B')))\n return months",
"def expiration(self):\n return datetime(int(self.exp_year), int(self.exp_month),\n calendar.monthrange(int(self.exp_year), int(self.exp_month))[1],\n 23, 59, 59)",
"def get_month():\n return handle_invalid_inputs(question_3, months)",
"def cc_expire_years():\n current_year = datetime.datetime.now().year\n years = range(current_year, current_year + 12)\n return [(str(x), str(x)) for x in years]",
"def get_expiry_by(\n expiries: List[pendulum.DateTime],\n year: int = 0,\n month: int = 0,\n n: int = 1,\n sort: bool = True,\n) -> pendulum.DateTime:\n if len(expiries) == 1:\n return expiries[0]\n if sort:\n expiries = sorted(expiries)\n\n if (month == 0) and (year == 0):\n return get_expiry(expiries, n=n)\n elif month == 0:\n filtered = [expiry for expiry in expiries if expiry.year == year]\n elif year == 0:\n filtered = [expiry for expiry in expiries if expiry.month == month]\n else:\n filtered = [\n expiry\n for expiry in expiries\n if (expiry.year == year and expiry.month == month)\n ]\n n = n if n < 1 else n - 1\n return filtered[n]",
"def YM(year=None, month=None):\n if month is None:\n if year is None:\n year = timezone.now()\n\n month = year.month\n year = year.year\n\n return module_globals[\n '%s_%04d_%02d' % (name, year, month)\n ]",
"def get_code_expiry():\n return now() + EXPIRE_CODE_DELTA",
"def get_code_expiry():\n return now() + EXPIRE_CODE_DELTA",
"def get_month(x):\n return x[\"SALE DATE\"].month",
"def get_year_and_month(self):\n return (self.timestamp.year, self.timestamp.month)",
"def get_date(self,yearlimits=[1500,2020]):\n\t\thead = self.raw_text()[:300] \t \t \n\t\tparser = Regexdate(head) \t \t\t\n\t\tyear = parser.find_year(yearlimits)\t\t\n\t\tmonth = parser.find_month()\n\t\tday = parser.find_day()\n\t\tif day and year != \"\":\n\t\t\treturn year + \"-\" + month + \"-\" + day\t\n\t\tif year:\n\t\t\treturn year\n\t\treturn \"\"",
"def get_year_and_month(self):\n year, month = self.kwargs.get('year'), self.kwargs.get('month')\n if year or month:\n return year, month\n\n # use cache if we can\n semester = getattr(self, 'semester', None)\n if semester:\n return semester.year, semester.month\n\n # use semester id hint if available\n sid = self.request.GET.get('semester')\n if sid:\n self.semester = get_object_or_404(models.Semester, id=sid)\n else:\n self.semester = models.Semester.objects.order_by('-year', '-month')[0]\n return self.semester.year, self.semester.month",
"def get_curr_year_month_date(self):\n return self.curr_year, self.curr_month, self.curr_date",
"def effective_invoice_month(self) -> pulumi.Input['GoogleTypeDateArgs']:\n return pulumi.get(self, \"effective_invoice_month\")",
"def _get_expire(self):\n return self.__expire",
"def get_expiration_date():\n dt = datetime.now()\n bdays_indx = pd.bdate_range(\n dt.strftime(\"%Y-%m-%d\"),\n (dt + timedelta(days=20)).strftime(\"%Y-%m-%d\"),\n freq=pd.offsets.CustomBusinessDay(calendar=USFederalHolidayCalendar()),\n ).tolist()\n expiration = [x.strftime(\"%Y-%m-%d\") for x in bdays_indx if x.weekday() == 4][0]\n return expiration",
"def expiry_date(self, today):\n three_years_ago = today + relativedelta(years=-3)\n three_years_in_the_future = today + relativedelta(years=+3)\n\n return date.fromordinal(random.randint(three_years_ago.toordinal(),\n three_years_in_the_future.toordinal()))",
"def get_cur_date(self):\n tmp = self.soup.find('small', text=re.compile('market', re.IGNORECASE)).text.split('Market')[0].strip()\n\n # assign year\n self.year = Settings.year.search(tmp).group(0)\n\n # assign day\n self.day = Settings.day.search(tmp).group(0)\n\n months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n\n # iterate over months and flag if match found\n for ii, mo in enumerate(months, 1):\n more = re.compile(mo, re.IGNORECASE)\n if more.search(tmp):\n self.month = ii\n break",
"def test_date_accept_this_month(self):\n spi_search = \"find date this month\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today(), '%Y-%m')\n self._compare_searches(inv_search, spi_search)",
"def date_now_plus_year():\n return (datetime.date.today() + datetime.timedelta(days=365))",
"def new_token_expiry_date():\n\treturn timezone.now() + datetime.timedelta(days=TOKEN_VALID_DATE)",
"def get_expiry_date(name: str, date_str: str):\n if 'IO' == name:\n # 沪深300, 到期月份的第三个星期五,遇国家法定假日顺延\n dates = THIRD_FRIDAYS[THIRD_FRIDAYS > date_str]\n day_str = get_next_trading_day_str(dates[0])\n elif name in ['cu', 'al', 'zn', 'au', 'ru']:\n # 上期所,标的期货合约到期日前一个月的倒数第 5 个交易日\n dates = LAST_BUSINESS_DAY[LAST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-1], -5)\n elif name in ['m', 'c', 'i', 'pg', 'l', 'v', 'pp']:\n # 大商所,标的期货合约到期日前一个月的第 5 个交易日\n dates = FIRST_BUSINESS_DAY[FIRST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-1], 5)\n elif 'SR' == name and date_str < '2019-09-01':\n # 郑商所,2019-09-01 之前为标的期货合约到期日前两个月的倒数第 5 个交易日\n dates = LAST_BUSINESS_DAY[LAST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-2], -5)\n elif name in ['CF', 'SR', 'RM', 'MA', 'TA', 'ZC']:\n # 郑商所,标的期货合约到期日前一个月的第 3 个交易日\n dates = FIRST_BUSINESS_DAY[FIRST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-1], 3)\n else:\n raise ValueError(f\"options contract not supported: {name}\")\n return day_str",
"def remaining_months_purchased(self) -> int:\n if not self.last_expiration:\n return 0\n start_date = _today()\n end_date = self.last_expiration\n return (end_date.year - start_date.year) * 12 + (\n end_date.month - start_date.month\n )",
"def get_default():\n today = datetime.date.today()\n if today.month == 1:\n return YearMonth(today.year - 1, 12)\n return YearMonth(today.year, today.month - 1)",
"def show_cal(request, year=None, month=None):\n if year == None:\n # get the current comic as a starting point\n lToday = Comic.objects.filter(published=True).order_by('-date')[0].date\n year = lToday.year\n month = lToday.month\n\n return calendar(request, year, month)",
"def year_month(cls,\n year: typing.Union[int, str],\n month: typing.Union[int, str])->str:\n yearstr: str\n if isinstance(year, int):\n yearstr = str(year)\n else:\n yearstr = year\n\n monthstr: str\n if isinstance(month, int):\n monthstr = str(month)\n else:\n monthstr = month\n if len(monthstr) == 1:\n monthstr = \"0\" + monthstr\n return cls.DATE_AND_TIMES_SIGIL + yearstr + \"-\" + monthstr + \"-01T00:00:00/10\"",
"def monthly_schedule(self,month):\n response = requests.get(f'http://company.com/{self.lname}/{month}')\n if response.ok:\n return response.text\n else:\n return 'Bad Response!'",
"def get_year(x):\n return x[\"SALE DATE\"].year",
"def _get_months(self, cr, uid, context):\n months=[(str(n),str(n)) for n in range(1,13)]\n return months",
"def month(self):\n return self._months"
] | [
"0.6451516",
"0.629649",
"0.6171842",
"0.6081973",
"0.6059073",
"0.59521425",
"0.59468853",
"0.59468853",
"0.5846049",
"0.58428335",
"0.5817317",
"0.5718091",
"0.57034725",
"0.56085",
"0.55998755",
"0.5556354",
"0.5548732",
"0.5462002",
"0.5388835",
"0.537513",
"0.5374864",
"0.5353485",
"0.535175",
"0.53460747",
"0.5325459",
"0.5325137",
"0.5322625",
"0.5318996",
"0.5302839",
"0.52989644"
] | 0.72779393 | 0 |
Call function to get 3 or 4 digit cvv | def get_cvv():
return get_or_append_details('cvv', 'Please enter your CVV. Typically this is the 3 digit number on the back of your card') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_cvv(\n cvk: bytes,\n pan: str,\n expiry: str,\n service_code: str,\n) -> str:\n\n if len(cvk) != 16:\n raise ValueError(\"CVK must be a double length DES key\")\n\n if len(pan) > 19 or not _tools.ascii_numeric(pan):\n raise ValueError(\"PAN must be less than 19 digits\")\n\n if len(expiry) != 4 or not _tools.ascii_numeric(expiry):\n raise ValueError(\"PAN expiry must be 4 digits long\")\n\n if len(service_code) != 3 or not _tools.ascii_numeric(service_code):\n raise ValueError(\"Service code must be 3 digits long\")\n\n block = (pan + expiry + service_code).ljust(32, \"0\")\n result = _des.encrypt_tdes_ecb(cvk[:8], _binascii.a2b_hex(block[:16]))\n result = _tools.xor(result, _binascii.a2b_hex(block[16:]))\n result = _des.encrypt_tdes_ecb(cvk, result)\n return \"\".join(\n [\n c\n for c in result.hex()\n if c in {\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"0\"}\n ][:3]\n )",
"def _get_vul(self):\n if (self.cve):\n return u\"CVE-%s\" % self.cve \n else:\n return u\"%s\" % self.cert_id",
"def calibV(self):\n # clear buffer in case of errors\n self.flushInput()\n \n if (self.model == 'GDS'):\n self.write(':CHAN'+str(ch)+':SCAL?\\n')\n # returns V/div, turn it into multiplicative factor\n # between digitizer and actual volts\n vmult = float(self.readline()) * 10./255.\n # GDS includes vertical offset in the data returned.\n voff = 0.\n elif (self.model == 'TDS'):\n self.write('WFMPre:YMUlt?\\n')\n # formula I am using later is from TDS manual, so this\n # is straightforward.\n vmult = float(self.readline())\n self.write('WFMPre:YOFf?\\n')\n voff = float(self.readline())\n \n # clear buffer in case of errors\n self.flushInput()\n\n return (vmult, voff)",
"def get_rvt_file_version(rvt_file):\n file_info = get_basic_info(rvt_file, cleaned_str=True)\n re_version = re.compile(r\"Format: (\\d{4})\")\n found = re.findall(re_version, file_info)\n if found:\n rvt_file_version = found[0]\n else:\n re_version = re.compile(r\"Autodesk Revit (\\d{4})\")\n rvt_file_version = re.findall(re_version, file_info)[0]\n return rvt_file_version",
"def cvv(i):\r\n\r\n return '{}'.format(_random.randint(111, (999 if i == 3 else 9999)))",
"def _get_vul(self):\n if (self.cve):\n return u\"CVE-%s\" % self.cve\n else:\n return u\"%s\" % self.cert_id",
"def get_cve_pattern(self, with_score):\n if with_score:\n # please note that in graph DB, the CVE entries have the following format:\n # CVE-2012-1150:5.0\n # don't ask me why, but the score is stored in one field together with ID itself\n # the : character is used as a separator\n return r\"CVE-(\\d{4})-\\d{4,}:(\\d+\\.\\d+)\"\n else:\n return r\"CVE-(\\d{4})-\\d{4,}\"",
"def test_get_id():\n vc = vtec.parse(EX1)\n assert vc[0].get_id(2005) == \"2005-KJAN-TO-W-0130\"",
"def get_vspk_version(cls, version):\n return (\"v%s\" % version).replace(\".\", \"_\")",
"def calc_Cinv_storage(vol, gV):\n if vol>0:\n InvCa = 7224.8 * vol ** (-0.522) * vol * gV.EURO_TO_CHF / 60\n else:\n InvCa = 0\n\n return InvCa # CHF/a",
"def _get_det_id(self, source):\n match = re.match(r\"Camp\\.0:pnCCD\\.(\\d)\", source)\n number = str.zfill(match.groups()[0], 4)\n return \"pnccd_\" + number",
"def test_badformat():\n res = vtec.contime(\"AABBCCTHHMMZ\")\n assert res is None",
"def get_vcard(self, netid):\n self.setQuery(\"\"\"Select ?vcard where {\n ?who <http://vivo.dartmouth.edu/ontology/netId> \"%s\" .\n ?who <http://purl.obolibrary.org/obo/ARG_2000028> ?vcard .\n }\"\"\" % (netid))\n\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n return g['results']['bindings'][0]['vcard']['value']\n except:\n return None",
"def get_vol(data):\n vol = re.search(\"[^a-zA-Z](v\\s*o\\s*l\\s*\\.?\\s*[^a-zA-Z\\n,]+)\",\n data, re.IGNORECASE)\n # Check if the search result contains at least a digit\n if has_num(vol.group()):\n return ''.join([num for num in vol.group() if num.isdigit()])\n return '(nf)'",
"def test_get_id():\n vc = vtec.parse(\"/O.NEW.KJAN.TO.W.0130.050829T1651Z-050829T1815Z/\")\n assert vc[0].get_id(2005) == '2005-KJAN-TO-W-0130'",
"def get_card_val(card):\n\n if card == '1':\n return 1\n if card == '2':\n return 2\n if card == '3':\n return 3\n else:\n return 4",
"def get_crx_version(crx_path):\n # TODO: This approach has some issues with catching some outliers that don't match the regular pattern\n ver_str = path.basename(crx_path).split('.crx')[0].split('_', 1)[1]\n return ver_str.replace('_', '.')",
"def read_vcp_code(self, code: int) -> Tuple[int, int]:\n if code is None:\n _LOGGER.error('vcp code to send is None. ignored.')\n return 0, 0\n \n api_call = ctypes.windll.Dxva2.GetVCPFeatureAndVCPFeatureReply\n api_in_vcp_code = wintypes.BYTE(code)\n api_out_current_value = wintypes.DWORD()\n api_out_max_value = wintypes.DWORD()\n \n if not api_call(self._phy_monitor_handle, api_in_vcp_code, None,\n ctypes.byref(api_out_current_value), ctypes.byref(api_out_max_value)):\n _LOGGER.error('get vcp command failed: ' + hex(code))\n _LOGGER.error(ctypes.WinError())\n return api_out_current_value.value, api_out_max_value.value",
"def get_id_version(crx_path):\n crx_id, ver_str = path.basename(crx_path).split('.crx')[0].split('_', 1)\n ver_str = ver_str.replace('_', '.')\n return crx_id, ver_str",
"def gtVcf(ref, alt, gt):\n\tif gt == ref: return \"0\"\n\tif gt == alt: return \"1\"\n\treturn \".\"",
"def get_version_string():\n vl = TopicTreeExtractCVS.get_version_number()\n\n return '''TopicTreeExtractCVS {0}.{1}.{2}\nNew BSD License.\nCopyright (C) 2017 Hitoshi Yamauchi\n'''.format(vl[0], vl[1], vl[2])",
"def test_getnumber(self):\n convert = cnv()\n\n convert.setnum('einhundertdreiundzwanzig')\n self.assertEqual(convert.getnum(), 123)",
"def get_vep_format(vep_vcf):\n fmt = None\n with gzip.open(vep_vcf) as f:\n for line in f:\n line = line.decode().strip()\n if line[0]!='#':\n break\n elif line[:6]=='##INFO' and 'CSQ' in line:\n fmt = line.split('Format: ')[1].replace('\">','').split('|')\n break\n if fmt is None:\n raise ValueError('CSQ format not found in VCF header')\n return fmt",
"def get_v(r_div_R, z_div_L, Pi_div_DLP, k, alpha_ast, Bp, Bm, gp, gm, membrane_geometry):\n return CT.get_v(r_div_R, z_div_L, Pi_div_DLP, k, alpha_ast, Bp, Bm, gp, gm, membrane_geometry)",
"def get_volume_name(self, vid):\n return \"cv-{0}\".format(vid)",
"def get_four_digit_code(self):\n return (\n self.subhead[\"New_Code\"].str[0:2] + \".\" + self.subhead[\"New_Code\"].str[2:4]\n )",
"def validate_CPF(value):\n\t\tif value in EMPTY_VALUES:\n\t\t\treturn u''\n\t\tif not value.isdigit():\n\t\t\tvalue = re.sub(\"[-\\.]\", \"\", value)\n\t\torig_value = value[:]\n\t\ttry:\n\t\t\tint(value)\n\t\texcept ValueError:\n\t\t\treturn False\n\t\tif len(value) != 11 or value == \"00000000000\" or value == \"11111111111\" or value == \"22222222222\" or value == \"33333333333\" or \\\n\t\t\tvalue == \"44444444444\" or value == \"55555555555\" or value == \"66666666666\" or value == \"77777777777\" or \\\n\t\t\tvalue == \"88888888888\" or value == \"99999999999\":\n\t\t\t\n\t\t\treturn False\n\n\t\torig_dv = value[-2:]\n\n\t\tnew_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(10, 1, -1))])\n\t\tnew_1dv = DV_maker(new_1dv % 11)\n\t\tvalue = value[:-2] + str(new_1dv) + value[-1]\n\t\tnew_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(11, 1, -1))])\n\t\tnew_2dv = DV_maker(new_2dv % 11)\n\t\tvalue = value[:-1] + str(new_2dv)\n\t\tif value[-2:] != orig_dv:\n\t\t\treturn False \n\n\n\t\treturn orig_value",
"def trilha_cvv1(self):\n return self._trilha_cvv1",
"def validate_CPF(value):\n\n if value in EMPTY_VALUES:\n return u''\n\n if len(value) != 11:\n raise ValidationError(cpf_error_messages['max_digits'])\n\n if not value.isdigit():\n raise ValidationError(cpf_error_messages['digits_only'])\n\n if len(set(list(value))) == 1:\n raise ValidationError(cpf_error_messages['invalid'])\n\n orig_dv = value[-2:]\n new_1dv = sum([i * int(value[idx]) for idx, i\n in enumerate(range(10, 1, -1))])\n\n new_1dv = dv_maker(new_1dv % 11)\n value = value[:-2] + str(new_1dv) + value[-1]\n new_2dv = sum([i * int(value[idx]) for idx, i\n in enumerate(range(11, 1, -1))])\n\n new_2dv = dv_maker(new_2dv % 11)\n value = value[:-1] + str(new_2dv)\n\n if value[-2:] != orig_dv:\n raise ValidationError(cpf_error_messages['invalid'])\n\n return value",
"def vcrit(Te):\n vcrit = 3.0*np.sqrt(np.pi)/4.*(2.*eV2J/me)**(1.5)*(me/mi)*np.sqrt(Te**3.)\n return vcrit"
] | [
"0.59738284",
"0.5607687",
"0.55734557",
"0.55584395",
"0.5479949",
"0.53915036",
"0.537697",
"0.53422475",
"0.5305803",
"0.52978706",
"0.52827394",
"0.52690667",
"0.52661955",
"0.52412796",
"0.5229631",
"0.521806",
"0.51853895",
"0.5098367",
"0.5095909",
"0.50661993",
"0.50566435",
"0.50505733",
"0.50434345",
"0.5038577",
"0.5032154",
"0.5031765",
"0.50172436",
"0.50133806",
"0.5002626",
"0.49901593"
] | 0.80665404 | 0 |
Construct a JSON reference resolver. The resolved specs are in the `specs` member after a call to `resolve_references` has been made. If a URL is given, it is used as a base for calculating the absolute URL of relative file references. | def __init__(self, specs, url = None, **options):
import copy
self.specs = copy.deepcopy(specs)
self.url = url
self.__reclimit = options.get('recursion_limit', 1)
self.__reclimit_handler = options.get('recursion_limit_handler',
default_reclimit_handler)
self.__reference_cache = options.get('reference_cache', {})
if self.url:
self.parsed_url = _url.absurl(self.url)
self._url_key = _url.urlresource(self.parsed_url)
# If we have a url, we want to add ourselves to the reference cache
# - that creates a reference loop, but prevents child resolvers from
# creating a new resolver for this url.
if self.specs:
self.__reference_cache[self._url_key] = self.specs
else:
self.parsed_url = self._url_key = None
self.__resolve_types = options.get('resolve_types', RESOLVE_ALL)
self.__encoding = options.get('encoding', None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resolve_references(self):\n self.specs = self._resolve_partial(self.parsed_url, self.specs, ())",
"def parse_resolve(cls, url):\n loc = cls.parse(url)\n if loc.path and loc.path != '/':\n # If true ref name contains slash, a prefix of path might be a suffix of\n # ref. Try to resolve it.\n ref_prefix = None\n if loc.treeish.startswith('refs/'):\n ref_prefix = loc.treeish + '/'\n refs = get_refs(loc.hostname, loc.project, ref_prefix)\n if not refs:\n raise TreeishResolutionError('could not resolve treeish in %s' % url)\n\n treeishes = set(refs.keys())\n # Add branches and tags without a prefix.\n for ref in refs:\n for prefix in ('refs/tags/', 'refs/heads/'):\n if ref.startswith(prefix):\n treeishes.add(ref[len(prefix):])\n break\n loc = cls.parse(url, treeishes=treeishes)\n return loc",
"def parse(self): # noqa: F811\n # If we have a file name, we need to read that in.\n if self.url and self.url != _PLACEHOLDER_URL:\n from .util.url import fetch_url\n encoding = self.options.get('encoding', None)\n self.specification = fetch_url(self.url, encoding = encoding)\n\n # If we have a spec string, try to parse it.\n if self._spec_string:\n from .util.formats import parse_spec\n self.specification = parse_spec(self._spec_string, self.url)\n\n # Perform some sanitization in lenient mode.\n if not self.options.get('strict', True):\n from .util import stringify_keys\n self.specification = stringify_keys(self.specification)\n\n # If we have a parsed spec, convert it to JSON. Then we can validate\n # the JSON. At this point, we *require* a parsed specification to exist,\n # so we might as well assert.\n assert self.specification, 'No specification parsed, cannot validate!'\n\n self._validate()",
"def new_ref(s, url, start_pt=None, path=None ):\n return Ref( url, start_pt, path, store=s )",
"def _dereference(self, ref_url, obj_path, recursions):\n # In order to start dereferencing anything in the referenced URL, we have\n # to read and parse it, of course.\n contents = _url.fetch_url(ref_url, self.__reference_cache, self.__encoding)\n\n # In this inner parser's specification, we can now look for the referenced\n # object.\n value = contents\n if len(obj_path) != 0:\n from prance.util.path import path_get\n try:\n value = path_get(value, obj_path)\n except (KeyError, IndexError, TypeError) as ex:\n raise _url.ResolutionError('Cannot resolve reference \"%s\": %s'\n % (ref_url.geturl(), str(ex)))\n\n # Deep copy value; we don't want to create recursive structures\n import copy\n value = copy.deepcopy(value)\n\n # Now resolve partial specs\n value = self._resolve_partial(ref_url, value, recursions)\n\n # That's it!\n return value",
"def resolver():\n if RESOLVER:\n return RESOLVER\n path = str(pathlib.Path(__file__).parents[1].joinpath(\"schema\", \"app.json\"))\n with open(path) as stream:\n schema = json.load(stream)\n globals()[\"RESOLVER\"] = RefResolver(\n \"https://schema.timeflux.io/app.json\", None\n ).from_schema(schema)\n return RESOLVER",
"def resolving(self, ref):\r\n\r\n full_uri = urljoin(self.resolution_scope, ref)\r\n uri, fragment = urldefrag(full_uri)\r\n if not uri:\r\n uri = self.base_uri\r\n\r\n if uri in self.store:\r\n document = self.store[uri]\r\n else:\r\n try:\r\n document = self.resolve_remote(uri)\r\n except Exception as exc:\r\n raise RefResolutionError(exc)\r\n\r\n old_base_uri, self.base_uri = self.base_uri, uri\r\n try:\r\n with self.in_scope(uri):\r\n yield self.resolve_fragment(document, fragment)\r\n finally:\r\n self.base_uri = old_base_uri",
"def __init__(\n self,\n parsed_url: ParsedUrl,\n output_dir: str,\n git_ref: Optional[str] = None,\n spec: Optional[str] = None,\n ):\n super().__init__(parsed_url, output_dir, spec)\n self._git_ref = git_ref",
"def resolve_uri(base_uri, ref_uri, strict=True):\n if ref_uri is None:\n return None\n base_scheme, base_auth, base_path, base_query, base_fragment = parse_uri(base_uri)\n ref_scheme, ref_auth, ref_path, ref_query, ref_fragment = parse_uri(ref_uri)\n if not strict and ref_scheme == base_scheme:\n reference_scheme = None\n else:\n reference_scheme = ref_scheme\n if reference_scheme is not None:\n target_scheme = reference_scheme\n target_auth = ref_auth\n target_path = remove_dot_segments(ref_path)\n target_query = ref_query\n else:\n if ref_auth is not None:\n target_auth = ref_auth\n target_path = remove_dot_segments(ref_path)\n target_query = ref_query\n else:\n if not ref_path:\n target_path = base_path\n if ref_query is not None:\n target_query = ref_query\n else:\n target_query = base_query\n else:\n if ref_path.startswith(b\"/\"):\n target_path = remove_dot_segments(ref_path)\n else:\n target_path = merge_paths(base_auth, base_path, ref_path)\n target_path = remove_dot_segments(target_path)\n target_query = ref_query\n target_auth = base_auth\n target_scheme = base_scheme\n target_fragment = ref_fragment\n return build_uri(scheme=target_scheme, authority=target_auth, path=target_path,\n query=target_query, fragment=target_fragment)",
"def resolve_xref(self, env, fromdocname, builder, typ, target,\n node, contnode):\n if node.get('json:name'):\n objdef = self.get_object(node['json:name'])\n if objdef:\n return node_utils.make_refnode(builder, fromdocname,\n objdef.docname, objdef.key,\n contnode)\n\n if typ in self.REF_TYPES:\n try:\n ref = nodes.reference(internal=False)\n ref['refuri'], ref['reftitle'] = self.REF_TYPES[target]\n ref.append(contnode)\n return ref\n except KeyError:\n pass",
"def _load_ref(self, ref_uri, session):\n if self.verbose:\n print(\"Resolving $ref URI {}\".format(ref_uri), file=sys.stderr)\n parsed_ref_uri = self._parse_ref_uri(ref_uri)\n ref_file = parsed_ref_uri.netloc + parsed_ref_uri.path\n if not ref_file: # must be relative to current doc\n pass # is already in cache\n elif ref_file not in self.cache:\n self.cache[ref_uri] = self._load_for_cache(parsed_ref_uri, session)\n ref_json = self.cache[ref_uri]\n expr = jsonpath_rw.parse(\"$\" + \".\".join(parsed_ref_uri.fragment.split(\"/\")))\n for match in expr.find(ref_json):\n return match.value # return first match only\n # If we reach here, resolution failed\n raise RefResolutionException('Could not resolve reference URI \"{}\"'.format(ref_uri))",
"def __init__(self, url = None, spec_string = None, lazy = False, **kwargs):\n assert url or spec_string and not (url and spec_string), \\\n 'You must provide either a URL to read, or a spec string to '\\\n 'parse, but not both!'\n\n # Keep the parameters around for later use\n self.url = None\n if url:\n from .util.url import absurl\n from .util.fs import abspath\n import os\n self.url = absurl(url, abspath(os.getcwd()))\n else:\n self.url = _PLACEHOLDER_URL\n\n self._spec_string = spec_string\n\n # Initialize variables we're filling later\n self.specification = None\n self.version = None\n self.version_name = None\n self.version_parsed = ()\n self.valid = False\n\n # Add kw args as options\n self.options = kwargs\n\n # Verify backend\n from .util import default_validation_backend\n self.backend = self.options.get('backend', default_validation_backend())\n if self.backend not in BaseParser.BACKENDS.keys():\n raise ValueError('Backend may only be one of %s!'\n % (BaseParser.BACKENDS.keys(), ))\n\n # Start parsing if lazy mode is not requested.\n if not lazy:\n self.parse()",
"def resolve_references(path, schema):\n if isinstance(schema, dict):\n # do $ref first\n if '$ref' in schema:\n # Pull the referenced filepath from the schema\n referenced_file = schema['$ref']\n\n # Referenced filepaths are relative, so take the current path's\n # directory and append the relative, referenced path to it.\n inner_path = os.path.join(os.path.dirname(path), referenced_file)\n\n # Then convert the path (which may contiain '../') into a\n # normalised, absolute path\n inner_path = os.path.abspath(inner_path)\n\n # Load the referenced file\n ref = load_file(\"file://\" + inner_path)\n\n # Check that the references in *this* file are valid\n result = resolve_references(inner_path, ref)\n\n # They were valid, and so were the sub-references. Delete\n # the reference here to ensure we don't pass over it again\n # when checking other files\n del schema['$ref']\n else:\n result = {}\n\n for key, value in schema.items():\n result[key] = resolve_references(path, value)\n return result\n elif isinstance(schema, list):\n return [resolve_references(path, value) for value in schema]\n else:\n return schema",
"def lookup(self, url):\n return {'url': url}",
"def resolve_reference(self, key, filetype=None):\n return self.__resolve_reference(\n key,\n self.get_metainfo().get(key), # TODO: use Java method with metaKey instead of getting metainfo here\n checked_filetype(filetype)\n )",
"def load_resolved_schema(spec_path, file_name=None, schema_obj=None, path_prefix=True):\r\n\r\n # Only one of file_name or schema_obj must be set\r\n assert bool(file_name) != bool(schema_obj)\r\n\r\n if path_prefix:\r\n spec_path = os.path.join(spec_path, \"APIs/schemas/\")\r\n base_path = os.path.abspath(spec_path)\r\n if not base_path.endswith(\"/\"):\r\n base_path = base_path + \"/\"\r\n if os.name == \"nt\":\r\n base_uri_path = \"file:///\" + base_path.replace('\\\\', '/')\r\n else:\r\n base_uri_path = \"file://\" + base_path\r\n\r\n loader = jsonref.JsonLoader(cache_results=False)\r\n\r\n if file_name:\r\n json_file = str(Path(base_path) / file_name)\r\n with open(json_file, \"r\") as f:\r\n schema = jsonref.load(f, base_uri=base_uri_path, loader=loader, jsonschema=True)\r\n elif schema_obj:\r\n # Work around an exception when there's nothing to resolve using an object\r\n if \"$ref\" in schema_obj:\r\n schema = jsonref.JsonRef.replace_refs(schema_obj, base_uri=base_uri_path, loader=loader, jsonschema=True)\r\n else:\r\n schema = schema_obj\r\n\r\n return schema",
"def _ParseAllDeps(self, solution_urls, solution_deps_content):\n deps = {}\n for solution in self.GetVar(\"solutions\"):\n custom_vars = solution.get(\"custom_vars\", {})\n solution_deps = self._ParseSolutionDeps(\n solution[\"name\"],\n solution_deps_content[solution[\"name\"]],\n custom_vars)\n\n # If a line is in custom_deps, but not in the solution, we want to append\n # this line to the solution.\n if \"custom_deps\" in solution:\n for d in solution[\"custom_deps\"]:\n if d not in solution_deps:\n solution_deps[d] = solution[\"custom_deps\"][d]\n\n for d in solution_deps:\n if \"custom_deps\" in solution and d in solution[\"custom_deps\"]:\n # Dependency is overriden.\n url = solution[\"custom_deps\"][d]\n if url is None:\n continue\n else:\n url = solution_deps[d]\n # if we have a From reference dependent on another solution, then\n # just skip the From reference. When we pull deps for the solution,\n # we will take care of this dependency.\n #\n # If multiple solutions all have the same From reference, then we\n # should only add one to our list of dependencies.\n if type(url) != str:\n if url.module_name in solution_urls:\n # Already parsed.\n continue\n if d in deps and type(deps[d]) != str:\n if url.module_name == deps[d].module_name:\n continue\n else:\n parsed_url = urlparse.urlparse(url)\n scheme = parsed_url[0]\n if not scheme:\n # A relative url. Fetch the real base.\n path = parsed_url[2]\n if path[0] != \"/\":\n raise gclient_utils.Error(\n \"relative DEPS entry \\\"%s\\\" must begin with a slash\" % d)\n # Create a scm just to query the full url.\n scm = gclient_scm.CreateSCM(solution[\"url\"], self._root_dir,\n None)\n url = scm.FullUrlForRelativeUrl(url)\n if d in deps and deps[d] != url:\n raise gclient_utils.Error(\n \"Solutions have conflicting versions of dependency \\\"%s\\\"\" % d)\n if d in solution_urls and solution_urls[d] != url:\n raise gclient_utils.Error(\n \"Dependency \\\"%s\\\" conflicts with specified solution\" % d)\n # Grab the dependency.\n deps[d] = url\n return deps",
"def _load_ref(self, doc_uri, doc, ref_uri):\n if self.verbose:\n print('Resolving $ref URI {}'.format(ref_uri), file=sys.stderr)\n parsed_ref_uri = self._parse_ref_uri(ref_uri)\n ref_file = parsed_ref_uri.netloc + parsed_ref_uri.path\n if not ref_file: # must be relative to current doc\n pass # is already in cache\n elif ref_file not in self.cache:\n self.cache[ref_uri] = self._load_for_cache(\n doc_uri, doc, parsed_ref_uri)\n ref_json = self.cache[ref_uri]\n expr = jsonpath_rw.parse(\n '$' + '.'.join(parsed_ref_uri.fragment.split('/')))\n for match in expr.find(ref_json):\n return match.value # return first match only\n # If we reach here, resolution failed\n raise RefResolutionException(\n 'Could not resolve reference URI \"{}\"'.format(ref_uri))",
"def test_references_url() -> None:\n\n soup = generate_case(\"references_url\")\n\n tests.html_schema_doc_asserts.assert_property_names(soup, [\"firstName\"])\n tests.html_schema_doc_asserts.assert_descriptions(soup, [\"Testing $ref with URL\", \"The person's first name.\"])\n tests.html_schema_doc_asserts.assert_types(soup, [\"object\", \"string\"])",
"def _resolve_dict_entry(self, base_obj, obj, session):\n # Interpret '$ref' key if present in obj, continue to resolve\n # recursively if necessary.\n if base_obj:\n objs = [self._resolve_dict_entry(self.dict_class(), base_obj, session), obj]\n else:\n objs = [obj]\n imported = set()\n while \"$ref\" in objs[-1]:\n last = objs[-1]\n if last[\"$ref\"] in imported:\n raise RefResolutionException(\n \"Detected recursion when including {}\".format(last[\"$ref\"])\n )\n imported.add(last[\"$ref\"])\n objs.append(self._load_ref(last[\"$ref\"], session))\n # Merge objs, values on the left have higher precedence.\n result = self.dict_class()\n for obj in reversed(objs):\n for k, v in obj.items():\n if k != \"$ref\":\n if k in result:\n result[k] = self._resolve(v, result[k], session)\n else:\n result[k] = self._resolve(v, type(v)(), session)\n return result",
"def load_referenced_json_config(filepath):\n import jsonref\n from copy import deepcopy\n with open(filepath, \"r\") as f:\n config = jsonref.load(f)\n\n # this dereferences the dictionary produced by jsonref.load. See here https://github.com/gazpachoking/jsonref/issues/9\n config = deepcopy(config)\n return config",
"def _resolve_dict_entry(self, doc_uri, main_doc, obj):\n # Interpret '$ref' key if present in obj\n if '$ref' in obj:\n result = self._load_ref(doc_uri, main_doc, obj['$ref'])\n else:\n result = self.dict_class()\n # Merge values from obj with result\n for k, v in obj.items():\n if k != '$ref':\n result[k] = self._resolve(doc_uri, main_doc, v)\n return result",
"def json_load(self, json):\n #print('json to load ' + str(json))\n for key in self.__dict__.keys():\n if key in json:\n if isinstance(getattr(self, key), BaseResource):\n getattr(self, key).json_load(json[key])\n elif is_reference_type(json[key]):\n ref = ReferenceType()\n ref.json_load(json[key])\n setattr(self, key, ref)\n else:\n setattr(self, key, json[key])",
"def JSONReference(self, default=None):\n return self.data.get('$ref', default)",
"def schema_resolve_refs(schema, ref_resolver=None, root=None):\n # FIXME more stable implementation that only attempts to resolve {\"$ref\"} objects where they are allowed.\n if isinstance(schema, dict):\n if len(schema) == 1 and \"$ref\" in schema and isinstance(schema[\"$ref\"], six.string_types):\n reference = schema[\"$ref\"]\n if reference.startswith(\"#\"):\n # TODO should also resolve any paths within the reference, which would need to be deferred.\n return root\n return ref_resolver(reference)\n\n resolved = {}\n for k, v in schema.items():\n resolved[k] = schema_resolve_refs(v,\n ref_resolver=ref_resolver,\n root=root if root is not None else resolved)\n return resolved\n if isinstance(schema, (list, tuple)):\n return [schema_resolve_refs(v, ref_resolver=ref_resolver, root=root) for v in schema]\n return schema",
"def resolve_schema_references(self, definition):\n # type: (Generator, Dict) -> None\n if \"$ref\" in definition:\n schema_reference = definition.pop(\"$ref\")\n section, name = schema_reference.split(\"/\")[-2:]\n referenced_definition = self.parser.specification[section][name]\n definition.update(referenced_definition)\n\n for value in definition.values():\n if isinstance(value, dict):\n self.resolve_schema_references(value)",
"def jsonresolver_loader(url_map):\n url_map.add(Rule(\n \"/api/taxonomies/<string:code>/<path:slug>\",\n endpoint=get_taxonomy_term,\n host=current_app.config.get('SERVER_NAME')\n ))",
"def make_url_reference(self, uri):\n date = helpers.today_as_WbTime()\n ref = WD.Reference(\n source_test=self.wd.make_simple_claim(u'P854', uri),\n source_notest=self.wd.make_simple_claim(u'P813', date))\n return ref",
"def parse():\n with urlopen(MDN_SITEMAP) as f:\n xml = ElementTree.parse(f)\n refs = defaultdict(dict)\n for loc in xml.iterfind('{{{ns}}}url/{{{ns}}}loc'.format(ns=SITEMAP_NS)):\n url = loc.text\n if 'JavaScript/Reference/Global_Objects/' not in url:\n continue\n url_suffix = url[81:]\n parts = url_suffix.split('/')\n if len(parts) == 1:\n name = parts[0]\n if name[0].isupper():\n ref_type = 'class'\n else:\n ref_type = 'data'\n elif len(parts) == 2:\n cls, attr = parts\n with urlopen('{url}$json'.format(url=url)) as f:\n metadata = json.loads(f.read().decode('utf-8'))\n name = '{0}.{1}'.format(cls, attr)\n if 'Method' in metadata['tags']:\n ref_type = 'function'\n elif 'Property' in metadata['tags']:\n ref_type = 'attribute'\n else:\n fmt = 'Unknown ref_type for {0}. Tags: {1}'\n log.warning(fmt.format(url, ', '.join(metadata['tags'])))\n continue\n else:\n log.warning('Skipping URL (too many parts): {0}'.format(url))\n continue\n refs[ref_type][name] = url_suffix\n return dict(refs)",
"def resolve_fragment(self, document, fragment):\r\n\r\n fragment = fragment.lstrip(u\"/\")\r\n parts = unquote(fragment).split(u\"/\") if fragment else []\r\n\r\n for part in parts:\r\n part = part.replace(u\"~1\", u\"/\").replace(u\"~0\", u\"~\")\r\n\r\n if isinstance(document, Sequence):\r\n # Array indexes should be turned into integers\r\n try:\r\n part = int(part)\r\n except ValueError:\r\n pass\r\n try:\r\n document = document[part]\r\n except (TypeError, LookupError):\r\n raise RefResolutionError(\r\n \"Unresolvable JSON pointer: %r\" % fragment\r\n )\r\n\r\n return document"
] | [
"0.65303266",
"0.56752896",
"0.5565178",
"0.5432406",
"0.5358694",
"0.5330516",
"0.53255826",
"0.5246696",
"0.5213265",
"0.51325446",
"0.5081862",
"0.50505877",
"0.5048943",
"0.49645126",
"0.4942218",
"0.49079823",
"0.48965055",
"0.4876572",
"0.48686698",
"0.48592854",
"0.48441303",
"0.4834116",
"0.47945544",
"0.47881305",
"0.47868094",
"0.47769564",
"0.4771068",
"0.47339898",
"0.47143188",
"0.4692377"
] | 0.6960167 | 0 |
Resolve JSON pointers/references in the spec. | def resolve_references(self):
self.specs = self._resolve_partial(self.parsed_url, self.specs, ()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resolve_schema_references(self, definition):\n # type: (Generator, Dict) -> None\n if \"$ref\" in definition:\n schema_reference = definition.pop(\"$ref\")\n section, name = schema_reference.split(\"/\")[-2:]\n referenced_definition = self.parser.specification[section][name]\n definition.update(referenced_definition)\n\n for value in definition.values():\n if isinstance(value, dict):\n self.resolve_schema_references(value)",
"def resolve(self, spec: \"ModelSpec\"):",
"def _dereference(self, ref_url, obj_path, recursions):\n # In order to start dereferencing anything in the referenced URL, we have\n # to read and parse it, of course.\n contents = _url.fetch_url(ref_url, self.__reference_cache, self.__encoding)\n\n # In this inner parser's specification, we can now look for the referenced\n # object.\n value = contents\n if len(obj_path) != 0:\n from prance.util.path import path_get\n try:\n value = path_get(value, obj_path)\n except (KeyError, IndexError, TypeError) as ex:\n raise _url.ResolutionError('Cannot resolve reference \"%s\": %s'\n % (ref_url.geturl(), str(ex)))\n\n # Deep copy value; we don't want to create recursive structures\n import copy\n value = copy.deepcopy(value)\n\n # Now resolve partial specs\n value = self._resolve_partial(ref_url, value, recursions)\n\n # That's it!\n return value",
"def resolve_fragment(self, document, fragment):\r\n\r\n fragment = fragment.lstrip(u\"/\")\r\n parts = unquote(fragment).split(u\"/\") if fragment else []\r\n\r\n for part in parts:\r\n part = part.replace(u\"~1\", u\"/\").replace(u\"~0\", u\"~\")\r\n\r\n if isinstance(document, Sequence):\r\n # Array indexes should be turned into integers\r\n try:\r\n part = int(part)\r\n except ValueError:\r\n pass\r\n try:\r\n document = document[part]\r\n except (TypeError, LookupError):\r\n raise RefResolutionError(\r\n \"Unresolvable JSON pointer: %r\" % fragment\r\n )\r\n\r\n return document",
"def schema_resolve_refs(schema, ref_resolver=None, root=None):\n # FIXME more stable implementation that only attempts to resolve {\"$ref\"} objects where they are allowed.\n if isinstance(schema, dict):\n if len(schema) == 1 and \"$ref\" in schema and isinstance(schema[\"$ref\"], six.string_types):\n reference = schema[\"$ref\"]\n if reference.startswith(\"#\"):\n # TODO should also resolve any paths within the reference, which would need to be deferred.\n return root\n return ref_resolver(reference)\n\n resolved = {}\n for k, v in schema.items():\n resolved[k] = schema_resolve_refs(v,\n ref_resolver=ref_resolver,\n root=root if root is not None else resolved)\n return resolved\n if isinstance(schema, (list, tuple)):\n return [schema_resolve_refs(v, ref_resolver=ref_resolver, root=root) for v in schema]\n return schema",
"def _resolve_dict_entry(self, doc_uri, main_doc, obj):\n # Interpret '$ref' key if present in obj\n if '$ref' in obj:\n result = self._load_ref(doc_uri, main_doc, obj['$ref'])\n else:\n result = self.dict_class()\n # Merge values from obj with result\n for k, v in obj.items():\n if k != '$ref':\n result[k] = self._resolve(doc_uri, main_doc, v)\n return result",
"def maybe_resolve(object, resolve):\n if isinstance(object, dict) and object.get('$ref'):\n return resolve(object['$ref'])\n return object",
"def resolve_references(path, schema):\n if isinstance(schema, dict):\n # do $ref first\n if '$ref' in schema:\n # Pull the referenced filepath from the schema\n referenced_file = schema['$ref']\n\n # Referenced filepaths are relative, so take the current path's\n # directory and append the relative, referenced path to it.\n inner_path = os.path.join(os.path.dirname(path), referenced_file)\n\n # Then convert the path (which may contiain '../') into a\n # normalised, absolute path\n inner_path = os.path.abspath(inner_path)\n\n # Load the referenced file\n ref = load_file(\"file://\" + inner_path)\n\n # Check that the references in *this* file are valid\n result = resolve_references(inner_path, ref)\n\n # They were valid, and so were the sub-references. Delete\n # the reference here to ensure we don't pass over it again\n # when checking other files\n del schema['$ref']\n else:\n result = {}\n\n for key, value in schema.items():\n result[key] = resolve_references(path, value)\n return result\n elif isinstance(schema, list):\n return [resolve_references(path, value) for value in schema]\n else:\n return schema",
"def resolve_nested_schema(self, schema):\n try:\n schema_instance = resolve_schema_instance(schema)\n # If schema is a string and is not found in registry,\n # assume it is a schema reference\n except marshmallow.exceptions.RegistryError:\n return schema\n schema_key = make_schema_key(schema_instance)\n if schema_key not in self.refs:\n name = self.schema_name_resolver(schema)\n if not name:\n try:\n json_schema = self.schema2jsonschema(schema_instance)\n except RuntimeError:\n raise APISpecError(\n \"Name resolver returned None for schema {schema} which is \"\n \"part of a chain of circular referencing schemas. Please\"\n \" ensure that the schema_name_resolver passed to\"\n \" MarshmallowPlugin returns a string for all circular\"\n \" referencing schemas.\".format(schema=schema)\n )\n if getattr(schema, \"many\", False):\n return {\"type\": \"array\", \"items\": json_schema}\n return json_schema\n name = get_unique_schema_name(self.spec.components, name)\n self.spec.components.schema(name, schema=schema)\n return self.get_ref_dict(schema_instance)",
"def resolve(self):\n for reference in self._references:\n if reference.target is None:\n definition = self._definitions.get(reference.name)\n if definition is None:\n msg = message_factory.get_message(\n 'vapi.data.structref.structure.not.defined',\n reference.name)\n logger.debug(msg)\n raise CoreException(msg)\n reference.target = definition",
"def dereference_json_pointer(root: n.SerializableType, ptr: str) -> Dict[str, Any]:\n cursor = root\n components = ptr.lstrip(\"#\").lstrip(\"/\").split(\"/\")\n for component in components:\n component = decode_json_pointer(component)\n if isinstance(cursor, List):\n if component == \"-\":\n # \"-\" specifies the imaginary element *after* the last element of an array.\n # We don't need to support this, so... we don't.\n raise NotImplementedError('\"-\" list components not supported')\n\n component_int = int(component)\n cursor = cursor[component_int]\n elif not isinstance(cursor, Dict):\n raise ValueError(\"Invalid entry type\")\n else:\n cursor = cursor[component]\n\n assert isinstance(cursor, Dict)\n return cursor",
"def JSONReference(self, default=None):\n return self.data.get('$ref', default)",
"def check_references(swagger: Dict):\n events = set()\n\n ref_jspath = JSPATH_REFERENCES\n\n for _, reference, path in get_elements(swagger, ref_jspath):\n # handle only local references\n if reference.startswith(\"#/\"):\n # decompose reference (error if not possible)\n try:\n rt, obj = reference[2:].split(\"/\")\n except ValueError:\n events.add(\n ReferenceInvalidSyntax(\n path=path, reason=f\"reference {reference} not of the form '#/section/item'\"\n )\n )\n continue\n\n if rt not in REFERENCE_SECTIONS:\n events.add(\n ReferenceInvalidSection(\n path=path,\n reason=f\"Reference {reference} not referring to one of the sections {REFERENCE_SECTIONS}\",\n )\n )\n\n # resolve reference (error if not possible)\n try:\n swagger[rt][obj]\n except KeyError:\n events.add(\n ReferenceNotFoundValidationError(\n path=path, reason=f\"reference '#/{rt}/{obj}' does not exist\"\n )\n )\n\n return events",
"def _resolve(self):\n pass",
"def resolve_xref(self, env, fromdocname, builder, typ, target,\n node, contnode):\n if node.get('json:name'):\n objdef = self.get_object(node['json:name'])\n if objdef:\n return node_utils.make_refnode(builder, fromdocname,\n objdef.docname, objdef.key,\n contnode)\n\n if typ in self.REF_TYPES:\n try:\n ref = nodes.reference(internal=False)\n ref['refuri'], ref['reftitle'] = self.REF_TYPES[target]\n ref.append(contnode)\n return ref\n except KeyError:\n pass",
"def json_load(self, json):\n #print('json to load ' + str(json))\n for key in self.__dict__.keys():\n if key in json:\n if isinstance(getattr(self, key), BaseResource):\n getattr(self, key).json_load(json[key])\n elif is_reference_type(json[key]):\n ref = ReferenceType()\n ref.json_load(json[key])\n setattr(self, key, ref)\n else:\n setattr(self, key, json[key])",
"def test_get_json_spec(self):\n pass",
"def test_read_json_schema():\n json_schema = os.path.join(TEST_DATA_PATH, 'example_schema.json')\n schema_tree = schema.load_schema(json_schema, resolve_references=True)\n schema.check_schema(schema_tree)",
"def test_references() -> None:\n soup = generate_case(\"references\")\n\n tests.html_schema_doc_asserts.assert_property_names(\n soup,\n [\n \"a_gift\",\n \"file_prefix\",\n \"anchor_with_slash\",\n \"propertyA\",\n \"anchor_no_slash\",\n \"anchor_nested_reference\",\n \"same_file_anchor_with_slash\",\n \"same_file_anchor_no_slash\",\n \"same_file_nested_reference\",\n \"other_file_anchor\",\n \"with_wrap\",\n \"other_file_dot_anchor\",\n \"other_file_dot_dot_anchor\",\n \"other_file_only\",\n \"not_a_string\",\n \"multi_hierarchy_reference\",\n \"propertyA\",\n ],\n )\n tests.html_schema_doc_asserts.assert_descriptions(\n soup,\n [\n \"Testing $ref\",\n \"A gift, or is it?\",\n \"A gift, or is it?\",\n \"Description for object_def/items/propertyA\",\n \"Description for array_def\",\n \"Description for string_def\",\n \"The delivery is a gift, no prices displayed\",\n \"The delivery is a gift, no prices displayed\",\n \"The delivery is a gift, no prices displayed\",\n \"Test schema with a not\",\n \"Contents of propertyA in final.json\",\n ],\n )\n tests.html_schema_doc_asserts.assert_types(\n soup,\n [\n \"object\", # root\n \"string\", # a_gift\n \"string\", # file_prefix\n \"object\", # anchor_with_slash\n \"string\", # anchor_with_slash -> propertyA\n \"array of string\", # anchor_no_slash\n \"string\", # anchor_no_slash items\n \"string\", # anchor_nested_reference\n \"string\", # same_file_anchor_with_slash\n \"object\", # same_file_anchor_no_slash\n \"string\", # same_file_nested_reference\n \"object\", # other_file_anchor\n \"boolean\", # other_file_anchor -> with_wrap\n \"object\", # other_file_dot_anchor\n \"object\", # other_file_dot_dot_anchor\n \"object\", # other_file_only\n \"string\", # not_a_string, not\n \"object\", # multi_hierarchy_reference\n \"string\", # multi_hierarchy_reference -> propertyA\n ],\n )",
"def resolve(self):\n pass # pragma: no cover",
"def _get_json_fields_translate_references(self, xblock, old_course_id, published):\r\n def get_translation(location):\r\n \"\"\"\r\n Convert the location and add to loc mapper\r\n \"\"\"\r\n return self.loc_mapper.translate_location(location, published, add_entry_if_missing=True)\r\n\r\n result = {}\r\n for field_name, field in xblock.fields.iteritems():\r\n if field.is_set_on(xblock):\r\n field_value = getattr(xblock, field_name)\r\n if isinstance(field, Reference) and field_value is not None:\r\n result[field_name] = get_translation(field_value)\r\n elif isinstance(field, ReferenceList):\r\n result[field_name] = [\r\n get_translation(ele) for ele in field_value\r\n ]\r\n elif isinstance(field, ReferenceValueDict):\r\n result[field_name] = {\r\n key: get_translation(subvalue)\r\n for key, subvalue in field_value.iteritems()\r\n }\r\n else:\r\n result[field_name] = field.read_json(xblock)\r\n\r\n return result",
"def resolve_all_refs(s):\n for ref in list_of_all_unpointed_refs():\n ref.resolve()",
"def addresses_in_spec_path(self, spec_path):",
"def resolve_spec(self, spec):\n try:\n address = Address.parse(spec)\n except ValueError as e:\n raise self.InvalidAddressError(e)\n _, addressable = self.resolve(address)\n return addressable",
"def load_resolved_schema(spec_path, file_name=None, schema_obj=None, path_prefix=True):\r\n\r\n # Only one of file_name or schema_obj must be set\r\n assert bool(file_name) != bool(schema_obj)\r\n\r\n if path_prefix:\r\n spec_path = os.path.join(spec_path, \"APIs/schemas/\")\r\n base_path = os.path.abspath(spec_path)\r\n if not base_path.endswith(\"/\"):\r\n base_path = base_path + \"/\"\r\n if os.name == \"nt\":\r\n base_uri_path = \"file:///\" + base_path.replace('\\\\', '/')\r\n else:\r\n base_uri_path = \"file://\" + base_path\r\n\r\n loader = jsonref.JsonLoader(cache_results=False)\r\n\r\n if file_name:\r\n json_file = str(Path(base_path) / file_name)\r\n with open(json_file, \"r\") as f:\r\n schema = jsonref.load(f, base_uri=base_uri_path, loader=loader, jsonschema=True)\r\n elif schema_obj:\r\n # Work around an exception when there's nothing to resolve using an object\r\n if \"$ref\" in schema_obj:\r\n schema = jsonref.JsonRef.replace_refs(schema_obj, base_uri=base_uri_path, loader=loader, jsonschema=True)\r\n else:\r\n schema = schema_obj\r\n\r\n return schema",
"def _backrefs_to_schema(backrefs: process_helper.TArtifactsIter) -> types.Schema:\n return {\n types.OpenApiProperties.TYPE: \"object\",\n types.ExtensionProperties.BACKREFS: {\n property_name: schema for _, property_name, schema in backrefs\n },\n }",
"def test_remote_ref(tmp_path, _clean_remote_schemas_store):\n # Create file\n directory = tmp_path / \"base\"\n directory.mkdir()\n schemas_file = directory / \"original.json\"\n remote_schemas_file = directory / \"remote.json\"\n remote_schemas_file.write_text('{\"Table\": {\"key\": \"value\"}}')\n # Set up remote schemas store\n ref.set_context(path=str(schemas_file))\n schemas = {\"RefTable\": {\"$ref\": \"remote.json#/Table\"}}\n model_factory = mock.MagicMock()\n\n define_all.define_all(model_factory=model_factory, schemas=schemas)",
"def handle_ref(self, data, **kwargs):\n\n # Backward compatibility: branch and ref were both used. Let's keep branch as the exposed field\n # even if interally it gets converted to \"ref\" later.\n if data.get(\"ref\"):\n data[\"branch\"] = data[\"ref\"]\n del data[\"ref\"]\n\n return data",
"def resolve():\n while _TO_RESOLVE:\n obj = _TO_RESOLVE.pop()\n annotations(obj)",
"def test_invoke_resolves_jsonref(processor_args, resolved):\n\n testapp = holocron.Application(\n {\"extra\": [{\"luke\": \"skywalker\"}], \"is_yoda_master\": True}\n )\n\n def processor(app, items, **args):\n assert args == resolved\n yield from items\n\n testapp.add_processor(\"processor\", processor)\n testapp.add_pipe(\"test\", [{\"name\": \"processor\", \"args\": processor_args}])\n\n with pytest.raises(StopIteration):\n next(testapp.invoke(\"test\"))"
] | [
"0.63346666",
"0.61350095",
"0.61242133",
"0.60123867",
"0.5895384",
"0.58681685",
"0.5828402",
"0.56767946",
"0.56145304",
"0.5577053",
"0.5550483",
"0.54556185",
"0.54530203",
"0.54383546",
"0.54287094",
"0.5406919",
"0.5402807",
"0.53680676",
"0.53565043",
"0.53061473",
"0.5266063",
"0.52417517",
"0.5230754",
"0.51994413",
"0.51918983",
"0.5191425",
"0.5177895",
"0.5170725",
"0.51675737",
"0.51416147"
] | 0.68938255 | 0 |
Iterate over a partial spec, dereferencing all references within. Yields the resolved path and value of all items that need substituting. | def _dereferencing_iterator(self, base_url, partial, path, recursions):
from .iterators import reference_iterator
for _, refstring, item_path in reference_iterator(partial):
# Split the reference string into parsed URL and object path
ref_url, obj_path = _url.split_url_reference(base_url, refstring)
if self._skip_reference(ref_url):
continue
# The reference path is the url resource and object path
ref_path = (_url.urlresource(ref_url), tuple(obj_path))
# Count how often the reference path has been recursed into.
from collections import Counter
rec_counter = Counter(recursions)
next_recursions = recursions + (ref_path,)
if rec_counter[ref_path] >= self.__reclimit:
# The referenced value may be produced by the handler, or the handler
# may raise, etc.
ref_value = self.__reclimit_handler(self.__reclimit, ref_url,
next_recursions)
else:
# The referenced value is to be used, but let's copy it to avoid
# building recursive structures.
ref_value = self._dereference(ref_url, obj_path, next_recursions)
# Full item path
full_path = path + item_path
# First yield parent
yield full_path, ref_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _resolve_partial(self, base_url, partial, recursions):\n # Gather changes from the dereferencing iterator - we need to set new\n # values from the outside in, so we have to post-process this a little,\n # sorting paths by path length.\n changes = dict(tuple(self._dereferencing_iterator(base_url, partial, (),\n recursions)))\n\n paths = sorted(changes.keys(), key = len)\n\n # With the paths sorted, set them to the resolved values.\n from prance.util.path import path_set\n for path in paths:\n value = changes[path]\n if len(path) == 0:\n partial = value\n else:\n path_set(partial, list(path), value, create = True)\n\n return partial",
"def resolve_references(self):\n self.specs = self._resolve_partial(self.parsed_url, self.specs, ())",
"def itervaluerefs(self):\n for value in self.itervalues():\n yield ref(value)",
"def __iter__(self) -> Iterator[str]:\n for fixup in self._fixup.values():\n yield fixup.var",
"def __iter__(self):\n element = self\n\n while element.HasField(\"pathtype\"):\n yield element\n\n if element.HasField(\"nested_path\"):\n element = element.nested_path\n else:\n break",
"def __iter__(self) -> Iterator[Tuple[str, str]]:\n for fixup in self._mapping._fixup.values():\n yield fixup.var, fixup.value",
"def __iter__(self):\n for path in self._paths: yield autopaths.Path(path.complete_path)",
"def __iter__(self) -> Iterator[str]:\n for fixup in self._mapping._fixup.values():\n yield fixup.value",
"def path_it(d):\n for p in _path_walk([], d):\n yield p",
"def __iter__(self):\n\n for lit in self.fvals:\n yield lit",
"def __iter__(self):\n if self.path_is_string:\n if self.path:\n yield self.path\n else:\n for part in self.path:\n yield part",
"def __iter__(self):\n for path_id in self._path_ids:\n yield (path_id, getattr(self, path_id))",
"def py__iter__(self, contextualized_node=None):\n for node in self.get_tree_entries():\n if node == ':' or node.type == 'subscript':\n # TODO this should probably use at least part of the code\n # of infer_subscript_list.\n yield LazyKnownValue(Slice(self._defining_context, None, None, None))\n else:\n yield LazyTreeValue(self._defining_context, node)\n for addition in check_array_additions(self._defining_context, self):\n yield addition",
"def __iter__(self):\n for i in self.ref:\n yield PythonBytecodeInPreproc(i)",
"def _resolve_paths(d, path):\n try:\n if len(path) == 0:\n yield (), d\n elif len(path) == 1:\n yield (path[0],), d[path[0]]\n else:\n if path[0] == '*':\n keys = d.keys()\n else:\n keys = [path[0]]\n for key in keys:\n for p, v in CombinatorialTree._resolve_paths(d[key], path[1:]):\n if v is not None:\n yield (key,) + p, v\n except KeyError:\n yield None, None",
"def genSubstitutions(molecule, fr, to):\n for m in re.finditer(fr, molecule):\n yield molecule[:m.start()] + to + molecule[m.end():]",
"def iter_relocations(self):\n for i in range(self.num_relocations()):\n yield self.get_relocation(i)",
"def iter_locations(self):\n for alt_loc in self.locations:\n yield self.locations[alt_loc]",
"def resolve_all(self,**bindings):\n for solution in resolve(self.expressions, bindings, namespace=self.namespace):\n yield solution",
"def __items(self, partial_ngram):\n #An n-gram is constructed element by element and passed on to each of the child nodes.\n #When the current node is a terminating node, it will yield the complete n-gram which was passed to it by its parents paired with the value of this node.\n \n #If this is a terminating node then yield the n-gram constructed so far together with this node's value.\n if self.end_of_ngram:\n yield (partial_ngram, self.value)\n #For each next element, construct the new partial n-gram and pass it to that element's child node, yielding every n-gram/value pair it yields.\n for ele in self.children:\n for item in self.children[ele].__items(partial_ngram+(ele,)):\n yield item",
"def _expand_spec(spec, **kwargs):\n fixed_params = {}\n variable_params = {}\n for k, v in spec.items():\n if isinstance(v, list):\n variable_params[k] = v\n elif isinstance(v, dict):\n # Try handling as distribution\n res = sample_values(v)\n if res is not None:\n variable_params[k] = res\n else:\n fixed_params[k] = v\n else:\n fixed_params[k] = v\n\n params = list(ParameterGrid(variable_params))\n [p.update(fixed_params) for p in params]\n return params",
"def flat_components_map(self, include_self=False, path=None):\r\n if path is None:\r\n path = []\r\n if include_self:\r\n yield path, self\r\n for name, component in self.components_map():\r\n path2 = path + [name]\r\n if isinstance(component, Composite):\r\n for fullpath, x in component.flat_components_map(include_self, path2):\r\n yield fullpath, x\r\n else:\r\n yield path2, component",
"def specs_to_addresses(self, specs, relative_to=''):\n for spec in specs:\n yield self.spec_to_address(spec, relative_to=relative_to)",
"def iteratePrimSpecs(parentPrims):\n for parentPrim in parentPrims:\n for primSpec in parentPrim.nameChildren:\n for each in iteratePrimSpecs({primSpec, }):\n yield each\n yield parentPrim",
"def itervalues(self):\r\n for sleek_ref in self.data.itervalues():\r\n try:\r\n yield sleek_ref()\r\n except SleekRefDied:\r\n pass",
"def load_values(self, values: Context) -> None:\n for name, refers_to in values.items():\n self.logger.info(f\"load_values {name!r} : {refers_to!r}\")\n if not self.extended_name_path.match(name):\n raise ValueError(f\"Invalid name {name}\")\n\n context = self\n\n # Expand \"name1.name2....\": refers_to into [\"name1\", \"name2\", ...]: refers_to\n # Update NameContainer(\"name1\", NameContainer(\"name2\", NameContainer(..., refers_to)))\n *path, final = self.ident_pat.findall(name)\n for name in path:\n ref = context.setdefault(name, Referent())\n if ref.container is None:\n ref.container = NameContainer(parent=self.parent)\n context = ref.container\n context.setdefault(final, Referent()) # No annotation.\n context[final].value = refers_to",
"def resolve_schema_references(self, definition):\n # type: (Generator, Dict) -> None\n if \"$ref\" in definition:\n schema_reference = definition.pop(\"$ref\")\n section, name = schema_reference.split(\"/\")[-2:]\n referenced_definition = self.parser.specification[section][name]\n definition.update(referenced_definition)\n\n for value in definition.values():\n if isinstance(value, dict):\n self.resolve_schema_references(value)",
"def __iter__(self):\n for (_,_,path) in self.frontierpq:\n yield path",
"def _iter_variant_extracted_paths(root, path, variants):\n for variant in sorted(variants, key=len, reverse=True):\n inner_path = os.path.join(*[str(request) for request in variant])\n resolved_path = os.path.join(root, inner_path)\n\n if filer.in_directory(path, resolved_path, follow=False):\n yield path.replace(inner_path + os.sep, \"\")",
"def resolve_all(self):\r\n def resolve_variable(x, passthrough=(gof.Variable,)):\r\n if isinstance(x, passthrough):\r\n return x\r\n elif isinstance(x, _RComponent):\r\n return x.r\r\n else:\r\n raise Exception('The following thing is not of the following types', x,\r\n passthrough + (_RComponent,))\r\n # return self.resolve(x).r\r\n\r\n def resolve_inputs():\r\n if isinstance(self.inputs, (io.In, gof.Variable, basestring)):\r\n inputs = [self.inputs]\r\n else:\r\n inputs = list(self.inputs)\r\n self.inputs = [resolve_variable(input,\r\n passthrough=(gof.Variable, io.In)) for input in inputs]\r\n\r\n def resolve_outputs():\r\n if isinstance(self.outputs, (io.Out, gof.Variable, basestring, type(None))):\r\n output = self.outputs\r\n self.outputs = resolve_variable(output,\r\n passthrough=(gof.Variable, io.Out, type(None)))\r\n else:\r\n outputs = list(self.outputs)\r\n self.outputs = [resolve_variable(output,\r\n passthrough=(gof.Variable, io.Out)) for output in outputs]\r\n\r\n def resolve_updates():\r\n updates = self.updates\r\n self.updates = {}\r\n for k, v in updates.iteritems():\r\n k, v = resolve_variable(k), resolve_variable(v)\r\n self.updates[k] = v\r\n\r\n resolve_inputs()\r\n resolve_outputs()\r\n resolve_updates()"
] | [
"0.58574283",
"0.56865025",
"0.5679534",
"0.56764805",
"0.5475917",
"0.546941",
"0.54071206",
"0.5371878",
"0.53358024",
"0.53343445",
"0.5330462",
"0.52939177",
"0.52759284",
"0.5187627",
"0.5166277",
"0.51503074",
"0.5030503",
"0.50293756",
"0.4980426",
"0.49454945",
"0.49296132",
"0.49215594",
"0.491575",
"0.4904287",
"0.48961347",
"0.48204005",
"0.48174396",
"0.48137188",
"0.4788423",
"0.47784555"
] | 0.6692858 | 0 |
Dereference the URL and object path. Returns the dereferenced object. | def _dereference(self, ref_url, obj_path, recursions):
# In order to start dereferencing anything in the referenced URL, we have
# to read and parse it, of course.
contents = _url.fetch_url(ref_url, self.__reference_cache, self.__encoding)
# In this inner parser's specification, we can now look for the referenced
# object.
value = contents
if len(obj_path) != 0:
from prance.util.path import path_get
try:
value = path_get(value, obj_path)
except (KeyError, IndexError, TypeError) as ex:
raise _url.ResolutionError('Cannot resolve reference "%s": %s'
% (ref_url.geturl(), str(ex)))
# Deep copy value; we don't want to create recursive structures
import copy
value = copy.deepcopy(value)
# Now resolve partial specs
value = self._resolve_partial(ref_url, value, recursions)
# That's it!
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_object(self, path):\n if path == \"/\":\n return self.target\n\n parts = path[1:].split(\"/\")\n last = self.target\n for part in parts:\n if type(last) == dict:\n last = last[part]\n else:\n last = getattr(last, \"get_\" + part)()\n return last",
"def __get__(self, obj, objtype):\n raw_path = super(Path,self).__get__(obj,objtype)\n return self._resolve(raw_path)",
"def getObject(self):\n parent = aq_parent(self)\n obj = None\n try:\n obj = parent.unrestrictedTraverse(self.getPath()) \n except:\n log.error(\"Unable to get object from brain. Path: {0}. Catalog may be out of sync.\".format(self._result.uid))\n return obj",
"def traverse(object, path, default=None, request=None):",
"def getObject(self):\n # try getting the remote object by unique id\n remote_obj = self._getObjectByUid()\n if remote_obj is not None:\n return remote_obj\n\n utool = getUtility(IURLTool)\n return utool.getPortalObject().restrictedTraverse(self.remote_url)",
"def lookup_obj(self,):\n return self._lookup_obj",
"def get_object(uri):\n bucket_name, key = split_uri(uri)\n return get_client().get_object(bucket_name, key)",
"def get_obj_from_path(path):\n (username, namespace, reponame, objtype, objid) = pagure.utils.parse_path(\n path\n )\n session = _get_session()\n repo = pagure.lib.query.get_authorized_project(\n session, reponame, user=username, namespace=namespace\n )\n\n if repo is None:\n raise PagureEvException(\"Project '%s' not found\" % reponame)\n\n # find the appropriate object getter function from OBJECTS\n try:\n getfunc = OBJECTS[objtype]\n except KeyError:\n raise PagureEvException(\"Invalid object provided: '%s'\" % objtype)\n\n return getfunc(repo, objid)",
"def dereference(self, reference: Union[str, DIDUrl]):\n if isinstance(reference, str):\n reference = DIDUrl.parse(reference)\n\n if reference not in self._index:\n raise ResourceIDNotFound(\"ID {} not found in document\".format(reference))\n return self._index[reference]",
"def getZopeObj(self, path):\n return self.getObjByPath(path)",
"def get(obj, path):\n right = path\n cur = obj\n while right:\n left, right = partition(right)\n if isinstance(cur, dict):\n cur = cur.get(left)\n elif isinstance(cur, (list, tuple)):\n left = int(left)\n cur = cur[left] if left < len(cur) else None\n return cur",
"def get_object(selenium, obj):\n return _get_ui_service(selenium, obj).get_obj_from_info_page(obj)",
"def deref(obj):\n try:\n return obj._obj.value # byref\n except AttributeError:\n try:\n return obj.value # plain ctypes\n except AttributeError:\n return obj # plain python",
"def get_from_uri(self, url, skip_cache=False, *args, **kwargs):\n\n cleaned_url = handle_slash(url, self.model._meta['add_slash'])\n\n if skip_cache:\n cached_response = None\n else:\n cached_response = self.get_from_cache('GET', cleaned_url)\n\n if cached_response:\n response = cached_response\n else:\n response = self._request('GET', cleaned_url, *args, **kwargs)\n\n self.validate_get_response(response)\n self.handle_get_response(response)\n\n # should this be handled by handle_get_response? i think probably.\n obj = self.obj_from_response(response)\n\n obj._full_url = cleaned_url\n\n return obj",
"def get(self, obj):",
"def pathlookup(obj_or_path_tuple, depth=None, include_origin=True):",
"def get_object(self, path: str) -> Object:\n objects_found = [item for item in self._objects.values() if item.path == path]\n if len(objects_found) == 0:\n raise ClientError(\n \"ObjectNotFoundException\", f\"Object with id={path} not found\"\n )\n return objects_found[0]",
"def getObjByPath(self, path):\n return getObjByPath(self, path)",
"def get(self, path=None, ref=None):\r\n params = base.get_params(('ref', ), locals())\r\n url = self.get_url()\r\n\r\n if path:\r\n url = '{0}/{1}'.format(url, path)\r\n\r\n return http.Request('GET', url, params), parsers.parse_json",
"def getPath(obj):",
"def getDmdObj(self, path):\n if path.startswith(\"/\"): path = path[1:]\n return self.getDmd().getObjByPath(path)",
"def resolve_path(self, path):\n if path:\n if path[0] == '/':\n #zope objects case\n try: return self.unrestrictedTraverse(path)\n except: pass\n else:\n #aliss (python) objects case\n try: return self.get_aliss_object(path)\n except: pass\n #case of no path\n pass",
"def find_object_by_path(cls, object_path):\n # XXX: ideally this would be per-connection method.\n with cls._object_path_map_lock:\n return cls._object_path_to_object_map[object_path]",
"def get_object(self, oid):\n return self.request('get', safeformat('registry/objects/{:int}', oid))",
"def maybe_resolve(object, resolve):\n if isinstance(object, dict) and object.get('$ref'):\n return resolve(object['$ref'])\n return object",
"def referenced_by(self, refobj):\n try:\n ref = cmds.referenceQuery(refobj, referenceNode=True)\n return ref\n except RuntimeError as e:\n if str(e).endswith(\"' is not from a referenced file.\\n\"):\n return None\n else:\n raise e",
"def get_object(self, url_id, user_id):\n try:\n return Link.objects.get(id=url_id, user=user_id)\n except Link.DoesNotExist:\n return None",
"def uri_to_value( obj, uri ):\n key = uri.lstrip('/')\n if '/' in uri:\n key,uri = uri.split('/',1)\n else:\n uri = None\n\n if key.startswith( '_' ):\n obj = getattr( obj, key[1:] )\n else:\n obj = obj[ key ]\n\n if not uri:\n return obj\n else:\n return uri_to_value( obj, uri )",
"def _load_ref(self, ref_uri, session):\n if self.verbose:\n print(\"Resolving $ref URI {}\".format(ref_uri), file=sys.stderr)\n parsed_ref_uri = self._parse_ref_uri(ref_uri)\n ref_file = parsed_ref_uri.netloc + parsed_ref_uri.path\n if not ref_file: # must be relative to current doc\n pass # is already in cache\n elif ref_file not in self.cache:\n self.cache[ref_uri] = self._load_for_cache(parsed_ref_uri, session)\n ref_json = self.cache[ref_uri]\n expr = jsonpath_rw.parse(\"$\" + \".\".join(parsed_ref_uri.fragment.split(\"/\")))\n for match in expr.find(ref_json):\n return match.value # return first match only\n # If we reach here, resolution failed\n raise RefResolutionException('Could not resolve reference URI \"{}\"'.format(ref_uri))",
"def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')"
] | [
"0.6393271",
"0.60683155",
"0.594546",
"0.58757573",
"0.5748134",
"0.57280153",
"0.5727585",
"0.5699852",
"0.561777",
"0.5597113",
"0.55593705",
"0.55497825",
"0.55468255",
"0.5499838",
"0.5483868",
"0.5482506",
"0.5443118",
"0.5417317",
"0.5408477",
"0.5357134",
"0.531767",
"0.5239376",
"0.52299",
"0.5208951",
"0.51871985",
"0.5155152",
"0.5126127",
"0.51147676",
"0.5108363",
"0.51070285"
] | 0.78929824 | 0 |
Resolve a (partial) spec's references. | def _resolve_partial(self, base_url, partial, recursions):
# Gather changes from the dereferencing iterator - we need to set new
# values from the outside in, so we have to post-process this a little,
# sorting paths by path length.
changes = dict(tuple(self._dereferencing_iterator(base_url, partial, (),
recursions)))
paths = sorted(changes.keys(), key = len)
# With the paths sorted, set them to the resolved values.
from prance.util.path import path_set
for path in paths:
value = changes[path]
if len(path) == 0:
partial = value
else:
path_set(partial, list(path), value, create = True)
return partial | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resolve_references(self):\n self.specs = self._resolve_partial(self.parsed_url, self.specs, ())",
"def resolve(self, spec: \"ModelSpec\"):",
"def resolve_all_refs(s):\n for ref in list_of_all_unpointed_refs():\n ref.resolve()",
"def resolving(self, ref):\r\n\r\n full_uri = urljoin(self.resolution_scope, ref)\r\n uri, fragment = urldefrag(full_uri)\r\n if not uri:\r\n uri = self.base_uri\r\n\r\n if uri in self.store:\r\n document = self.store[uri]\r\n else:\r\n try:\r\n document = self.resolve_remote(uri)\r\n except Exception as exc:\r\n raise RefResolutionError(exc)\r\n\r\n old_base_uri, self.base_uri = self.base_uri, uri\r\n try:\r\n with self.in_scope(uri):\r\n yield self.resolve_fragment(document, fragment)\r\n finally:\r\n self.base_uri = old_base_uri",
"def schema_resolve_refs(schema, ref_resolver=None, root=None):\n # FIXME more stable implementation that only attempts to resolve {\"$ref\"} objects where they are allowed.\n if isinstance(schema, dict):\n if len(schema) == 1 and \"$ref\" in schema and isinstance(schema[\"$ref\"], six.string_types):\n reference = schema[\"$ref\"]\n if reference.startswith(\"#\"):\n # TODO should also resolve any paths within the reference, which would need to be deferred.\n return root\n return ref_resolver(reference)\n\n resolved = {}\n for k, v in schema.items():\n resolved[k] = schema_resolve_refs(v,\n ref_resolver=ref_resolver,\n root=root if root is not None else resolved)\n return resolved\n if isinstance(schema, (list, tuple)):\n return [schema_resolve_refs(v, ref_resolver=ref_resolver, root=root) for v in schema]\n return schema",
"def _dereference(self, ref_url, obj_path, recursions):\n # In order to start dereferencing anything in the referenced URL, we have\n # to read and parse it, of course.\n contents = _url.fetch_url(ref_url, self.__reference_cache, self.__encoding)\n\n # In this inner parser's specification, we can now look for the referenced\n # object.\n value = contents\n if len(obj_path) != 0:\n from prance.util.path import path_get\n try:\n value = path_get(value, obj_path)\n except (KeyError, IndexError, TypeError) as ex:\n raise _url.ResolutionError('Cannot resolve reference \"%s\": %s'\n % (ref_url.geturl(), str(ex)))\n\n # Deep copy value; we don't want to create recursive structures\n import copy\n value = copy.deepcopy(value)\n\n # Now resolve partial specs\n value = self._resolve_partial(ref_url, value, recursions)\n\n # That's it!\n return value",
"def resolve_references_as_possible(s):\n refs = []\n resolved = []\n\n # ask all graphs for REFs\n for graph in s.graphs.values():\n refs.extend( graph.list_of_all_unpointed_refs() )\n\n # try to resolve all REFs\n for ref in refs:\n if ref.try_to_point():\n resolved.append(ref)\n\n # for REFs that link up,\n for ref in resolved:\n s.resolve_single_ref( ref )",
"def resolve(self):\n for reference in self._references:\n if reference.target is None:\n definition = self._definitions.get(reference.name)\n if definition is None:\n msg = message_factory.get_message(\n 'vapi.data.structref.structure.not.defined',\n reference.name)\n logger.debug(msg)\n raise CoreException(msg)\n reference.target = definition",
"def resolve_partial_ref_prefix(self, path):\n\n project, remainingPath = self.resolve_project_prefix(path)\n if not project:\n return None, None, None\n\n refPrefix = remainingPath.as_posix() + '/'\n\n # Resolve to most recently created reference for accurate directory dates\n refs = self.cache.list_project_refs(project, self.tagRefs)\n refs = sorted(refs, key=lambda ref: -iso8601.parse_date(ref.commit['committed_date']).timestamp())\n\n for ref in refs:\n if ref.name.startswith(refPrefix):\n return project, ref, refPrefix\n\n return None, None, None",
"def resolve_spec(self, spec):\n try:\n address = Address.parse(spec)\n except ValueError as e:\n raise self.InvalidAddressError(e)\n _, addressable = self.resolve(address)\n return addressable",
"def resolve_references(path, schema):\n if isinstance(schema, dict):\n # do $ref first\n if '$ref' in schema:\n # Pull the referenced filepath from the schema\n referenced_file = schema['$ref']\n\n # Referenced filepaths are relative, so take the current path's\n # directory and append the relative, referenced path to it.\n inner_path = os.path.join(os.path.dirname(path), referenced_file)\n\n # Then convert the path (which may contiain '../') into a\n # normalised, absolute path\n inner_path = os.path.abspath(inner_path)\n\n # Load the referenced file\n ref = load_file(\"file://\" + inner_path)\n\n # Check that the references in *this* file are valid\n result = resolve_references(inner_path, ref)\n\n # They were valid, and so were the sub-references. Delete\n # the reference here to ensure we don't pass over it again\n # when checking other files\n del schema['$ref']\n else:\n result = {}\n\n for key, value in schema.items():\n result[key] = resolve_references(path, value)\n return result\n elif isinstance(schema, list):\n return [resolve_references(path, value) for value in schema]\n else:\n return schema",
"def resolve(self, spec):\r\n with ParseContext.temp():\r\n return Pants(spec).resolve()",
"def resolve_schema_references(self, definition):\n # type: (Generator, Dict) -> None\n if \"$ref\" in definition:\n schema_reference = definition.pop(\"$ref\")\n section, name = schema_reference.split(\"/\")[-2:]\n referenced_definition = self.parser.specification[section][name]\n definition.update(referenced_definition)\n\n for value in definition.values():\n if isinstance(value, dict):\n self.resolve_schema_references(value)",
"def resolve_ref(ref):\n if ref == DIRTY:\n return ref\n try:\n return git_rev_parse(ref)\n except CommandFailure:\n for remote in git_remote():\n try:\n return git_rev_parse('{remote}/{ref}'.format(**locals()))\n except CommandFailure:\n continue\n return None",
"def maybe_resolve(object, resolve):\n if isinstance(object, dict) and object.get('$ref'):\n return resolve(object['$ref'])\n return object",
"def resolve_reference(ref, rel):\n\n # Find out which module we should be looking in.\n modname = None\n relpath = None\n rel_parts = rel.split('.')\n for i in range(len(rel_parts), 0, -1):\n try_modname = '.'.join(rel_parts[:i])\n if idb.has_module(try_modname):\n modname = try_modname\n relpath = rel_parts[i:]\n break\n\n if not modname:\n return None\n\n refpath = ref.replace('::', '.').split('.')\n\n # Say `rel` is \"panda3d.core.NodePath.node\",\n # and `ref` is \"PandaNode.final\", then we will try these in this order:\n # - panda3d.core::NodePath.node.PandaNode.final\n # - panda3d.core::NodePath.PandaNode.final\n # - panda3d.core::PandaNode.final\n\n for i in range(len(relpath), -1, -1):\n search = relpath[:i] + refpath\n ifunc = idb.lookup_function(modname, search)\n if ifunc:\n # Grab the mangled function name.\n func_name = idb.get_function_name(ifunc, mangle=True)\n return ('meth', '.'.join(relpath[:i] + refpath[:-1] + [func_name]))\n\n itype = idb.lookup_type(modname, search)\n if itype:\n # Grab the original type name.\n type_name = idb.get_type_name(itype, mangle=False, scoped=True)\n return ('class', type_name)",
"def resolve_fragment(self, document, fragment):\r\n\r\n fragment = fragment.lstrip(u\"/\")\r\n parts = unquote(fragment).split(u\"/\") if fragment else []\r\n\r\n for part in parts:\r\n part = part.replace(u\"~1\", u\"/\").replace(u\"~0\", u\"~\")\r\n\r\n if isinstance(document, Sequence):\r\n # Array indexes should be turned into integers\r\n try:\r\n part = int(part)\r\n except ValueError:\r\n pass\r\n try:\r\n document = document[part]\r\n except (TypeError, LookupError):\r\n raise RefResolutionError(\r\n \"Unresolvable JSON pointer: %r\" % fragment\r\n )\r\n\r\n return document",
"def _resolve_dependency(self, chunks):\n chunks = self._concatenate_inner(chunks, True)\n chunks = self._concatenate_inner(chunks, False)\n return chunks",
"def resolve_ref(ref):\n if isinstance(ref, weakref.ref):\n ref = ref()\n return ref",
"def resolve_anyref(\n self, refdoc: str, node: pending_xref, contnode: Element,\n ) -> Element | None:\n stddomain = self.env.get_domain('std')\n target = node['reftarget']\n results: list[tuple[str, Element]] = []\n # first, try resolving as :doc:\n doc_ref = stddomain.resolve_xref(self.env, refdoc, self.app.builder,\n 'doc', target, node, contnode)\n if doc_ref:\n results.append(('doc', doc_ref))\n # next, do the standard domain (makes this a priority)\n results.extend(stddomain.resolve_any_xref(self.env, refdoc, self.app.builder,\n target, node, contnode))\n for domain in self.env.domains.values():\n if domain.name == 'std':\n continue # we did this one already\n try:\n results.extend(domain.resolve_any_xref(self.env, refdoc, self.app.builder,\n target, node, contnode))\n except NotImplementedError:\n # the domain doesn't yet support the new interface\n # we have to manually collect possible references (SLOW)\n for role in domain.roles:\n res = domain.resolve_xref(self.env, refdoc, self.app.builder,\n role, target, node, contnode)\n if res and len(res) > 0 and isinstance(res[0], nodes.Element):\n results.append((f'{domain.name}:{role}', res))\n # now, see how many matches we got...\n if not results:\n return None\n if len(results) > 1:\n def stringify(name: str, node: Element) -> str:\n reftitle = node.get('reftitle', node.astext())\n return f':{name}:`{reftitle}`'\n candidates = ' or '.join(stringify(name, role) for name, role in results)\n logger.warning(__(\"more than one target found for 'any' cross-\"\n 'reference %r: could be %s'), target, candidates,\n location=node)\n res_role, newnode = results[0]\n # Override \"any\" class with the actual role type to get the styling\n # approximately correct.\n res_domain = res_role.split(':')[0]\n if (len(newnode) > 0 and\n isinstance(newnode[0], nodes.Element) and\n newnode[0].get('classes')):\n newnode[0]['classes'].append(res_domain)\n newnode[0]['classes'].append(res_role.replace(':', '-'))\n return newnode",
"def resolve_all_refs(s):\n refs = []\n # ask all graphs for REFs\n for graph in s.graphs.values():\n refs.extend( graph.list_of_all_unpointed_refs() )\n\n # resolve collected refs\n for ref in refs:\n ref.resolve()\n\n return len( refs )",
"def _resolve(self):\n pass",
"def find_real_ref(ref, ref_list):\n for r in reversed(ref_list):\n if r == ref:\n return r\n return None",
"def resolve(self,**bindings):\n for solution in self.resolve_all(**bindings):\n return solution",
"def get_actual_ref(self, ref):\n while ref in self.reference_map:\n ref = self.reference_map[ref]\n return ref",
"def addresses_in_spec_path(self, spec_path):",
"def resolve ( self, *deps, **kw ):\n result = self.do_resolve ( deps, **kw )\n # result := ( list<resolved>, list<unresolvable> )\n return None if result is None else result [0]",
"def _resolve_references(self, args: Optional[list], kwargs: Optional[dict]) -> Tuple[list, dict]:\n def resolve_id(action, id_):\n if isinstance(id_, Id):\n if id_.value not in self._vars:\n raise InterpException(f\"argument {id_.value} was not a known variable\")\n else:\n var = self._vars[id_.value]\n if isinstance(var, InterpretableWrapper):\n action(var._obj)\n else:\n action(var)\n else:\n action(id_)\n\n resolved_args = []\n for a in [] if not args else args:\n resolve_id(lambda x: resolved_args.append(x), a)\n\n resolved_kwargs = {}\n\n def setter(k):\n def method(v):\n resolved_kwargs[k] = v\n return method\n\n for k, v in {}.items() if not kwargs else kwargs.items():\n resolve_id(setter(k), v)\n\n return resolved_args, resolved_kwargs",
"def resolve_xref(self, env, fromdocname, builder, typ, target,\n node, contnode):\n if node.get('json:name'):\n objdef = self.get_object(node['json:name'])\n if objdef:\n return node_utils.make_refnode(builder, fromdocname,\n objdef.docname, objdef.key,\n contnode)\n\n if typ in self.REF_TYPES:\n try:\n ref = nodes.reference(internal=False)\n ref['refuri'], ref['reftitle'] = self.REF_TYPES[target]\n ref.append(contnode)\n return ref\n except KeyError:\n pass",
"def resolve(self):\n raise NotImplementedError"
] | [
"0.8131186",
"0.6489531",
"0.6228663",
"0.6109309",
"0.60549843",
"0.6046081",
"0.59280676",
"0.5861704",
"0.58215785",
"0.58063215",
"0.5793763",
"0.57419795",
"0.5669969",
"0.56401956",
"0.5603574",
"0.5599089",
"0.5540529",
"0.548241",
"0.5451723",
"0.5440825",
"0.5419424",
"0.5319423",
"0.5287133",
"0.5283119",
"0.5276385",
"0.5274274",
"0.5269659",
"0.5261944",
"0.52287835",
"0.5216321"
] | 0.66698265 | 1 |
Test result of func count_ways(0). | def test_count_ways_null():
assert f.count_ways(0) == 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_count_ways_positive(inputs, outputs):\n assert f.count_ways(inputs) == outputs",
"def count_ways(n):\n if n < 0:\n return 0\n elif n == 0:\n return 1\n else:\n total = 0\n for i in range(1, min(n, 3) + 1):\n total += count_ways(n - i)\n return total",
"def test_count_0(self):\n self.assertEqual(count(0), 0, 'Between 0 and 0, there is 0 lucky numbers.')",
"def test_markow_chain():\n amount = len(markow_chain(SNULL, TIMESTEPS, PROBABILITYMATRIX))\n assert TIMESTEPS == amount",
"def test_step_count(self):\n inp = [(0, 0), (1, 1), (1, 2)]\n expected = 2\n actual = get_num_steps(inp)\n self.assertEqual(expected, actual)",
"def test_count_1719(self):\n value: int = 2645\n result: int = 1113\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_361_077(self):\n value: int = 361_077\n result: int = 188_065\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_770(self):\n value: int = 770\n result: int = 306\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_459(self):\n value: int = 459\n result: int = 148\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def count_stair_ways(n):\n if n == 1:\n return 1\n if n == 2:\n return 2\n return count_stair_ways(n - 1) + count_stair_ways(n - 2)",
"def test_count_9(self):\n value: int = 9\n result: int = 2\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_ways_wrong_value_raise(inputs, outputs):\n with pytest.raises(outputs):\n f.count_ways(inputs)",
"def test_calculate_count(request):\n print(\"\\n--Starting:\", request.node.name)\n\n net = ModelRoadwayNetwork.read(\n link_file=STPAUL_LINK_FILE,\n node_file=STPAUL_NODE_FILE,\n shape_file=STPAUL_SHAPE_FILE,\n fast=True,\n )\n\n net.add_counts()\n assert \"AADT\" in net.links_df.columns\n print(net.links_df[net.links_df.drive_access == 1].AADT.value_counts())\n ## todo write an assert that actually tests something",
"def number_of_ways(n):\r\n return number_of_ways_helper([1, 5, 10, 25], n)",
"def obstacle_count(self):\n found_something = False\n count = 0\n starting_postion = self.get_heading()\n self.right(primary=60, counter=60)\n time.sleep(0.5)\n while self.get_heading() != starting_postion:\n if self.read_distance() < 250 and not found_something:\n found_something = True\n count += 1\n print (\"I found something\")\n elif self.read_distance() > 250 and found_something:\n found_something = False\n print(\"I have a clear view\")\n self.stop()\n\n print(\"I have found this many things: %d\" % count)\n return count",
"def wayCounterHelper(steps,memos):\n\tif steps<0:\n\t\treturn 0\n\telif steps==0:\n\t\treturn 1\n\telse:\n\t\tif steps in memos:\n\t\t\treturn memos[steps]\n\t\telse:\n\t\t\tmemos[steps] = wayCounterHelper(steps-1,memos)\\\n\t\t\t\t+wayCounterHelper(steps-2,memos)+wayCounterHelper(steps-3,memos)\n\t\t\treturn memos[steps]",
"def test_returns_zero_for_empty_grid(self):\n grid = []\n result = num_islands(grid)\n self.assertEqual(result, 0)",
"def count():",
"def test_count_72(self):\n value: int = 72\n result: int = 21\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_2645(self):\n value: int = 1719\n result: int = 723\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_20(self):\n value: int = 20\n result: int = 4\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_66(self):\n value: int = 66\n result: int = 18\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_361_080(self):\n value: int = 361_080\n result: int = 188_067\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_triangle_count_06(self):\n body = {\"direction\": \"OUT\", \"degree\": -1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0",
"def count_paths_with_zero_intervals(self):\n zeros = []\n for path in self.paths:\n # print(\"Checking path {}\".format(path))\n has_zero = 0\n for arc in path:\n # lb = self.arc_info[arc][\"lower_bound\"]\n # ub = self.arc_info[arc][\"upper_bound\"]\n # print(\"{} {} interval\".format(lb,ub))\n if (self.arc_info[arc][\"upper_bound\"] -\n self.arc_info[arc][\"lower_bound\"]) == 0:\n has_zero = 1\n zeros.append(has_zero)\n print(zeros)\n return(sum(zeros))",
"def test_triangle_count_04(self):\n body = {\"direction\": \"OUT\"}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0",
"def test_count_173(self):\n value: int = 173\n result: int = 55\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_35(self):\n value: int = 35\n result: int = 6\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_361_087(self):\n value: int = 361_087\n result: int = 188_067\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_triangle_count_07(self):\n body = {\"direction\": \"OUT\", \"degree\": 1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 0}\n else:\n assert 0"
] | [
"0.7972175",
"0.686043",
"0.6760794",
"0.61788404",
"0.6081778",
"0.6073252",
"0.6062703",
"0.606255",
"0.6061677",
"0.60572267",
"0.6021797",
"0.6009044",
"0.6005339",
"0.60044366",
"0.5997331",
"0.5973426",
"0.59645575",
"0.5960583",
"0.5955983",
"0.59351563",
"0.59320086",
"0.58960545",
"0.5867288",
"0.5865524",
"0.58357173",
"0.58322334",
"0.58219504",
"0.58158875",
"0.58057404",
"0.57768327"
] | 0.8269839 | 0 |
Timestamp is the timestamp of the reading, direction is sent or received, size is the est. sized of the pkt, direction_count provides a total ordering of packets going in self.direction | def __init__(self, timestamp, direction, size, direction_count):
self.timestamp = timestamp
self.direction = direction
self.size = size
self.direction_count = direction_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def touch_packet (self, byte_count, now=None):\n if now is None: now = time.time()\n self.byte_count += byte_count\n self.packet_count += 1\n self.last_touched = now",
"def read_packetlen(self):\n packetlen = int(struct.unpack('!I', b\"\".join(self.__input))[0])\n self.__input = []\n self.set_terminator(packetlen)\n self.found_terminator = self.read_milter_data",
"def read_packet(self):\n\n\t\t#self.debug(\"READ BUFFER SIZE: %d\" % len(self.buff))\n\t\tbackup = self.buff[:]\n\t\tpacket = Packet()\n\t\ttry:\n\t\t\tpacket.direction = self.node\n\t\t\tpacket.ident = self.unpack('ubyte')\n\t\t\t\n\t\t\t#Defined structs from huge dict\n\t\t\tfor datatype, name in self.get_struct(packet):\n\t\t\t\t# this populates packet.data with {name: value}\n\t\t\t\tpacket.data[name] = self.unpack(datatype)\n\n\t\t\t# I believe the following are packet-type specific fixes for variable-length packets.\n\n\t\t\t#0x17\n\t\t\tif packet.ident == 0x17:\n\t\t\t\tif packet.data['unknown'] > 0:\n\t\t\t\t\tpacket.data['x2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['y2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['z2'] = self.unpack('short')\n\t\t\n\t\t\t#0x33\n\t\t\tif packet.ident in (0x33, 0x34):\n\t\t\t\tpacket.data['data'] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n#\t\t\t#0x34\n#\t\t\tif packet.ident == 0x34:\n#\t\t\t\tcoords = self.unpack_array_fast('short', packet.data['data_size'])\n#\t\t\t\tbtype = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tmetadata = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tpacket.data[\"blocks\"] = []\n#\t\t\t\tfor i in zip(coords, btype, metadata):\n#\t\t\t\t\tblock = {}\n#\t\t\t\t\tblock[\"x\"] =\t\ti[0] >> 12\n#\t\t\t\t\tblock[\"z\"] = 0x0F & i[0] >> 8\n#\t\t\t\t\tblock[\"y\"] = 0xFF & i[0]\n#\t\t\t\t\tblock[\"type\"] = i[1]\n#\t\t\t\t\tblock[\"metadata\"] = i[2]\n#\t\t\t\t\tpacket.data[\"blocks\"].append(block)\n#\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x3C\n\t\t\tif packet.ident == 0x3C:\n\t\t\t\trecords = self.unpack_array_fast('byte', packet.data['data_size']*3)\n\t\t\t\ti = 0\n\t\t\t\tpacket.data[\"blocks\"] = []\n\t\t\t\twhile i < packet.data['data_size']*3:\n\t\t\t\t\tpacket.data[\"blocks\"].append(dict(zip(('x','y','z'), records[i:i+3])))\n\t\t\t\t\ti+=3\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x68\n\t\t\tif packet.ident == 0x68:\n\t\t\t\tpacket.data[\"slots_data\"] = self.unpack_array('slot', packet.data[\"data_size\"])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\t#0x82:\n\t\t\tif packet.ident == 0x82:\n\t\t\t\tpacket.data[\"text\"] = []\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tpacket.data[\"text\"].append(packet.data[\"line_%s\" % (i+1)])\n\t\t\t\t\t\n\t\t\t#0x83\n\t\t\tif packet.ident == 0x83:\n\t\t\t\tpacket.data[\"data\"] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\n\t\t\t# Sets packet.original to the byte string that the packet was decoded from.\n\t\t\tpacket.original = backup[:len(backup) - len(self.buff)]\n\n\t\t\treturn packet\n\n\t\texcept IncompleteData:\n\t\t\tself.buff = backup\n\t\t\treturn None\n\t\texcept Exception, ex:\n\t\t\tself.buff = backup\n\t\t\tex.args += (self.buff[20:],)\n\t\t\traise",
"def getPacketCount(self):\n return 1",
"def ingest_packet(self, pkt, pkt_receive_timestamp):\n #*** Packet length on the wire:\n self.packet_length = len(pkt)\n #*** Read into dpkt:\n eth = dpkt.ethernet.Ethernet(pkt)\n eth_src = _mac_addr(eth.src)\n eth_dst = _mac_addr(eth.dst)\n eth_type = eth.type\n #*** We only support IPv4 (TBD: add IPv6 support):\n if eth_type != 2048:\n self.logger.error(\"Non IPv4 packet, eth_type is %s\", eth_type)\n return 0\n ip = eth.data\n self.ip_src = socket.inet_ntop(socket.AF_INET, ip.src)\n self.ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst)\n #*** We only support TCP:\n if ip.p != 6:\n self.logger.error(\"Non TCP packet, ip_proto=%s\",\n ip.p)\n return 0\n proto = 'tcp'\n tcp = ip.data\n self.tcp_src = tcp.sport\n self.tcp_dst = tcp.dport\n self.tcp_seq = tcp.seq\n self.tcp_acq = tcp.ack\n self.tcp_flags = tcp.flags\n self.payload = tcp.data\n #*** Generate a hash unique to flow for packets in either direction\n self.fcip_hash = _hash_5tuple(self.ip_src, self.ip_dst, self.tcp_src,\n self.tcp_dst, proto)\n #*** Check to see if we already know this identity:\n db_data = {'hash': self.fcip_hash}\n self.fcip_doc = self.fcip.find_one(db_data)\n if not self.fcip_doc:\n #*** Get flow direction (which way is TCP initiated). Client is\n #*** the end that sends the initial TCP SYN:\n if _is_tcp_syn(tcp.flags):\n self.logger.debug(\"Matched TCP SYN first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 'verified-SYN'\n elif _is_tcp_synack(tcp.flags):\n self.logger.debug(\"Matched TCP SYN+ACK first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_dst\n self.server = self.ip_src\n self.packet_direction = 's2c'\n self.verified_direction = 'verified-SYNACK'\n else:\n self.logger.debug(\"Unmatch state first pkt, tcp_flags=%s\",\n tcp.flags)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 0\n #*** Neither direction found, so add to FCIP database:\n self.fcip_doc = {'hash': self.fcip_hash,\n 'ip_A': self.ip_src,\n 'ip_B': self.ip_dst,\n 'port_A': self.tcp_src,\n 'port_B': self.tcp_dst,\n 'proto': proto,\n 'finalised': 0,\n 'packet_count': 1,\n 'latest_timestamp' : pkt_receive_timestamp,\n 'packet_timestamps': [pkt_receive_timestamp,],\n 'tcp_flags': [tcp.flags,],\n 'packet_lengths': [self.packet_length,],\n 'client': self.client,\n 'server': self.server,\n 'packet_directions': [self.packet_direction,],\n 'verified_direction': self.verified_direction,\n 'suppressed': 0}\n self.logger.debug(\"FCIP: Adding record for %s to DB\",\n self.fcip_doc)\n db_result = self.fcip.insert_one(self.fcip_doc)\n self.packet_count = 1\n\n elif self.fcip_doc['finalised']:\n #*** The flow is already finalised just increment packet count:\n self.fcip_doc['packet_count'] += 1\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count']},})\n self.packet_count = self.fcip_doc['packet_count']\n\n else:\n #*** We've found the flow in the FCIP database, now update it:\n self.logger.debug(\"FCIP: found existing record %s\", self.fcip_doc)\n #*** Rate this packet as c2s or s2c direction:\n if self.client == self.ip_src:\n self.packet_direction = 'c2s'\n elif self.client == self.ip_dst:\n self.packet_direction = 's2c'\n else:\n self.packet_direction = 'unknown'\n #*** Increment packet count. Is it at max?:\n self.fcip_doc['packet_count'] += 1\n self.packet_count = self.fcip_doc['packet_count']\n if self.fcip_doc['packet_count'] >= self.max_packet_count:\n #*** TBD:\n self.fcip_doc['finalised'] = 1\n self.logger.debug(\"Finalising...\")\n #*** Read suppressed status to variable:\n self.suppressed = self.fcip_doc['suppressed']\n #*** Read verified_direction status to variable:\n self.verified_direction = self.fcip_doc['verified_direction']\n #*** Add packet timestamps, tcp flags etc:\n self.fcip_doc['latest_timestamp'] = pkt_receive_timestamp\n self.fcip_doc['packet_timestamps'].append(pkt_receive_timestamp)\n self.fcip_doc['tcp_flags'].append(tcp.flags)\n self.fcip_doc['packet_lengths'].append(self.packet_length)\n self.fcip_doc['packet_directions'].append(self.packet_direction)\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count'],\n 'finalised': self.fcip_doc['finalised'],\n 'packet_timestamps': self.fcip_doc['packet_timestamps'],\n 'tcp_flags': self.fcip_doc['tcp_flags'],\n 'packet_lengths': self.fcip_doc['packet_lengths'],\n 'packet_directions': self.fcip_doc['packet_directions']\n },})\n #*** Tests:\n self.logger.debug(\"max_packet_size is %s\", self.max_packet_size())\n self.logger.debug(\"max_interpacket_interval is %s\",\n self.max_interpacket_interval())\n self.logger.debug(\"min_interpacket_interval is %s\",\n self.min_interpacket_interval())",
"def Parse(self):\n prev_percent_read = 0\n for packet in TS.next_packet(self._filename):\n #check_packet_formedness(packet)\n pei = TS.get_transport_error_indicator(packet)\n pusi = TS.get_payload_start(packet)\n pid = TS.get_pid(packet)\n tsc = TS.get_tsc(packet)\n\n # per .ts packet handler\n if self.OnTSPacket:\n self.OnTSPacket(packet)\n\n # Update a progress callback\n self._read_size += TS.PACKET_SIZE\n percent_read = ((self._read_size / float(self._total_filesize)) * 100)\n new_percent_read = int(percent_read * 100)\n if new_percent_read != prev_percent_read and self.Progress:\n self.Progress(self._read_size, self._total_filesize, percent_read)\n prev_percent_read = new_percent_read\n\n adaptation_field_control = TS.get_adaptation_field_control(packet)\n continuity_counter = TS.get_continuity_counter(packet)\n\n # put together PES from payloads\n payload = TS.get_payload(packet)\n if pusi == True:\n if not ES.pes_packet_check_formedness(payload):\n if pid in self._elementary_streams:\n self._elementary_streams[pid] = None\n continue\n pes_id = ES.get_pes_stream_id(payload)\n self._elementary_streams[pid] = payload\n else:\n if pid in self._elementary_streams:\n # TODO: check packet sequence counter\n if not self._elementary_streams[pid]:\n self._elementary_streams[pid] = \"\"\n self._elementary_streams[pid] += payload\n else:\n # TODO: throw. this situaiton means out of order packets\n pass\n if pid in self._elementary_streams and ES.pes_packet_complete(self._elementary_streams[pid]):\n # TODO: handle packet contents here (callback)\n es = self._elementary_streams[pid]\n if self.OnESPacket:\n header_size = ES.get_pes_header_length(es)\n self.OnESPacket(pid, es, header_size)",
"def SendPacketsSendSize(self) -> int:",
"def _get_data(self, read_size):\n return self._pipe.recv_bytes()",
"def read_raw_packet(self):\n\n size = 0\n\n # Read our two-byte header from the debugger...\n while not size:\n size = (self._get_next_byte() << 16) | self._get_next_byte()\n\n # ... and read our packet.\n packet = bytearray([self._get_next_byte() for _ in range(size)])\n\n # Return our packet.\n # TODO: extract and provide status flags\n # TODO: generate a timestamp on-device\n return packet, datetime.now(), None",
"def diff(self, other):\n if self is other or self == other:\n # headers are equal, we need 1 byte\n return 0\n\n # difference should be computed for packets with same object_id\n assert self.object_id == other.object_id\n\n if self.stream_id == other.stream_id:\n if self.type == other.type and self.length == other.length:\n # difference only in timestamp, 4 bytes\n return 1\n\n # stream_id matches, we need 8 bytes\n return 2\n \n # we need full header encoded, 12 bytes\n return 3",
"def pack( self, key, timestamp = None, label = None, description = None, table = None, pad = False ) : \r\n\r\n duration = 1\r\n if timestamp is None :\r\n timestamp = ms_localtime() \r\n\r\n #\r\n # bugfix : as it seems that NetStation\r\n #\r\n # (a) does not clean the internal buffer for the \"event\" data\r\n # and\r\n # (b) ignores the \"total packet length\" from the \"event\" message header\r\n # when reading the \"label\" / \"description\" / \"key/data\" information , \r\n #\r\n # we have to append a fake \"tail\" to our message if the case if it is incomplete --\r\n # -- otherwise either garbage or the information from the previous datagram\r\n # would be erroneously recognized as belonging to ours . \r\n #\r\n # nb. this also means that the 'label' / 'description' / 'key/data' entries\r\n # cannot be optional . \r\n # \r\n\r\n if not is_32_bit_int_compatible( timestamp ) :\r\n \r\n raise Eggog( \"only 'small' 32-bit integer values less than %d are accepted as timestamps, not %s\" % ( 0xffffFFFF, timestamp ) ) \r\n \r\n if label is None : label = ''\r\n if description is None : description = ''\r\n\r\n label_str = pstring( label ) \r\n description_str = pstring( description ) \r\n\r\n if table is None or len( table.keys() ) <= 0 :\r\n # explicitly state that the number of keys is zero ( see above comment ) \r\n table_str = struct.pack( 'B', 0 ) \r\n else : \r\n table_str = self._pack_dict(table, pad)\r\n\r\n size = len( label_str ) + len( description_str ) + len( table_str ) \r\n \r\n ## # debug \r\n ## print \"+size: \", size \r\n\r\n header_str = self._make_event_header( size, timestamp, duration, key ) \r\n\r\n ## # debug \r\n ## print \"'%s', '%s', '%s', '%s'\" % ( header_str, label_str, description_str, table_str ) \r\n\r\n result_str = _cat( header_str, label_str, description_str, table_str ) \r\n \r\n return result_str",
"def length(self):\n return struct.unpack('<H', self.pkt.payload[2:4])[0]",
"def recv_ts(self) -> int:\n pass",
"def send_packet(self):\n amountfreed = 0\n bitstransmitted = 0\n # If we are at or have passed the time at which we should send the next\n # packet, we should try to send the next packet.\n if (self.next_packet_send_time <= globals.systime):\n # If there is nothing currently in the buffer, we have nothing to\n # send at this time.\n if (len(self.buffer) == 0):\n self.next_packet_send_time = \\\n self.next_packet_send_time + globals.dt\n\n # Otherwise, It's time to send the packet at the front of the buffer\n else:\n packet_to_send = self.buffer.pop(0)\n amountfreed = packet_to_send.get_size()\n # Updates buffersize to reflect that we removed the packet\n # at the front of the buffer from the buffer.\n self.buffersize = self.buffersize - amountfreed\n\n # Time represents the amount of time in the previous dt that we\n # were transmitting. (i.e. between the previous systime and the\n # current)\n time = self.next_packet_send_time - (globals.systime - globals.dt)\n # bitstransmitted represents the number of bits that were\n # transmitted in the previous dt\n bitstransmitted = time * self.rate\n\n # Now we need to add the packet that we removed from the\n # buffer to the lists that keep track of the propegation of the\n # packets.\n self.packets_in_transmission.append(packet_to_send)\n self.packet_arrival_times.append(self.next_packet_send_time + self.delay)\n\n # If there are still packets in the buffer, update the time\n # to send the next packet to be when it would finish transmitting\n if (len(self.buffer) > 0):\n next_packet_size = self.buffer[0].get_size()\n self.next_packet_send_time = self.next_packet_send_time + \\\n next_packet_size * (1/self.rate)\n # If we finished transmitting a packet and immediately\n # started sending another, we transmitted the entire time\n # step.\n bitstransmitted = globals.dt * self.rate\n\n # the buffer is empty so we will just set the time to try to\n # send the next packet to be the next time step.\n else:\n self.next_packet_send_time = self.next_packet_send_time + \\\n globals.dt\n\n # in one of two cases: either buffer is empty or we used link to capacity\n # in last dt.\n else:\n # if the buffer is nonempty, we must have been transmitting for\n # the entire duration of the last timestep.\n if (len(self.buffer) != 0):\n bitstransmitted = globals.dt * self.rate\n else:\n pass\n\n # Now, we compute and update the effective rate of the link.\n rate = 0\n self.lrsteps.append(bitstransmitted)\n if(globals.systime <= self.lrwindow):\n if (globals.systime != 0):\n rate = sum(self.lrsteps)/(globals.systime + globals.dt)\n # when the time is 0, we will just set the rate to be 0.\n else:\n pass\n else:\n self.lrsteps.pop(0)\n rate = sum(self.lrsteps)/self.lrwindow\n self.effectiverate = rate\n\n # If we are tracking this HalfLink, we will also record its current\n # rate.\n if (self.track):\n key = self.id + \":\" + self.source + \"->\" + self.destination + \":\" \\\n + globals.LINKRATE\n dict = globals.statistics[key][globals.systime] = rate\n\n # Now we will check if any packets should be arriving at their\n # destination.\n if (len(self.packet_arrival_times) > 0):\n # If the time has passed the arrival time at the front of the list\n # of packet_arrival_times, we should remove the first item of the\n # list of packet_arrival_times, as well as the corresponding first\n # element of the list of packets_in_transmission and we should send\n # that packet to its destination.\n if (self.packet_arrival_times[0] <= globals.systime):\n packet_to_send = self.packets_in_transmission.pop(0)\n self.packet_arrival_times.pop(0)\n dest_type = ''\n if self.destination[0] == 'H':\n dest_type = 'hosts'\n else:\n dest_type = 'routers'\n receiver = globals.idmapping[dest_type][self.destination]\n receiver.receive_packet(packet_to_send, self.id)\n return amountfreed",
"def length(self):\n return struct.unpack('<H', self.pkt.payload[6:8])[0]",
"def get_count(self):\n return unpack(os.read(self.fd, 8))",
"def next_bytes(self, count):\r\n bts = self.b_form[self._ip:self._ip + count]\r\n self._ip += count\r\n return bts",
"def update_link_statistics(self):\n if (self.track):\n key = self.id + \":\" + self.source + \"->\" + self.destination + \":\" \\\n + globals.BUFFEROCCUPANCY\n globals.statistics[key][globals.systime] = self.buffersize",
"def _read_length(self):\n msg_length = struct.unpack('!I', self.received_data[0])[0]\n self.l.debug('msg_length = %d', msg_length)\n self.set_terminator(msg_length)\n self.process_data = self._read_message\n self.received_data = []",
"def delta_bytes(self):\n return sum(self.fcip_doc['packet_lengths'])",
"def __len__(self):\n return len(self.buffer)",
"def __len__(self):\n return len(self.buffer)",
"def _read_len(self):\n\n read = self.socket.recv(4)\n if len(read) == 0:\n # if we read 0 bytes and self.message is empty, it means client\n # closed the connection\n if len(self.message) != 0:\n logging.error(\"can't read frame size from socket\")\n self.close()\n return\n self.message += read\n if len(self.message) == 4:\n self.len, = struct.unpack(b'!i', self.message)\n if self.len < 0:\n logging.error(\"negative frame size, it seems client\"\\\n \" doesn't use FramedTransport\")\n self.close()\n elif self.len == 0:\n logging.error(\"empty frame, it's really strange\")\n self.close()\n else:\n self.len += 4 # Include message length\n self._set_status(WAIT_MESSAGE)",
"def length(self):\n return struct.unpack('<B', self.pkt.payload[1:2])[0]",
"def _handle_ordered_packet(self, packet):\n pass",
"def _read_len(self):\r\n read = self.socket.recv(4 - len(self.message))\r\n if len(read) == 0:\r\n # if we read 0 bytes and self.message is empty, it means client close \r\n # connection\r\n if len(self.message) != 0:\r\n logging.error(\"can't read frame size from socket\")\r\n self.close()\r\n return\r\n self.message += read\r\n if len(self.message) == 4:\r\n self.len, = struct.unpack('!i', self.message)\r\n if self.len < 0:\r\n logging.error(\"negative frame size, it seems client\"\\\r\n \" doesn't use FramedTransport\")\r\n self.close()\r\n elif self.len == 0:\r\n logging.error(\"empty frame, it's really strange\")\r\n self.close()\r\n else:\r\n self.message = ''\r\n self.status = WAIT_MESSAGE",
"def _process_frame(self, timestamp: Timestamp, frame: SerialFrame) -> None:\n assert frame.data_specifier == self._specifier.data_specifier, \"Internal protocol violation\"\n self._statistics.frames += 1\n\n transfer: typing.Optional[pyuavcan.transport.TransferFrom]\n if frame.source_node_id is None:\n transfer = TransferReassembler.construct_anonymous_transfer(timestamp, frame)\n if transfer is None:\n self._statistics.errors += 1\n _logger.debug(\"%s: Invalid anonymous frame: %s\", self, frame)\n else:\n transfer = self._get_reassembler(frame.source_node_id).process_frame(\n timestamp, frame, self._transfer_id_timeout\n )\n if transfer is not None:\n self._statistics.transfers += 1\n self._statistics.payload_bytes += sum(map(len, transfer.fragmented_payload))\n _logger.debug(\"%s: Received transfer: %s; current stats: %s\", self, transfer, self._statistics)\n try:\n self._queue.put_nowait(transfer)\n except asyncio.QueueFull: # pragma: no cover\n # TODO: make the queue capacity configurable\n self._statistics.drops += len(transfer.fragmented_payload)",
"def data_received(self, data):\n self.buffered += data\n while True:\n if self.have_length:\n if len(self.buffered) < self.message_length:\n break\n self._decode_message(self.buffered[:self.message_length])\n self.have_length = False\n self.buffered = self.buffered[self.message_length:]\n self.message_length = 0\n else:\n if len(self.buffered) < 4:\n break\n (self.message_length,) = struct.unpack_from(\">I\", self.buffered)\n self.buffered = self.buffered[4:]\n self.have_length = True",
"def readPacket(self, timingOrder):\n keep = any(s.startswith(self.dev.devName) for s in timingOrder)\n return self.dev.read(self.nPackets) if keep else \\\n self.dev.discard(self.nPackets)",
"def __len__(self):\n if self._buffer is not None:\n if self._header.value_type in b'ZBH':\n return len(self._buffer)\n else:\n return 1\n else:\n return 0"
] | [
"0.5552777",
"0.5473483",
"0.5385493",
"0.5235498",
"0.51582557",
"0.51522464",
"0.51505923",
"0.51322556",
"0.51171005",
"0.5098781",
"0.508691",
"0.5086687",
"0.5083064",
"0.5064236",
"0.505681",
"0.504536",
"0.50425094",
"0.5032256",
"0.5010765",
"0.5007731",
"0.50073075",
"0.50073075",
"0.49963135",
"0.4981447",
"0.49797094",
"0.49769938",
"0.49677962",
"0.4959072",
"0.4958419",
"0.49577713"
] | 0.6326216 | 0 |
This method decompose the file | def decompose(self, file_name):
print("[+] Decompose started...")
with open(file_name, "rb") as image_file:
# We check if the directory chunks doesn't exist, then, we create it
if not path.exists("./chunks/"):
makedirs("chunks")
to_print = b64.b64encode(image_file.read()).decode('utf-8')
size = len(to_print)
re_size = self.verify_size_content(self.divide(size))
content = ""
i = 0
print("[+] FILENAME: " + str(file_name))
print("[+] " + str(re_size))
print("[+] SIZE: " + str(size))
while to_print:
content = to_print[:re_size['chunck']]
title = md5(content[:300].encode()).hexdigest()
self.map[i] = title
self.chunk_array.append({title: content})
print("> chunck: " + title)
system("mkdir ../chunks/")
# Optionnal, to saved the chunks
with open("../chunks/" + title, "w+") as file:
file.write(content)
# Optionnal, to saved the chunks
to_print = to_print[re_size['chunck']:]
i += 1
print("[+] Decompose done.")
print("-------") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decode(self, file):\n raise NotImplementedError()",
"def decompose(self):\r\n contents = [i for i in self.contents]\r\n for i in contents:\r\n if isinstance(i, Tag):\r\n i.decompose()\r\n else:\r\n i.extract()\r\n self.extract()",
"def decompress_file(in_file: str, out_file: str) -> None:\n with open(in_file, \"rb\") as f:\n num_nodes = f.read(1)[0]\n buf = f.read(num_nodes * 4)\n node_lst = bytes_to_nodes(buf)\n # use generate_tree_general or generate_tree_postorder here\n tree = generate_tree_postorder(node_lst, num_nodes - 1)\n size = bytes_to_int(f.read(4))\n with open(out_file, \"wb\") as g:\n text = f.read()\n g.write(decompress_bytes(tree, text, size))",
"def read (self, file):\n\t\tself.unpack (file.read (self.size()))",
"def preprocess (self, filecontents):\n\t\treturn filecontents",
"def unpack(file_path, extraction_path, remove):\n print(file_path)\n Archive(file_path).extractall(extraction_path, auto_create_dir=True)\n # remove original compressed file???\n if remove is True:\n os.remove(file_path)",
"def uncompress(in_file, out_file):\n with open(in_file, \"rb\") as f:\n num_nodes = f.read(1)[0]\n buf = f.read(num_nodes * 4)\n node_lst = bytes_to_nodes(buf)\n # use generate_tree_general or generate_tree_postorder here\n tree = generate_tree_general(node_lst, num_nodes - 1)\n size = bytes_to_size(f.read(4))\n with open(out_file, \"wb\") as g:\n text = f.read()\n g.write(generate_uncompressed(tree, text, size))",
"def decode(fh):\n # (dmrs { ... })*",
"def Decompress(inputFilePath, outputFilePath):\n # TODO: Add tests for this function\n compressedString = FilePathIntoString(inputFilePath)\n dictionary = ExtractTextDictionaryFromString(compressedString)\n\n compressedWords = SplitIntoWords(compressedString)\n\n # Remove the dictionary from the first word\n firstWord = compressedWords[0]\n firstWordArray = firstWord.split(\"\\n\")\n compressedWords[0] = firstWordArray[len(firstWordArray) - 1]\n\n uncompressedWords = UncompressWordArray(compressedWords, dictionary)\n outputString = WordArrayToString(uncompressedWords)\n WriteToFile(outputFilePath, outputString)",
"def decode_file(self, filename):\n num_bytes = os.stat(filename)[6]\n data = array.array('B')\n\n with open(filename, 'rb') as f:\n data.fromfile(f, num_bytes)\n\n return self.decode_data(data)",
"def decompress(self, file):\n\t\t\n\t\tbit_string = \"\"\n\n\t\tbyte = file.read(1)\n\t\twhile(len(byte) > 0):\n\t\t\tbyte = ord(byte)\n\t\t\tbits = bin(byte)[2:].rjust(8, '0')\n\t\t\tbit_string += bits\n\t\t\tbyte = file.read(1)\n\n\t\tencoded_text = self.remove_padding(bit_string)\n\n\t\tdecompressed_text = self.decode_text(encoded_text)\n\t\t\n\t\tprint(\"Decompressed\")\n\t\treturn decompressed_text",
"def prepare_input(self):\n super().prepare_input()\n input_file = self._input_filepath.open(encoding='utf-8')\n input_formatted_file = Path(self.__input_formatted_filepath).open('w', encoding='utf8')\n for line in input_file.readlines():\n for token in line.split():\n if token.endswith('.') or token.endswith(','):\n input_formatted_file.write('{0}\\n{1}\\n'.format(token[:-1], token[-1]))\n else:\n input_formatted_file.write('{}\\n'.format(token))",
"def revert(self):\n headerdump = self.file.readp(0, 16)\n if sum(headerdump):\n dictat,dictlen = struct.unpack(\"<QQ\", headerdump)\n dictblob = self.file.readp(dictat, dictlen)\n self.keys = pickle.loads(dictblob)\n self.buffered = {}\n self.cache = {}\n self.awaitingpunch = []\n\n else:\n self.keys = {}\n self.buffered = {}\n self.cache = {}\n self.awaitingpunch = []",
"def fullunzip_output(self: object, controller: FullDecoder) -> Iterator[str]:\n yield controller.huff_decoder.seq.read()\n yield controller.huff_decoder.header\n yield controller.huff_decoder.unicode\n yield controller.huff_decoder.binary \n yield controller.huff_decoder.decompressed\n for line in controller.bw_decoder.bwm:\n yield line\n yield controller.bw_decoder.original",
"def unformat_file(path: str, out_dir: str):\n\n data_dir = get_data_dir()\n path = Path(path)\n out_dir = Path(out_dir)\n if not path.exists() and out_dir.exists() and out_dir.is_dir():\n return\n\n if path.is_dir():\n path.mkdir(exist_ok=True)\n for filename in path.iterdir():\n unformat_file(filename, str(out_dir))\n\n else:\n dataobj = frontmatter.load(str(path))\n\n try:\n # get relative path of object in `data` dir\n datapath = path.parent.resolve().relative_to(data_dir)\n except ValueError:\n datapath = Path()\n\n # create subdir if doesn't exist\n (out_dir / datapath).mkdir(exist_ok=True)\n new_path = out_dir / datapath / f\"{dataobj.metadata['title']}.md\"\n with new_path.open(\"w\") as f:\n f.write(dataobj.content)\n\n current_app.logger.info(\n f\"Unformatted and moved {str(path)} to {str(new_path.resolve())}\"\n )\n path.unlink()",
"def raw_file_structure(self):\n return None",
"def load_and_clean_file(self, path):\n pass",
"def unpack(file, path='.'):\n assert isfile(file)\n assert isdir(path)\n\n for implementor in [GzipTarredFile, ZippedFile, Bzip2TarredFile]:\n if implementor.is_valid(file):\n with console.cd(path):\n return [implementor(file).extract(), implementor]\n else:\n raise InvalidFile, 'compressed file format unknown: %s' % file",
"def parse_dat(\n dat_path: str,\n verbose=False,\n):\n check_fourcc(\"TLZC\", dat_path)\n dat_path = Path(dat_path)\n binary_file = open(dat_path, \"rb\")\n g = BinaryReader(binary_file)\n g.word(4)\n g.i(5)\n\n dec_path = dat_path.parent / f\"{dat_path.name}.dec\"\n with open(dec_path, \"wb\") as dec_file:\n data = g.read(g.fileSize() - g.tell())\n data = zlib.decompress(data)\n dec_file.write(data)\n\n g.close()\n logger.info(f\"Parse DAT as {dat_path.name}.dec completed.\")",
"def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n decoded_lines = decoded_text.split('\\n')\n\n # Remove titles of Wikipedia articles if desired\n if self.remove_headers:\n filtered_lines = []\n for line in decoded_lines:\n line_strip = line.strip()\n if len(line_strip) > 0:\n if line_strip[0] != '=' and line_strip[-1] != '=':\n filtered_lines.append(line)\n decoded_lines = filtered_lines\n\n eol = self.eol or ''\n if self.split_by_line:\n text = [(line.lstrip() + eol,) for line in decoded_lines]\n else:\n text = [(eol.join(decoded_lines),)]\n\n return text",
"async def transform(self, file):\n\t\tpass",
"def close_file_deserializer(self):\n if self._input_file:\n self._input_file.close()\n self._input_file = None",
"def load(self, filename):\n # XXX Hay que comprobar los datos leidos y lanzar excepcion\n f = open(filename)\n prelaciones = []\n asig = []\n rec = []\n l = f.readline()\n while l:\n # Activities and following activities\n if l[0:21] == 'PRECEDENCE RELATIONS:':\n f.readline()\n l = f.readline()\n while l[0] != '*':\n data = l.split()\n prel = (data[0], data[3:])\n prelaciones.append(prel)\n l = f.readline()\n\n # Activity duration and resource units needed\n if l[0] == '-':\n l = f.readline()\n while l[0] != '*':\n asig.append(l.split())\n l = f.readline()\n\n # Name, type and unit of resources\n if l[0:22] == 'RESOURCEAVAILABILITIES':\n l = f.readline()\n while l[0] != '*':\n rec.append(l.split())\n l = f.readline()\n\n l = f.readline()\n \n # Create data structure\n cont = 1\n activities = []\n for prelacion in prelaciones:\n activities.append([cont, prelacion[0], prelacion[1], '', '', '', '', '', ('Beta')])\n cont += 1 \n\n # Update activities duration\n for n in range(len(asig)): \n activities[n][6] = float(asig[n][2])\n\n # Update resources\n i = 1\n m = 0\n resources = []\n if len(rec) < 2:\n raise InvalidFileFormatException()\n\n for n in range(len(rec[1])):\n # Renewable\n if rec[0][m]=='R' or rec[0][m][0]=='R':\n if rec[0][m]=='R':\n row=[rec[0][m]+rec[0][i], 'Renewable', '', rec[1][n]] \n m+=2\n else:\n row=[rec[0][m], 'Renewable', '', rec[1][n]] \n m+=1 \n # Non Renewable\n elif rec[0][m]=='N' or rec[0][m][0]=='N':\n if rec[0][m]=='N':\n row=[rec[0][m]+rec[0][i], 'Non renewable', rec[1][n], '']\n m+=2\n else:\n row=[rec[0][m], 'Non renewable', rec[1][n], ''] \n m+=1\n # Double constrained\n elif rec[0][m]=='D' or rec[0][m][0]=='D':\n if rec[0][m]=='D':\n row=[rec[0][m]+rec[0][i], 'Double constrained', rec[1][n], rec[1][n]]\n m+=2\n else:\n row=[rec[0][m], 'Double constrained', rec[1][n], rec[1][n]] \n m+=1\n \n resources.append(row)\n i += 2\n # Note: Unlimited resources are not present on PSPLIB projects and so \n # not taken into account here\n\n # Resources needed per activity\n asignation = []\n for n in range(len(asig)): \n for m in range(3, 3+len(rec[1])): #len(self.rec[1]): number of resources \n if asig[n][m] != '0': #unused resources are not shown\n i = m-3\n row = [asig[n][0], resources[i][0], asig[n][m]] \n asignation.append(row)\n \n return (activities, [], resources, asignation)",
"def extract_file(self):\n shutil.unpack_archive(os.path.join(self.root, self.resources), self.root)\n os.remove(os.path.join(self.root, self.resources))",
"def decompressFile(infile, outfile):\n decoder = Decoder(infile)\n for data in decoder.bytes():\n outfile.write(data)",
"def decode():\r\n # Open the file with binary instructions\r\n with open(file_name) as file:\r\n lines = file.readlines()\r\n with open(PATH + file_name, \"w\") as file_write:\r\n for line in lines:\r\n file_write.write(line + \"\\n\")\r\n\r\n # Read the instructions\r\n instructions, instruction_names = [], []\r\n parse_instr_bin_list(lines, instructions, instruction_names)\r\n\r\n # Print formatted binary instructions and their names\r\n instr_print(instructions, instruction_names)\r\n\r\n # Write to each of MPS-Files parsed hex-instructions\r\n write_mps(instructions)\r\n\r\n # Write to Mapping-PROM linked addresses\r\n write_mapping_prom(instruction_names)",
"def file_reader(filename = 'conv_params'):\n\n with open(filename) as f:\n info = f.readlines()\n info = [i.strip() for i in info] # each element in info is a string of a line from the file\n info = [i.split() for i in info] # split each whitespace delimited element into a list of lists\n info = [[i.split('-') for i in j] for j in info] # note info is 3 layers deep\n\n info[2] = info[2][0] # makes default E just a single string of the number\n info[3] = info[3][0]\n\n return info",
"def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))",
"def decompress(infile, path, members=None):\n with open(infile, 'rb') as inf, open(path, 'w', encoding='utf8') as tof:\n decom_str = gzip.decompress(inf.read()).decode('utf-8')\n tof.write(decom_str)",
"def extract_file(self):\n shutil.unpack_archive(os.path.join(\n self.root, self.resources), f\"{self.root}\")\n os.remove(os.path.join(self.root, self.resources))"
] | [
"0.61689585",
"0.58359903",
"0.5817161",
"0.5712472",
"0.5661358",
"0.56584007",
"0.56018007",
"0.55656415",
"0.55426884",
"0.5491138",
"0.5484127",
"0.5476499",
"0.54438716",
"0.5438515",
"0.53901505",
"0.5349641",
"0.532227",
"0.5321122",
"0.5308797",
"0.53050524",
"0.5296115",
"0.528828",
"0.5285168",
"0.5265034",
"0.5255768",
"0.5247288",
"0.52319103",
"0.52301764",
"0.5227968",
"0.5227725"
] | 0.61009246 | 1 |
Load the NER model and evaluate it in the test data. | def evaluate_model():
# Get the processed data (in proper format to evaluate the NER model)
data = get_json_from_file_path(PROCESSED_DATA_PATH)
# Split the dataset for training and test as we did for training
train_data, test_data = train_test_split(data, train_size=0.7,
random_state=4)
# Load the model trained
try:
ner_model = spacy.load(OUTPUT_MODEL_PATH)
except Exception as err:
msg = f'Could not load the model. Error: {err}'
raise Exception(msg)
# Compute evaluation scores
print('Computing metrics...')
scores = evaluate(ner_model, test_data)
# General metrics of the model
f_score = scores.get('ents_f')
precision = scores.get('ents_p')
recall = scores.get('ents_r')
print('\nScoring:')
print(f'F-score: {f_score}')
print(f'Precision: {precision}')
print(f'Recall: {recall}')
# Get the specific scores for each entity
scores_per_entity = scores.get('ents_per_type')
# Get the F-score of the entities
f_scores_of_entities = []
for entity_scores in scores_per_entity.values():
f_scores_of_entities.append(entity_scores['f'])
# Compute the macro averaged F-score
macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)
print(f'Macro averaged F-score: {macro_avg_f_score}')
print('\nScores per entity;')
print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))
for key, value in scores_per_entity.items():
entity = key
f, p, r = value['f'], value['p'], value['r']
print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eval(self):\n self.train(mode=False)",
"def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)",
"def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)",
"def evaluate_model():\n\n print '\\n\\tevaluate result'\n os.system('./conlleval.pl -d \\'\\t\\' < ' + encoded_test + ' >> ' + result_file)\n print '\\t--done\\n'",
"def train_ner(model, new_model_name, output_dir, n_iter, train_data):\r\n random.seed(0)\r\n if model is not None:\r\n nlp = spacy.load(model) # load existing spaCy model\r\n print(\"Loaded model '%s'\" % model)\r\n else:\r\n nlp = spacy.blank(\"en\") # create blank Language class\r\n print(\"Created blank 'en' model\")\r\n # Add entity recognizer to model if it's not in the pipeline\r\n # nlp.create_pipe works for built-ins that are registered with spaCy\r\n if \"ner\" not in nlp.pipe_names:\r\n ner = nlp.create_pipe(\"ner\")\r\n nlp.add_pipe(ner)\r\n # otherwise, get it, so we can add labels to it\r\n else:\r\n ner = nlp.get_pipe(\"ner\")\r\n\r\n ner.add_label(LABEL) # add new entity label to entity recognizer\r\n # Adding extraneous labels shouldn't mess anything up\r\n ner.add_label(\"EXTRA\")\r\n \r\n \r\n if model is None:\r\n optimizer = nlp.begin_training()\r\n else:\r\n optimizer = nlp.resume_training()\r\n move_names = list(ner.move_names)\r\n # get names of other pipes to disable them during training\r\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != \"ner\"]\r\n with nlp.disable_pipes(*other_pipes): # only train NER\r\n sizes = compounding(1.0, 4.0, 1.001)\r\n # batch up the examples using spaCy's minibatch\r\n for itn in range(n_iter):\r\n random.shuffle(train_data)\r\n batches = minibatch(train_data, size=sizes)\r\n losses = {}\r\n for batch in batches:\r\n texts, annotations = zip(*batch)\r\n nlp.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses)\r\n print(\"Losses\", losses)\r\n\r\n # save model to output directory\r\n if output_dir is not None:\r\n output_dir = Path(output_dir)\r\n if not output_dir.exists():\r\n output_dir.mkdir()\r\n nlp.meta[\"name\"] = new_model_name # rename model\r\n nlp.to_disk(output_dir)\r\n print(\"Saved model to\", output_dir)",
"def test_train_after_load(self):\n model = PoincareModel(self.data, burn_in=0, negative=3)\n model.train(epochs=1)\n model.save(testfile())\n loaded = PoincareModel.load(testfile())\n model.train(epochs=1)\n loaded.train(epochs=1)\n self.models_equal(model, loaded)",
"def test_load_model():\n spectrum_binner, test_generator = get_test_binner_and_generator()\n\n model_file = TEST_RESOURCES_PATH / \"testmodel.hdf5\"\n model = load_model(model_file)\n assert model.spectrum_binner.__dict__ == spectrum_binner.__dict__, \"Expected different spectrum binner\"\n\n # Test model layer shapes\n assert model.model.layers[2].to_json() == model.base.to_json(), \\\n \"Expected based model to be identical to part of main model.\"\n\n # Test base model inference\n X, y = test_generator.__getitem__(0)\n embeddings = model.base.predict(X[0])\n assert isinstance(embeddings, np.ndarray), \"Expected numpy array\"\n assert embeddings.shape[0] == test_generator.settings[\"batch_size\"] == 32, \\\n \"Expected different batch size\"\n assert embeddings.shape[1] == model.base.output_shape[1] == 200, \\\n \"Expected different embedding size\"",
"def train_and_eval(model_dir, model_type, train_steps, train_data, test_data, train_embeddings_file_name, test_embeddings_file_name, positive_labels, combination_method, method):\n \n index_map, weights = wvd.load(train_embeddings_file_name)\n #Get positive labels\n positive_labels = positive_labels.split(',')\n \n print(\"reading data...\")\n train_file_name = train_data \n df_train = pd.read_table(train_file_name, dtype={'node1':str, 'node2':str})\n df_train = df_train.sample(frac=1)\n\n # remove NaN elements\n df_train = df_train.dropna(how='any', axis=0)\n \n df_train[LABEL_COLUMN] = (\n df_train[\"label\"].apply(lambda x: label_func(x, positive_labels))).astype(int)\n\n model_dir = tempfile.mkdtemp() if not model_dir else model_dir\n print(\"model directory = %s\" % model_dir)\n \n train_x, _, train_y, _ = get_input(df_train, weights, index_map, combination_method)\n \n print(\"\\nBuilding model...\")\n m = build_estimator(model_dir, model_type, weights, index_map, combination_method)\n \n print(\"\\nTraining model...\")\n if model_type == \"regressor\":\n m.fit(train_x, train_y, n_epoch=train_steps, show_metric=True, snapshot_epoch=False)\n \n print(\"\\nTesting model...\")\n index_map, weights = wvd.load(test_embeddings_file_name)\n \n print(\"reading data...\")\n test_file_name = test_data\n df_test = pd.read_table(test_file_name, dtype={'node1':str, 'node2':str})\n df_test = df_test.sample(frac=1)\n\n # remove NaN elements\n df_test = df_test.dropna(how='any', axis=0)\n \n df_test[LABEL_COLUMN] = (\n df_test[\"label\"].apply(lambda x: label_func(x, positive_labels))).astype(int)\n \n if model_type == \"regressor\":\n test_x, test_original_y, test_index_y, test_original_x = get_input(df_test, weights, index_map, combination_method, data_purpose='test')\n node_sets = get_node_sets(test_original_x, test_original_y)\n \n print(\"\\nPredicting:\")\n model_predictions = m.predict(test_x)\n model_predictions = list(model_predictions)\n #Covert back to 1 and 0\n predictions = []\n model_predictions_probs = []\n for prediction in model_predictions:\n predictions.append(prediction[1]) #non-thresholded value of positve class\n model_predictions_probs.append(prediction[1])\n \n k = int(len([i for i in test_original_y if i == 1]) * 0.3)\n do_evaluations([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets, \n positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method)\n #Uncomment to log ranked links\n #log_predictions([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets, \n # positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method,\n # outfilename=combination_method, method=method)",
"def test_fer_model(img_folder, model=\"/path/to/model\"):\n preds = None\n ### Start your code here\n\n if not os.path.exists(model):\n print (\"Model Loading Error: can't find the model.\\n\")\n return None\n\n if not os.path.exists(img_folder):\n print (\"Data Loading Error: can't find the data.\\n\")\n return None\n\n with open(model, 'rb') as model_file:\n model = load(model_file)\n data = load_FER2013_samples(img_folder)\n preds = model.predict(data)\n print (preds)\n ### End of code\n return preds",
"def test(self, test_loader):\n\n self.model.eval()\n with torch.no_grad():\n return self.tester(test_loader, verbose=False)",
"def main(model=None, output_dir=None, n_iter=20):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, last=True)\n # otherwise, get it so we can add labels\n else:\n ner = nlp.get_pipe('ner')\n\n # add labels\n for _, annotations in TRAIN_DATA:\n for ent in annotations.get('entities'):\n ner.add_label(str(ent[2]))\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']\n with nlp.disable_pipes(*other_pipes): # only train NER\n optimizer = nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in TRAIN_DATA:\n nlp.update(\n [text], # batch of texts\n [annotations], # batch of annotations\n drop=0.5, # dropout - make it harder to memorise data\n sgd=optimizer, # callable to update weights\n losses=losses)\n print(losses)\n \n # test the trained model\n for text, _ in TRAIN_DATA:\n doc = nlp(text)\n print('Entities', [(ent.text, ent.label_) for ent in doc.ents])\n print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])\n \n # save model to output directory\n if output_dir is not None:\n print(output_dir)\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n for text, _ in TRAIN_DATA:\n doc = nlp2(text)\n print('Entities', [(ent.text, ent.label_) for ent in doc.ents])\n print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])",
"def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')",
"def load_model():\n global obj\n obj = NutritionTableDetector()\n print(\"Weights Loaded!\")",
"def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)",
"def main(model=None, output_dir=None, n_iter=10):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # add the parser to the pipeline if it doesn't exist\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'parser' not in nlp.pipe_names:\n parser = nlp.create_pipe('parser')\n nlp.add_pipe(parser, first=True)\n # otherwise, get it, so we can add labels to it\n else:\n parser = nlp.get_pipe('parser')\n\n # add labels to the parser\n for _, annotations in TRAIN_DATA:\n for dep in annotations.get('deps', []):\n parser.add_label(dep)\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'parser']\n with nlp.disable_pipes(*other_pipes): # only train parser\n optimizer = nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in TRAIN_DATA:\n nlp.update([text], [annotations], sgd=optimizer, losses=losses)\n print(losses)\n\n # test the trained model\n test_text = \"It was back in 2007 that hip-hop bible XXL launched its first ever Freshman Class, a list of ten up-and-coming artists poised to change the rap game for good. The last decade has seen more than a hundred stars spotlighted as part of the list and its accompanying annual cover feature, but this year features a history-making entry: Stefflon Don. The talented star has already built a strong reputation for herself in the UK; her unique blend of hard-hitting raps and smooth, dancehall beats has galvanized the scene, earning her critical acclaim and a series of impressive chart positions. Now, she seems ready to achieve the unthinkable: global stardom. Earlier this year, her infectious hit “Hurtin’ Me” – featuring former XXL Freshman French Montana – ascended the Billboard charts, peaking at no. 7 and confirming her US fanbase; but could she truly become the first artist to crack the US? And, more importantly, why has it taken so long for UK rappers to achieve Stateside success?\"\n doc = nlp(test_text)\n print('Dependencies', [(t.text, t.dep_, t.head.text) for t in doc])\n sentence_spans = list(doc.sents)\n displacy.serve(sentence_spans, style='dep')\n\n # save model to output directory\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n doc = nlp2(test_text)\n print('Dependencies', [(t.text, t.dep_, t.head.text) for t in doc])",
"def test_model():\n test_text = \"what is the price of jug?\"\n model = spacy.load(\"../model/custom_ner_model\")\n doc = model(test_text)\n for ent in doc.ents:\n print(ent.text, ent.start_char, ent.end_char, ent.label_)",
"def evaluate(model, tokenizer, dataset, lines, output_test_file, batch_size=32):\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=batch_size)\n\n print(\"*** Evaluating ***\")\n eval_loss = 0.0\n num_steps = 0\n preds = None\n out_label_ids = None\n for i, batch in enumerate(dataloader):\n if i % 200 == 199:\n print(\"=\", end=\"\")\n if i % 5000 == 4999:\n print(\"[Step \" + str(i+1) + \" / \" + str(len(dataloader)) + \"] \" )\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n labels = batch[3]\n outputs = model(input_ids=batch[0], attention_mask=batch[1], labels=labels)\n tmp_eval_loss, logits = outputs[:2]\n eval_loss += tmp_eval_loss.mean().item()\n \n num_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = labels.detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)\n \n eval_loss = eval_loss / num_steps\n \n preds_label = np.argmax(preds, axis=1)\n \n accuracy = (preds_label == out_label_ids).mean()\n output_dir = os.path.dirname(output_test_file)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n with open(output_test_file, \"w\") as writer:\n all_logits = preds.tolist()\n for i, logit in enumerate(all_logits):\n line = '<CODESPLIT>'.join(\n [item.encode('ascii', 'ignore').decode('ascii') for item in lines[i]])\n\n writer.write(line + '<CODESPLIT>' + '<CODESPLIT>'.join([str(l) for l in logit]) + '\\n')\n print(\"Accuracy =\", str(accuracy))\n\n return accuracy",
"def load_model(self, tmp_dir):\n if self.inf_learner is None:\n self.log_options()\n model_uri = self.backend_opts.model_uri\n model_path = download_if_needed(model_uri, tmp_dir)\n self.inf_learner = load_learner(\n dirname(model_path), basename(model_path))",
"def evaluate(path_to_config, path_to_model):\n\n config, paths, session_id = setup(path_to_config, 1)\n assert isinstance(config, ExperimentConfig)\n logger = logging.getLogger(\"%s.main\" % config.name)\n\n logger.info(\"Evaluating network on test data\")\n\n network = Network(config, paths, session_id, 0)\n network.build()\n network.evaluate(DATA_TYPE_TEST, model_path=path_to_model)",
"def _set_eval(self):\n\n if self.model.__dict__['training']:\n self.model.eval()",
"def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )",
"def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )",
"def load_and_predict(self):\n if DataLoader.data is None:\n messagebox.showerror(\"Information\", \"Data file is empty, please load the data first.\")\n return\n\n path = filedialog.askopenfilename()\n with open(path, 'rb') as file:\n Trainer.model = pickle.load(file)\n\n scale = DataLoader.data['out'].max() - DataLoader.data['out'].min()\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(DataLoader.data)\n data_scaled = pd.DataFrame(scaler.transform(DataLoader.data), columns=DataLoader.data.columns)\n\n Trainer.y_pred = batch_predict(Trainer.model, data_scaled.drop(columns=['out']))\n Trainer.y_true = data_scaled['out']\n\n self.test_rmse = scale * math.sqrt(mean_squared_error(Trainer.y_pred, Trainer.y_true))\n print(self.test_rmse)\n self.r_squared = np.corrcoef(Trainer.y_pred * scale, data_scaled['out'] * scale)[0, 1] ** 2\n print(self.r_squared)\n\n models = Trainer.model.get_models()\n param_string = f'Component Function Trained Parameters:\\n'\n for i in range(len(models)):\n param_string += \"length scale: {:.4f}\".format(models[i].kernel_.k1.length_scale) + ' ' + \\\n \"noise level: {:.4e}\".format(models[i].kernel_.k2.noise_level) + '\\n'\n param_string += f'\\nRMSE on the test set: {self.test_rmse}\\n'\n param_string += f'R^2 value on the test set: {self.r_squared}'\n display_params = ttk.Label(self, text=param_string, width=40)\n display_params.grid(row=24 + 7, column=0, columnspan=2, sticky=tk.W + tk.E)",
"def evaluate(args):\n dataset_param_filepath = os.path.join(args.model, 'dataset.params')\n dataset_params = putils.load_params(dataset_param_filepath)\n left_vocab_filepath = os.path.join(args.model, 'left.vocab')\n left_vocab = Vocab(vocab_filepath=left_vocab_filepath)\n right_vocab_filepath = os.path.join(args.model, 'right.vocab')\n right_vocab = Vocab(vocab_filepath=right_vocab_filepath)\n model_params_filepath = os.path.join(args.model, 'model.params')\n model_params = putils.load_params(model_params_filepath)\n checkpoint_filepath = os.path.join(args.model, 'checkpoint.tar')\n if not torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location=const.DEVICE)\n elif torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath)\n elif torch.cuda.is_available() and not model_params['cuda']:\n logger.info('Loading a CPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location='cuda:0')\n else:\n logger.info('Loading a CPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath)\n if checkpoint['encoder']['model_type'] == 'transformer':\n encoder = TEncoder(input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n dropout=checkpoint['encoder']['dropout'],\n num_attention_heads=checkpoint['encoder']['num_attention_heads'])\n else:\n encoder = Encoder(model_type=checkpoint['encoder']['model_type'],\n input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n nonlinearity=checkpoint['encoder']['nonlinearity'],\n bias=checkpoint['encoder']['bias'],\n dropout=checkpoint['encoder']['dropout'],\n bidirectional=checkpoint['encoder']['bidirectional'])\n if checkpoint['decoder']['model_type'] == 'transformer':\n decoder = TDecoder(hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n dropout=checkpoint['decoder']['dropout'],\n num_attention_heads=checkpoint['decoder']['num_attention_heads'])\n elif checkpoint['decoder']['with_attention']:\n decoder = Attention(hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n max_seq_len=dataset_params['max_seq_len'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'])\n else:\n decoder = Decoder(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'])\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n if torch.cuda.is_available():\n encoder.to(const.DEVICE)\n decoder.to(const.DEVICE)\n encoder.eval()\n decoder.eval()\n pairs = putils.convert_to_seq_pairs(args.data)\n indexed_pairs = putils.index_pairs(pairs, left_vocab.char2idx,\n right_vocab.char2idx)\n if dataset_params['reverse']:\n indexed_pairs = [(y, x) for x, y in indexed_pairs]\n source_vocab = right_vocab\n target_vocab = left_vocab\n else:\n source_vocab = left_vocab\n target_vocab = right_vocab\n if args.random > 0:\n random.shuffle(indexed_pairs)\n for seq_num in range(args.random):\n seq = indexed_pairs[seq_num]\n print('-'*80)\n input_str = ' '.join(\n ''.join([source_vocab.idx2char[idx] for idx in seq[0] if idx\n not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n gold_str = ' '.join(\n ''.join([target_vocab.idx2char[idx] for idx in seq[1] if idx\n not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n predicted_idxx = decode(seq[0], args.itemize, encoder, decoder,\n dataset_params['max_seq_len'])\n pred_str = ' '.join(\n ''.join([target_vocab.idx2char[idx] for idx in predicted_idxx\n if idx not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n print('>', input_str)\n print('=', gold_str)\n print('<', pred_str)\n else:\n _evaluate(indexed_pairs, args.itemize, encoder, decoder,\n target_vocab.idx2char, dataset_params['max_seq_len'])",
"def evaluate(self, test_data):\n result = self.model.run(test_data)\n self._save_result(result)",
"def test_training(self):\n self.classifier.train(\"test\", self.message)",
"def train_and_evaluate(OUTPUT_DIR,do_train = True,do_eval=True):\n\n\t\n\tBATCH_SIZE = 32\n\tLEARNING_RATE = 2e-5\n\tNUM_TRAIN_EPOCHS = 5.0\n\n\t#in this steps lr will be low and training will be slow\n\tWARMUP_PROPORTION = 0.1\n\n\n\n\tif os.path.exists(OUTPUT_DIR) and os.listdir(OUTPUT_DIR) and do_train:\n\t\traise ValueError(\"Output directory ({}) already exists and is not empty.\".format(OUTPUT_DIR))\n\tif not os.path.exists(OUTPUT_DIR):\n\t\tos.makedirs(OUTPUT_DIR)\n\t\t\n\t#create train and test data\n\n\ttrain_sents,train_labels,test_sents,test_labels = create_train_test(\"ADE/DRUG-AE.rel\",\"ADE/negative_data_AE.rel\")\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\n\tif do_train:\n\n\t\ttrain_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(train_sents, train_labels)]\n\t\tnum_train_examples = len(train_examples)\n\n\t\tnum_train_steps = int(math.ceil(num_train_examples / BATCH_SIZE * NUM_TRAIN_EPOCHS))\n\t\tnum_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)\n\n\t\tmodel = BertForSequenceClassification.from_pretrained(\"bert-base-uncased\",num_labels = num_labels)\n\t\tmodel.to(device)\n\n\t\tparam_optimizer = list(model.named_parameters())\n\t\tno_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n\t\toptimizer_grouped_parameters = [\n\t\t\t{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n\t\t\t{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n\t\t\t]\n\n\t\toptimizer = BertAdam(optimizer_grouped_parameters,lr=LEARNING_RATE,warmup=WARMUP_PROPORTION,t_total=num_train_steps)\n\n\t\tglobal_step = 0\n\t\tnb_tr_steps = 0\n\t\ttr_loss = 0\n\n\t\ttrain_features = convert_examples_to_features(\n\t\t\ttrain_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\n\t\tlogger.info(\"***** Running training *****\")\n\t\tlogger.info(\" Num examples = %d\", num_train_examples)\n\t\tlogger.info(\" Batch size = %d\", BATCH_SIZE)\n\t\tlogger.info(\" Num steps = %d\", num_train_steps)\n\n\n\t\tall_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n\t\tall_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n\t\tall_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n\t\tall_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n\n\t\ttrain_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\t\ttrain_sampler = RandomSampler(train_data)\n\n\t\ttrain_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=BATCH_SIZE)\n\n\t\tmodel.train()\n\t\t# for name, param in model.named_parameters():\n\t\t# if param.requires_grad:\n\t\t# print(name)\n\t\t# return\n\t\tfor _ in trange(int(NUM_TRAIN_EPOCHS), desc=\"Epoch\"):\n\t\t\ttr_loss = 0\n\t\t\tnb_tr_examples, nb_tr_steps = 0, 0\n\t\t\tfor step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n\t\t\t\tbatch = tuple(t.to(device) for t in batch)\n\t\t\t\tinput_ids, input_mask, segment_ids, label_id = batch\n\t\t\t\tloss = model(input_ids, segment_ids, input_mask, label_id)\n\t\t\t\tloss.backward()\n\n\t\t\t\ttr_loss += loss.item()\n\t\t\t\tnb_tr_examples += input_ids.size(0)\n\t\t\t\tnb_tr_steps += 1\n\t\t\t\toptimizer.step()\n\t\t\t\toptimizer.zero_grad()\n\t\t\t\tglobal_step += 1\n\t\t\tprint(tr_loss)\n\n\t\t# Save a trained model and the associated configuration\n\t\tmodel_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n\t\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\t\ttorch.save(model_to_save.state_dict(), output_model_file)\n\t\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\t\twith open(output_config_file, 'w') as f:\n\t\t\tf.write(model_to_save.config.to_json_string())\n\t\tlabel_map = {i : label for i, label in enumerate(label_list,1)} \n\t\tmodel_config = {\"bert_model\":\"bert-base-uncased\",\"do_lower\":True,\"max_seq_length\":MAX_SEQ_LENGTH,\"num_labels\":num_labels,\"label_map\":label_map}\n\t\tjson.dump(model_config,open(os.path.join(OUTPUT_DIR,\"model_config.json\"),\"w\"))\n\n\telse:\n\t\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\t\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\t\tconfig = BertConfig(output_config_file)\n\t\tmodel = BertForSequenceClassification(config, num_labels=num_labels)\n\t\tmodel.load_state_dict(torch.load(output_model_file))\n\n\tmodel.to(device)\n\n\tif do_eval:\n\n\t\tEVAL_BATCH_SIZE = 32\n\n\t\teval_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(test_sents, test_labels)]\n\t\tnum_eval_examples = len(eval_examples)\n\n\t\teval_features = convert_examples_to_features(\n\t\t\teval_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\t\tlogger.info(\"***** Running evaluation *****\")\n\t\tlogger.info(\" Num examples = %d\", num_eval_examples)\n\t\tlogger.info(\" Batch size = %d\", EVAL_BATCH_SIZE)\n\t\tall_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n\t\tall_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n\t\tall_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n\t\tall_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n\t\teval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) \n\t\t# # Run prediction for full data\n\t\teval_sampler = SequentialSampler(eval_data)\n\t\teval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=EVAL_BATCH_SIZE)\n\t\tmodel.eval()\n\n\t\teval_loss, eval_accuracy = 0, 0\n\t\tnb_eval_steps, nb_eval_examples = 0, 0\n\t\ty_true = []\n\t\ty_pred = []\n\t\tlabel_map = {i : label for i, label in enumerate(label_list,1)}\n\t\tfor input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):\n\t\t\tinput_ids = input_ids.to(device)\n\t\t\tinput_mask = input_mask.to(device)\n\t\t\tsegment_ids = segment_ids.to(device)\n\t\t\tlabel_ids = label_ids.to(device)\n\n\t\t\twith torch.no_grad():\n\t\t\t\tlogits = model(input_ids, segment_ids, input_mask)\n\t\t\t\t\n\t\t\tlogits = torch.argmax(F.log_softmax(logits,dim=1),dim=1)\n\t\t\tlogits = logits.detach().cpu().numpy()\n\t\t\tlabel_ids = label_ids.to('cpu').numpy()\n\t\t\ty_pred.extend(logits)\n\t\t\ty_true.extend(label_ids)\n\t\tprint(len(y_pred))\n\t\tprint(len(y_true))\n\t\treport = classification_report(y_true, y_pred)\n\t\toutput_eval_file = os.path.join(OUTPUT_DIR, \"eval_results.txt\")\n\t\twith open(output_eval_file, \"w\") as writer:\n\t\t\tlogger.info(\"***** Eval results *****\")\n\t\t\tlogger.info(\"\\n%s\", report)\n\t\t\twriter.write(report)",
"def test_load_model():\n model = BERTopic(language=\"Dutch\", embedding_model=None, n_components=12)\n model.save(\"test\")\n loaded_model = BERTopic.load(\"test\")\n assert type(model) == type(loaded_model)\n assert model.language == loaded_model.language\n assert model.embedding_model == loaded_model.embedding_model\n assert model.top_n_words == loaded_model.top_n_words\n assert model.n_neighbors == loaded_model.n_neighbors\n assert model.n_components == loaded_model.n_components",
"def model(self):\n filePath = self.config['data_path']['train_data']\n data = self.loadCSV(filePath)\n cleandata = self.preprocess(data)\n X, y = self.dataSplit(cleandata)\n X = self.CountVect(X, self.config['transform_path']['transform_model_path'])\n X_train, X_test, y_train, y_test = self.TrainTestSplit(X, y)\n self.MultinomialNB(X_train, X_test, y_train, y_test, self.config['nlp_path']['model_path'])"
] | [
"0.65599304",
"0.6550237",
"0.6496291",
"0.6476324",
"0.6436608",
"0.63972366",
"0.6394532",
"0.63934684",
"0.6388684",
"0.6336536",
"0.63256794",
"0.6304074",
"0.62766826",
"0.62613994",
"0.62345725",
"0.6198054",
"0.6170204",
"0.61625904",
"0.6160035",
"0.6158422",
"0.6153318",
"0.61411905",
"0.61411905",
"0.6126203",
"0.6124813",
"0.61056477",
"0.6097083",
"0.6079961",
"0.6079941",
"0.6074047"
] | 0.70876956 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.