query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
View all verb and tense quiz options.
def quiz_selection(): verbs = crud.get_verbs() tenses = crud.get_tenses() return render_template("verb-conjugation.html", verbs=verbs, tenses=tenses)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ask_all():\n for q in Question.all_questions:\n print 'from', q.file\n q.ask()", "def show_results(self):\n print(\"Survey results:\")\n for response in self.responses:\n print('- ' + response)", "def get_answers(self):\r\n pass", "def option_show_advanced_dialog(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionShowAdvancedDialog/')))", "def answers_all(self):\n return self.answer_set.all()", "def get_all_teas_select(self):\n self.tView.all_teas_display(self.manyTea)\n self.tView.prompt_display(2)", "def display_options(self):\n print()\n options = list(self.get_commands().values())\n options.sort(key=lambda op: int(op.name))\n\n for option in options:\n print(f'{\"%3d\" % int(option.name)}. {option.description}')", "def help_opt(self):\n print(OPTIONS)", "def display(self):\n\n print('\\n')\n for key, val in self.option.items():\n print(key, val, '\\n') # make it more confortable to read\n self.get_choice() # launch automaticly the choice method after display", "def show_question(self):\n print(self.question)", "def get(self,request,format=None):\n answers = SingleWordQuizAnswer.objects.filter(user=request.user.info)\n serializer = SingleWordQuizAnswerSerializer(answers,many=True)\n return Response(data=serializer.data,status=status.HTTP_200_OK)", "async def _opt_all(self, ctx):\n all_options = self.database.get_all_guild_options()\n out = \"```\"\n for options in all_options:\n out += f\"Guild: {self.bot.get_guild(options.id)}\\n\"\n for item in options.__slots__[1:]:\n option = getattr(options, item)\n if option is None:\n continue\n out += f\" {item}: {option}\\n\"\n out += \"```\"\n if out == \"``````\":\n await ctx.send(\"No options available.\")\n return\n await ctx.send(out)", "def get_all_teas(self):\n self.tView.all_teas_display(self.manyTea)\n self.tView.prompt_display(0)", "def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()", "def display_other_options():\n print(\"> - Next Song page.\")\n print(\"< - Previous song page.\")\n print(\"q - to quit\")", "def __debug_print_questions__(self):\n for k in sorted(self.questions.keys()):\n print(\"Question: %s\" %k)\n for a in self.questions[k].answers:\n print(\"\\t%s\" % a)", "def printCurrentOptions(self):\n if self.comm.rank == 0:\n print('+---------------------------------------+')\n print('| All %s Options: |' % self.name)\n print('+---------------------------------------+')\n # Need to assemble a temporary dictionary\n tmpDict = {}\n for key in self.options:\n tmpDict[key] = self.getOption(key)\n pp(tmpDict)", "def list(self, request, format=None):\n queryset = Vocab.objects.all()\n serializer = VocabSerializer(queryset, context={\"request\": request})\n return Response(serializer.data)", "def get(self,request,format=None):\n answers = MultipleQuizAnswer.objects.filter(user=request.user.info)\n serializer = MultipleQuizAnswerSerializer(answers,many=True)\n return Response(data=serializer.data,status=status.HTTP_200_OK)", "def print_options(self):\n for option in self._options.items():\n print \"{0} = {1}\".format(option[0], option[1])", "def options(self, *args, **kwargs):\n self.request(\"options\", *args, **kwargs)", "def verbs(self):\n return self._verbs", "async def contest(self, ctx):\n\t\tawait ctx.send_help('contest')", "async def advanced(self, ctx):\n await ctx.send(f'Testing advanced')", "def show_question(self, text, option0, option1, option2=\"\"):\r\n\r\n raise NotImplementedError", "def answers(self):\n return self.answer_set.filter(active=True)", "def get_answers(self):\r\n return self.answer_values", "def show_menu():\r\n print(\"Write a number of the next options:\")\r\n for key, value in enumerate(options):\r\n print(\"{}. {}\".format(key, value))", "def get(self,request,format=None):\n questions_easy = SingleWordQuiz.objects.filter(difficulty=1)\n random_ques_easy = random.choices(questions_easy,k=5)\n\n questions_medium = SingleWordQuiz.objects.filter(difficulty=2)\n random_ques_med = random.choices(questions_medium,k=3)\n\n questions_hard = SingleWordQuiz.objects.filter(difficulty=3)\n random_ques_hard = random.choices(questions_hard,k=2)\n\n final_list = list(chain(random_ques_easy, random_ques_med, random_ques_hard))\n\n serializer = SingleWordQuizSerializer(final_list,many=True)\n return Response(data=serializer.data,status=status.HTTP_200_OK)", "def get_queryset(self):\n return Question.objects.all().order_by(\"-allVote\") #แสดงคำถาม" ]
[ "0.600085", "0.58309793", "0.565951", "0.5643114", "0.55537456", "0.55362535", "0.55034494", "0.5461608", "0.5451248", "0.5445065", "0.5440097", "0.5396626", "0.5365166", "0.535084", "0.53193617", "0.5317761", "0.5285466", "0.5230963", "0.5222562", "0.52065843", "0.51965123", "0.51887774", "0.5184815", "0.51581126", "0.5154902", "0.51447594", "0.5135638", "0.51201904", "0.50484693", "0.5028082" ]
0.68374383
0
Return an RFC3339compliant timestamp.
def rfc3339(self): if self._nanosecond == 0: return _to_rfc3339(self) nanos = str(self._nanosecond).rjust(9, "0").rstrip("0") return "{}.{}Z".format(self.strftime(_RFC3339_NO_FRACTION), nanos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rfc3339_time() -> Text:\n\n return datetime.datetime.utcnow().isoformat('T') + 'Z'", "def _timestamp():\n moment = time.time()\n moment_us = repr(moment).split(\".\")[1]\n return time.strftime(\"%Y-%m-%d-%H-%M-%S-{}\".format(moment_us), time.gmtime(moment))", "def get_timestamp(self):\n p = self._get_sub_text('timestamp')\n if not p:\n return None\n else:\n return xep_0082.datetime(p)", "def _get_timestamp():\n return '{}Z'.format(\n datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]\n )", "def forge_timestamp(value) -> int:\n assert isinstance(value, str)\n dt = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n return calendar.timegm(dt.utctimetuple())", "def timestamp():\n return datetime.now().strftime(\"%Y%m%dT%H%M%S\")", "def get_timestamp():\n return time.strftime('%Y-%m-%d %H:%M:%S')", "def timestamp(self):\n if self.has_timestamp():\n return parse_windows_timestamp(struct.unpack_from(str(\"<Q\"), self.raw_data()[-8:])[0])\n raise ValueError('value does not have a timestamp')", "def _get_timestamp():\n return str(int(time.time()))", "def timestamp():\n\tn = datetime.datetime.now()\n\treturn \"%04d-%02d-%02dT%02d:%02d:%02d\" % (\n\t\tn.year, n.month, n.day, n.hour, n.minute, n.second\n\t)", "def _generate_timestamp():\n\t\treturn strftime(\"%Y%m%dT%H%M%S\")", "def get_timestamp():\n import time\n timestamp = time.strftime('%Y-%m-%d: %H:%M:%S', time.localtime(time.time()))\n return timestamp", "def timestamp(self):\n return parse_windows_timestamp(self.unpack_qword(0x4))", "def strtotimestamp(s):\n return utctotimestamp(parse_datetime(s))", "def timestamp(self) -> datetime.datetime.timestamp:\n timestamp = datetime.datetime.utcfromtimestamp(int(self._timestamp) / 1000)\n return timestamp", "def get_timestamp():\n timestamp = '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.datetime.now())\n return timestamp", "def timestamp_(tstamp: Optional[int]) -> Optional[str]:\n if tstamp is None:\n return None\n\n dtm = datetime.datetime.fromtimestamp(tstamp)\n return dtm.isoformat()", "def get_timestamp():\n return int(time.time())", "def timestamp():\n return datetime.datetime.now().strftime(\"%Y-%m-%d-T%H-%M-%S\")", "def rfc3339date(date):\n if not date: return ''\n date = date + datetime.timedelta(seconds=-time.timezone)\n if time.daylight:\n date += datetime.timedelta(seconds=time.altzone)\n return date.strftime('%Y-%m-%dT%H:%M:%SZ')", "def __timestamp():\n today = time.time()\n return struct.pack(b'=L', int(today))", "def _make_timestamp(self):\r\n\t\tlogger.debug(\"Get a timestamp\")\r\n\t\treturn time.mktime(datetime.today().timetuple())", "def get_timestamp():\n now, s=get_date()\n return (now, \"%s%s%s%s\" % (s, str(now.hour).zfill(2), str(now.minute).zfill(2), str(now.second).zfill(2)))", "def timestamp():\n tmptz = time.timezone\n sign_str = '+'\n if tmptz > 0:\n sign_str = '-'\n tmptz_hours = int(tmptz / 3600)\n\n return str(\"%s%s%02d:%02d\" % (time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime()), sign_str, abs(tmptz_hours),\n int(tmptz / 60 - tmptz_hours * 60)))", "def get_datetime_timestamp():\n KST = datetime.timezone(datetime.timedelta(hours=9))\n return datetime.datetime.now(tz=KST).strftime(\"%Y%m%d%H%M%S\")[2:]", "def timestamp(self):\n return parser.get_timestamp(self)", "def timestamp():\n return datetime.utcnow().strftime(\"%F %T\")", "def timestamp(fmt, timestruct=None):\n return _time.strftime(fmt, timestruct or _time.gmtime())", "def _to_rfc3339(value, ignore_zone=True):\n if not ignore_zone and value.tzinfo is not None:\n # Convert to UTC and remove the time zone info.\n value = value.replace(tzinfo=None) - value.utcoffset()\n\n return value.strftime(_RFC3339_MICROS)", "def get_timestamp() -> int:\n\n return int(time.time() * 1000)" ]
[ "0.7412164", "0.7067743", "0.70454866", "0.680817", "0.6691189", "0.66868603", "0.664912", "0.66445935", "0.66431254", "0.6610651", "0.65642494", "0.65453845", "0.65272963", "0.65122104", "0.649016", "0.6486672", "0.64797294", "0.6453291", "0.64185005", "0.6393736", "0.6376092", "0.6356407", "0.6355241", "0.6340536", "0.6324196", "0.63107383", "0.62930936", "0.62781864", "0.6278085", "0.6217622" ]
0.75104326
0
Saves the result dictionary as JSON to a well known location in the working space folder. Relative to the working space folder, the JSON is stored in 'output/results.json'. Folders are created as needed.
def save_result(working_space: str, result: dict) -> None: result_path = os.path.join(working_space, 'output') if not os.path.exists(result_path): os.makedirs(result_path) result_path = os.path.join(result_path, 'result.json') logging.info("Storing result at location: '%s'", result_path) logging.debug("Result: %s", str(result)) with open(result_path, 'w') as out_file: json.dump(result, out_file, indent=2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, result_dir):\n path = os.path.join(result_dir, self._filename)\n\n util.write_json(path, {\n 'results': self._results,\n 'params': self._params,\n 'requirements': self._env.requirements,\n 'commit_hash': self._commit_hash,\n 'date': self._date,\n 'python': self._python\n }, self.api_version)", "def save_results(results):\n json.dump(results, open(\"results.json\", \"w\"))", "def save_result(self, results: Dict[str, Dict[str, Any]]) -> None:\n if self.out_dir:\n os.makedirs(self.out_dir, exist_ok=True)\n with open(self.eval_result_file, 'w') as f:\n json.dump(results, f, indent=2)\n else:\n raise ValueError(f'Invalid output dir: {self.out_dir}')\n\n if self.verbose:\n print(f\"======\\nPanoptic nuScenes {self.task} evaluation for {self.eval_set}\")\n print(json.dumps(results, indent=4, sort_keys=False))\n print(\"======\")", "def save_results(self, export_json_path):\n with open(export_json_path, 'w') as f:\n json.dump(self.results, f)", "def save(self, directory):\n os.makedirs(directory, exist_ok=True)\n\n summarized_res = self.get_summarized_results()\n detailed_res = self.get_detailed_results()\n\n with open(os.path.join(directory, '%s_summarized_results.json' % self.name), 'w') as f:\n json.dump(summarized_res, f, sort_keys=True, indent=2)\n\n with open(os.path.join(directory, '%s_detailed_results.json' % self.name), 'w') as f:\n json.dump(detailed_res, f, sort_keys=True, indent=2)", "def write_result(dict, out_path):\n with open(out_path, 'w') as f:\n json.dump(dict, f)", "def save(self):\n output = self.prepare_results()\n\n override_name = output[\"config\"][\"sysconfig\"].get(\"output_filename\", None)\n scenario_name = (\n override_name if override_name else output[\"config\"][\"scenario\"][\"name\"]\n )\n filename = f\"{scenario_name}_{output['timestamp']}.json\"\n log.info(\n \"Saving evaluation results to path \"\n f\"{self.scenario_output_dir}/{filename} \"\n \"inside container.\"\n )\n output_path = os.path.join(self.scenario_output_dir, filename)\n with open(output_path, \"w\") as f:\n json_utils.dump(output, f)\n if os.path.getsize(output_path) > 2**27:\n log.warning(\n \"Results json file exceeds 128 MB! \"\n \"Recommend checking what is being recorded!\"\n )", "def save_result(res, name):\n with open('dist/'+name+'.json','w') as fp:\n json.dump(res, fp)", "def write_result_to_file(self):\n self.__test_result[Result.__RUN] = self.__run\n with open(self.__json_file_path, \"w+\") as outfile:\n json.dump(self.__test_result, outfile,\n ensure_ascii=False, indent=2)", "def __saveGithubResults(self):\n\t\tself.__debugInfo(\"Saving JSON results into file {}\".format(self.output_file))\n\t\ttry:\n\t\t\twith open(self.output_file, 'w') as wfile:\n\t\t\t\tjson.dump(self.final_results, wfile)\n\t\texcept Exception as exception:\n\t\t\traise MsgException('Output file could not be written', exception)", "def save_results(coordinates, rs_directory):\n results = coordinates\n with open(os.path.join(rs_directory, 'results.json'), \"w\") as file:\n json.dump(results, file)", "def save_to_file(result, date):\n try:\n os.mkdir('/Users/yueyang/Downloads/serp-626-75-json', mode=0o744)\n except FileExistsError:\n # print('Directory already exists.')\n pass\n\n filename = '{0}.json'.format(date) #datetime.today().strftime('%m-%d-%Y'), query)\n with open(os.path.join('/Users/yueyang/Downloads/serp-626-75-json', filename), 'w') as f:\n json.dump(result, f, indent=4)\n print('Saved search results to {0}'.format(f.name))", "def create_result_json(json_object, result_json_file):\n write_json_to_file(json_object, result_json_file)", "def generate_json_results_file_for_json(\n results_json, builder_name, build_number,\n results_directory, chrome_revision, master_name):\n if not os.path.exists(results_directory):\n os.makedirs(results_directory)\n json_results_file_path = os.path.abspath(\n os.path.join(results_directory, FULL_RESULTS_FILENAME))\n results_json['builder_name'] = builder_name\n results_json['build_number'] = build_number\n results_json['chromium_revision'] = chrome_revision\n results_json['master_name'] = master_name\n with open(json_results_file_path, 'w') as f:\n json.dump(results_json, f)\n return [(FULL_RESULTS_FILENAME, json_results_file_path)]", "def dump_json(self):\n # JSON output not requested\n if not self.json_results:\n return\n\n # Are we writing to a file or stdout?\n if self.json_results == \"-\":\n json_results_fd = sys.stdout\n else:\n try:\n json_results_fd = open(\n os.path.expanduser(\n os.path.expandvars(\n self.json_results)), \"wb\")\n\n except Exception as err:\n self.message(\n \"[-] Problem opening file '%s' to write JSON results to: %s\" %\n (self.json_results, err))\n self.message(\n \"[!] Defaulting to writing JSON results to stdout instead\")\n json_results_fd = sys.stdout\n\n try:\n json.dump(self.results, json_results_fd)\n except Exception as err:\n self.message(\n \"[-] Problem writing JSON output to %s : %s\" %\n (self.json_results, err))\n\n if self.json_results != \"-\":\n self.message(\"[+] Written JSON results to %s\" %\n (os.path.abspath(self.json_results)))", "def save_outputs(self):\n write_pickled(join(self.output_folder, \"results.pkl\"), self.get_results())", "def __init_output_folder():\n try:\n os.makedirs(Result.__json_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e", "def dump_to_file(final_results):\n\t#Add prefix result\n\tif final_results[\"Results\"][\"Test passed\"] == True:\n\t\ttime_now = time.time()\n\t\touput_filepath = checklists_filepath.replace(\".json\", \"\", 1) + \"_\" + datetime.datetime.fromtimestamp(time_now).strftime('%Y-%m-%d_%Hh%Mm%Ss') + \"_PASSED.json\"\n\telse:\n\t\ttime_now = time.time()\n\t\touput_filepath = checklists_filepath.replace(\".json\", \"\", 1) + \"_\" + datetime.datetime.fromtimestamp(time_now).strftime('%Y-%m-%d_%Hh%Mm%Ss') + \"_FAILED.json\"\n\twith open(ouput_filepath, 'w') as fp:\n\t\tjson.dump(final_results, fp)\n\treturn ouput_filepath", "def store_json_convertible_result(result: dict, filepath: str):\n\n with open(filepath, \"w\") as file:\n file.write(json.dumps(result, ensure_ascii=False, indent=3))", "def save_results_internal_json(self, results_internal_dict: Dict):\r\n filename = f\"{self.search_internal_path}/results_internal.json\"\r\n\r\n with open_(filename, \"w+\") as f:\r\n json.dump(results_internal_dict, f, indent=4)", "def save(self):\n\n\t\tdirectory = os.path.dirname(self.path)\n\n\t\tif not os.path.exists(directory):\n\t\t\tos.makedirs(directory)\n\n\t\twith open(self.path, \"w\") as f:\n\t\t\tf.write(\n\t\t\t\tjson.dumps(\n\t\t\t\t\tself.dump(),\n\t\t\t\t\tindent=4,\n\t\t\t\t\tsort_keys=True\n\t\t\t\t)\n\t\t\t)", "def save_fingerprint_result_to_file(result):\n file_path = os.path.join(helpers.get_json_output_directory(), str(int(time.time())) + '.json')\n with open(file_path, 'w') as outfile:\n json.dump(result, outfile)", "def save(self):\n pickle_save(self.results, 'results', self.main_dir)", "def save_json(self):\n if not settings.save_module_result:\n return False\n logger.log('TRACE', f'Save the subdomain results found by '\n f'{self.source} module as a json file')\n path = settings.result_save_dir.joinpath(self.domain, self.module)\n path.mkdir(parents=True, exist_ok=True)\n name = self.source + '.json'\n path = path.joinpath(name)\n with open(path, mode='w', errors='ignore') as file:\n result = {'domain': self.domain,\n 'name': self.module,\n 'source': self.source,\n 'elapse': self.elapse,\n 'find': len(self.subdomains),\n 'subdomains': list(self.subdomains),\n 'infos': self.infos}\n json.dump(result, file, ensure_ascii=False, indent=4)\n return True", "def save_fit_results(self, save_path: str = \"./fit_results.json\"):\n assert (\n self._fit_src_dst_results or self._fit_dst_src_results\n ), \"There are no fit results to be saved, \\\n call fit method first or load the results from the file\"\n assert save_path.endswith(\".json\"), self.JSON_ASSERTION\n\n wrapped_results = {\n \"fit_src_dst_results\": self._fit_src_dst_results,\n \"fit_dst_src_results\": self._fit_dst_src_results,\n }\n\n with open(save_path, \"w\") as fjson:\n json.dump(wrapped_results, fjson)", "def send_results_file_json(**kwargs):\n try:\n logging.debug(\"Opening json output file for writing\")\n with open(kwargs[\"output_file_json\"], \"w\") as file_json_open:\n logging.info(\n \"Writing to output json file: \" + kwargs[\"output_file_json\"]\n )\n file_json_open.write(kwargs[\"results_dataset_json\"])\n return True\n except IOError:\n logging.exception(\"Error writing results to json output file\")\n return False", "def saveScanResults(self, out_path=None) -> None:\n\t\tscan_dir = Path(config.PKG_SCAN_DIR)\n\t\tif not scan_dir.is_dir():\n\t\t\tscan_dir.mkdir()\n\t\t\tlogger.info(f\"Created scan results directory at {config.PKG_SCAN_DIR}\")\n\n\t\tjson_results = self.installed_packages.toJSON()\n\n\t\tscan_file = Path(config.PKG_SCAN_FILE)\n\t\tresults_path = scan_dir / scan_file\n\t\twith open(results_path, \"w+\") as f:\n\t\t\tf.write(json_results)\n\n\t\tlogger.info(f\"Scan results saved to:{results_path}\")", "def save_result(self):\n self.logger.info(f'Saving results to {self.db_loc}s24_{self.year}.json')\n open(f'{self.db_loc}s24_{self.year}.json', 'w').write(json.dumps(self.db, indent=4, ensure_ascii=False))", "def save_json_file(article_data, dir_path):\n current_date = str(datetime.now().date())\n output_path = os.path.join(dir_path, current_date)\n os.makedirs(output_path, exist_ok=True)\n filepath = os.path.join(output_path, article_data['source'] + '.json')\n if not os.path.isfile(filepath):\n with open(filepath, 'w') as fp:\n json_data = {\n 'results': {\n article_data['id']: article_data\n }\n }\n json.dump(json_data, fp)\n else:\n with open(filepath, 'r') as fp:\n json_data = json.load(fp)\n json_data['results'][article_data['id']] = article_data\n\n with open(filepath, 'w') as fp2:\n json.dump(json_data, fp2)", "def write_results_to_file(test_results_dict, results_json_file, test_summary, summary_file):\n try:\n logging.info(\"Removing previous version of results file...\")\n if os.path.exists(results_json_file):\n os.remove(results_json_file)\n if os.path.exists(summary_file):\n os.remove(summary_file)\n except Exception as e:\n logging.error(\"Deleting file failed with error '{ERROR}'\".format(ERROR=e))\n\n try:\n logging.info(\"Writing test results to JSON file '{FILE}'...\".format(FILE=results_json_file))\n with open(results_json_file, 'w', encoding='utf-8') as results_json:\n json.dump(test_results_dict, results_json, ensure_ascii=False, indent=4)\n\n logging.info(\"Writing test summary to file '{FILE}'...\".format(FILE=summary_file))\n f = open(summary_file, \"w\")\n f.write(test_summary)\n f.close()\n except Exception as e:\n logging.error(\"Writing test results to files failed with error '{ERROR}'\".format(ERROR=e))" ]
[ "0.79232246", "0.7569962", "0.7325321", "0.7315301", "0.7306659", "0.72789645", "0.7265616", "0.719532", "0.71553546", "0.706384", "0.7049616", "0.70123094", "0.6973604", "0.69650054", "0.69412196", "0.69017756", "0.68920434", "0.68472093", "0.6750217", "0.6739409", "0.6656393", "0.665546", "0.6640913", "0.6629772", "0.6620404", "0.65992296", "0.65944237", "0.65303564", "0.6463829", "0.6451175" ]
0.7699649
1
Generates a 6 character alphanumeric code to be used to verify that the user has purchased a cartridge.
def create_secret_code(): characters = string.ascii_uppercase + string.digits size = 6 return ''.join(random.choice(characters) for _ in range(size))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_new_code():\n code = ''.join(random.choice(string.digits) for i in range(6))\n return code", "def gen_code():\n return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)])", "def generate_code(self):\n code = ''.join(\n random.choices(string.ascii_lowercase + string.digits, k=5))\n self.code = '{}{}'.format(self.user.id, code)", "def generate_verification_code():\n new_ver_code = str(random.randint(1000000, 9999999))\n return new_ver_code", "def make_ticket_code(prefix, code_dict):\r\n while (True): # continue until we find a unique code\r\n letters = random.sample(_CHAR_LIST, 4) # generate 4 random letters\r\n code = prefix + ''.join(letters) # turn letters to string\r\n if not code_dict.has_key(code): # code is unique\r\n return code", "def __generate_random_string():\n return uuid4().hex[:6].upper()", "def generate_verification_code(self, size=10, chars=string.digits):\n return \"\".join(random.choice(chars) for _ in range(size))", "def _generate_cart_id():\n cart_id = ''\n characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()'\n cart_id_length = 50\n cart_id = ''.join([ _generate(characters) for y in range(cart_id_length)])\n\n return cart_id", "def generate_code(_=None):\n chars = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\n rand = random.SystemRandom()\n return \"\".join(rand.choice(chars) for x in range(30))", "def _generate_cart_id():\n cart_id = ''\n characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()'\n cart_id_length = 50\n for y in range(cart_id_length):\n cart_id += characters[random.randint(0, len(characters) - 1)]\n return cart_id", "def createOTP():\n\t code = []\n\t for i in range(6):\n\t\t code.append(random.randint(0,9))\n\t return \"\".join(str(code) for c in code)", "def activation_code_generaor(size=6,candidate_chars=upper_case+lower_case+digits):\n\tcode = ''.join([random.choice(candidate_chars) for i in xrange(size)])#random.choice(list) picks an element from list randomly\n\treturn code", "def genSCID():\n scid_hex = getRandomBytes(8)\n scid_hex = getSHA256Hex(scid_hex)\n scid_hex = scid_hex[0:8]\n return scid_hex", "def generate_key():\r\n\t\treturn ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(123))", "def generate_random_coupon_code(suffix=None):\n code = fake.password(length=8, special_chars=False, digits=True, upper_case=True, lower_case=False)\n if suffix:\n code += suffix\n\n return code", "def gen_api_key():\r\n m = hashlib.sha256()\r\n m.update(get_random_word(12))\r\n return unicode(m.hexdigest()[:12])", "def generate_product_number():\n return str(uuid.uuid4())", "def default_code():\n return uuid.uuid4().hex", "def create_hash() -> str:\n length = 6\n char = string.ascii_uppercase + string.digits + string.ascii_lowercase\n\n # Generate a new ID, until one is found that is unique\n while True:\n hash = \"\".join(random.choice(char) for _ in range(length))\n\n if not utils.cache_is_hash_taken(hash):\n return hash", "def generate_id(self):\n unique_id = \"\"\n\n while len(unique_id) < self.id_length:\n ascii_number = self.get_random_bits()\n\n if self.is_approved_ascii(ascii_number):\n random_char = chr(ascii_number)\n\n if not self.is_excluded_char(random_char):\n unique_id += chr(ascii_number)\n\n return unique_id", "def generate_password(self, length):\n items = [\"a\", \"e\", \"i\", \"o\", \"u\", \"1\", \"2\", \"4\", \"5\", \"7\", \"8\", \"9\"]\n\n new_password = \"\"\n while(len(new_password) < length):\n item = items[randint(0, len(items) - 1)]\n new_password += item\n return new_password", "def generate_mac():\n rand_str = generate_name(choices=\"0123456789abcdef\", length=12)\n return \":\".join(re.findall(\"..\", rand_str))", "def generate_mac():\n rand_str = generate_name(choices=\"0123456789abcdef\", length=12)\n return \":\".join(re.findall(\"..\", rand_str))", "def _generate_id() -> str:\n return \"\".join(sample(\"abcdefghjkmopqrstuvqxyz\", 16))", "def gen_secret() -> str:\n r = random.randrange(0, 255) # INSECURE, just for demo\n r = hex(r)[2:]\n if len(r) == 1:\n return f'0{r}'\n return r", "def generate_player_id() -> string:\n while True:\n # code will have uppercase letters and numbers\n code_options = string.ascii_uppercase + string.digits\n generated_player_id = ''.join(secrets.choice(code_options) for i in range(5))\n if Player.objects.filter(player_id=generated_player_id).count() == 0:\n break\n return generated_player_id", "def new_barcode(num_digits=5, chars=string.digits+string.uppercase):\n return 'FLIM-'+(''.join([random.choice(chars) for _ in xrange(num_digits)]))", "def generate_game_code() -> int:\n while True:\n # code will only contain digits\n code_options = string.digits\n generated_game_code = ''.join(secrets.choice(code_options) for i in range(7))\n if Game.objects.filter(game_code=generated_game_code).count() == 0:\n break\n return int(generated_game_code)", "def generate_passwd(length=6):\n ret = ''\n if length < 6 :\n length = 6\n elif length > 10 :\n length = 10\n for x in xrange(length) :\n if x == 3 :\n ret += '-'\n ret += chr(random.randrange(ord('a'),ord('z'),1))\n return ret", "def generate_code(self):\n seeds = \"1234567890\"\n random_str = []\n for i in range(4):\n random_str.append(choice(seeds))\n\n return \"\".join(random_str)" ]
[ "0.74614185", "0.7150063", "0.7099597", "0.6996119", "0.6931389", "0.6922186", "0.68661886", "0.67869914", "0.6760706", "0.6746724", "0.67371583", "0.6697226", "0.65777385", "0.651033", "0.6507484", "0.6506442", "0.64953274", "0.64928424", "0.6449761", "0.643318", "0.64188457", "0.64012927", "0.64012927", "0.6398607", "0.6395929", "0.63925916", "0.6383545", "0.6375396", "0.6354404", "0.6349416" ]
0.7415497
1
Remove outliers from ``Series``.
def filter_outliers(data: pd.Series, std: int=3) -> pd.Series: return data[(data - data.mean()).abs() <= (std * data.std())]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_outliers(self, data, sd_val):\n data = data.dropna()\n data = data[(np.abs(stats.zscore(data)) < float(sd_val)).all(axis=1)]\n return data", "def rem_outliers(s):\n s_mean = s.mean()\n s_std = s.std()\n s_min = s_mean - 3 * s_std\n s_max = s_mean + 3 * s_std\n return s.loc[(s_min < s.loc[:]) & (s.loc[:] < s_max)].index.to_list()", "def _remove_outliers(self, data, nsigma):\n while (np.abs(np.ma.fix_invalid(data)-np.nanmean(data)) > nsigma *\n np.nanstd(data)).any():\n\n data[np.where(np.abs(np.ma.fix_invalid(data)-np.nanmean(data)) >\n nsigma * np.nanstd(data))] = np.nan\n\n return data", "def drop_outliers_for(feature, samples):\n return [s for s in samples if not feature.is_outlier(s)]", "def remove_outliers(data):\n upper_boundary = np.quantile(data, 0.992)\n lower_boundary = np.quantile(data, 0.008)\n selection = data[(data > lower_boundary) & (data < upper_boundary)]\n standard_dev = np.std(selection)\n median = np.median(selection)\n data[(median + 4.5 * standard_dev < data) | (data < median - 4.5 * standard_dev)] = median\n return data", "def filter_outliers(self, df, outlier):\n return df[~outlier].reset_index(drop=True)", "def remove_outliers(df, var):\n import numpy as np\n \n df = df.copy()\n \n # remove outliers\n Q1 = np.nanquantile(df[var] ,0.25)\n Q3 = np.nanquantile(df[var], 0.75)\n IQR = Q3 - Q1\n \n lower_end = Q1 - 1.5 * IQR \n high_end = Q3 + 1.5 * IQR \n \n df_filtered = df.drop(df[(df[var] < lower_end) | (df[var] > high_end)].index)\n \n return df_filtered", "def remove_outliers(data_frame, attribute, n):\n\tr = robjects.r\n\trobjects.globalenv[\"dat\"] = data_frame\n\tnew_frame = r(\"dat[!(abs(dat$\"+attribute+\" - mean(dat$\"+attribute+ \\\n\t\t\t\t\t\t\"))/sd(dat$\"+attribute+\")) >\" +str(n)+\",]\")\n\treturn new_frame", "def remove_outliers(value, remove_outlier):\n try:\n if len(value) > 0:\n percent = float(remove_outlier)\n value = value.dropna().astype(\"float64\")\n q75, q25 = np.percentile(\n value, [percent, 100 - percent], interpolation=\"linear\"\n )\n iqr = q75 - q25\n value = value[value >= (q25 - 1.5 * iqr)]\n value = value[value <= (q75 + 1.5 * iqr)]\n value.reset_index(drop=True)\n return value\n except:\n raise", "def filter_outliers(data): \n \n idx_out = find_outliers_IQR(data)\n \n cleaned = data[~idx_out].copy()\n\n # print(f'There were {idx_out.sum()} outliers.')\n \n return cleaned", "def remove_outliers(df, std_threshold: float = 3):\n\n df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]\n return df", "def remove_outliers(self, std_tol=1.5):\r\n from lsst.analysis import outlier\r\n for tnum in numpy.unique(self.data[\"tiles\"]):\r\n self.decimate(outlier.valid(self, self.data[\"tiles\"]==tnum, std_tol=std_tol))", "def reject_outliers(self, data, m=2):\n std = np.std(data)\n return data[abs(data - np.median(data)) < m * std]", "def remove_rf_outliers(X, y):\n summary = np.percentile(y, [25, 50, 75])\n high_lim = summary[0] - 1.5 * (summary[2] - summary[1])\n low_lim = summary[2] + 1.5 * (summary[2] - summary[1])\n\n data = np.hstack((X, y[None].T))\n\n data = data[~(data[:, -1] >= low_lim)]\n data = data[~(data[:, -1] <= high_lim)]\n\n # remove last instances\n data = data[:-(data.shape[0] % 1000), :]\n\n return data[:, :-1], data[:, -1]", "def remove_outliers(X, lo, hi):\n\t\n\tx1 = np.array(X)\n\ty1 = x1[np.where(x1 > lo)]\n\ty2 = y1[np.where(y1 <= hi)]\n\n\treturn y2", "def outlier(arr, as_nan=True, thresh=0.05, show=False, report=False):\n if len(arr) < 3:\n return arr\n if show:\n plt.subplot(1,2,1) # Plot part 1 first\n plt.plot(np.random.random(len(arr)), thing1, 'o', color='blue',\n markeredgecolor='none', alpha=0.4)\n plt.title('With outliers')\n \n med_res = [(np.median(arr)-i)**2 for i in arr] \n med_res_ix = [u for u in med_res] # Create index\n arr_copy = [u for u in arr] # The copy will be edited first\n stds = []\n med_res.sort(reverse=True) # Largest to smallest\n # print(med_res[:10])\n numPts = max([int(len(arr)*thresh), 2])\n # print('Testing largest %i residuals' %numPts)\n \n # Pretend to remove 10% of points\n for i in range(numPts): #for i in range(int(len(arr)*.1)): #\n stds.append(np.std(arr_copy))\n rm_ix = med_res_ix.index(med_res[i])\n try:\n rm = arr[rm_ix]\n except:\n print('tried to remove ix %i but arr is len %i'\n %(rm_ix, len(arr)))\n try: \n arr_copy.pop(arr_copy.index(rm))\n except:\n print('tried to remove %f but not in arr_copy' %rm)\n \n # Find the greatest d(std)\n dstd = np.diff(stds)\n dstd = [abs(i) for i in dstd]\n rm_to = list(dstd).index(max(dstd))+1 # len(diff) = len(arr)-1\n\n #print('Mean d(std): %.3f, removing all above %.3f (%i pts)'\n # %(np.mean(dstd), dstd[rm_to-1], rm_to))\n \n for i in range(rm_to):\n arr[med_res_ix.index(med_res[i])] = np.nan\n \n if show: # Show\n plt.subplot(1,2,2)\n plt.plot(np.random.random(len(arr)), arr, 'o',\n color='red', markeredgecolor='none', alpha=0.4)\n plt.title('Without outliers')\n plt.show()\n if as_nan:\n return arr\n return [i for i in arr if not pd.isnull(i)] # Else just eliminate it.", "def remove_outliers(self, matrix):\n input = matrix[:, :-1]\n row_incides_to_delete = []\n for j, column in enumerate(input.transpose()):\n self.feature_means.append(np.mean(column))\n self.feature_stds.append(np.std(column))\n\n for i, row in enumerate(input):\n cell = input[i, j]\n if cell > self.feature_means[j] + 3 * self.feature_stds[j] or cell < self.feature_means[j] - 3 * \\\n self.feature_stds[j]:\n row_incides_to_delete.append(i)\n matrix = np.delete(matrix, row_incides_to_delete, 0)\n return matrix, len(list(set(row_incides_to_delete)))", "def removeOutliers(self):\n #With the DSFPlate object, we can just use self.wells.pop() to remove outliers\n visited = []\n discard = []\n for well in self.wells:\n if well not in visited:\n reps = []\n reps += self.originalPlate.repDict[well]\n pairs = combinations(reps,2)\n distMatrix = [[0 for x in range(len(reps))] for y in range(len(reps))]\n for pair in pairs:\n dist = sqrDiffWellFluoro(self.wells[pair[0]].fluorescence,self.wells[pair[1]].fluorescence)\n distMatrix[reps.index(pair[0])][reps.index(pair[1])] = dist\n distMatrix[reps.index(pair[1])][reps.index(pair[0])] = dist\n keep = rh.discardBad(reps,distMatrix,SIMILARITY_THRESHOLD)\n for rep in reps:\n visited.append(rep)\n if rep not in keep:\n discard.append(rep)\n for well in discard:\n self.wells[well].fluorescence = None\n self.delCurves.append(well)\n return", "def reject_outliers(data, m):\n d = np.abs(data - np.nanmedian(data))\n mdev = np.nanmedian(d)\n s = d/mdev if mdev else 0.\n return np.where(s < m)", "def remove_outliers(self, periods=None, in_place=False):\n###############################################################################\n\n # import\n import numpy as np\n\n if self.outliers:\n if periods == None:\n data = np.delete(self.data, self.outliers, axis=0)\n else:\n lindex = self.lindex_in_periods(periods)\n ldelete = np.intersect1d(lindex, self.outliers)\n data = np.delete(self.data, ldelete, axis=0)\n\n\n else:\n data = np.copy(self.data)\n\n new_Gts = self.copy()\n new_Gts.outliers = []\n new_Gts.data = data\n\n if in_place:\n self.data = new_Gts.data.copy()\n del new_Gts\n self.outliers = []\n\n return (self)\n else:\n return (new_Gts)", "def drop_outliers(ps, method=1,center=\"mean\",variable=None):\n s = ps.describe().T\n Q1,median,Q3,mean,std = s[\"25%\"],s[\"50%\"],s[\"75%\"],s['mean'], s['std']\n if method == 1:\n IQR = Q3 - Q1\n if IQR == 0:\n print \"IQR == 0. \",variable, \"needs a closer look\"\n return ps\n else:\n ix = ps[(ps < (Q1 - 1.5 * IQR)) | (ps > (Q3 + 1.5 * IQR))].index.tolist()\n return ps\n elif method == 2:\n if center == \"mean\":\n ix = ps[abs(ps - mean) > 2.5 * std].index.tolist()\n ps.loc[ix] = mean\n return ps\n elif center == \"median\":\n ix = ps[abs(ps - median) > 2.5 * std].index.tolist()\n ps.loc[ix] = median\n return ps\n else:\n print \"unknonw center\"\n return ps\n else:\n print \"unknonw method\"\n return ps", "def reject_outliers(data, m=2., std=None):\n median = np.median(data)\n keep = []\n if std is None:\n std = np.std(data)\n for item in data:\n if abs(item - median) > m * std:\n pass\n else:\n keep.append(item)\n return keep", "def replace_outliers(series, period=5000, min_period=0, k=5):\n log_series = np.log(series)\n std_rolling = log_series.rolling(window=period, min_periods=min_period).std()\n mmed = log_series.rolling(window=period, min_periods=min_period).median()\n# ma = log_series.rolling(window=period, min_periods=min_period).mean()\n threshold = k * std_rolling\n\n index_values_to_replace = np.abs(log_series - mmed) > threshold\n new_series = series.copy()\n new_series[index_values_to_replace] = np.exp(mmed)\n return new_series", "def remove_outliers(pairs, constants):\n outlier_threshold = constants.OUTLIER_THRESHOLD\n new_pairs = []\n for pair in pairs:\n pair.calculate_outlier()\n if pair.outlier_indicator <= outlier_threshold:\n new_pairs.append(pair)\n return new_pairs", "def drop_outliers(target_df, settings):\n target_df.sort_values(list(target_df.columns), inplace=True)\n startlen = target_df.shape[0]\n if settings[\"drop_outlier_above\"] < 1:\n target_df = target_df.iloc[: int(np.floor(startlen * settings[\"drop_outlier_above\"])), :]\n if settings[\"drop_outlier_below\"] > 0:\n target_df = target_df.iloc[int(np.floor(startlen * settings[\"drop_outlier_below\"])) :, :]\n return target_df", "def remove_outliers(a, constant=1.5):\n if not isinstance(a, np.ndarray):\n a = np.array(list(a))\n\n upper_quartile = np.percentile(a, 75)\n lower_quartile = np.percentile(a, 25)\n IQR = (upper_quartile - lower_quartile) * constant\n quartile_set = (lower_quartile - IQR, upper_quartile + IQR)\n return [y for y in a.tolist() if y >= quartile_set[0] and y <= quartile_set[1]]", "def reject_outliers_arg(data,nSigma):\n criterion = ( (data[:] < (data[:].mean() + data[:].std() * nSigma)) &\n (data[:] > (data[:].mean() - data[:].std() * nSigma)) )\n ind = np.array(np.where(criterion))[0]\n \n return ind", "def drop_outliers(data, cols, t=1.5):\n iqr_d = iqr(data, cols, t)\n for col in cols:\n return data[~((data[col]< iqr_d[\"low_b\"][col]) | (data[col]> iqr_d[\"upp_b\"][col]))]", "def is_outlier(incoming_data):\r\n outlier_df = \\\r\n incoming_data[incoming_data.apply(\r\n lambda x: np.abs(x - x.mean()) / x.std() > 3).all(axis=1)]\r\n return not outlier_df.empty", "def remove_outliers(self, data, min_p= 25, max_p= 75, cut= '', skewed= False):\n data_c = [ d for d in data if d ]\n q25, q75 = np.nanpercentile(data_c, min_p), np.nanpercentile(data_c, max_p)\n cut_off = (q75 - q25) * cut\n lower, upper = q25-cut_off, q75+cut_off\n\n if skewed==True:\n q50 = np.nanpercentile(data_c, 50)\n lower , upper = q25-(q50-q25)*cut , q75+(q75-q50)*cut\n\n median = np.nanmedian(data_c)\n cleaned, outliers = [],[]\n\n for d in np.asarray(data):\n if d >= lower and d <= upper:\n cleaned.append(d)\n outliers.append(np.nan)\n elif np.isnan(d):\n cleaned.append(np.nan)\n outliers.append(np.nan)\n else:\n cleaned.append(np.nan)\n outliers.append(d)\n return cleaned, outliers, lower, upper, median" ]
[ "0.745088", "0.7391351", "0.7311255", "0.7210623", "0.712991", "0.71262974", "0.7124151", "0.7059068", "0.7044938", "0.70288086", "0.7001559", "0.69933045", "0.6866978", "0.6784392", "0.6720005", "0.6652681", "0.6555574", "0.6537433", "0.64253753", "0.6419967", "0.63965493", "0.63895774", "0.6379469", "0.6363781", "0.6356182", "0.63507116", "0.63101214", "0.6308166", "0.6307972", "0.6296622" ]
0.7444352
1
Tests behavior if interface.get_vessel_list throws a DoesNotExistError
def test_get_vessel_list_throws_DoesNotExistError(): interface.get_vessel_list = mock_get_vessel_list_throws_DoesNotExistError interface.release_vessels = mock_release_vessels response = c.post('/html/del_resource', good_data, follow=True) assert(response.status_code == 200) assert("Unable to remove" in response.content) assert(response.template[0].name == 'control/myvessels.html')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_url_vessel_empty_list(self):\n url = reverse('vessel-list')\n response = self.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_vessel_list_throws_InvalidRequestError():\r\n interface.get_vessel_list = mock_get_vessel_list_throws_InvalidRequestError\r\n interface.release_vessels = mock_release_vessels\r\n response = c.post('/html/del_resource', good_data, follow=True)\r\n \r\n assert(response.status_code == 200)\r\n assert(\"Unable to remove\" in response.content)\r\n assert(response.template[0].name == 'control/myvessels.html')", "def helper_test_vessel_non_empty_list(self):\n url = reverse('vessel-list')\n response = self.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json_data = json.loads(response.content)\n is_empty = True\n if type(json_data) == list:\n is_empty = len(json_data) == 0\n\n self.assertEqual(is_empty, False)", "def test_url_create_vessel(self):\n self.helper_test_create_vessel()\n self.helper_test_create_vessel_duplicated()\n self.helper_test_vessel_non_empty_list()", "def test_visibility_of_not_available_2(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertNotContains(response, partner.get_absolute_url())", "def test_release_vessels_throws_InvalidRequestError():\r\n interface.get_vessel_list = mock_get_vessel_list\r\n interface.release_vessels = mock_release_vessels_throws_InvalidRequestError\r\n response = c.post('/html/del_resource', good_data, follow=True)\r\n \r\n assert(response.status_code == 200)\r\n assert(\"Unable to remove\" in response.content)\r\n assert(response.template[0].name == 'control/myvessels.html')", "def test_list_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)", "def test_if_app_can_search_for_existing_list_without_products(self):\n add_list=self.client.post('/shoppinglists/', \n data=self.shopllist,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(searchforlists.status_code,200) \n self.assertIn(\"No list found\",str(searchforlists.data))", "def test_visibility_of_not_available_4(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertContains(response, partner.get_absolute_url())", "def test_service_not_ready_catalog(self):\n conn = self._get_conn()\n\n server_id = str(uuid.uuid4())\n server_name = self.getUniqueString('name')\n fake_server = fakes.make_fake_server(server_id, server_name)\n\n self.register_uris(\n [\n dict(\n method='GET',\n uri='https://compute.example.com/v2.1/',\n exc=requests.exceptions.ConnectionError,\n ),\n self.get_nova_discovery_mock_dict(),\n dict(\n method='GET',\n uri=self.get_mock_url(\n 'compute', 'public', append=['servers', 'detail']\n ),\n json={'servers': [fake_server]},\n ),\n ]\n )\n\n self.assertRaises(\n exceptions.ServiceDiscoveryException, getattr, conn, 'compute'\n )\n\n # Nova has empty adapter config, so these default\n adap = conn.compute\n self.assertIsNone(adap.region_name)\n self.assertEqual('compute', adap.service_type)\n self.assertEqual('public', adap.interface)\n self.assertIsNone(adap.endpoint_override)\n\n s = next(adap.servers())\n self.assertEqual(s.id, server_id)\n self.assertEqual(s.name, server_name)\n self.assert_calls()", "def test_list_all_vessels(self):\n response = self.client.get(self.base_url)\n self.assertEqual(len(json.loads(response.content)), len(Vessel.objects.all()))", "async def test_get_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n await self.collection.get('x')", "def test_visibility_of_not_available_1(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n with self.assertRaises(Http404):\n # We must explicitly pass kwargs to the view even though they are\n # implied by the URL.\n _ = PartnersDetailView.as_view()(request, pk=partner.pk)", "def test_create_vessel_invalid_input(self):\n url = reverse('vessel-create')\n payload = json.dumps({\n \"code2\": \"MV101\"\n })\n response = self.post(url, payload)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def helper_test_create_vessel_duplicated(self):\n url = reverse('vessel-create')\n payload = json.dumps({\n \"code\": \"MV101\"\n })\n response = self.post(url, payload)\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)", "def test_api_get_bucketlist_by_id_not_exist(self):\n res = self.client().get(f\"/bucketlist/99\")\n self.assertEqual(res.status_code, 404)", "def test_no_list_listings(self):\n pool_name = p_n()\n fs_name = fs_n()\n StratisCli.pool_create(pool_name, block_devices=DISKS)\n StratisCli.fs_create(pool_name, fs_name)\n\n self.assertEqual(StratisCli.pool_list(), StratisCli.pool_list(False))\n self.assertEqual(StratisCli.fs_list(), StratisCli.fs_list(False))\n self.assertEqual(StratisCli.blockdev_list(),\n StratisCli.blockdev_list(False))", "def test_no_listings(self):\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context[\"listings\"], [])", "def test_cannot_get_service_from_store_that_does_not_exist(self):\n get_response = self.client.get('/navyget-api/v1/store/5a2bc733791e4bbc9a26f7a5/service/', headers=self.my_header)\n self.assertEqual(get_response.status, \"404 NOT FOUND\")\n self.assertIn(\"That Store does not exist.\", str(get_response.data))", "def test_visibility_of_not_available_3(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n\n # This should not raise Http404.\n response = PartnersDetailView.as_view()(request, pk=partner.pk)\n self.assertEqual(response.status_code, 200)", "def test_get_non_existent_flavor(self):\n try:\n self.flavors_client.get_flavor_details(999)\n self.fail('No exception thrown for a non-existent flavor id')\n except ItemNotFound:\n pass", "def test_list_failure(self, mock_get):\n mock_response = Mock(name='response')\n mock_response.json.side_effect = ValueError('No JSON object could be decoded')\n mock_get.return_value = mock_response\n\n with self.assertRaises(ValueError):\n # Call the method\n self.policies.list()", "def test_list_failure(self, mock_get):\n mock_response = Mock(name='response')\n mock_response.json.side_effect = ValueError('No JSON object could be decoded')\n mock_get.return_value = mock_response\n\n with self.assertRaises(ValueError):\n # Call the method\n self.policies.list()", "async def test_select_not_supported(hass: HomeAssistant):\n\n entity_registry = mock_registry(hass)\n device_registry = mock_device_registry(hass)\n\n vehicle_type = \"zoe_40\"\n not_supported_exception = exceptions.NotSupportedException(\n \"err.tech.501\",\n \"This feature is not technically supported by this gateway\",\n )\n\n with patch(\"homeassistant.components.renault.PLATFORMS\", [SELECT_DOMAIN]):\n await setup_renault_integration_vehicle_with_side_effect(\n hass, vehicle_type, not_supported_exception\n )\n await hass.async_block_till_done()\n\n mock_vehicle = MOCK_VEHICLES[vehicle_type]\n check_device_registry(device_registry, mock_vehicle[\"expected_device\"])\n\n assert len(entity_registry.entities) == 0", "def test_volumes_get(self):\n pass", "def test_no_bucket_returned_by_given_id(self):\n with self.client:\n token = self.get_user_token()\n\n response = self.client.get(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['bucket'], list)\n self.assertTrue(response.content_type == 'application/json')", "def test_domain_list_fails(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._fail_domain_list = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def test_get_list_empty(self):\r\n result = self.get_json(self.LIST_URI)\r\n self.assertEqual(result[\"count\"], 0)\r\n self.assertIsNone(result[\"next\"])\r\n self.assertIsNone(result[\"previous\"])\r\n self.assertEqual(result[\"results\"], [])", "def test_virtualservice_get(self):\n pass", "def test_list_subnets(self):\n print(self.the_client.list_subnets())" ]
[ "0.72203964", "0.71185136", "0.7066029", "0.679958", "0.5962365", "0.590518", "0.58169925", "0.57935566", "0.5749535", "0.5715016", "0.5686451", "0.56774145", "0.5670005", "0.5656361", "0.56513494", "0.5625519", "0.5612344", "0.56053144", "0.55029345", "0.54844", "0.54839444", "0.5467571", "0.5467571", "0.545885", "0.5446316", "0.5444354", "0.5443072", "0.54094285", "0.5399811", "0.53788465" ]
0.75056046
0
Tests behavior if interface.get_vessel_list throws an InvalidRequestError
def test_get_vessel_list_throws_InvalidRequestError(): interface.get_vessel_list = mock_get_vessel_list_throws_InvalidRequestError interface.release_vessels = mock_release_vessels response = c.post('/html/del_resource', good_data, follow=True) assert(response.status_code == 200) assert("Unable to remove" in response.content) assert(response.template[0].name == 'control/myvessels.html')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_url_vessel_empty_list(self):\n url = reverse('vessel-list')\n response = self.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_vessel_list_throws_DoesNotExistError():\r\n interface.get_vessel_list = mock_get_vessel_list_throws_DoesNotExistError\r\n interface.release_vessels = mock_release_vessels\r\n response = c.post('/html/del_resource', good_data, follow=True)\r\n \r\n assert(response.status_code == 200)\r\n assert(\"Unable to remove\" in response.content)\r\n assert(response.template[0].name == 'control/myvessels.html')", "def helper_test_vessel_non_empty_list(self):\n url = reverse('vessel-list')\n response = self.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json_data = json.loads(response.content)\n is_empty = True\n if type(json_data) == list:\n is_empty = len(json_data) == 0\n\n self.assertEqual(is_empty, False)", "def test_url_create_vessel(self):\n self.helper_test_create_vessel()\n self.helper_test_create_vessel_duplicated()\n self.helper_test_vessel_non_empty_list()", "def test_release_vessels_throws_InvalidRequestError():\r\n interface.get_vessel_list = mock_get_vessel_list\r\n interface.release_vessels = mock_release_vessels_throws_InvalidRequestError\r\n response = c.post('/html/del_resource', good_data, follow=True)\r\n \r\n assert(response.status_code == 200)\r\n assert(\"Unable to remove\" in response.content)\r\n assert(response.template[0].name == 'control/myvessels.html')", "def test_create_vessel_invalid_input(self):\n url = reverse('vessel-create')\n payload = json.dumps({\n \"code2\": \"MV101\"\n })\n response = self.post(url, payload)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def test_aws_service_api_network_subnets_get(self):\n pass", "def test_list_subnets(self):\n print(self.the_client.list_subnets())", "def test_list_all_vessels(self):\n response = self.client.get(self.base_url)\n self.assertEqual(len(json.loads(response.content)), len(Vessel.objects.all()))", "def test_get_offers(self):\n pass", "def test_get_filtered_list_fail(self):\n (flexmock(errata.requests)\n .should_receive(\"get\")\n .and_return(flexmock(status_code=404, text=\"_irrelevant_\")))\n\n self.assertRaises(exceptions.ErrataToolError, errata.get_filtered_list)", "def test_ipam_vrfs_list(self):\n pass", "def test_list_failure(self, mock_get):\n mock_response = Mock(name='response')\n mock_response.json.side_effect = ValueError('No JSON object could be decoded')\n mock_get.return_value = mock_response\n\n with self.assertRaises(ValueError):\n # Call the method\n self.policies.list()", "def test_list_failure(self, mock_get):\n mock_response = Mock(name='response')\n mock_response.json.side_effect = ValueError('No JSON object could be decoded')\n mock_get.return_value = mock_response\n\n with self.assertRaises(ValueError):\n # Call the method\n self.policies.list()", "def test_list_host_subnet(self):\n pass", "def test_aws_service_api_volumes_get(self):\n pass", "def test_get_operations_list_with_correct_data(self):\n ops = self.client.get_operations_list(self.agent_id)\n self.assertIsInstance(ops, list)", "def test_ip_list_get_when_blank_arguments_provided(err_msg, args, mock_client):\n with pytest.raises(Exception) as err:\n ip_list_get_command(mock_client, args)\n assert str(err.value) == err_msg", "def test_get_request_on_bucketlist_resource(self):\n\n response = self.client.get(\"/bucketlists/\")\n self.assertEqual(response.status_code, 401)", "def test_get_eligible_shipment_services_old(self):\n pass", "def test_aws_service_api_interfaces_get(self):\n pass", "def test_get(self):\n\n # Grab the server's addresses...\n addrs = self.server.addresses\n\n # Make sure the public and private lists are present\n dtutil.assert_true('public' in addrs)\n dtutil.assert_true('private' in addrs)\n\n # Are IP addresses actually returned?", "def test_unique_vessel_code(self):\n data = {'code': test_utils.VESSEL_CODE}\n response = self.client.post(self.base_url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_ipam_vlans_list(self):\n pass", "def test_get_list(self):\n pass", "def test_aws_service_api_vms_get(self):\n pass", "def test_virtualservice_get(self):\n pass", "def test_get_eligible_shipment_services(self):\n pass", "def test_list_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)" ]
[ "0.6953142", "0.67237854", "0.66492444", "0.6389677", "0.620747", "0.5982903", "0.5849792", "0.58475804", "0.57987154", "0.57366586", "0.557864", "0.55655384", "0.5556224", "0.54853606", "0.54853606", "0.5484565", "0.5474907", "0.54696727", "0.54495025", "0.54492813", "0.5443718", "0.5439627", "0.5422106", "0.5397862", "0.5390763", "0.5335061", "0.53168315", "0.5304224", "0.5304091", "0.52833927" ]
0.75849336
0
Tests behavior if interface.release_vessels throws an InvalidRequestError
def test_release_vessels_throws_InvalidRequestError(): interface.get_vessel_list = mock_get_vessel_list interface.release_vessels = mock_release_vessels_throws_InvalidRequestError response = c.post('/html/del_resource', good_data, follow=True) assert(response.status_code == 200) assert("Unable to remove" in response.content) assert(response.template[0].name == 'control/myvessels.html')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_vessel_list_throws_InvalidRequestError():\r\n interface.get_vessel_list = mock_get_vessel_list_throws_InvalidRequestError\r\n interface.release_vessels = mock_release_vessels\r\n response = c.post('/html/del_resource', good_data, follow=True)\r\n \r\n assert(response.status_code == 200)\r\n assert(\"Unable to remove\" in response.content)\r\n assert(response.template[0].name == 'control/myvessels.html')", "def test_releaseresourcesrequest_equality_with_other_objects():\n constructor_args = dict(\n interface=\"https://schema.skao.int/ska-low-mccs-releaseresources/2.0\",\n subarray_id=1,\n release_all=True,\n )\n request = ReleaseResourcesRequest(**constructor_args)\n\n assert request != 1\n assert request != object()", "def test_create_vessel_invalid_input(self):\n url = reverse('vessel-create')\n payload = json.dumps({\n \"code2\": \"MV101\"\n })\n response = self.post(url, payload)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_invalid_release(self):\n self.assertRaises(HTTPError, lambda: self.d.release(0).title)", "def test_get_vessel_list_throws_DoesNotExistError():\r\n interface.get_vessel_list = mock_get_vessel_list_throws_DoesNotExistError\r\n interface.release_vessels = mock_release_vessels\r\n response = c.post('/html/del_resource', good_data, follow=True)\r\n \r\n assert(response.status_code == 200)\r\n assert(\"Unable to remove\" in response.content)\r\n assert(response.template[0].name == 'control/myvessels.html')", "def test_publish_non_release_fails(self):\n aws = FakeAWS(routing_rules={}, s3_buckets={})\n self.assertRaises(\n NotARelease,\n self.publish_docs,\n aws, '0.3.0-444-gf05215b', '0.3.0-444-gf05215b',\n environment=Environments.STAGING)", "def ValidateLicense(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_invalid_version(self):\n\n params_82 = {'ReQuEsT': \"DescribeCoverage\", 'SeRvIcE': \"WCS\", \"BOGUS\": \"SSS\", 'Version': \"0.0.0.0\"}\n response = self.query_server(params_82)\n soup = BeautifulSoup(response.text, 'xml')\n self.assertTrue(\n soup.find('ServiceExceptionReport'),\n msg=\"The server should return an exception if an invalid version is submitted with a DescribeCoverage request.\"\n )", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def test_archive_400_doesnt_create_a_new_version(self):\n company = CompanyFactory()\n assert Version.objects.get_for_object(company).count() == 0\n\n url = reverse('api-v4:company:archive', kwargs={'pk': company.id})\n response = self.api_client.post(url)\n\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert Version.objects.get_for_object(company).count() == 0", "def test_unique_vessel_code(self):\n data = {'code': test_utils.VESSEL_CODE}\n response = self.client.post(self.base_url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)", "def test_subscriber_access_if_vsg2_goes_down(self):", "def test_invalid_params(self):\n req = '{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": 1}'\n resp = '{\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: \\'params\\' must be an array or object\"}, \"id\": null}'\n status = 400\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))", "def test_missing_version(self):\n\n params_82 = {'ReQuEsT': \"DescribeCoverage\", 'SeRvIcE': \"WCS\", \"BOGUS\": \"SSS\"}\n response = self.query_server(params_82)\n soup = BeautifulSoup(response.text, 'xml')\n self.assertTrue(\n soup.find('ServiceExceptionReport'),\n msg=\"The server should return an exception if the version is not included in a DescribeCoverage request.\")", "def testInvalidRequest(self):\n self.mgr.sendGoProRequest(141)\n self.assertFalse(self.v.message_factory.gopro_set_request_encode.called)", "def test_release_tag_for_invalid_version(self) -> None:\n with self.assertRaisesRegexp(ValueError, \"Unable to parse version foo.bar.ba\"):\n release_tag()", "def test_kyc_put_request_legal(self):\n pass", "def test_subscriber_access_if_vsg1_goes_down(self):", "def check_signature_validity(\n request: Request, policy: RequestPolicy, logger: Logger\n) -> None:\n if not policy.signature_validity_match_zsk_policy:\n logger.warning(\n \"KSR-POLICY-SIG-VALIDITY: Disabled by policy (signature_validity_match_zsk_policy)\"\n )\n return\n\n logger.debug(\"Verifying RequestBundles validity parameters:\")\n num = 0\n for bundle in request.bundles:\n num += 1\n validity = bundle.expiration - bundle.inception\n logger.debug(\n \"{num:<2} {inception:29} {expiration:30} {validity}\".format(\n num=num,\n inception=fmt_timestamp(bundle.inception),\n expiration=fmt_timestamp(bundle.expiration),\n validity=validity,\n )\n )\n\n for bundle in request.bundles:\n validity = bundle.expiration - bundle.inception\n\n if validity < request.zsk_policy.max_signature_validity:\n _validity_str = fmt_timedelta(validity)\n _overlap_str = fmt_timedelta(request.zsk_policy.min_signature_validity)\n raise KSR_POLICY_SIG_VALIDITY_Violation(\n f\"Bundle validity {_validity_str} < claimed \"\n f\"min_signature_validity {_overlap_str} (in bundle {bundle.id})\"\n )\n\n if validity > request.zsk_policy.max_signature_validity:\n _validity_str = fmt_timedelta(validity)\n _overlap_str = fmt_timedelta(request.zsk_policy.max_signature_validity)\n raise KSR_POLICY_SIG_VALIDITY_Violation(\n f\"Bundle validity {_validity_str} > claimed \"\n f\"max_signature_validity {_overlap_str} (in bundle {bundle.id})\"\n )\n\n _num_bundles = len(request.bundles)\n _min_str = fmt_timedelta(request.zsk_policy.min_signature_validity)\n _max_str = fmt_timedelta(request.zsk_policy.max_signature_validity)\n logger.info(\n f\"KSR-POLICY-SIG-VALIDITY: All {_num_bundles} bundles have {_min_str} <= validity >= {_max_str}\"\n )", "def test_add_400_doesnt_create_a_new_version(self):\n assert Version.objects.count() == 0\n\n response = self.api_client.post(\n reverse('api-v4:company:collection'),\n data={'name': 'Acme'},\n )\n\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert Version.objects.count() == 0", "def test_version_control_error(self):\n data = {\n \"version_control\": \"gitgitgit\",\n \"scm_repo\": \"A\",\n \"scm_branch\": \"A\",\n \"scm_commit\": \"A\",\n \"repo\": \"A\",\n \"branch\": \"A\",\n \"enabled\": 0\n }\n\n resp = self.client.post(\"/tracking\", json=data, content_type=\"application/json\", headers=self.auth)\n resp_dict = json.loads(resp.data)\n self.assertIn(\"code\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(ResponseCode.INPUT_PARAMETERS_ERROR, resp_dict.get(\"code\"), msg=\"Error in status code return\")\n\n self.assertIn(\"msg\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(\n ResponseCode.CODE_MSG_MAP.get(ResponseCode.INPUT_PARAMETERS_ERROR),\n resp_dict.get(\"msg\"),\n msg=\"Error in status code return\"\n )\n\n self.assertIn(\"data\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(resp_dict.get(\"data\"), None, msg=\"Error in data information return\")", "def test__enable_tunnel_request__value_error(self, params):\n\n self.assertRaises(ValueError, params['server']._enable_tunnel_request,\n 'test')", "def test_invalid_version_fields(self):\n self.assertRaises(ValueError, versions.Version, version='1234', name='foo')", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'signature' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.signature)\n api_client.not_implemented.assert_called_with(\"signature\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def test_release_tag_for_empty(self) -> None:\n with self.assertRaisesRegexp(ValueError, \"Unable to parse version \"):\n release_tag()", "def validate_release_request(self, doi: Doi):\n # For release requests, need to check if there are any other DOI records\n # using the same PDS identifier\n if doi.doi:\n self._check_for_preexisting_doi(doi)\n\n self._check_node_id(doi)\n self._check_identifier_fields(doi)\n self._check_lidvid_field(doi)\n self._check_field_title_duplicate(doi)\n self._check_field_title_content(doi)\n\n # Release requests require a valid URL assigned, so check for that here\n self._check_field_site_url(doi)", "def test_create_vehicle_with_too_long_license_plate(self):\n payload = {\n 'user': self.user,\n 'type': 'VSL',\n 'license_plate': 'AA-1234-BB'\n }\n\n res = self.client.post(VEHICLE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_invalid_request(self):\n req = '{\"jsonrpc\": \"2.0\", \"method\": 1, \"params\": \"bar\"}'\n resp = '{\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Method must be a string\"}, \"id\": null}'\n status = 400\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))", "def test_releaseresourcesrequest_object_equality():\n constructor_args = dict(\n interface=\"https://schema.skao.int/ska-low-mccs-releaseresources/2.0\",\n subarray_id=1,\n release_all=True,\n )\n request = ReleaseResourcesRequest(**constructor_args)\n\n # objects with same property values are considered equal\n other = ReleaseResourcesRequest(**constructor_args)\n assert request == other\n\n # objects where any property differs are considered unequal\n different_args = dict(\n interface=\"https://schema.skao.int/ska-low-mccs-releaseresources/999.0\",\n subarray_id=2,\n release_all=False,\n )\n for k, v in different_args.items():\n other_args = dict(constructor_args)\n other_args[k] = v\n assert request != ReleaseResourcesRequest(**other_args)" ]
[ "0.6750871", "0.6093183", "0.6056714", "0.5971273", "0.59207636", "0.58044356", "0.578226", "0.5749196", "0.5587508", "0.5565045", "0.54786885", "0.54742163", "0.54639006", "0.5455582", "0.54409415", "0.5428984", "0.5417074", "0.54011023", "0.53716034", "0.5360595", "0.53510076", "0.53300387", "0.53224003", "0.5320689", "0.5318226", "0.53116107", "0.53074163", "0.53046453", "0.5279906", "0.52503234" ]
0.7573449
0
Save the buffer contents in a file with the given filename
def save_as(self, filename): # Join together the buffer contents so it can be written to a file contents = "" for line in self.buffer.get_lines(): contents += line + '\n' # Attempt to open or create the file and write the contents to it try: with open(filename, 'w') as f: f.write(contents) self.filename = filename self.has_changes = False self.message = "Succesfully saved: '{}'".format(filename) return True except: self.message = "Error writing file. File not saved!" return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _Write(buf, filename):\n with open(filename, 'wb') as f:\n f.write(buf)", "def save_file(self, filename):\r\n \r\n f = open(filename,'w')\r\n f.write(self.body)\r\n f.close", "def write(self, filename=None):\n # Take filename and expand tilde.\n if filename is not None:\n self.filename = filename\n assert self.filename\n filename = os.path.expanduser(self.filename)\n\n # Write it.\n with codecs.open(filename, 'w', self.encoding) as f:\n f.write(self.buffer.text)\n\n self._file_content = self.buffer.text", "def write_to_file(self, filename: str) -> None:", "def save_file(self, file_name, text):\n\n with open(file_name, 'w') as content_file:\n content = content_file.write(text)", "def _save_to_buffer(self):\n self._save_to_resource()", "def save_file(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'wb') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def write_bytes_to_file(bytes, filename):\n try:\n with open(filename, mode=\"bx\") as file:\n file.write(bytes)\n except FileExistsError:\n os.remove(filename)\n ResourceHandler.write_bytes_to_file(bytes, filename)\n except Exception as e:\n print(e)", "def write_file(data, filename):\n file = open(filename, \"wb\")\n file.write(data)\n file.close()", "def save(self, filename):\n pass", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def save(self, filename):\n o = open(filename, 'w')\n o.write(self.write())\n o.close()", "def save(self, fname, snver=None):\n self._io.save(fname)", "def filewrite(self, filename, data):\n try:\n filedata = data.decode(\"utf-8\")\n except Exception:\n filedata = data\n lock = FileLock(filename)\n lock.acquire()\n with open(filename, 'w+') as f:\n f.write(filedata)\n lock.release()", "def save_file_(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'w') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def to_file(self, filename=None):\n name = None\n if filename is not None:\n name = filename\n elif self.name:\n name = self.name\n\n if name:\n #f = open(self.name, 'w')\n f = codecs.open(name, 'w', encoding='utf-8')\n self.seek(0)\n f.write(self.read())\n f.close()\n else:\n print \"No log_name for this log\"", "def write_to_file(filename, content):\n with open(filename, 'w') as f:\n f.write(content)", "def save(self, filename):\n \n raise NotImplementedError(\"not implemented!\")", "def save2File(self, contents, filename):\n self.setup()\n fullpath = os.path.join(self.output_path, filename)\n f = open(fullpath, 'w')\n f.write(contents) # python will convert \\n to os.linesep\n f.close() # you can omit in most cases as the destructor will call it\n url = \"file://\" + fullpath\n return url", "def spit(filename, contents):\n with open(filename, 'w') as file:\n file.write(contents)", "def write_data_to_file(data, filename):\n with open(filename, 'wb') as outfile:\n outfile.write(data)", "def store(self, filename):", "def writefile(filename, content):\n with open(Path(os.path.expanduser(filename)), 'w') as outfile:\n outfile.write(content)", "def to_file(self, filename):\n self.header['n'] = self.n\n save_gyre(filename, self.header, self.data)", "def saveIntoFile(self, fname, data, mode='a'):\n\t\tg = open(fname, mode)\n\t\tg.write(data)\n\t\tg.close()", "def filewrite(self, filename):\n io.write(self, filename)", "def save_memory(self, filename):\n \n\n with open(filename + '/obses.npy', 'wb') as f:\n np.save(f, self.obses)\n \n with open(filename + '/actions.npy', 'wb') as f:\n np.save(f, self.actions)\n\n with open(filename + '/next_obses.npy', 'wb') as f:\n np.save(f, self.next_obses)\n \n with open(filename + '/rewards.npy', 'wb') as f:\n np.save(f, self.rewards)\n \n with open(filename + '/not_dones.npy', 'wb') as f:\n np.save(f, self.not_dones)\n \n with open(filename + '/not_dones_no_max.npy', 'wb') as f:\n np.save(f, self.not_dones_no_max)\n\n with open(filename + '/index.txt', 'w') as f:\n f.write(\"{}\".format(self.idx))\n\n print(\"save buffer to {}\".format(filename))", "def save_as(self, filename):\n raise NotImplementedError(\n \"Saving ring buffers to other formats is not yet implemented.\")\n\n if filename[-3:] == 'zip':\n pass # TODO\n elif filename[-2:] == 'h5':\n pass # TODO\n elif filename[-4:] == 'fits':\n pass # TODO\n elif filename[-3:] == 'npz':\n self.save_as_numpy(filename)", "def save(self, filename):\n result = self.render()\n\n with open(filename, 'w') as f:\n f.write(result)" ]
[ "0.74837494", "0.707368", "0.7064249", "0.6976132", "0.66752696", "0.6646077", "0.6633993", "0.6593267", "0.65920824", "0.6573828", "0.6536589", "0.6536589", "0.65348256", "0.6523011", "0.65186787", "0.648745", "0.64630646", "0.64508384", "0.64131004", "0.63882536", "0.63727534", "0.6367972", "0.634913", "0.63372624", "0.6324357", "0.6295742", "0.6285579", "0.6284933", "0.6270566", "0.62424064" ]
0.7714931
0
Return weather or not a file name has been set
def has_filename(self): if self.filename == "untitled": return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_new_file(self):\n return self.filename is None", "def filename(self) -> Optional[str]:\n ...", "def isBasedInHiddenFile(self):\n #type: () -> Optional[bool]\n return (\n None if self.realFileName is None #if before\n else self.realFileName != self.fileName\n )", "def file_missing(filename):\n return not os.path.isfile(filename)", "def fileProcessed(self,fileInstance):\n if hasattr(fileInstance,\"name\"): name=fileInstance.name\n elif hasattr(fileInstance,\"url\"): name=fileInstance.url\n if name in self.emptyFileFlag: return self.emptyFileFlag[name]\n else: return False", "def has_file(self, name):\n return bool(self.input(name).__class__.__name__ == 'cgi_FieldStorage')", "def file_exists(self):\n if os.path.isfile(self.file_name):\n return True\n else:\n return False", "def test_is_check_filename_False(self):\n self.assertFalse(check_filename('sample.txt'))", "def has_file(self) -> bool:\n return self._file is not None", "def valid_file(fname):\r\n try:\r\n if os.stat(fname).st_size > 0: # if filename contains data\r\n return \"0\"\r\n else:\r\n return \"Selected file is empty....please reenter\"\r\n except OSError:\r\n return \"Can not find the file....please reenter\"", "def nofile(filename):\n if not os.path.isfile(filename):\n return True\n else:\n return False", "def is_file_exists(self):\n pass", "def is_filename_safe(value):\n return value == str_to_filename(value)", "def checkFilename(self):\r\n \r\n #all this should be in the view\r\n\r\n print(\"working directory \", self.path) \r\n print(\"If you'd like to use another directory/folder, please include the full path with the filename.\")\r\n #should i let users change working directory or just put it in the file path\r\n print(\"checking filename \", self.filename)\r\n\r\n if not os.path.isfile(self.filename):\r\n print(\"this is not an existing file\")\r\n createYN = (input(\"create it? y/n \")).upper()\r\n if createYN=='Y':\r\n self.createFile()\r\n self.getHeaderDict()\r\n\r\n else: # create file = NO\r\n headerDict = {} #create an empty dictionary\r\n self.loadDictRow(keystring = '') #this will create keys but not values\r\n\r\n else:\r\n \"\"\"\r\n Check to see if the first row is headers, and second row is Test Router\r\n \"\"\"\r\n print(\"this is an existing file\")\r\n self.getHeaderDict()", "def is_file(self):\n return self.tipo == 'file' or self.tipo is None", "def _isvalid_file(filename):\r\n thisisavalidfile = True\r\n if (filename[0] == \".\") or (filename[0] == \"_\") or not ((filename.split(\".\")[-1] == \"txt\") or (filename.split(\".\")[-1] == \"csv\")):\r\n thisisavalidfile = False\r\n\r\n return thisisavalidfile", "def test_is_check_filename(self):\n self.assertTrue(check_filename('sample.csv'))", "def is_present(self):\n return self.file_is_present()", "def has_file(self, name):\n return name in self.files", "def filename(self):\r\n\t\treturn None", "def testFilenameReturn(self):\n self.assertEqual(\n self.filename,\n self.mr.filename\n )\n\n self.mr._filename = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.filename\n )", "def _check_template_name(self, template):\n filename = os.path.join(get_conf('DEFAULT_TEMPLATE_PATH'), template, '__init__.ini')\n if self._check_file_exists(filename) and self._check_access(filename, os.R_OK):\n return True\n else:\n return False", "def is_file(self):\n return not self.is_directory", "def check_for_file(self):\n if self.task.file_name in os.listdir(self.task.file_storage):\n return True\n return False", "def check_base_filename(self, record):\n time_tuple = time.localtime()\n\n if self.file_name_format:\n pass\n\n if self.suffix_time != time.strftime(self.suffix, time_tuple) or not os.path.exists(\n self._get_format_filename()):\n return 1\n else:\n return 0", "def checkPath(self, filename):\r\n if (not os.path.exists(filename)):\r\n filename = os.getenv('MDLROOT')+'/'+filename\r\n if (not os.path.exists(filename)):\r\n print \"[MDL] ERROR, FILE\", filename, \"DOES NOT EXIST.\"\r\n sys.exit(1)\r\n return filename", "def file_is_present(self, key=None):\n return os.path.isfile(self.file_path(key))", "def names_singleton(self):\r\n if self.stream:\r\n return True\r\n else:\r\n return os.path.isfile(self.object_name)", "def is_filename(name):\n test = re.search(\"[A-Za-z0-9_-]+\\.xml$\", name)\n if test:\n return True\n else:\n return False", "def check_file_exist(self):\n return False" ]
[ "0.6959931", "0.69592226", "0.6951416", "0.68595546", "0.68428236", "0.68309575", "0.68021667", "0.67782605", "0.6746299", "0.66894406", "0.6678487", "0.6630654", "0.65806156", "0.6525225", "0.65238625", "0.65157396", "0.6504091", "0.65025294", "0.6500659", "0.64631623", "0.64626735", "0.6443444", "0.6439251", "0.6434229", "0.64267844", "0.64091045", "0.63857216", "0.6384094", "0.63748497", "0.63739455" ]
0.8064465
0
Add a character to the buffer at the cursors current position
def add_char(self, char): if self.pos >= self.line_length(): self.buffer.append_char(char, self.line) else: self.buffer.insert_char(char, self.line, self.pos) self.pos += 1 self.has_changes = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addChar (self, c) :\r\n # Notice the \\n so we can notice when new lines begin\r\n if (c=='\\n') :\r\n self.lineNumber_ += 1\r\n self.charNumber_ = 0\r\n \r\n # Keep the last 1024 or so characters\r\n if (self.data_.full()) :\r\n self.data_.get()\r\n self.data_.put(c)\r\n self.charNumber_ += 1", "def append(self, char):\n self.sequence += char", "def add_char(self, coord, char, modify=False):\n if modify:\n range_y, range_x = self._map_dims\n new_coord = [coord[0]+range_y[0]-1, coord[1]+range_x[0]-1]\n self._screen.addch(new_coord[0], new_coord[1], char)\n self._screen.refresh()\n return new_coord\n else:\n self._screen.addch(coord[0], coord[1], char)\n self._screen.refresh()\n return coord", "def insertChar(self, ch):\n word, cx = self.edCursor.getPos()\n string = word.string[:cx] + ch + word.string[cx:]\n word.setString(string)\n # Re-render from tline:\n self.rsubject.linify(word.tline)\n self.edCursor.setPos(word, cx+1)", "def addch(self, posy, posx, character, color_pair):\r\n if posy < 0 or posy > self.height - 1:\r\n return\r\n if posx < 0 or posx > self.width - 1:\r\n return\r\n if posx == self.width - 1 and posy == self.height - 1:\r\n return\r\n self.win.addch(posy, posx, character, color_pair)", "def _push(self, char: str) -> None:\n if (\n char != \".\"\n or self._get_buffer(self._char_buffer_index(self._chars - 1) + 1)\n & 0b01000000\n ):\n self.scroll()\n self._put(\" \", self._chars - 1)\n self._put(char, self._chars - 1)", "def addch(self, stdscr, y, x, text):\n stdscr.addch(y, x, text, curses.color_pair(self.i))", "def addChar(self, char):\n self.guessedChars.append(char)", "def _push(self, char: str) -> None:\n if char in \":;\":\n self._put(char)\n else:\n if (\n char != \".\"\n or self._get_buffer(self._adjusted_index(self._chars - 1)) & 0b10000000\n ):\n self.scroll()\n self._put(\" \", self._chars - 1)\n self._put(char, self._chars - 1)", "def _put(self, char: str, index: int = 0) -> None:\n if not 0 <= index < self._chars:\n return\n if not 32 <= ord(char) <= 127:\n return\n if char == \".\":\n self._set_buffer(\n self._adjusted_index(index * 2 + 1),\n self._get_buffer(self._adjusted_index(index * 2 + 1)) | 0b01000000,\n )\n return\n character = ord(char) * 2 - 64\n self._set_buffer(self._adjusted_index(index * 2), CHARS[1 + character])\n self._set_buffer(self._adjusted_index(index * 2 + 1), CHARS[character])", "def write_char(self, char, token, string_index=None,\n set_cursor_position=False, z_index=False):\n assert len(char) == 1\n\n char_obj = Char(char, token, z_index)\n char_width = char_obj.get_width()\n\n # In case there is no more place left at this line, go first to the\n # following line. (Also in case of double-width characters.)\n if self._x + char_width > self.size.columns:\n self._y += 1\n self._x = 0\n\n insert_pos = self._y, self._x # XXX: make a Point of this?\n\n if string_index is not None:\n self._cursor_mappings[string_index] = insert_pos\n\n if set_cursor_position:\n self.cursor_position = Point(y=self._y, x=self._x)\n\n # Insertion of newline\n if char == '\\n':\n self._y += 1\n self._x = 0\n self._line_number += 1\n\n # Insertion of a 'visible' character.\n else:\n if char_obj.z_index >= self._buffer[self._y][self._x].z_index:\n self._buffer[self._y][self._x] = char_obj\n\n # When we have a double width character, store this byte in the\n # second cell. So that if this character gets deleted afterwarsd,\n # the ``output_screen_diff`` will notice that this byte is also\n # gone and redraw both cells.\n if char_width > 1:\n self._buffer[self._y][self._x+1] = Char(six.unichr(0))\n\n # Move position\n self._x += char_width\n\n return insert_pos", "def writechar(self, char: int, /) -> None:", "def _put_chr_at(self, char, row, col, color, adjustment_x=.19, adjustment_y=.19):\n self._goto_piece_xy(row, col, adjustment_x, adjustment_y)\n self.pen.color(color)\n self.pen.write(char, font=(\"Courier\", round(self.square_side_size * .7),\n \"normal\"))", "def write_at_pos(self, y, x, char_obj):\n # Add char to buffer\n if x < self.size.columns:\n if char_obj.z_index >= self._buffer[y][x].z_index:\n self._buffer[y][x] = char_obj", "def advance(self):\n self.pos += 1\n if self.pos < len(self.text):\n self.current_char = self.text[self.pos]\n else:\n self.current_char = None", "def insert(self, character):\n if not hasattr(character, 'character'):\n character = Character(character)\n self.characters.insert(self.cursor.position, character)\n self.cursor.forward()", "def _insChar(self, char, pos, color):\n char, vertices, glyph = self._extractGlyph(char, glm.vec4(color))\n if not self.text:\n off, kern = self._updateMetric(pos, char)\n if char in self.NO_GLYPH_CHARS:\n self.colors.insert(pos, [char, None])\n else:\n vertices['vtx'] += off + glyph['offset']\n self.allVertices = np.hstack(vertices)\n self.allIndices = self._baseInd\n self.colors.insert(pos, [char, color])\n self.text += char\n else:\n self.logger.debug(\"Inserting %r at %d\" % (char, pos))\n nonGlyph = countInSet(self.text[:pos], self.NO_GLYPH_CHARS)\n # Arrange vertices\n if pos < len(self.text):\n self.allVertices = self.allVertices[:(pos - nonGlyph) * 4]\n self.allIndices = self.allIndices[:pos - nonGlyph]\n\n # Set the metric\n off, kern = self._updateMetric(pos, char)\n if char in self.NO_GLYPH_CHARS:\n color = None\n else:\n vertices['vtx'] += off + kern + glyph['offset']\n if self.allVertices is None:\n self.allVertices = np.hstack(vertices)\n else:\n self.allVertices = np.append(self.allVertices, vertices)\n if self.allIndices is None:\n self.allIndices = self._baseInd\n else:\n self.allIndices = np.vstack((self.allIndices,\n self._baseInd + (pos - nonGlyph) * 4))\n\n self.colors.insert(pos, [char, color])\n if pos < len(self.text):\n self.text = self.text[:pos] + char + self.text[pos:]\n self._updateGlyphs(pos, char)\n else:\n self.text += char", "def _(event):\n # Take the current cursor position as the start of this selection.\n buff = event.current_buffer\n if buff.text:\n buff.start_selection(selection_type=SelectionType.CHARACTERS)", "def update_text(self,ch):\n self.text += chr(ch)\n self.update()", "def point(self, x, y, char):\n assert len(char) == 1\n assert x >= 0\n assert x < self.cols\n assert y >= 0\n assert y < self.lines\n\n self.canvas[y][x] = char", "def _put(self, char: str, index: int = 0) -> None:\n # pylint: disable=too-many-return-statements\n if not 0 <= index < self._chars:\n return\n index = self._adjusted_index(index)\n if self._chardict and char in self._chardict:\n self._set_buffer(index, self._chardict[char])\n return\n char = char.lower()\n if char == \".\":\n self._set_buffer(index, self._get_buffer(index) | 0b10000000)\n return\n if char in \"abcdefghijklmnopqrstuvwxy\":\n character = ord(char) - 97 + 10\n elif char == \"-\":\n character = 36\n elif char in \"0123456789\":\n character = ord(char) - 48\n elif char == \" \":\n self._set_buffer(index, 0x00)\n return\n elif char == \":\":\n self._set_buffer(4, 0x02)\n return\n elif char == \";\":\n self._set_buffer(4, 0x00)\n return\n elif char in \"lL\":\n self._set_buffer(index, 0b00111000)\n return\n elif char in \"oO\":\n self._set_buffer(index, 0b00111111)\n return\n else:\n return\n self._set_buffer(index, NUMBERS[character])", "def append_lexeme(self, char: str):\r\n\r\n self._lexeme_buffer += char", "def test_forwardCharacterAtEndOfBuffer(self):\n s = 'hello world'\n n = len(s)\n self.widget.buffer = s\n self.widget.cursor = n\n self.widget.keystrokeReceived('\\x06', None) # C-f\n self.assertEqual(self.widget.buffer, s)\n self.assertEqual(self.widget.cursor, n)", "def InsertText(self, pos, text):\n self.stc.InsertText(pos, text)\n if self.IsInsertMode():\n self.buffer += text", "def next(self):\n self.pos += 1\n self.current_char = None if self.pos >= len(self.input) else self.input[self.pos]", "def insert(self, string: str) -> None:\n self.buffer.insert(string, self.index)\n self.index += len(string)", "def def_char(self, offset, data):\n self.send((\"\\x1b\\x26\\x01%c%c\\x05\") % ((offset&0xff), (offset&0xff)))\n time.sleep(0.01)\n for i in data:\n self.send((\"%c\")%i)", "def _next_char(self):\n self.current_position += 1\n if self.current_position >= len(self.stream):\n self.current_char = \"\\0\"\n self.EOF = True\n else:\n self.current_char = self.stream[self.current_position]\n if self.current_char == \"\\n\":\n self.line_number += 1\n self.line_start_position = self.current_position", "def write_char(self, char=' '):\n integer = ord(char)\n self.instruction(integer, True)", "def characters(self, content):\n if self._current_tag:\n self._buffer.append(content)" ]
[ "0.72998464", "0.720256", "0.7192313", "0.71792465", "0.7026925", "0.6762037", "0.6726603", "0.6712365", "0.6637423", "0.662271", "0.6604135", "0.65804297", "0.6580041", "0.6575565", "0.646021", "0.6420522", "0.63929373", "0.6347639", "0.6338991", "0.63316494", "0.6328756", "0.6317574", "0.6296167", "0.6294507", "0.62547696", "0.62331486", "0.6204134", "0.6202941", "0.6199576", "0.61800075" ]
0.80837435
0
Delete the character behind the cursor or join line to the one above
def backspace(self): # If the position is at the beggining of a line that is not the first # line then join the line to the end of the line above it. if self.pos == 0 and self.line > 0: self.pos = self.buffer.line_length(self.line - 1) self.buffer.join_lines(self.line - 1, self.line) self.line -= 1 elif not (self.pos == 0 and self.line == 0): # Delete the character before the cursor and move the position back 1 self.buffer.delete_char(self.line, self.pos - 1) self.pos -= 1 self.has_changes = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_forward():\r\n point().delete_right_char()", "def clear_line_after(self) -> \"Cursor\":\n self._output.write(\"\\x1b[K\")\n\n return self", "def backspace(self):\n if self.current_index > 0:\n self.current_index -= 1\n self.line[self.current_index] = gamestate.PlayerPeg.empty", "def backspace(entry):\n entry.delete_symbol()", "def _(event):\n pos = line.document.find_start_of_previous_word(count=event.arg)\n if pos:\n deleted = line.delete_before_cursor(count=-pos)\n line.set_clipboard(ClipboardData(deleted))", "def delete(self):\n # If the position in the buffer is on a char\n if self.pos < self.buffer.line_length(self.line):\n self.buffer.delete_char(self.line, self.pos)\n self.has_changes = True", "def _(event):\n deleted = line.delete_before_cursor(count=-line.document.get_start_of_line_position())\n line.set_clipboard(ClipboardData(deleted))", "def delete_backward():\r\n point().delete_left_char()\r\n set_point(point().offset(-1))", "def wrap_cursor_back(event):\n b = event.cli.current_buffer\n b.cursor_up(count=1)\n relative_end_index = b.document.get_end_of_line_position()\n b.cursor_right(count=relative_end_index)", "def delete_chr(text):\n \"\"\" if the user try to delete an empty line it will not allowed him:)\"\"\"\n if len(text.getText())<1:\n text.setText(\"\")\n return text\n else:\n text.setText(text.getText()[:-10]) # 10 is the length of the word \"Backspace\" + 1 letter i delete\n return text", "def clear_line(self) -> \"Cursor\":\n self._output.write(\"\\x1b[2K\")\n\n return self", "def key_C(buf, input_line, cur, count):\n weechat.command(\"\", \"/input delete_end_of_line\")\n set_mode(\"INSERT\")", "def test_backward_delete_char__middle_of_line(self):\n before_b = \"\"\"\\\n first line\n last line\n \"\"\"\n after_b = \"\"\"\\\n firstline\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.6\", \"1.6\"),\n after_sel=(\"1.5\", \"1.5\"),\n command_name=\"backward-delete-char\",\n )", "def deleteLastChar (self) :\r\n c = self.data_.drop();\r\n # Notice the \\n so we can notice when new lines begin\r\n if (c=='\\n') :\r\n self.lineNumber_ -= 1\r\n # Find last \\n ... if we can\r\n index_of_last_newline = -1\r\n for ii in xrange(0, len(self.data_)) :\r\n if (self.data_.peek(len(self.data_)-ii-1)=='\\n') :\r\n index_of_last_newline = ii\r\n break \r\n \r\n self.charNumber_ = index_of_last_newline\r\n if (index_of_last_newline==-1) : self.charNumber = 80\r\n else :\r\n self.charNumber_-=1;", "def delete(self):\n del self.characters[self.cursor.position]", "def clear_line(string):\n for character in string:\n #backtrack-whitespace-backtrack\n sys.stdout.write(\"\\b \\b\")", "def fast_backspace(self, pad, linepad, *args):\n coordinates1 = map(int, pad.index(GUI.INSERT).split('.'))\n coordinates = str(coordinates1[0]) + '.0'\n r = pad.get(coordinates, GUI.INSERT)\n if len(str(r)) % 4 == 0:\n return\n if len(set(list(r))) == 1 and r[0] == u' ':\n coordinates = str(coordinates1[0]) + '.' + str(max(0, coordinates1[1] - 3))\n pad.delete(coordinates, GUI.INSERT)\n self.linenumber(pad, linepad)", "def cmd_D(self):\n node = self.start\n while node is not None:\n if node == self.cursor:\n line = node.element\n node.element = line[0:self.delta] + \"\\n\"\n break\n node = node.next\n self.get_text()", "def test_backward_delete_char_last_char(self):\n before_b = \"\"\"\\\n first line\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n last lin\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.9\", \"2.9\"),\n after_sel=(\"2.8\", \"2.8\"),\n command_name=\"backward-delete-char\",\n )", "def _delChar(self, pos):\n nonGlyph = countInSet(self.text[:pos], self.NO_GLYPH_CHARS)\n\n self.allVertices = self.allVertices[:(pos - nonGlyph) * 4]\n self.allIndices = self.allIndices[:pos - nonGlyph]\n self.colors.pop(pos)\n self._string_metric = self._string_metric[:pos]\n self.text = self.text[:pos] + self.text[pos + 1:]\n self._updateGlyphs(pos)", "def scratch(line):\n if line.count('~~') >= 2:\n for i in range(0, line.count('~~') - line.count('~~') % 2):\n if i % 2 == 0:\n line = line.replace('~~', '<del>', 1)\n else:\n line = line.replace('~~', '</del>', 1)\n return line", "def backward_character():\r\n set_point(point().offset(-1))", "def _(event):\n if system_line.text:\n system_line.delete_before_cursor()\n else:\n # If no text after the prompt, cancel.\n system_line.reset()\n event.input_processor.pop_input_mode()", "def resetCursor():\n print(\"\\u001b[?0l\", end='')", "def refresh(self):\n\t\thead = self.head\n\t\ttail = self.tail or ' '\n\n\t\twidth = self.get_width()\n\t\tmax_head = width - 2\n\t\tif max_head <= 0:\n\t\t\traise ValueError(\"Cannot display line: terminal too narrow\")\n\n\t\t# if line is too long, strip from left to ensure there's room for cursor at the end\n\t\thead = head[-max_head:]\n\t\t# if line is still too long, cut off tail\n\t\tmax_tail = width - len(head)\n\t\tassert max_tail >= 2, \"logic error: max_tail = {!r}\".format(max_tail)\n\t\ttail = tail[:max_tail]\n\n\t\tselected, tail = tail[0], tail[1:]\n\t\tif self.encoding and not PY3:\n\t\t\thead, tail, selected = [s.encode(self.encoding) for s in (head, tail, selected)]\n\n\t\tself.output.write(\n\t\t\t escapes.SAVE_CURSOR\n\t\t\t+ escapes.set_cursor(1,999)\n\t\t\t+ escapes.CLEAR_LINE\n\t\t\t+ head\n\t\t\t+ escapes.INVERTCOLOURS + selected + escapes.UNFORMAT\n\t\t\t+ tail\n\t\t\t+ escapes.LOAD_CURSOR\n\t\t)\n\t\tself.output.flush()", "def erase(self, x, y):\n self.console.draw_char(x, y, ' ', bg=None)", "def handle_backspace(self, peer, row, col):\n\n # If the peer has selected text, delete that\n \n if peer.hasSelection():\n \n peer.deleteSelection()\n\n # Treat as if 1 char was deleted\n\n if peer is self.marker:\n \n self.root.last_col += 1\n\n else:\n\n # Move the cursor left one for a backspace\n\n if row > 0 and col > 0:\n\n index = \"{}.{}\".format(row, col-1)\n\n self.delete(index)\n\n elif row > 1 and col == 0:\n\n index = \"{}.end\".format(row-1,)\n\n self.delete(index)\n\n col = int(self.index(index).split('.')[1])\n\n # peer.move(row-1, col)\n\n return", "def cut_line(self):\r\n self.parachute.pop(0)", "def test_backward_delete_char(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first lie\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.9\", \"1.9\"),\n after_sel=(\"1.8\", \"1.8\"),\n command_name=\"backward-delete-char\",\n )", "def delete_line(self) -> str:\n row: int = self.buffer.get_row(self.index)\n column: int = self.buffer.get_column(self.index)\n line_end: int = self.buffer.get_line_end(self.index)\n\n line_start: int = self.buffer.get_line_start(self.index)\n new_line_start: int\n new_column: int = 0\n if line_end == len(self.buffer):\n previous_line_start: int = 0\n try:\n previous_line_start = self.buffer.reverse_index('\\n', start=line_start)\n except ValueError:\n previous_line_start = 0\n new_line_start = previous_line_start\n for new_column, character in enumerate(\n self.buffer[previous_line_start:previous_line_start + column + 1]):\n if character == '\\n':\n break\n else:\n new_line_start = line_start\n next_line_start: int = line_end + 1\n for new_column, character in enumerate(self.buffer[next_line_start:next_line_start +\n column + 1]):\n if character == '\\n':\n break\n\n new_index: int = new_line_start + new_column\n self.index = new_index\n\n line: str = self.buffer.delete_row(row)\n\n return line" ]
[ "0.7527465", "0.729241", "0.7284611", "0.72036695", "0.70729005", "0.6992967", "0.6897935", "0.6848832", "0.6840378", "0.6833158", "0.6825223", "0.6706762", "0.6673771", "0.6631402", "0.6623533", "0.66097546", "0.65821445", "0.6568563", "0.65677965", "0.6565864", "0.6565159", "0.6556927", "0.65469146", "0.65379393", "0.6490962", "0.648815", "0.64524895", "0.6421374", "0.63884103", "0.63740563" ]
0.76642966
0
Delete the character under the cursor
def delete(self): # If the position in the buffer is on a char if self.pos < self.buffer.line_length(self.line): self.buffer.delete_char(self.line, self.pos) self.has_changes = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self):\n del self.characters[self.cursor.position]", "def delete_forward():\r\n point().delete_right_char()", "def backspace(entry):\n entry.delete_symbol()", "def _delChar(self, pos):\n nonGlyph = countInSet(self.text[:pos], self.NO_GLYPH_CHARS)\n\n self.allVertices = self.allVertices[:(pos - nonGlyph) * 4]\n self.allIndices = self.allIndices[:pos - nonGlyph]\n self.colors.pop(pos)\n self._string_metric = self._string_metric[:pos]\n self.text = self.text[:pos] + self.text[pos + 1:]\n self._updateGlyphs(pos)", "def delete_chr(text):\n \"\"\" if the user try to delete an empty line it will not allowed him:)\"\"\"\n if len(text.getText())<1:\n text.setText(\"\")\n return text\n else:\n text.setText(text.getText()[:-10]) # 10 is the length of the word \"Backspace\" + 1 letter i delete\n return text", "def erase(self, x, y):\n self.console.draw_char(x, y, ' ', bg=None)", "def _(event):\n deleted = line.delete_before_cursor(count=-line.document.get_start_of_line_position())\n line.set_clipboard(ClipboardData(deleted))", "def _(event):\n pos = line.document.find_start_of_previous_word(count=event.arg)\n if pos:\n deleted = line.delete_before_cursor(count=-pos)\n line.set_clipboard(ClipboardData(deleted))", "def key_C(buf, input_line, cur, count):\n weechat.command(\"\", \"/input delete_end_of_line\")\n set_mode(\"INSERT\")", "def delete_brackets_or_quotes(event):\n buffer = event.cli.current_buffer\n before = buffer.document.char_before_cursor\n after = buffer.document.current_char\n\n if any(\n [before == b and after == a for (b, a) in [\"()\", \"[]\", \"{}\", \"''\", '\"\"']]\n ):\n buffer.delete(1)\n\n buffer.delete_before_cursor(1)", "def edKey(self, key):\n # Any key press except cursor keys (?) causes the selection to be deleted!\n if isinstance(key, types.IntType):\n rawkey = (key & (SHIFT-1))\n if (rawkey in (1,2,3,4)):\n # First check for shift+cursor-key, because these alter the selection\n if (key & SHIFT): # SHIFT + cursor key\n # if there is not already a selection, set the first marker before\n # moving the cursor\n if not self.selection.selectionMark2:\n w, cx = self.edCursor.getPos()\n # Hack to handle empty words which are about to be deleted\n if (w.string == u\"\") and (len(w.tline.twords) > 1):\n # Don't start selection, just move cursor\n self.cursorKey(rawkey)\n return\n self.selection.selectionMark = (w, cx)\n self.cursorKey(rawkey)\n self.selection.setMark2(self.edCursor.getPos())\n\n else: # cursor key without SHIFT\n # clear selection and move cursor.\n self.selection.clearSelection()\n self.cursorKey(rawkey)\n return\n\n # All other keys are ignored if this widget is read-only\n if not self.editable: return\n\n self.cursorX = None\n # If there is a selection this must be deleted\n if self.delete() and (rawkey in (8,9)): return\n\n # Get cursor position\n word, cx = self.edCursor.getPos()\n tline = word.tline\n\n if (rawkey == 10): # space\n if (key & SHIFT):\n self.insertChar(FixedSpace)\n return\n s1 = word.string[:cx]\n s2 = word.string[cx:]\n word.setString(s1)\n # Create a new TWord with the second half of the split:\n nw = TWord(s2)\n wx = tline.twords.index(word)\n tline.insert(nw, wx+1)\n nw.setCanvas(self.canvas)\n\n # Re-render from this word, noting that it became shorter:\n self.rsubject.renderShortened(word)\n self.edCursor.setPos(nw, 0)\n return\n\n if (rawkey == 7): # line break\n s1 = word.string[:cx]\n s2 = word.string[cx:]\n word.setString(s1)\n # Create a new TWord with the second half of the split:\n nw = TWord(s2)\n # And a new Paragraph, copying the properties of the old one:\n para = Paragraph(tline.para)\n # And a new TextLine:\n ntl = TextLine(para, [nw])\n lx = self.rsubject.tlines.index(tline) + 1\n self.rsubject.tlines.insert(lx, ntl)\n nw.setCanvas(self.canvas)\n # Move words following the split:\n wx = tline.twords.index(word)\n for w in tline.twords[wx+1:]:\n ntl.insert(w)\n del(tline.twords[wx+1:])\n # Now move subsequent lines to new paragraph\n while True:\n lx += 1\n if (len(self.rsubject.tlines) <= lx) or \\\n (self.rsubject.tlines[lx].para != tline.para):\n break\n self.rsubject.tlines[lx].para = para\n\n # Re-render from this word, noting that it became shorter:\n self.rsubject.renderShortened(word)\n # Set cursor to start of new word.\n self.edCursor.setPos(nw, 0)\n return\n\n if (rawkey == 8) or (rawkey == 9): # delete / backspace\n if (rawkey == 9):\n # backspace: take one step back and then do as delete.\n if (cx == 0): # at start of word\n para0 = tline.para\n # if stepping back works ...\n if not self.edCursor.step(False): return\n # Get new cursor position\n word, cx = self.edCursor.getPos()\n tline = word.tline\n para = tline.para # needed for deletion test below\n else:\n cx -= 1\n s = word.string\n if (len(s) == cx): # at end of word\n # Join words\n wx = tline.twords.index(word) + 1\n if (wx >= len(tline.twords)): # at end of line\n # If we arrived at the end of a paragraph with\n # backspace, and the step backwards didn't skip\n # to the previous paragraph, do nothing!\n # That is necessary because of the\n # automatic deletion of words which become empty\n # when the cursor leaves them.\n if (rawkey == 9) and (para == para0): return\n # If at end of paragraph, join paragraphs\n nl = self.rsubject.nextLine(tline)\n if nl:\n para0 = tline.para\n para = nl.para\n if (para != para0):\n nl2 = nl\n while True:\n nl2.setPara(para0)\n nl2 = self.rsubject.nextLine(nl2)\n if (not nl2) or (nl2.para != para): break\n # Next line is (now) in same paragraph.\n # Move first word of next line to current line:\n tline.insert(nl.twords[0])\n del(nl.twords[0])\n if not nl.twords:\n # Line now empty, delete it\n self.rsubject.deleteTLine(nl)\n else:\n nl.y = None # to ensure re-rendering\n else:\n # Nothing to delete\n return\n\n nw = tline.twords[wx]\n del(tline.twords[wx])\n word.setString(s + nw.string)\n # The removed word must be 'freed'\n nw.delete()\n # Re-render from tline:\n self.rsubject.linify(tline)\n else:\n # Not at end of word, the word will be shortened.\n s = s[:cx] + s[cx+1:]\n word.setString(s)\n # Re-render from this word, noting that it became shorter:\n self.rsubject.renderShortened(word)\n # Reset cursor to start of new word/paragraph.\n self.edCursor.setPos(word, cx)\n\n self.deleteCount +=1\n if (self.deleteCount >= DELETECOUNT):\n self.saveText()\n return\n\n # Anything else is ignored\n return\n\n # All other keys are ignored if this widget is read-only\n if not self.editable: return\n\n # character key\n self.cursorX = None\n # If there is a selection this must be deleted\n # This must also reset the cursor appropriately\n self.delete()\n self.insertChar(key)", "def handle_backspace(self, peer, row, col):\n\n # If the peer has selected text, delete that\n \n if peer.hasSelection():\n \n peer.deleteSelection()\n\n # Treat as if 1 char was deleted\n\n if peer is self.marker:\n \n self.root.last_col += 1\n\n else:\n\n # Move the cursor left one for a backspace\n\n if row > 0 and col > 0:\n\n index = \"{}.{}\".format(row, col-1)\n\n self.delete(index)\n\n elif row > 1 and col == 0:\n\n index = \"{}.end\".format(row-1,)\n\n self.delete(index)\n\n col = int(self.index(index).split('.')[1])\n\n # peer.move(row-1, col)\n\n return", "def ctrl_d(self):\n if self.index < len(self.string):\n self.string.pop(self.index)", "def delete_word(event):\n get_by_name(\"backward-kill-word\").call(event)", "def _(event):\n if system_line.text:\n system_line.delete_before_cursor()\n else:\n # If no text after the prompt, cancel.\n system_line.reset()\n event.input_processor.pop_input_mode()", "def delete_text(self, color, coord, coord_):\n\n pygame.draw.rect(self.game_display, color, ((coord), (coord_)))\n pygame.display.update()", "def backspace(self):\n if self.current_index > 0:\n self.current_index -= 1\n self.line[self.current_index] = gamestate.PlayerPeg.empty", "def key_cc(buf, input_line, cur, count):\n weechat.command(\"\", \"/input delete_line\")\n set_mode(\"INSERT\")", "def backspace(self):\n # If the position is at the beggining of a line that is not the first\n # line then join the line to the end of the line above it.\n if self.pos == 0 and self.line > 0:\n self.pos = self.buffer.line_length(self.line - 1)\n self.buffer.join_lines(self.line - 1, self.line)\n self.line -= 1\n elif not (self.pos == 0 and self.line == 0):\n # Delete the character before the cursor and move the position back 1\n self.buffer.delete_char(self.line, self.pos - 1)\n self.pos -= 1\n \n self.has_changes = True", "def test_delete(self):\n s = 'hello world'\n n = 5\n self.widget.buffer = s\n self.widget.cursor = n\n self.widget.keystrokeReceived(ServerProtocol.DELETE, None)\n self.failUnless(self.painted)\n self.assertEqual(self.widget.buffer, s[:n] + s[n + 1:])\n self.assertEqual(self.widget.cursor, n)", "def cancel(self):\n end = self.start\n start = self.start + f'-{self.chars}c'\n self.text.tag_delete('found', 1.0, tk.END)\n self.text.tag_delete('found.focus', 1.0, tk.END)\n self.text.tag_add(tk.SEL, start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.focus_set()\n self.destroy()", "def cancel(self):\n end = self.start\n start = self.start + f'-{self.chars}c'\n self.text.tag_delete('found', 1.0, tk.END)\n self.text.tag_delete('found.focus', 1.0, tk.END)\n self.text.tag_add(tk.SEL, start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.focus_set()\n self.destroy()", "def clear_line_after(self) -> \"Cursor\":\n self._output.write(\"\\x1b[K\")\n\n return self", "def get_delete_token(self):\n # Insert word choose\n return ''", "def deleteLastChar (self) :\r\n c = self.data_.drop();\r\n # Notice the \\n so we can notice when new lines begin\r\n if (c=='\\n') :\r\n self.lineNumber_ -= 1\r\n # Find last \\n ... if we can\r\n index_of_last_newline = -1\r\n for ii in xrange(0, len(self.data_)) :\r\n if (self.data_.peek(len(self.data_)-ii-1)=='\\n') :\r\n index_of_last_newline = ii\r\n break \r\n \r\n self.charNumber_ = index_of_last_newline\r\n if (index_of_last_newline==-1) : self.charNumber = 80\r\n else :\r\n self.charNumber_-=1;", "def delete_backward():\r\n point().delete_left_char()\r\n set_point(point().offset(-1))", "def clear_line(self) -> \"Cursor\":\n self._output.write(\"\\x1b[2K\")\n\n return self", "def test_delete_char(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n firstline\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.5\", \"1.5\"),\n after_sel=(\"1.5\", \"1.5\"),\n command_name=\"delete-char\",\n )", "def erase(self):\n output = Output(self.stdout)\n\n output.cursor_backward(self._cursor_pos.x)\n output.cursor_up(self._cursor_pos.y)\n output.erase_down()\n output.reset_attributes()\n output.flush()\n\n self.reset()", "def clearEditCursor(self, event):\n self.editMode = False\n self.updateCursor(\"arrow\")\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n self.paintBackground(x, y, self.checkFree(x, y))" ]
[ "0.82906467", "0.73986584", "0.7359406", "0.7227849", "0.7168641", "0.6955541", "0.66195863", "0.6606934", "0.65643644", "0.6553981", "0.65507", "0.65390956", "0.652098", "0.6479685", "0.6413434", "0.63415545", "0.6319842", "0.63145006", "0.6278936", "0.6263064", "0.6234896", "0.6234896", "0.6204025", "0.6193025", "0.6186285", "0.6146446", "0.6139618", "0.6109183", "0.61076045", "0.6080154" ]
0.74214077
1
Move the cursor up a line or within the line if a wrap width is given
def up(self, wrap = None): len_current = self.line_length() # If there is line wrapping if wrap: # If the position is in the top wrap of the line move it into the # last wrap of the line above it. Take into account shorter lines if self.pos < wrap and self.line > 0: len_next = self.line_length(-1) wraps_next = int(len_next / wrap) columns_next = len_next % wrap self.line -= 1 if self.pos > columns_next: self.pos = (wraps_next * wrap) + columns_next else: self.pos = (wraps_next * wrap) + self.pos # If the position is in the wraps of the current line elif self.pos >= wrap: self.pos = self.pos - wrap # If there is no line wrapping move to the same position or lower in # the next line up. elif self.line > 0: len_next = self.line_length(-1) self.line -= 1 if self.pos > len_next: self.pos = len_next
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def down(self, wrap = None):\n len_current = self.line_length()\n \n # If there is line wrapping\n if wrap:\n wraps_current = int(len_current / wrap)\n columns_current = len_current % wrap\n \n # If the position is not in the bottom wrap of the line move it down a\n # wrap. Take into account shorter wraps below.\n if len_current > wrap and self.pos < wraps_current * wrap:\n pos_wrap = int(self.pos / wrap)\n if pos_wrap + 1 == wraps_current and self.pos % wrap > columns_current:\n self.pos = (wraps_current * wrap) + columns_current\n else:\n self.pos = self.pos + wrap\n \n # If the position is in the bottom wrap move it to the first wrap of\n # the next line. Take into acount shorter lines below.\n elif self.line < self.buffer.size() - 1:\n len_next = self.line_length(1)\n self.line += 1\n if self.pos % wrap > len_next:\n self.pos = len_next\n else:\n self.pos = self.pos % wrap\n \n # If no wrapping is being done move the line down one and adjust the\n # position if the next line is shorter.\n elif self.line < self.buffer.size() - 1:\n len_next = self.line_length(1)\n self.line += 1\n if self.pos > len_next:\n self.pos = len_next", "def wrap_cursor_forward(event):\n b = event.cli.current_buffer\n relative_begin_index = b.document.get_start_of_line_position()\n b.cursor_left(count=abs(relative_begin_index))\n b.cursor_down(count=1)", "def wrap_cursor_back(event):\n b = event.cli.current_buffer\n b.cursor_up(count=1)\n relative_end_index = b.document.get_end_of_line_position()\n b.cursor_right(count=relative_end_index)", "def linewrap(width = None):\n wrapper = TextWrapper()\n wrapper.width = width or 251\n wrapper.replace_whitespace = True\n wrapper.break_long_words = False\n wrapper.break_on_hyphens = False\n return wrapper.wrap", "def move_up(self) -> None:\n try:\n line_start: int = self.buffer.reverse_index('\\n', end=self.index) + 1\n except ValueError:\n return\n\n previous_line_start: int\n try:\n previous_line_start = self.buffer.reverse_index('\\n', end=line_start - 1) + 1\n except ValueError:\n previous_line_start = 0\n\n previous_line_length = line_start - previous_line_start\n column: int = self.index - line_start\n if previous_line_length <= column:\n previous_line_end = line_start - 1\n self.index = previous_line_end\n else:\n self.index = previous_line_start + column", "def _scroll_when_linewrapping(\n self, ui_content: UIContent, width: int, height: int\n ) -> None:\n scroll_offsets_bottom = self.scroll_offsets.bottom\n scroll_offsets_top = self.scroll_offsets.top\n\n # We don't have horizontal scrolling.\n self.horizontal_scroll = 0\n\n def get_line_height(lineno: int) -> int:\n return ui_content.get_height_for_line(lineno, width, self.get_line_prefix)\n\n # When there is no space, reset `vertical_scroll_2` to zero and abort.\n # This can happen if the margin is bigger than the window width.\n # Otherwise the text height will become \"infinite\" (a big number) and\n # the copy_line will spend a huge amount of iterations trying to render\n # nothing.\n if width <= 0:\n self.vertical_scroll = ui_content.cursor_position.y\n self.vertical_scroll_2 = 0\n return\n\n # If the current line consumes more than the whole window height,\n # then we have to scroll vertically inside this line. (We don't take\n # the scroll offsets into account for this.)\n # Also, ignore the scroll offsets in this case. Just set the vertical\n # scroll to this line.\n line_height = get_line_height(ui_content.cursor_position.y)\n if line_height > height - scroll_offsets_top:\n # Calculate the height of the text before the cursor (including\n # line prefixes).\n text_before_height = ui_content.get_height_for_line(\n ui_content.cursor_position.y,\n width,\n self.get_line_prefix,\n slice_stop=ui_content.cursor_position.x,\n )\n\n # Adjust scroll offset.\n self.vertical_scroll = ui_content.cursor_position.y\n self.vertical_scroll_2 = min(\n text_before_height - 1, # Keep the cursor visible.\n line_height\n - height, # Avoid blank lines at the bottom when scrolling up again.\n self.vertical_scroll_2,\n )\n self.vertical_scroll_2 = max(\n 0, text_before_height - height, self.vertical_scroll_2\n )\n return\n else:\n self.vertical_scroll_2 = 0\n\n # Current line doesn't consume the whole height. Take scroll offsets into account.\n def get_min_vertical_scroll() -> int:\n # Make sure that the cursor line is not below the bottom.\n # (Calculate how many lines can be shown between the cursor and the .)\n used_height = 0\n prev_lineno = ui_content.cursor_position.y\n\n for lineno in range(ui_content.cursor_position.y, -1, -1):\n used_height += get_line_height(lineno)\n\n if used_height > height - scroll_offsets_bottom:\n return prev_lineno\n else:\n prev_lineno = lineno\n return 0\n\n def get_max_vertical_scroll() -> int:\n # Make sure that the cursor line is not above the top.\n prev_lineno = ui_content.cursor_position.y\n used_height = 0\n\n for lineno in range(ui_content.cursor_position.y - 1, -1, -1):\n used_height += get_line_height(lineno)\n\n if used_height > scroll_offsets_top:\n return prev_lineno\n else:\n prev_lineno = lineno\n return prev_lineno\n\n def get_topmost_visible() -> int:\n \"\"\"\n Calculate the upper most line that can be visible, while the bottom\n is still visible. We should not allow scroll more than this if\n `allow_scroll_beyond_bottom` is false.\n \"\"\"\n prev_lineno = ui_content.line_count - 1\n used_height = 0\n for lineno in range(ui_content.line_count - 1, -1, -1):\n used_height += get_line_height(lineno)\n if used_height > height:\n return prev_lineno\n else:\n prev_lineno = lineno\n return prev_lineno\n\n # Scroll vertically. (Make sure that the whole line which contains the\n # cursor is visible.\n topmost_visible = get_topmost_visible()\n\n # Note: the `min(topmost_visible, ...)` is to make sure that we\n # don't require scrolling up because of the bottom scroll offset,\n # when we are at the end of the document.\n self.vertical_scroll = max(\n self.vertical_scroll, min(topmost_visible, get_min_vertical_scroll())\n )\n self.vertical_scroll = min(self.vertical_scroll, get_max_vertical_scroll())\n\n # Disallow scrolling beyond bottom?\n if not self.allow_scroll_beyond_bottom():\n self.vertical_scroll = min(self.vertical_scroll, topmost_visible)", "def copy_line(\n line: StyleAndTextTuples,\n lineno: int,\n x: int,\n y: int,\n is_input: bool = False,\n ) -> tuple[int, int]:\n if is_input:\n current_rowcol_to_yx = rowcol_to_yx\n else:\n current_rowcol_to_yx = {} # Throwaway dictionary.\n\n # Draw line prefix.\n if is_input and get_line_prefix:\n prompt = to_formatted_text(get_line_prefix(lineno, 0))\n x, y = copy_line(prompt, lineno, x, y, is_input=False)\n\n # Scroll horizontally.\n skipped = 0 # Characters skipped because of horizontal scrolling.\n if horizontal_scroll and is_input:\n h_scroll = horizontal_scroll\n line = explode_text_fragments(line)\n while h_scroll > 0 and line:\n h_scroll -= get_cwidth(line[0][1])\n skipped += 1\n del line[:1] # Remove first character.\n\n x -= h_scroll # When scrolling over double width character,\n # this can end up being negative.\n\n # Align this line. (Note that this doesn't work well when we use\n # get_line_prefix and that function returns variable width prefixes.)\n if align == WindowAlign.CENTER:\n line_width = fragment_list_width(line)\n if line_width < width:\n x += (width - line_width) // 2\n elif align == WindowAlign.RIGHT:\n line_width = fragment_list_width(line)\n if line_width < width:\n x += width - line_width\n\n col = 0\n wrap_count = 0\n for style, text, *_ in line:\n new_buffer_row = new_buffer[y + ypos]\n\n # Remember raw VT escape sequences. (E.g. FinalTerm's\n # escape sequences.)\n if \"[ZeroWidthEscape]\" in style:\n new_screen.zero_width_escapes[y + ypos][x + xpos] += text\n continue\n\n for c in text:\n char = _CHAR_CACHE[c, style]\n char_width = char.width\n\n # Wrap when the line width is exceeded.\n if wrap_lines and x + char_width > width:\n visible_line_to_row_col[y + 1] = (\n lineno,\n visible_line_to_row_col[y][1] + x,\n )\n y += 1\n wrap_count += 1\n x = 0\n\n # Insert line prefix (continuation prompt).\n if is_input and get_line_prefix:\n prompt = to_formatted_text(\n get_line_prefix(lineno, wrap_count)\n )\n x, y = copy_line(prompt, lineno, x, y, is_input=False)\n\n new_buffer_row = new_buffer[y + ypos]\n\n if y >= write_position.height:\n return x, y # Break out of all for loops.\n\n # Set character in screen and shift 'x'.\n if x >= 0 and y >= 0 and x < width:\n new_buffer_row[x + xpos] = char\n\n # When we print a multi width character, make sure\n # to erase the neighbours positions in the screen.\n # (The empty string if different from everything,\n # so next redraw this cell will repaint anyway.)\n if char_width > 1:\n for i in range(1, char_width):\n new_buffer_row[x + xpos + i] = empty_char\n\n # If this is a zero width characters, then it's\n # probably part of a decomposed unicode character.\n # See: https://en.wikipedia.org/wiki/Unicode_equivalence\n # Merge it in the previous cell.\n elif char_width == 0:\n # Handle all character widths. If the previous\n # character is a multiwidth character, then\n # merge it two positions back.\n for pw in [2, 1]: # Previous character width.\n if (\n x - pw >= 0\n and new_buffer_row[x + xpos - pw].width == pw\n ):\n prev_char = new_buffer_row[x + xpos - pw]\n char2 = _CHAR_CACHE[\n prev_char.char + c, prev_char.style\n ]\n new_buffer_row[x + xpos - pw] = char2\n\n # Keep track of write position for each character.\n current_rowcol_to_yx[lineno, col + skipped] = (\n y + ypos,\n x + xpos,\n )\n\n col += 1\n x += char_width\n return x, y", "def _(event):\n buffer = event.current_buffer\n\n if buffer.document.is_cursor_at_the_end_of_line:\n buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=False)\n else:\n buffer.cursor_position += buffer.document.get_end_of_line_position()", "def test_move_lines_up(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n line 1\n first line\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.2\", \"2.2\"),\n after_sel=(\"1.2\", \"1.2\"),\n command_name=\"move-lines-up\",\n )", "def motion_w(input_line, cur, count):\n pos = get_pos(input_line, REGEX_MOTION_LOWERCASE_W, cur, True, count)\n if pos == -1:\n return len(input_line), False, False\n return cur + pos, False, False", "def word_wrap(self):\n textArea = self.get_current()\n if self.wrap.get() == 0:\n textArea.config(wrap='none')\n elif self.wrap.get() == 1:\n textArea.config(wrap='word')", "def __newLineBelow(self):\n focusWidget = QApplication.focusWidget()\n if (\n focusWidget == e5App().getObject(\"Shell\") or\n focusWidget == self.quickFindtextCombo\n ):\n return\n else:\n aw = self.activeWindow()\n if aw:\n aw.newLineBelow()", "def place(self, line):\n # easy enough\n return self.leader + line", "def wrapTextAt( text, linewidth=78 ):\n ansistring = ansistring = stringExtends.ansiStringClass( \"\" )\n if ( text is not None ) and isinstance( text, ( str,unicode ) ):\n ansistring.Text = text\n\n line_width = 78\n if (linewidth is not None) and isinstance( linewidth, (int, float) ):\n line_width = linewidth\n\n r = \"\"\n for line in ansistring.ansiTextWrap( line_width ):\n r += line + \"\\n\"\n\n r = r[:-1]\n return r", "def right(self):\n if self.pos < self.buffer.line_length(self.line):\n self.pos += 1", "def motion_W(input_line, cur, count):\n pos = get_pos(input_line, REGEX_MOTION_UPPERCASE_W, cur, True, count)\n if pos == -1:\n return len(input_line), False, False\n return cur + pos, False, False", "def move_up(self, step: int = 1) -> None:\n if self.cursor_pos.x == 0:\n self.cursor_pos = Point(self.height - step, self.cursor_pos.y)\n else:\n self.cursor_pos = Point(self.cursor_pos.x-step, self.cursor_pos.y)", "def test_move_lines_up_into_docstring(self):\n before_b = '''\\\n #@@language python\n def test():\n \"\"\" a\n b\n c\n \"\"\"\n print 1\n \n print 2\n '''\n after_b = '''\\\n #@@language python\n def test():\n \"\"\" a\n b\n c\n print 1\n \"\"\"\n \n print 2\n '''\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"7.1\", \"7.1\"),\n after_sel=(\"6.1\", \"6.1\"),\n command_name=\"move-lines-up\",\n )", "def wordwrap():\n file = open(sys.argv[2])\n width = int(sys.argv[1]) \n line = file.readline()\n while line:\n line = line.strip()\n line1,line = wordwrap_on(line,width)\n print line1\n if line=='' :\n line = file.readline()", "def key_a(buf, input_line, cur, count):\n set_cur(buf, input_line, cur + 1, False)\n set_mode(\"INSERT\")", "def _(event):\n system_line.cursor_right()", "def current_line_preserved():\n\n current_line = get_current_line_number() + 1\n yield\n vim.command('{0}'.format(current_line))", "def test_move_lines_down(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line c\n line a\n line b\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.3\", \"4.3\"),\n after_sel=(\"4.3\", \"5.3\"),\n command_name=\"move-lines-down\",\n )", "def key_I(buf, input_line, cur, count):\n pos, _, _ = motion_carret(input_line, cur, 0)\n set_cur(buf, input_line, pos)\n set_mode(\"INSERT\")", "def enter(self):\n if self.pos < self.line_length():\n # If the position is not at the end of the line split the line\n self.buffer.split_line(self.line, self.pos)\n else:\n self.buffer.insert_line(\"\", self.line + 1)\n \n self.line += 1\n self.pos = 0\n self.has_changes = True", "def key_A(buf, input_line, cur, count):\n set_cur(buf, input_line, len(input_line), False)\n set_mode(\"INSERT\")", "def tidyout(line, linewidth, currentpos, lineend, autoStyle=True):\n openStyle = ''\n closeStyle = ''\n url=False\n if autoStyle:\n # format things tha look like bio sequences\n if len(line) > 40:\n if re.search('^[A-Z]+$',line.upper()) != None:\n # looks like a sequence\n openStyle = '<pre>'\n closeStyle = '</pre>'\n lineend = '<br/>'\n \n\n # format things that look like URL's\n if re.search('^http[s]*\\:\\/',line) != None:\n url=True\n result = '<a href=\"%s\" target=urllinkout> %s </a>'%(line,line)\n \n\n if not url:\n result=openStyle\n for i in range(0,len(line)):\n if currentpos == linewidth:\n result += lineend\n currentpos = 0 \n result += line[i:i+1]\n currentpos += 1\n\n #print currentpos\n result += closeStyle\n \n return (result,currentpos)", "def set_cur(buf, input_line, pos, cap=True):\n if cap:\n pos = min(pos, len(input_line) - 1)\n weechat.buffer_set(buf, \"input_pos\", str(pos))", "def motion_l(input_line, cur, count):\n return cur + max(count, 1), False, False", "def goto_line(editor, lineno, scroll=False):\n count = editor.blockCount()\n if lineno > count:\n lineno = count\n lineno = lineno-1\n pos = editor.document(\n ).findBlockByNumber(\n lineno).position()\n\n goto_position(editor, pos)\n \n if scroll:\n bar = editor.verticalScrollBar()\n bar.setValue(max(0, bar.value()-2))" ]
[ "0.7428185", "0.6528294", "0.62725735", "0.6061254", "0.6049975", "0.5999769", "0.5879037", "0.5761063", "0.57182825", "0.5612058", "0.56017333", "0.5597055", "0.5573323", "0.5563547", "0.55090386", "0.5465161", "0.5446503", "0.5443915", "0.5440159", "0.54241407", "0.5423677", "0.5409705", "0.5386582", "0.53726655", "0.53505623", "0.53475666", "0.53194505", "0.5307533", "0.528821", "0.5282" ]
0.8001269
0
Move the cursor down a line or within the line if wrap width is given
def down(self, wrap = None): len_current = self.line_length() # If there is line wrapping if wrap: wraps_current = int(len_current / wrap) columns_current = len_current % wrap # If the position is not in the bottom wrap of the line move it down a # wrap. Take into account shorter wraps below. if len_current > wrap and self.pos < wraps_current * wrap: pos_wrap = int(self.pos / wrap) if pos_wrap + 1 == wraps_current and self.pos % wrap > columns_current: self.pos = (wraps_current * wrap) + columns_current else: self.pos = self.pos + wrap # If the position is in the bottom wrap move it to the first wrap of # the next line. Take into acount shorter lines below. elif self.line < self.buffer.size() - 1: len_next = self.line_length(1) self.line += 1 if self.pos % wrap > len_next: self.pos = len_next else: self.pos = self.pos % wrap # If no wrapping is being done move the line down one and adjust the # position if the next line is shorter. elif self.line < self.buffer.size() - 1: len_next = self.line_length(1) self.line += 1 if self.pos > len_next: self.pos = len_next
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def up(self, wrap = None):\n len_current = self.line_length()\n \n # If there is line wrapping\n if wrap:\n \n # If the position is in the top wrap of the line move it into the\n # last wrap of the line above it. Take into account shorter lines\n if self.pos < wrap and self.line > 0:\n len_next = self.line_length(-1)\n wraps_next = int(len_next / wrap)\n columns_next = len_next % wrap\n self.line -= 1\n if self.pos > columns_next:\n self.pos = (wraps_next * wrap) + columns_next\n else:\n self.pos = (wraps_next * wrap) + self.pos\n \n # If the position is in the wraps of the current line\n elif self.pos >= wrap:\n self.pos = self.pos - wrap\n \n # If there is no line wrapping move to the same position or lower in\n # the next line up.\n elif self.line > 0:\n len_next = self.line_length(-1)\n self.line -= 1\n if self.pos > len_next:\n self.pos = len_next", "def wrap_cursor_forward(event):\n b = event.cli.current_buffer\n relative_begin_index = b.document.get_start_of_line_position()\n b.cursor_left(count=abs(relative_begin_index))\n b.cursor_down(count=1)", "def linewrap(width = None):\n wrapper = TextWrapper()\n wrapper.width = width or 251\n wrapper.replace_whitespace = True\n wrapper.break_long_words = False\n wrapper.break_on_hyphens = False\n return wrapper.wrap", "def _scroll_when_linewrapping(\n self, ui_content: UIContent, width: int, height: int\n ) -> None:\n scroll_offsets_bottom = self.scroll_offsets.bottom\n scroll_offsets_top = self.scroll_offsets.top\n\n # We don't have horizontal scrolling.\n self.horizontal_scroll = 0\n\n def get_line_height(lineno: int) -> int:\n return ui_content.get_height_for_line(lineno, width, self.get_line_prefix)\n\n # When there is no space, reset `vertical_scroll_2` to zero and abort.\n # This can happen if the margin is bigger than the window width.\n # Otherwise the text height will become \"infinite\" (a big number) and\n # the copy_line will spend a huge amount of iterations trying to render\n # nothing.\n if width <= 0:\n self.vertical_scroll = ui_content.cursor_position.y\n self.vertical_scroll_2 = 0\n return\n\n # If the current line consumes more than the whole window height,\n # then we have to scroll vertically inside this line. (We don't take\n # the scroll offsets into account for this.)\n # Also, ignore the scroll offsets in this case. Just set the vertical\n # scroll to this line.\n line_height = get_line_height(ui_content.cursor_position.y)\n if line_height > height - scroll_offsets_top:\n # Calculate the height of the text before the cursor (including\n # line prefixes).\n text_before_height = ui_content.get_height_for_line(\n ui_content.cursor_position.y,\n width,\n self.get_line_prefix,\n slice_stop=ui_content.cursor_position.x,\n )\n\n # Adjust scroll offset.\n self.vertical_scroll = ui_content.cursor_position.y\n self.vertical_scroll_2 = min(\n text_before_height - 1, # Keep the cursor visible.\n line_height\n - height, # Avoid blank lines at the bottom when scrolling up again.\n self.vertical_scroll_2,\n )\n self.vertical_scroll_2 = max(\n 0, text_before_height - height, self.vertical_scroll_2\n )\n return\n else:\n self.vertical_scroll_2 = 0\n\n # Current line doesn't consume the whole height. Take scroll offsets into account.\n def get_min_vertical_scroll() -> int:\n # Make sure that the cursor line is not below the bottom.\n # (Calculate how many lines can be shown between the cursor and the .)\n used_height = 0\n prev_lineno = ui_content.cursor_position.y\n\n for lineno in range(ui_content.cursor_position.y, -1, -1):\n used_height += get_line_height(lineno)\n\n if used_height > height - scroll_offsets_bottom:\n return prev_lineno\n else:\n prev_lineno = lineno\n return 0\n\n def get_max_vertical_scroll() -> int:\n # Make sure that the cursor line is not above the top.\n prev_lineno = ui_content.cursor_position.y\n used_height = 0\n\n for lineno in range(ui_content.cursor_position.y - 1, -1, -1):\n used_height += get_line_height(lineno)\n\n if used_height > scroll_offsets_top:\n return prev_lineno\n else:\n prev_lineno = lineno\n return prev_lineno\n\n def get_topmost_visible() -> int:\n \"\"\"\n Calculate the upper most line that can be visible, while the bottom\n is still visible. We should not allow scroll more than this if\n `allow_scroll_beyond_bottom` is false.\n \"\"\"\n prev_lineno = ui_content.line_count - 1\n used_height = 0\n for lineno in range(ui_content.line_count - 1, -1, -1):\n used_height += get_line_height(lineno)\n if used_height > height:\n return prev_lineno\n else:\n prev_lineno = lineno\n return prev_lineno\n\n # Scroll vertically. (Make sure that the whole line which contains the\n # cursor is visible.\n topmost_visible = get_topmost_visible()\n\n # Note: the `min(topmost_visible, ...)` is to make sure that we\n # don't require scrolling up because of the bottom scroll offset,\n # when we are at the end of the document.\n self.vertical_scroll = max(\n self.vertical_scroll, min(topmost_visible, get_min_vertical_scroll())\n )\n self.vertical_scroll = min(self.vertical_scroll, get_max_vertical_scroll())\n\n # Disallow scrolling beyond bottom?\n if not self.allow_scroll_beyond_bottom():\n self.vertical_scroll = min(self.vertical_scroll, topmost_visible)", "def wrap_cursor_back(event):\n b = event.cli.current_buffer\n b.cursor_up(count=1)\n relative_end_index = b.document.get_end_of_line_position()\n b.cursor_right(count=relative_end_index)", "def _(event):\n buffer = event.current_buffer\n\n if buffer.document.is_cursor_at_the_end_of_line:\n buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=False)\n else:\n buffer.cursor_position += buffer.document.get_end_of_line_position()", "def copy_line(\n line: StyleAndTextTuples,\n lineno: int,\n x: int,\n y: int,\n is_input: bool = False,\n ) -> tuple[int, int]:\n if is_input:\n current_rowcol_to_yx = rowcol_to_yx\n else:\n current_rowcol_to_yx = {} # Throwaway dictionary.\n\n # Draw line prefix.\n if is_input and get_line_prefix:\n prompt = to_formatted_text(get_line_prefix(lineno, 0))\n x, y = copy_line(prompt, lineno, x, y, is_input=False)\n\n # Scroll horizontally.\n skipped = 0 # Characters skipped because of horizontal scrolling.\n if horizontal_scroll and is_input:\n h_scroll = horizontal_scroll\n line = explode_text_fragments(line)\n while h_scroll > 0 and line:\n h_scroll -= get_cwidth(line[0][1])\n skipped += 1\n del line[:1] # Remove first character.\n\n x -= h_scroll # When scrolling over double width character,\n # this can end up being negative.\n\n # Align this line. (Note that this doesn't work well when we use\n # get_line_prefix and that function returns variable width prefixes.)\n if align == WindowAlign.CENTER:\n line_width = fragment_list_width(line)\n if line_width < width:\n x += (width - line_width) // 2\n elif align == WindowAlign.RIGHT:\n line_width = fragment_list_width(line)\n if line_width < width:\n x += width - line_width\n\n col = 0\n wrap_count = 0\n for style, text, *_ in line:\n new_buffer_row = new_buffer[y + ypos]\n\n # Remember raw VT escape sequences. (E.g. FinalTerm's\n # escape sequences.)\n if \"[ZeroWidthEscape]\" in style:\n new_screen.zero_width_escapes[y + ypos][x + xpos] += text\n continue\n\n for c in text:\n char = _CHAR_CACHE[c, style]\n char_width = char.width\n\n # Wrap when the line width is exceeded.\n if wrap_lines and x + char_width > width:\n visible_line_to_row_col[y + 1] = (\n lineno,\n visible_line_to_row_col[y][1] + x,\n )\n y += 1\n wrap_count += 1\n x = 0\n\n # Insert line prefix (continuation prompt).\n if is_input and get_line_prefix:\n prompt = to_formatted_text(\n get_line_prefix(lineno, wrap_count)\n )\n x, y = copy_line(prompt, lineno, x, y, is_input=False)\n\n new_buffer_row = new_buffer[y + ypos]\n\n if y >= write_position.height:\n return x, y # Break out of all for loops.\n\n # Set character in screen and shift 'x'.\n if x >= 0 and y >= 0 and x < width:\n new_buffer_row[x + xpos] = char\n\n # When we print a multi width character, make sure\n # to erase the neighbours positions in the screen.\n # (The empty string if different from everything,\n # so next redraw this cell will repaint anyway.)\n if char_width > 1:\n for i in range(1, char_width):\n new_buffer_row[x + xpos + i] = empty_char\n\n # If this is a zero width characters, then it's\n # probably part of a decomposed unicode character.\n # See: https://en.wikipedia.org/wiki/Unicode_equivalence\n # Merge it in the previous cell.\n elif char_width == 0:\n # Handle all character widths. If the previous\n # character is a multiwidth character, then\n # merge it two positions back.\n for pw in [2, 1]: # Previous character width.\n if (\n x - pw >= 0\n and new_buffer_row[x + xpos - pw].width == pw\n ):\n prev_char = new_buffer_row[x + xpos - pw]\n char2 = _CHAR_CACHE[\n prev_char.char + c, prev_char.style\n ]\n new_buffer_row[x + xpos - pw] = char2\n\n # Keep track of write position for each character.\n current_rowcol_to_yx[lineno, col + skipped] = (\n y + ypos,\n x + xpos,\n )\n\n col += 1\n x += char_width\n return x, y", "def right(self):\n if self.pos < self.buffer.line_length(self.line):\n self.pos += 1", "def _(event):\n system_line.cursor_right()", "def word_wrap(self):\n textArea = self.get_current()\n if self.wrap.get() == 0:\n textArea.config(wrap='none')\n elif self.wrap.get() == 1:\n textArea.config(wrap='word')", "def next_line(self, context, line):", "def goto_line(editor, lineno, scroll=False):\n count = editor.blockCount()\n if lineno > count:\n lineno = count\n lineno = lineno-1\n pos = editor.document(\n ).findBlockByNumber(\n lineno).position()\n\n goto_position(editor, pos)\n \n if scroll:\n bar = editor.verticalScrollBar()\n bar.setValue(max(0, bar.value()-2))", "def wordwrap():\n file = open(sys.argv[2])\n width = int(sys.argv[1]) \n line = file.readline()\n while line:\n line = line.strip()\n line1,line = wordwrap_on(line,width)\n print line1\n if line=='' :\n line = file.readline()", "def _(event):\n system_line.cursor_left()", "def place(self, line):\n # easy enough\n return self.leader + line", "def _scroll_without_linewrapping(\n self, ui_content: UIContent, width: int, height: int\n ) -> None:\n cursor_position = ui_content.cursor_position or Point(x=0, y=0)\n\n # Without line wrapping, we will never have to scroll vertically inside\n # a single line.\n self.vertical_scroll_2 = 0\n\n if ui_content.line_count == 0:\n self.vertical_scroll = 0\n self.horizontal_scroll = 0\n return\n else:\n current_line_text = fragment_list_to_text(\n ui_content.get_line(cursor_position.y)\n )\n\n def do_scroll(\n current_scroll: int,\n scroll_offset_start: int,\n scroll_offset_end: int,\n cursor_pos: int,\n window_size: int,\n content_size: int,\n ) -> int:\n \"Scrolling algorithm. Used for both horizontal and vertical scrolling.\"\n # Calculate the scroll offset to apply.\n # This can obviously never be more than have the screen size. Also, when the\n # cursor appears at the top or bottom, we don't apply the offset.\n scroll_offset_start = int(\n min(scroll_offset_start, window_size / 2, cursor_pos)\n )\n scroll_offset_end = int(\n min(scroll_offset_end, window_size / 2, content_size - 1 - cursor_pos)\n )\n\n # Prevent negative scroll offsets.\n if current_scroll < 0:\n current_scroll = 0\n\n # Scroll back if we scrolled to much and there's still space to show more of the document.\n if (\n not self.allow_scroll_beyond_bottom()\n and current_scroll > content_size - window_size\n ):\n current_scroll = max(0, content_size - window_size)\n\n # Scroll up if cursor is before visible part.\n if current_scroll > cursor_pos - scroll_offset_start:\n current_scroll = max(0, cursor_pos - scroll_offset_start)\n\n # Scroll down if cursor is after visible part.\n if current_scroll < (cursor_pos + 1) - window_size + scroll_offset_end:\n current_scroll = (cursor_pos + 1) - window_size + scroll_offset_end\n\n return current_scroll\n\n # When a preferred scroll is given, take that first into account.\n if self.get_vertical_scroll:\n self.vertical_scroll = self.get_vertical_scroll(self)\n assert isinstance(self.vertical_scroll, int)\n if self.get_horizontal_scroll:\n self.horizontal_scroll = self.get_horizontal_scroll(self)\n assert isinstance(self.horizontal_scroll, int)\n\n # Update horizontal/vertical scroll to make sure that the cursor\n # remains visible.\n offsets = self.scroll_offsets\n\n self.vertical_scroll = do_scroll(\n current_scroll=self.vertical_scroll,\n scroll_offset_start=offsets.top,\n scroll_offset_end=offsets.bottom,\n cursor_pos=ui_content.cursor_position.y,\n window_size=height,\n content_size=ui_content.line_count,\n )\n\n if self.get_line_prefix:\n current_line_prefix_width = fragment_list_width(\n to_formatted_text(self.get_line_prefix(ui_content.cursor_position.y, 0))\n )\n else:\n current_line_prefix_width = 0\n\n self.horizontal_scroll = do_scroll(\n current_scroll=self.horizontal_scroll,\n scroll_offset_start=offsets.left,\n scroll_offset_end=offsets.right,\n cursor_pos=get_cwidth(current_line_text[: ui_content.cursor_position.x]),\n window_size=width - current_line_prefix_width,\n # We can only analyse the current line. Calculating the width off\n # all the lines is too expensive.\n content_size=max(\n get_cwidth(current_line_text), self.horizontal_scroll + width\n ),\n )", "def wrapTextAt( text, linewidth=78 ):\n ansistring = ansistring = stringExtends.ansiStringClass( \"\" )\n if ( text is not None ) and isinstance( text, ( str,unicode ) ):\n ansistring.Text = text\n\n line_width = 78\n if (linewidth is not None) and isinstance( linewidth, (int, float) ):\n line_width = linewidth\n\n r = \"\"\n for line in ansistring.ansiTextWrap( line_width ):\n r += line + \"\\n\"\n\n r = r[:-1]\n return r", "def move_down(self) -> None:\n try:\n next_newline_index: int = self.buffer.index('\\n', start=self.index)\n except ValueError:\n return\n\n if next_newline_index == self.buffer.end:\n return\n\n down_index: int\n column: int = self.buffer.get_column(self.index)\n down_index = next_newline_index + 1 + column\n\n if down_index > self.buffer.end:\n down_index = self.buffer.end\n else:\n start: int = next_newline_index + 1\n end: int = down_index\n try:\n next_next_newline_index: int = self.buffer.index('\\n', start=start, end=end)\n down_index = next_next_newline_index\n except ValueError:\n pass\n\n self.index = down_index", "def enter(self):\n if self.pos < self.line_length():\n # If the position is not at the end of the line split the line\n self.buffer.split_line(self.line, self.pos)\n else:\n self.buffer.insert_line(\"\", self.line + 1)\n \n self.line += 1\n self.pos = 0\n self.has_changes = True", "def __newLineBelow(self):\n focusWidget = QApplication.focusWidget()\n if (\n focusWidget == e5App().getObject(\"Shell\") or\n focusWidget == self.quickFindtextCombo\n ):\n return\n else:\n aw = self.activeWindow()\n if aw:\n aw.newLineBelow()", "def move_to_line_end(self) -> None:\n self.index = self.buffer.get_line_end(self.index)", "def next_line():\r\n set_point(point().next_line())", "def move_right(self) -> None:\n if not self.buffer:\n return\n\n if self.index == self.buffer.end:\n return\n\n if self.buffer[self.index] != '\\n':\n self.index += 1", "def print_line(line, highlight=False):\n global lineno\n try:\n if highlight:\n line += \" \" * (win.getmaxyx()[1] - len(line))\n win.addstr(lineno, 0, line, curses.A_REVERSE)\n else:\n win.addstr(lineno, 0, line, 0)\n except curses.error:\n lineno = 0\n win.refresh()\n raise\n else:\n lineno += 1", "def set_line_width(self, val):\n self.lwidth = val", "def scroll(self, direction):\n # next cursor position after scrolling\n next_line = self.line + direction\n\n # Up direction scroll overflow\n # current cursor position is 0, but top position is greater than 0\n if (direction == self.UP) and (self.top > 0 and self.line == 0):\n self.top += direction\n \n # Down direction scroll overflow\n # next cursor position touch the max lines, but absolute position of max lines could not touch the bottom\n elif (direction == self.DOWN) and (next_line == self.max_lines -1) and (self.top + self.max_lines < self.bottom):\n self.top += direction\n \n # Scroll up\n # current cursor position or top position is greater than 0\n elif (direction == self.UP) and (self.top > 0 or self.line > 0):\n self.line = next_line\n \n # Scroll down\n # next cursor position is above max lines, and absolute position of next cursor could not touch the bottom\n elif (direction == self.DOWN) and (next_line < self.max_lines) and (self.top + next_line < self.bottom):\n self.line = next_line", "def test_move_lines_down(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line c\n line a\n line b\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.3\", \"4.3\"),\n after_sel=(\"4.3\", \"5.3\"),\n command_name=\"move-lines-down\",\n )", "def current_line_preserved():\n\n current_line = get_current_line_number() + 1\n yield\n vim.command('{0}'.format(current_line))", "def test_wrap_word():\n line = \"n\" * 81\n assert wrap_line(line) == \"n\" * 80 + \"\\nn\"", "def go_to(self, value=None):\n self.go_to_this_line = self.line_number.get()\n self.my_text.mark_set(INSERT, str(float(self.go_to_this_line)))\n self.current_area()\n self.my_text.see(INSERT)\n self.searcher.destroy()" ]
[ "0.7282259", "0.65697646", "0.63280654", "0.6278724", "0.6258118", "0.6026035", "0.6025714", "0.59524655", "0.58828807", "0.5844057", "0.5785511", "0.57741666", "0.5715548", "0.5617951", "0.55869836", "0.5586518", "0.55736643", "0.5569546", "0.55602664", "0.5536436", "0.5534042", "0.54956543", "0.5455707", "0.5417402", "0.541698", "0.53993046", "0.5397739", "0.53975534", "0.5394596", "0.5374796" ]
0.7542303
0
Move cursor right once. Wont move up if line begin is reached
def right(self): if self.pos < self.buffer.line_length(self.line): self.pos += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_right(self, step: int = 1) -> None:\n if self.cursor_pos.y < self.width - 1:\n self.cursor_pos = Point(self.cursor_pos.x, self.cursor_pos.y+step)\n else:\n self.cursor_pos = Point(self.cursor_pos.x, 0)", "def move_right(self) -> None:\n if not self.buffer:\n return\n\n if self.index == self.buffer.end:\n return\n\n if self.buffer[self.index] != '\\n':\n self.index += 1", "def _(event):\n system_line.cursor_right()", "def right(self):\n self.move(1,0)", "def move_right(self):\n if self.change_valid(dx=1):\n self.x += 1", "def move_right(self):\n\n if self.xcor() > 230:\n self.setx(250)\n else:\n new_x = self.xcor() + 40\n self.setx(new_x)", "def wrap_cursor_forward(event):\n b = event.cli.current_buffer\n relative_begin_index = b.document.get_start_of_line_position()\n b.cursor_left(count=abs(relative_begin_index))\n b.cursor_down(count=1)", "def go_right(self):\n self.change_x = 6", "def go_right(self):\n self.change_x = 6", "def wrap_cursor_back(event):\n b = event.cli.current_buffer\n b.cursor_up(count=1)\n relative_end_index = b.document.get_end_of_line_position()\n b.cursor_right(count=relative_end_index)", "def go_right(self):\n self.rect.centerx += 9", "def go_right(self):\n self.change_x = 6\n self.direction = \"R\"", "def moveRight(self):\n if self._position.x != 14:\n self._position.x +=1\n return True\n return False", "def go_right(self):\n self.rect.centerx += self.__dx", "def _(event):\n system_line.cursor_left()", "def right(self):\n if self.head.heading() != LEFT and self.last_direction != LEFT:\n self.head.setheading(RIGHT)", "def move_right(self):\n self.yaw_motor.step_forward()", "def move_right(self):\n self.rect.x += 5 # Moves to the right by 5\n\n # If the player reaches the edge of the screen, they can't go further\n if self.rect.x >= 580:\n self.rect.x = 580", "def right():\n global x, canvas # x é modificado\n canvas.create_line(x, y, x + 10, y)\n x += 10", "def move_right(self):\n\t\tself.set_x_vector(constants.DONKEY_SPEED)", "def move_right(self):\r\n self._time += 1\r\n if self._position < len(self._list) - 1:\r\n self._position += 1\r\n return True\r\n else:\r\n return False", "def move_right(self):\r\n if self.rect.right < BG_WIDTH:\r\n self.rect.right += self.speed", "def move_right(self):\n self._time += 1\n if self._position < len(self._list) - 1:\n self._position += 1\n return True\n else:\n return False", "def move_left(self) -> None:\n if self.index == 0:\n return\n\n if self.buffer[self.index - 1] != '\\n':\n self.index -= 1", "def move_right(self, num):\n self.right_position = num", "def move_right(self):\r\n self.left += self.__speed", "def move_left(self, step: int = 1) -> None:\n if self.cursor_pos.y == 0:\n self.cursor_pos = Point(self.cursor_pos.x, self.width-step)\n else:\n self.cursor_pos = Point(self.cursor_pos.x, self.cursor_pos.y-step)", "def advance(self, distance):\n self.cursor += distance", "def move_to_line_end(self) -> None:\n self.index = self.buffer.get_line_end(self.index)", "def do_STEP(self, parametros):\n if len(cancion.marks)!=0:\n cancion.moveCursor(1,True)" ]
[ "0.76631117", "0.75563663", "0.75281566", "0.7331321", "0.70240384", "0.69982034", "0.6981223", "0.69527495", "0.69527495", "0.69385767", "0.6848111", "0.6813645", "0.67973435", "0.67660815", "0.66800827", "0.66345584", "0.6620433", "0.65905064", "0.65749073", "0.6568575", "0.6560634", "0.6546241", "0.6519655", "0.6493514", "0.64831996", "0.6444557", "0.64274186", "0.63931274", "0.63604075", "0.633049" ]
0.7801659
0
Return the length of the current line or a line a given offset
def line_length(self, dLine = 0): return self.buffer.line_length(self.line + dLine)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lineOffset(self):\n if self.__lineOffset is None:\n self.__lineOffset = self.__offset - self.__source.rfind(\"\\n\", 0, self.__offset) - 1\n\n return self.__lineOffset", "def __len__(self):\n nlines = self.get_endline() - self.get_startline() + 1\n if nlines < 0:\n nlines = 0\n return nlines", "def line(self) -> int:", "def _loc(self) -> int:\n return len(self.lines)", "def len(self):\n\t\t\n\t\treturn len(self.line)", "def get_line_width(self):\n return self.lwidth", "def offsetline(linen, pattern_result):\n\n if \"nlines\" in pattern_result:\n nlines = pattern_result[\"nlines\"]\n else:\n nlines = 0\n new_linen = linen - nlines - 1\n if new_linen < 0:\n return 0\n else:\n return new_linen", "def get_linecount(self):\n self._update_linetab(len(self.input))\n lcount = len(self.__linepos)\n return lcount - (self.input.endswith('\\n'))", "def get_line_length(file_path):\n with open(file_path, 'rb+') as f:\n return len(f.readline())", "def get_canvas_line_length(self, line_id):\n\n line_coords = self.coords(line_id)\n x1 = line_coords[0]\n y1 = line_coords[1]\n x2 = line_coords[2]\n y2 = line_coords[3]\n length = numpy.sqrt(numpy.square(x2-x1) + numpy.square(y2-y1))\n return length", "def line(self):\n if self.__line is None:\n left = self.__source.rfind(\"\\n\", 0, self.__offset) + 1\n right = self.__source.find(\"\\n\", self.__offset)\n\n self.__line = self.__source[left : right]\n self.__lineOffset = self.__offset - left\n\n return self.__line", "def get_roi_line_len(self):\n return len(self.line_list)", "def length(self):\n total_length = 0\n for location_a, location_b in zip(\n self.locations[:-1], self.locations[1:]):\n total_length += Line(location_a, location_b).length\n return total_length", "def get_height_for_line(self, lineno: int) -> int:\n if self.wrap_lines:\n return self.ui_content.get_height_for_line(\n lineno, self.window_width, self.window.get_line_prefix\n )\n else:\n return 1", "def _get_offset_line(self):\n columns = [col for col in self.df if col.startswith('offset_')]\n offset_lines = []\n if columns != []:\n return self.df[columns]\n else:\n print('No offsets available')\n raise IndexError", "def _get_offset(self, lnum, offset):\n start, end = self._get_linespan(lnum)\n length = end - start\n if offset < 0 or offset >= length:\n raise IndexError(\"offset not in 0..%d\" % length)\n\n return start + offset", "def level_size(level):\n return level.count('\\n') + 1, level.find('\\n')", "def _getOldCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"+\"):\n nb_lines += 1\n return nb_lines", "def get(self, offset: int) -> Position:\n line = bisect_right(self.line_starts, offset) - 1\n character = offset - self.line_starts[line]\n return Position(line=line, character=character)", "def get_length(self, px, py):\n idx = px + py * self.width\n return len(self._rotations_buffer[idx])", "def get_offset():\n try:\n offset = open(offset_file, 'r+')\n except IOError as e:\n offset = open(offset_file, 'a+')\n o = offset.readline()\n if len(o) == 0 or o == \"\\n\":\n o = 0\n return o\n offset.close()", "def _offset_to_line_column(cls, chars_per_byte: int, offset: int, adjust_column: int = 0) -> str:\r\n line = (offset // cls.BYTES_PER_ROW) + 1 # Line is 1-based\r\n column = ((offset % cls.BYTES_PER_ROW) * chars_per_byte)\r\n return f\"{line}.{column + adjust_column}\"", "def indentsize(line):\r\n expline = string.expandtabs(line)\r\n return len(expline) - len(string.lstrip(expline))", "def peek_length(self) -> Optional[int]:\n LINE_CUTOFF = 10_000\n count = 0\n with open(self.path, mode='r') as f:\n for _ in f:\n count += 1\n\n return count", "def line_size(line, style):\n logical_extents = ffi.new('PangoRectangle *')\n pango.pango_layout_line_get_extents(line, ffi.NULL, logical_extents)\n width = units_to_double(logical_extents.width)\n height = units_to_double(logical_extents.height)\n ffi.release(logical_extents)\n if style['letter_spacing'] != 'normal':\n width += style['letter_spacing']\n return width, height", "def _getNewCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"-\"):\n nb_lines += 1\n return nb_lines", "def lineNumber(self):\n if self.__lineNumber is None:\n self.__lineNumber = self.__source.count(\"\\n\", 0, self.__offset) + 1\n\n return self.__lineNumber", "def get_line_end(self):\n return self._line_end", "def offset_at_position(self, position: Position) -> int:\n lines = self.lines\n pos = position_from_utf16(lines, position)\n row, col = pos.line, pos.character\n return col + sum(utf16_num_units(line) for line in lines[:row])", "def line_width(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"line_width\")" ]
[ "0.670328", "0.6597389", "0.652373", "0.6489323", "0.6456836", "0.6371697", "0.633413", "0.6271833", "0.6259438", "0.6247852", "0.62300384", "0.61852074", "0.6146044", "0.6099755", "0.6070542", "0.6052635", "0.6051297", "0.6044246", "0.60049206", "0.59722704", "0.5970488", "0.595474", "0.592504", "0.5912829", "0.5903122", "0.5901826", "0.58960456", "0.5825239", "0.5807277", "0.5805586" ]
0.7565296
0
Creates a live demonstration of the kernelization algorithm
def kernel_stream_demo(args: Dict[str, Any]): path = args["<edge_list_file>"] read_func = get_read_func_from_edgelist(path) # Set up graphs kernel_exists = True graph = read_func(path) edges = list(graph.edges) if args["--shuffle"]: shuffle(edges) k = int(args["<k>"]) kernel: Graph = Graph() maximal_matching: Set[Tuple[Any, Any]] = set() # Set up matplotlib plot.show() figure: Figure = plot.figure("Kernelization Algorithm", figsize=(16, 9)) # Fix window to x=50 y=50 from top right of screen figure.canvas.manager.window.wm_geometry("+50+50") figure.subplots_adjust(left=0.05, right=0.95, bottom=0.1, top=0.9) figure.suptitle("Kernelization Algorithm") graph_axes: Axes = figure.add_subplot(1, 2, 1) kernel_axes: Axes = figure.add_subplot(1, 2, 2) layout = get_graph_layout(graph) delay = float(args["--delay"]) / 1000 with_labels = args["--label"] for i, (u, v) in enumerate(edges): # Kernelization algorithm is_neighbour = False if _in(u, maximal_matching): is_neighbour = True if kernel.degree[u] < k: kernel.add_edge(u, v) elif _in(v, maximal_matching): is_neighbour = True if kernel.degree[v] < k: kernel.add_edge(u, v) if not is_neighbour: maximal_matching.add((u, v)) kernel.add_edge(u, v) if len(maximal_matching) > k: kernel_exists = False break # Graph subplot draw_graph(graph_axes, graph, kernel, layout, u, v, with_labels) # Kernel subplot draw_kernel( kernel_axes, kernel, graph, layout, k, maximal_matching, with_labels, i ) try: # Wait for update plot.pause(delay) except TclError: # Exception caused when exiting break if kernel_exists: draw_success_text(figure, f"A kernel exists of size {k}") else: draw_failure_text(figure, f"There is no such kernel of size {k}") plot.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n dist = \"Euclidean\"\n path = \"\"\n k_v = 2\n error = []\n k_vals = []\n\n for i in range(len(sys.argv)):\n if sys.argv[i] == \"--path\":\n path = sys.argv[i+1]\n if sys.argv[i] == \"--k\":\n k_v = int(sys.argv[i+1])\n if sys.argv[i] == \"[--distance Manhattan]\":\n dist = \"Manhattan\"\n if sys.argv[i] == \"[--distance Minkowski]\":\n dist = \"Minkowski\"\n\n\n training_data = create_data(path)\n\n for k in range(2,10):\n k_vals.append(k)\n if k>2:\n for i in range(len(training_data)):\n training_data[i].remove(training_data[i][-1])\n trained_data, centroids = get_clusters(training_data, k, dist)\n error.append(rms(trained_data, dist))\n plot_error(k_vals, error)\n\n for i in range(len(training_data)):\n training_data[i].remove(training_data[i][-1])\n\n trained_data, centroids = get_clusters(training_data, k_v, dist)\n\n test_clusters(trained_data, centroids)", "def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"k\",\n type=int,\n help=\"the number of neighbors\")\n parser.add_argument(\"--xTrain\",\n default=\"q3xTrain.csv\",\n help=\"filename for features of the training data\")\n parser.add_argument(\"--yTrain\",\n default=\"q3yTrain.csv\",\n help=\"filename for labels associated with training data\")\n parser.add_argument(\"--xTest\",\n default=\"q3xTest.csv\",\n help=\"filename for features of the test data\")\n parser.add_argument(\"--yTest\",\n default=\"q3yTest.csv\",\n help=\"filename for labels associated with the test data\")\n\n args = parser.parse_args()\n # load the train and test data\n xTrain = pd.read_csv(args.xTrain)\n yTrain = pd.read_csv(args.yTrain)\n xTest = pd.read_csv(args.xTest)\n yTest = pd.read_csv(args.yTest)\n #create an instance of the model\n knn = Knn(args.k)\n knn.train(xTrain, yTrain['label'])\n # predict the training dataset\n yHatTrain = knn.predict(xTrain)\n trainAcc = accuracy(yHatTrain, yTrain['label'])\n # predict the test dataset\n yHatTest = knn.predict(xTest)\n testAcc = accuracy(yHatTest, yTest['label'])\n print(\"Training Acc:\", trainAcc)\n print(\"Test Acc:\", testAcc)\n\n # runs the KNN from 1 to K to compare the accuracy for different values of K.\n performance(xTrain, yTrain, xTest, yTest, args.k)", "def main():\n # Initializing learning rate\n learning_rate = 0.0005\n # Initializing stopping criteria\n stopping_criteria = 0.01\n # load the data training data from a csv file with an url\n training_x,testing_x, training_y, testing_y,mean,sd= ai.store_data(\"https://github.com/santiagocantu98/K-Nearest-Neightbours/raw/master/diabetes.csv\",\"training\")\n normal_testing = np.copy(testing_x)\n\n # scalates the features of the testing data\n testing_data_scaled,mean,sd = ai.scale_features(testing_x,mean,sd)\n ai.print_scaled_data(testing_data_scaled,\"testing\")\n ai.calculate_euclidean_distance(training_x, training_y , testing_data_scaled, testing_y,normal_testing)", "def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()", "def main():\n data = Dummy(n_samples=500, n_dim=3)\n X = data.get_dummy()\n clustering = Kmeans(X, K=5, display=False)\n clustering.run()\n print(f\"Number of iterations: {clustering.num_iterations}\\n\")\n\n \"\"\" Test example of clustering_kmeans with unknown number of clusters K \"\"\"\n clustering = Kmeans(X,)\n clustering.silhouette_find_k()\n print(f\"Number of centroids found: {clustering.num_K}\")", "def find_knn_hyperparams():\n n_neighbors = np.arange(5, 10)\n ps = np.arange(1, 10)\n results = []\n\n for p in ps:\n result = []\n for _ in range(10):\n data = FaceDataset(\"embeddings/known\", n=50)\n train_data, train_labels = data.train()\n test_data, test_labels = data.test()\n accs = []\n for n in n_neighbors:\n clf = KNeighborsClassifier(n_neighbors=n, weights=\"distance\", p=p)\n clf, _ = train(clf, train_data, train_labels)\n acc, _ = test(clf, test_data, test_labels)\n accs.append(acc)\n result.append(accs)\n result = np.mean(result, axis=0)\n results.append(result)\n\n plots = []\n for i in range(len(ps)):\n p = plotly.graph_objs.Scatter(x=n_neighbors, y=results[i], name=\"p={}\".format(ps[i]))\n plots.append(p)\n\n plotly.offline.plot(plots, filename=\"knn.html\")\n print(\"C={}\".format(n_neighbors[np.argmax(results)]))", "def kozakov2015(*args, **kwargs):\n clusters = []\n for sel in args:\n cluster = Cluster(\"\", sel, pm.get_coords(sel))\n clusters.append(cluster)\n\n ensemble = Kozakov2015Ensemble(clusters)\n print(\n textwrap.dedent(\n f\"\"\"\n {ensemble}\n Class {ensemble.klass}\n S {ensemble.strength}\n S0 {ensemble.strength0}\n CD {ensemble.max_center_to_center}\n MD {ensemble.max_dist}\n \"\"\"\n )\n )", "def run_kohonen(data, size_k: int=6, sigma: float=2.0, eta: int=0.9, \n tmax: int=5000, convergence=0):\n dim = 28*28\n data_range = 255.0\n dy, dx = data.shape\n \n #convergence criteria\n eps = 1E-6\n eps_2 = 0.1\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n #error for convergence criterion\n error = [np.inf]\n \n print('start iteration')\n for t, i in enumerate(i_random):\n old_centers = copy(centers)\n som_step(centers, data[int(i),:],neighbor,eta,sigma)\n \n if t % 1E4 == 0:\n print('iteration {}'.format(t))\n \n if convergence == 1:\n #convergence: distance between samples and best matching prototypes \n error.append(calculate_error(centers,data))\n# if np.abs((error[-2]-error[-1])/error[1]) < eps :\n# break\n \n elif convergence == 2:\n #convergence: non significant weight update\n err = np.linalg.norm(centers-old_centers)\n error.append(err)\n# if err < eps_2:\n# break\n \n \"\"\" # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw() \"\"\"\n \n print('Total iteration : {}'.format(t))\n return centers, error[1:]", "def main(args):\n \n ## Load & Preprocess data \n if args.data_name == 'amsterdam': \n file_name = '../data/amsterdam/test_longitudinal_data.csv'\n ori_data = data_preprocess(file_name, args.max_seq_len)\n \n # Divide the data into training and testing\n divided_data, _ = data_division(ori_data, seed = args.seed, divide_rates = [args.train_rate, 1-args.train_rate])\n \n train_data = np.asarray(divided_data[0])\n test_data = np.asarray(divided_data[1])\n\n print('Finish data loading: ' + str(args.data_name)) \n \n ## Run hider algorithm\n if args.hider_model == 'timegan':\n generated_data = timegan.timegan(train_data)\n elif args.hider_model == 'add_noise':\n generated_data = add_noise.add_noise(train_data, args.noise_size) \n print('Finish hider algorithm training') \n \n ## Define enlarge data and its labels\n enlarge_data = np.concatenate((train_data, test_data), axis = 0)\n enlarge_data_label = np.concatenate((np.ones([train_data.shape[0],]), np.zeros([test_data.shape[0],])), axis = 0)\n \n # Mix the order\n idx = np.random.permutation(enlarge_data.shape[0])\n enlarge_data = enlarge_data[idx]\n enlarge_data_label = enlarge_data_label[idx]\n \n ## Run seeker algorithm\n reidentified_data = knn_seeker(generated_data, enlarge_data)\n \n print('Finish seeker algorithm training') \n \n ## Evaluate the performance\n # 1. Feature prediction\n feat_idx = np.random.permutation(train_data.shape[2])[:args.feature_prediction_no]\n ori_feat_pred_perf = feature_prediction(train_data, test_data, feat_idx)\n new_feat_pred_perf = feature_prediction(generated_data, test_data, feat_idx)\n \n feat_pred = [ori_feat_pred_perf, new_feat_pred_perf]\n \n print('Feature prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_feat_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_feat_pred_perf, 4)))\n \n # 2. One step ahead prediction\n ori_step_ahead_pred_perf = one_step_ahead_prediction(train_data, test_data)\n new_step_ahead_pred_perf = one_step_ahead_prediction(generated_data, test_data)\n \n step_ahead_pred = [ori_step_ahead_pred_perf, new_step_ahead_pred_perf]\n \n print('One step ahead prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_step_ahead_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_step_ahead_pred_perf, 4)))\n \n # 3. Reidentification score\n reidentification_score = reidentify_score(enlarge_data_label, reidentified_data)\n \n print('Reidentification score: ' + str(np.round(reidentification_score, 4)))\n \n shutil.rmtree('tmp')\n \n return feat_pred, step_ahead_pred, reidentification_score", "def run_kohonen_dynamicLearningRate(data,fun,size_k: int=6, eta: float=0.1, tmax: int=5000, convergence=0):\n dim = 28*28\n data_range = 255.0\n dy, dx = data.shape\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n #error for convergence criterion\n error = [np.inf]\n\n for t, i in enumerate(i_random):\n old_centers = copy(centers)\n sigma = fun(t)\n som_step(centers, data[i,:],neighbor,eta,sigma)\n \n if t % 1E4 == 0:\n print('iteration {}'.format(t))\n \n if convergence == 1:\n #convergence: distance between samples and best matching prototypes \n error.append(calculate_error(centers,data))\n# if np.abs((error[-2]-error[-1])/error[1]) < eps :\n# break\n \n elif convergence == 2:\n #convergence: non significant weight update\n err = np.linalg.norm(centers-old_centers)\n error.append(err)\n# if err < eps_2:\n# break\n\n \"\"\" # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw() \"\"\"\n return centers, error[1:]", "def nn_kras():\n return GCN_simple(29, [3], 2, 7, dropout=0)", "def train(x_train, y_train, x_test, y_test):\n\n print(\" Nearest centroid : \", end='')\n run(x_train, y_train, x_test, y_test, NearestCentroid())\n print(\" k-NN classifier (k=3) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=3))\n print(\" k-NN classifier (k=7) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=7))\n print(\" Naive Bayes (Gaussian) : \", end='')\n run(x_train, y_train, x_test, y_test, GaussianNB())\n print(\" Random Forest (trees= 5) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=5))\n print(\" Random Forest (trees= 50) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=50))\n print(\" Random Forest (trees=500) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=500))\n print(\" Random Forest (trees=1000): \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=1000))\n print(\" LinearSVM (C=0.01) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.01))\n print(\" LinearSVM (C=0.1) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.1))\n print(\" LinearSVM (C=1.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=1.0))\n print(\" LinearSVM (C=10.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=10.0))", "def main():\n \"\"\"\n This is just for testing the functions\n \"\"\"\n\n x1 = np.array([1, 1, 1, 1, -1, -1, 1, 1, 1])\n x2 = np.array([1, -1, 1, 1, 1, 1, 1, -1, 1])\n x3 = np.array([-1, 1, -1, -1, 1, -1, -1, 1, -1])\n train_set = np.vstack((x1, x2))\n train_set = np.vstack((train_set, x3))\n\n\n params = {\n \"epochs\": 100,\n \"neurons\": len(x1),\n \"learn_method\": 'classic'\n }\n\n hop = hop_net.HopfieldNet(train_set, **params)\n hop.batch_train()\n show_trained(train_set)\n\n x4d = [1,1,1,1,1,1,1,1,1]\n x5d = [1,1,1,1,-1,-1,1,-1,-1]\n x45d = np.vstack((x4d, x5d))\n test_set = np.vstack((x45d, train_set))\n recalled_set = hop.recall(test_set)\n for i in range(test_set.shape[0]):\n show_tested(test_set[i], recalled_set[i])", "def test_cherenkov_instability( show=False ):\n # Dictionary to record the final value of E\n slope_Erms = {}\n\n for scheme in [ 'standard', 'galilean', 'pseudo-galilean']:\n\n # Choose the correct parameters for the scheme\n if scheme == 'standard':\n v_comoving = 0.\n use_galilean = False\n else:\n v_comoving = 0.9999*c\n if scheme == 'galilean':\n use_galilean = True\n else:\n use_galilean = False\n\n # Initialize the simulation object\n sim = Simulation( Nz, zmax, Nr, rmax, Nm, dt,\n p_zmin, p_zmax, p_rmin, p_rmax, p_nz, p_nr, p_nt, n_e,\n zmin=zmin, initialize_ions=True,\n v_comoving=v_comoving, use_galilean=use_galilean,\n boundaries={'z':'periodic', 'r':'reflective'}, use_cuda=use_cuda )\n\n # Give a relativistic velocity to the particle, with some noise\n sim.ptcl[0].uz[:] = uz_m\n sim.ptcl[0].inv_gamma[:] = 1./np.sqrt( 1 + sim.ptcl[0].uz**2 )\n sim.ptcl[1].uz[:] = uz_m\n sim.ptcl[1].inv_gamma[:] = 1./np.sqrt( 1 + sim.ptcl[1].uz**2 )\n\n # Perform the simulation;\n # record the rms electric field every 50 timestep\n Er_rms = np.zeros(int(N_step/30)+1)\n t = np.zeros(int(N_step/30+1))\n Er_rms[0] = get_Er_rms(sim)\n t[0] += sim.time\n for i in range(int(N_step/30)):\n sim.step( 30, show_progress=False )\n print('Checkpoint %d' %i)\n Er_rms[i+1] = get_Er_rms(sim)\n t[i+1] += sim.time\n print('Calculated RMS')\n\n # Check/plot the results\n if show:\n import matplotlib.pyplot as plt\n # Add a plot\n plt.semilogy( t, Er_rms, '-', label=scheme )\n plt.ylabel('RMS(Er)')\n plt.xlabel('Time')\n else:\n # Registed the final value of the slope of the electric field\n slope_Erms[scheme] = np.log( Er_rms[-1] ) - np.log(Er_rms[-2] )\n\n if show:\n # Show the plot\n plt.legend(loc=0)\n plt.show()\n else:\n # Check that, in the standard case, the electric field is\n # growing much faster, due to the Cherenkov instability\n assert slope_Erms['standard'] > 3.5*slope_Erms['galilean']\n assert slope_Erms['standard'] > 3.5*slope_Erms['pseudo-galilean']", "def eg_sk():\n\n rxs = []\n a = []\n b = []\n c = []\n d = []\n e = []\n f = []\n g = []\n h = []\n i = []\n j = []\n\n for _ in range(1000):\n a.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n b.append(utils.gaussian(10.1, 1))\n\n for _ in range(1000):\n c.append(utils.gaussian(20, 1))\n\n for _ in range(1000):\n d.append(utils.gaussian(30, 1))\n\n for _ in range(1000):\n e.append(utils.gaussian(30.1, 1))\n\n for _ in range(1000):\n f.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n g.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n h.append(utils.gaussian(40, 1))\n\n for _ in range(1000):\n i.append(utils.gaussian(40, 3))\n\n for _ in range(1000):\n j.append(utils.gaussian(10, 1))\n\n for k, v in enumerate([a, b, c, d, e, f, g, h, i, j]):\n rxs.append(creation.RX(v, \"rx{}\".format(k)))\n\n for rx in stats.tiles(stats.scottKnot(rxs)):\n print(\"\", rx[\"rank\"], rx[\"name\"], rx[\"show\"], sep=\"\\t\")", "def demo():\n # declare dummy input vector with two dimensions:\n vectors = numpy.array([[2,4], [0,1], [1,1], [3,2], [4,0], [2,2], [8, 9], [8, 11]])\n\n # compute the distance matrix on the basis of the vectors via sklearn:\n dist_matrix = pairwise_distances(vectors, metric='cityblock')\n\n # plot the distance matrix:\n # dist_matrix.draw() this doesn't work anymore\n\n # initialize a temporal VNC clusterer, here with the Ward linkage method:\n clusterer = VNClusterer(dist_matrix, linkage='ward') # could also be a plain Clusterer()\n\n # start the clustering procedure:\n clusterer.cluster(verbose=1)\n\n labels = ['n'+str(i+1) for i in range(len(vectors))]\n # plot the result as a dendrogram\n clusterer.dendrogram.draw(save=True,\n labels=labels,\n title=\"VNC Analysis (Ward's Linkage)\")", "def random_walk(G, k, convergence_threshold, plot):\n #SET: minimum amount of iterations\n min_iterations = 1000\n #SET: minimum amount of walks\n min_walks = 10\n #SET: maximum amount of walks\n max_walks = 5000 \n\n double_zero, double_one, zero_one, one_zero = 0, 0, 0, 0\n RWC_list = []\n convergence = 10000\n i = 0\n\n nodes0 = [node for node in G.nodes(data=True) if node[1]['cluster'] == 0]\n nodes1 = [node for node in G.nodes(data=True) if node[1]['cluster'] == 1]\n\n if nodes0 == [] or nodes1 == []:\n return 'NaN'\n \n degrees0 = sorted([(node[0], G.degree(node[0])) for node in nodes0], key=itemgetter(1), reverse=True)\n degrees1 = sorted([(node[0], G.degree(node[0])) for node in nodes1], key=itemgetter(1), reverse=True)\n\n k_tuples= degrees0[:int(ceil(k*len(nodes0)))] + degrees1[:int(ceil(k*len(nodes1)))]\n k_nodes= [node for (node, degree) in k_tuples]\n \n while convergence > convergence_threshold or i < min_iterations:\n # choose random cluster (choose random between 0,1), prob is 0.5\n begin_cluster = random.choice([0, 1])\n\n # choose random node in cluser\n if begin_cluster == 0:\n current_node = random.choice(nodes0)\n else:\n current_node = random.choice(nodes1)\n\n # choose random edge from cluster (repeat)\n current_node = current_node[0]\n\n j = 0\n while j < max_walks:\n previous_node = current_node\n current_node = random.choice(G.neighbors(current_node))\n #prevent self_loops\n if previous_node == current_node:\n current_node = previous_node\n j+=1\n continue\n #print('{}'.format(current_node))\n if current_node in k_nodes:\n if j < min_walks:\n continue\n else:\n break\n j += 1 \n\n # what cluster end node\n end_cluster = G.node[current_node]['cluster']\n\n #Keep tally of outcomes\n if begin_cluster == 0:\n if end_cluster == 0:\n double_zero += 1\n else:\n zero_one += 1\n else:\n if end_cluster == 0:\n one_zero += 1\n else:\n double_one += 1\n\n #calculate conditional probabilities\n total = double_zero + double_one + zero_one + one_zero\n\n prob00 = (double_zero/total) / 0.5\n prob11 = (double_one/total) / 0.5\n prob10 = (one_zero/total)/ 0.5\n prob01 = (zero_one/total)/ 0.5\n\n rwc = prob00*prob11 - prob10*prob01\n\n #update convergence \n if RWC_list != []:\n convergence = abs(rwc - RWC_list[-1])\n \n i += 1 \n RWC_list.append(rwc)\n\n # Plot RWC scores over time \n if plot == True:\n plt.plot(RWC_list)\n plt.show()\n\n return(rwc)", "def main():\r\n test = TesterNeighbour()\r\n test.setUp()\r\n test.test_result_n()\r\n print(\"result_of_algorithm_test - passed\")", "def main():\n \n\n parser = argparse.ArgumentParser(description='MozartFlow: Observing the flow of music.')\n\n parser.add_argument('-k', '--knn', help='K in K-nearest neighbours algorithm', default=2)\n parser.add_argument('-ll', '--loglevel', help='Set the logging level', type=str, choices=['DEBUG','INFO','WARNING','ERROR','CRITICAL'])\n parser.add_argument('-p', '--path', help='Filepath of the audio file, need to be labeled', type=str, default='')\n \n args = parser.parse_args()\n \n logging.basicConfig(level=args.loglevel)\n\n model = Model(args.knn, args.loglevel)\n model.model()\n\n if args.path is not '':\n model.prediction(args.path)\n else:\n print('\\n[-.-] Ain\\'t you testing something! Well, that\\'s a shame. I learned just for you.')\n\n logger.info('\\n\\n-------/------- Created by ------/-------')\n for creator in model.read_yml['_creator']:\n logger.info('Lord {}'.format(creator))", "def train():\n rng = random.PRNGKey(0)\n\n # Get Zachary's karate club graph dataset.\n node_feats, node_labels, sources, targets = get_karate_club_data()\n\n # Create model and optimizer.\n _, initial_params = GNN.init(\n rng, node_x=node_feats, edge_x=None, sources=sources, targets=targets)\n model = nn.Model(GNN, initial_params)\n optimizer = optim.Adam(learning_rate=0.01).create(model)\n\n # Train for 20 iterations.\n for iteration in range(20):\n optimizer, loss = train_step(optimizer, node_feats, sources, targets)\n\n accuracy = eval_step( # Model is stored in `optimizer.target`.\n optimizer.target, node_feats, sources, targets, node_labels)\n\n print('iteration: %d, loss: %.4f, accuracy: %.2f'\n % (iteration+1, loss, accuracy * 100))", "def main() -> None:\n\n # Load pickled (adj, feat) tuple\n with open(os.path.join(NETWORK_DIR, PICKLE_FILE), \"rb\") as file:\n adj, features = pickle.load(file)\n\n g = nx.Graph(adj) # Recreate graph using node indices (0 to num_nodes-1)\n\n # Draw the network\n # nx.draw_networkx(g, with_labels=False, node_size=50, node_color=\"r\")\n # plt.show()\n\n # Preprocessing (train/test split)\n np.random.seed(0) # make sure train-test split is consistent\n adj_sparse = nx.to_scipy_sparse_matrix(g)\n\n # Perform train-test split\n (\n adj_train,\n train_edges,\n train_edges_false,\n val_edges,\n val_edges_false,\n test_edges,\n test_edges_false,\n ) = mask_test_edges(adj_sparse, test_frac=0.3, val_frac=0.1)\n\n # new graph object with only non-hidden edges\n g_train = nx.from_scipy_sparse_matrix(adj_train)\n\n # Inspect train/test split\n print(\"Total nodes:\", adj_sparse.shape[0])\n\n # adj is symmetric, so nnz (num non-zero) = 2 * num_edges\n print(\"Total edges:\", int(adj_sparse.nnz / 2))\n print(\"Training edges (positive):\", len(train_edges))\n print(\"Training edges (negative):\", len(train_edges_false))\n print(\"Validation edges (positive):\", len(val_edges))\n print(\"Validation edges (negative):\", len(val_edges_false))\n print(\"Test edges (positive):\", len(test_edges))\n print(\"Test edges (negative):\", len(test_edges_false))\n\n # Train node2vec (Learn Node Embeddings)\n\n # node2vec settings\n # NOTE: When p = q = 1, this is equivalent to DeepWalk\n\n P = 1 # Return hyperparameter\n Q = 1 # In-out hyperparameter\n WINDOW_SIZE = 10 # Context size for optimization\n NUM_WALKS = 10 # Number of walks per source\n WALK_LENGTH = 80 # Length of walk per source\n DIMENSIONS = 128 # Embedding dimension\n DIRECTED = False # Graph directed/undirected\n WORKERS = 8 # Num. parallel workers\n ITER = 1 # SGD epochs\n\n # Preprocessing, generate walks\n\n # create node2vec graph instance\n g_n2v = node2vec.Graph(g_train, DIRECTED, P, Q)\n g_n2v.preprocess_transition_probs()\n walks = g_n2v.simulate_walks(NUM_WALKS, WALK_LENGTH)\n walks = [list(map(str, walk)) for walk in walks]\n\n # Train skip-gram model\n model = Word2Vec(\n walks,\n size=DIMENSIONS,\n window=WINDOW_SIZE,\n min_count=0,\n sg=1,\n workers=WORKERS,\n iter=ITER,\n )\n\n # Store embeddings mapping\n emb_mappings = model.wv\n\n print(emb_mappings)\n\n # Create node embeddings matrix (rows = nodes, columns = embedding features)\n emb_list = []\n for node_index in range(0, adj_sparse.shape[0]):\n node_str = str(node_index)\n node_emb = emb_mappings[node_str]\n emb_list.append(node_emb)\n emb_matrix = np.vstack(emb_list)\n\n def get_edge_embeddings(edge_list):\n \"\"\"\n Generate bootstrapped edge embeddings (as is done in node2vec paper)\n Edge embedding for (v1, v2) = hadamard product of node embeddings for\n v1, v2.\n \"\"\"\n embs = []\n for edge in edge_list:\n node1 = edge[0]\n node2 = edge[1]\n emb1 = emb_matrix[node1]\n emb2 = emb_matrix[node2]\n edge_emb = np.multiply(emb1, emb2)\n embs.append(edge_emb)\n embs = np.array(embs)\n return embs\n\n # Train-set edge embeddings\n pos_train_edge_embs = get_edge_embeddings(train_edges)\n neg_train_edge_embs = get_edge_embeddings(train_edges_false)\n train_edge_embs = np.concatenate(\n [pos_train_edge_embs, neg_train_edge_embs]\n )\n\n # Create train-set edge labels: 1 = real edge, 0 = false edge\n train_edge_labels = np.concatenate(\n [np.ones(len(train_edges)), np.zeros(len(train_edges_false))]\n )\n\n # Val-set edge embeddings, labels\n pos_val_edge_embs = get_edge_embeddings(val_edges)\n neg_val_edge_embs = get_edge_embeddings(val_edges_false)\n val_edge_embs = np.concatenate([pos_val_edge_embs, neg_val_edge_embs])\n val_edge_labels = np.concatenate(\n [np.ones(len(val_edges)), np.zeros(len(val_edges_false))]\n )\n\n # Test-set edge embeddings, labels\n pos_test_edge_embs = get_edge_embeddings(test_edges)\n neg_test_edge_embs = get_edge_embeddings(test_edges_false)\n test_edge_embs = np.concatenate([pos_test_edge_embs, neg_test_edge_embs])\n\n # Create val-set edge labels: 1 = real edge, 0 = false edge\n test_edge_labels = np.concatenate(\n [np.ones(len(test_edges)), np.zeros(len(test_edges_false))]\n )\n\n # Train logistic regression classifier on train-set edge embeddings\n edge_classifier = LogisticRegression(random_state=0)\n edge_classifier.fit(train_edge_embs, train_edge_labels)\n\n # Predicted edge scores: probability of being of class \"1\" (real edge)\n val_preds = edge_classifier.predict_proba(val_edge_embs)[:, 1]\n val_roc = roc_auc_score(val_edge_labels, val_preds)\n val_ap = average_precision_score(val_edge_labels, val_preds)\n\n # Predicted edge scores: probability of being of class \"1\" (real edge)\n test_preds = edge_classifier.predict_proba(test_edge_embs)[:, 1]\n test_roc = roc_auc_score(test_edge_labels, test_preds)\n test_ap = average_precision_score(test_edge_labels, test_preds)\n\n print(\"node2vec Validation ROC score: \", str(val_roc))\n print(\"node2vec Validation AP score: \", str(val_ap))\n print(\"node2vec Test ROC score: \", str(test_roc))\n print(\"node2vec Test AP score: \", str(test_ap))", "def test_function_kg(test_loader, model, settype, S, lk, bk, num_iters, epsilon, num_classes, topk):\r\n\r\n det_boxes = []\r\n det_labels = []\r\n det_scores = []\r\n true_boxes = []\r\n true_labels = []\r\n true_difficulties = []\r\n true_areas = []\r\n\r\n with torch.no_grad():\r\n for i, (images, targets) in enumerate(test_loader):\r\n\r\n # Move to default device.\r\n images = [im.to(device) for im in images]\r\n\r\n # Some (1021) images of COCO contain no objects at all. These are filtered out in the data loader, but\r\n # return an empty list, which raises an error in the model, so they are skipped.\r\n if len(images) == 0:\r\n continue\r\n\r\n prediction = model(images)\r\n\r\n for p in range(len(prediction)-1):\r\n true_boxes.append(targets[p]['boxes'].to(device))\r\n true_labels.append(targets[p]['labels'].to(device))\r\n\r\n if settype == 'voc':\r\n true_difficulties.append(targets[p]['difficulties'].to(device))\r\n # true_difficulties.append(torch.zeros(len(targets[p]['boxes'])).to(device))\r\n true_areas.append(torch.zeros(len(targets[p]['boxes'])).to(device))\r\n else:\r\n true_difficulties.append(torch.zeros(len(targets[p]['boxes'])).to(device))\r\n true_areas.append(targets[p]['areas'].to(device))\r\n\r\n boxes_temp = prediction[1][0]['boxes']\r\n labels_temp = prediction[1][0]['labels']\r\n scores_temp = prediction[1][0]['scores']\r\n\r\n new_predictions = torch.zeros((boxes_temp.shape[0], num_classes)).to(device)\r\n\r\n for l in range(new_predictions.shape[0]):\r\n label = labels_temp[l] - 1\r\n new_predictions[l, label] = scores_temp[l]\r\n\r\n p_hat = find_p_hat(boxes_temp, new_predictions, bk, lk, S, num_iters, epsilon)\r\n\r\n predk, boxk, labk, scok = find_top_k(p_hat, boxes_temp, topk)\r\n\r\n det_boxes.append(boxk)\r\n det_labels.append(labk)\r\n det_scores.append(scok)\r\n\r\n del prediction\r\n torch.cuda.empty_cache()\r\n\r\n return det_boxes, det_labels, det_scores, true_boxes, true_labels, true_difficulties, true_areas", "def _main():\n min_args = 6\n max_args = min_args + 3\n if len(sys.argv) not in range(min_args, max_args + 1):\n print(_HELP_STR)\n sys.exit(1)\n\n n_o = int(sys.argv[1])\n d = int(sys.argv[2])\n r_q = int(sys.argv[3])\n q = int(sys.argv[4])\n eps = float(sys.argv[5])\n kern = sys.argv[6] if len(sys.argv) > 6 else 'rbf'\n seed = int(sys.argv[7]) if len(sys.argv) > 7 else 1234\n testtype = sys.argv[8] if len(sys.argv) > 8 else 'inversion'\n kerntypes = ['rbf', 'periodic', 'matern', 'mix']\n testtypes = ['inv', 'opt']\n\n assert n_o > 7\n assert d > 0\n assert r_q > 0\n assert r_q <= d\n assert q > 0\n assert eps > 0\n assert kern in kerntypes\n assert testtype in testtypes\n np.random.seed(seed)\n n = n_o * d\n\n print('n_o {} d {} r_q {} q {} eps {} kern {} seed {} test-type {}'.format(\n n_o, d, r_q, q, eps, kern, seed, testtype))\n\n distrib = scipy.stats.truncnorm(-1, 1)\n coreg_vecs = distrib.rvs(size=(q, r_q, d))\n coreg_diags = np.reciprocal(np.random.gamma(shape=1, scale=1, size=(q, d)))\n noise = np.reciprocal(np.random.gamma(\n shape=(1 + (1 / eps)), scale=1, size=d))\n kernels = gen_kernels(q)\n descriptions = [\n 'rbf only - inv lengthscales in logspace(0, 1, q)',\n 'periodic only - inv lengthscale is 1, periods in logspace(0, 1, q)',\n 'matern32 only - inv lengthscales in logspace(0, 1, q)',\n 'mixed - rbf, periodic, matern varying params added together']\n kdict = {k_name: (k, desc) for k_name, k, desc in\n zip(kerntypes, kernels, descriptions)}\n\n Xs, Ys = np.random.rand(2, d, n_o)\n Xs = np.expand_dims(Xs, Xs.ndim)\n\n dists, grid_dists, interpolant, interpolant_T = prep(\n d, n_o, Xs)\n\n k, desc = kdict[kern]\n print()\n print(desc)\n\n fkern = FunctionalKernel(D=d, lmc_kernels=k,\n lmc_ranks=[len(x) for x in coreg_vecs])\n fkern.noise = noise\n fkern.coreg_vecs = coreg_vecs\n fkern.coreg_diags = coreg_diags\n fkern.set_input_dim(1)\n\n run_kernel_benchmark(\n Xs, Ys, fkern, dists, grid_dists, interpolant, interpolant_T, testtype)", "def main():\n # discrete_actions = list(np.linspace(0.00001, 1., 10, dtype=np.float32))\n # discrete_actions = [np.array([action,]) for action in discrete_actions]\n\n discrete_actions = create_discrete_actions_epsilon_kp()\n config = Config()\n experiment = Experiment(\"PathFollower-FeedbackLinearized-v0\", discrete_actions, NUM_RUNS, config)\n experiment.train()\n experiment.plot_rewards()\n experiment.plot_actions()", "def main():\n # Read in trainingSet and testSet as a DataFrame\n trainingOriginal = pd.read_csv(\n filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.train.5fold.csv\")\n testOriginal = pd.read_csv(filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.test.csv\")\n\n # Select only the numeric data\n training = pd.DataFrame(trainingOriginal.select_dtypes(['number']))\n training = pd.concat([training.reset_index(drop=True),\n trainingOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Select only the numeric data\n test = pd.DataFrame(testOriginal.select_dtypes(['number']))\n test = pd.concat([test.reset_index(drop=True),\n testOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Normalize the columns for training and test\n # print training['age'].min()\n # print training['age'].max()\n # print training.head()\n\n # Run max-min normalization on numerical columns for testing and training data\n for i in range(6):\n training.iloc[:, i] = (training.iloc[:, i]- training.iloc[:, i].min())/(training.iloc[:, i].max() - training.iloc[:, i].min())\n test.iloc[:, i] = (test.iloc[:, i]- test.iloc[:, i].min())/(test.iloc[:, i].max() - test.iloc[:, i].min())\n\n # Convert the 'earns' column to boolean as follows\n training['earns'] = training['earns'] == '>50K'\n test['earns'] = test['earns'] == ' >50K'\n\n # Group the training set by the fold attribute as given by the dataset\n trainingForFinal = training\n training = training.groupby('fold')\n\n # Since we want to consider odd k-values from 1 to 39, construct a list with these values\n kList = []\n for i in range(40):\n if i % 2 == 1:\n kList.append(i)\n\n # Empty dictionary to hold performance of each k-values and its accuracy\n performance = {}\n\n # Compute the performance for each k-value\n for k in kList:\n performance = crossValidation(training, k, performance)\n\n # Sort the performance dictionary by its accuracy (value)\n performance = sorted(performance.items(), key=operator.itemgetter(1), reverse=True)\n\n # Open file to write results\n file = open('grid.results.txt', 'w')\n # Write the results to file\n file.write(\"K | Accuracy\\n\")\n for item in performance:\n if item[0] < 10:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n else:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n # Close file\n file.close()\n\n # The best K is the one at the top of the list after the sorting\n bestK = performance[0][0]\n\n print 'Running Test Set with K = ' + str(bestK)\n\n applyModel(test,trainingForFinal,bestK)", "def distortion_of_kmeans_clustering(data_table):\n num_iritations = 5\n singleton_list = []\n for line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n distortion_list = []\n for num in range(20, 5, -1):\n cluster_list = kmeans_clustering(singleton_list,num, num_iritations)\n distortion = compute_distortion(data_table, cluster_list)\n distortion_list.append(distortion)\n return distortion_list\n\n#####################################################################\n# Code to load cancer data, compute a clustering and\n# visualize the results\n\n\n# def run_example():\n# \"\"\"\n# Load a data table, compute a list of clusters and\n# plot a list of clusters\n#\n# Set DESKTOP = True/False to use either matplotlib or simplegui\n# \"\"\"\n# data_table = load_data_table(DATA_3108_URL)\n# singleton_list = []\n# for line in data_table:\n# singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n num_clusters = 16\n # cluster_list = sequential_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"sequential clusters\")\n #\n # cluster_list = alg_project3_solution.hierarchical_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"hierarchical clusters\")\n #\n # cluster_list = alg_project3_solution.kmeans_clustering(singleton_list, num_clusters, 5)\n # print(\"Displaying\", len(cluster_list), \"k-means clusters\")\n\n # draw the clusters using matplotlib or simplegui\n #\n # if DESKTOP:\n # # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, False)\n # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, True) #add cluster centers\n\n # else:\n # alg_clusters_simplegui.PlotClusters(data_table, cluster_list) # use toggle in GUI to add cluster centers", "def main(name):\n\n # Create an object from HNR. The constructor of the class will initialize\n # the MNIST dataset and optimizer.\n hnr_demo = HandwrittenNumeralRecognition()\n # Create the default CNN model\n hnr_demo.create_default_model()\n # Compile the model\n hnr_demo.compile()\n # Fit the model\n hnr_demo.fit()\n # Show a summary of the model. It will show the framework of the model\n hnr_demo.show_summary()\n # Evaluate the model by the whole test set\n hnr_demo.evaluate()\n # Save the configuration and the evaluation result (loss and accuracy) of the model\n hnr_demo.write_config(name)\n # Draw a picture of the training process\n hnr_demo.visualization(name, save_fig=True)\n # Save the model as a h5 file\n hnr_demo.save_model(name)\n return", "def fig5(X_r, Y, TRAIN_SIZE=6000):\n \n # Normalize X_r\n X_n = preprocessing.normalize(X_r)\n \n #kNN weighting and k\n weights = [ \"uniform\", \"distance\" ]\n ks = [2,4,8,16,32,64] \n \n # Little lambda functions to standardize feature extraction\n pca = lambda X,Y: PCA(n_components=128).fit(X).transform(X)\n lda = lambda X,Y: LDA().fit(X, Y).transform(X)\n idn = lambda X,Y: X\n \n # Start the plot\n fig, ax = plt.subplots()\n plt.ylabel(\"Error %\")\n plt.xlabel(\"k\")\n \n \n # Try every combination (product) of weights, feature extraction and normalization\n for weight, feat_reduce, X_ in itertools.product(\n weights, [pca, lda, idn], [X_r, X_n]):\n \n # Reset error rate\n errors = []\n \n #Flags to make things easier\n reduction = \"PCA\" if feat_reduce == pca else \"LDA\"\n normalized = \"n\" if X_ is X_n else \"r\"\n \n #Initialize a black (i.e. key - cmy_K_) line\n linestyle = \"k\"\n \n # Match the point style used in Vailaya\n if weight == \"uniform\":\n if X_ is X_n:\n linestyle += \"x\"\n else:\n linestyle += \"*\"\n if weight == \"distance\":\n if X_ is X_n:\n linestyle += \"o\"\n else:\n linestyle += \"+\"\n \n # As well as the line style\n if feat_reduce is pca:\n linestyle += \":\" # Dotted\n elif feat_reduce is lda:\n linestyle += \"--\" # Solid\n else:\n linestyle += \"-\" # Dashed\n \n # Loop through all k's \n for k in ks:\n #Initialized classifier parameters\n knn = neighbors.KNeighborsClassifier(warn_on_equidistant=False)\n knn.n_neighbors = k\n knn.weights = weight\n \n #Here's where the lambda's come in handy.\n X = feat_reduce(X_,Y)\n \n # Fit the training set\n knn.fit(X[:TRAIN_SIZE], Y[:TRAIN_SIZE])\n \n # Again ugly code for the predictions\n predictions = []\n for i in range(TRAIN_SIZE, len(X)):\n predictions += [ knn.predict(X[i])[0] ] \n \n # Calculate error rate and append it to error rate list\n error = 1.- float(sum(predictions == Y[TRAIN_SIZE:])) / len(predictions)\n errors += [error]\n \n # Print it just for fun. Also in case error rates need to be exported.\n print weight, reduction, normalized, k, error\n \n # Plot the line for all k values \n ax.plot(ks, errors, linestyle)\n \n # Couldn't specify legends properly\n #ax.legend()", "def train():\n pass", "def main():\n\n # test()\n\n DIM = 10\n SHIPS = [2,3,3,4,5]\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n model = train_Convnet(DIM, SHIPS, device)\n torch.save(model.state_dict(),'./saved_models/convenet.torch' )\n\n conv_model = ModelConvnet(\"Vikram\", DIM, len(SHIPS), device).to(device)\n conv_model.load_state_dict(torch.load('./saved_models/convenet.torch'))\n\n g = Game(conv_model, ModelRandom(\"Betal\", DIM, len(SHIPS), device), Environment(DIM, SHIPS, \"Vikram\"), Environment(DIM, SHIPS, \"Betal\"))\n g.play()" ]
[ "0.6887435", "0.666323", "0.6602766", "0.65625256", "0.64616084", "0.6310591", "0.6247457", "0.6233544", "0.6176472", "0.6066823", "0.60558283", "0.60446703", "0.60367167", "0.6026387", "0.60035414", "0.5995146", "0.5965171", "0.59172124", "0.58743846", "0.5867801", "0.5857124", "0.5849077", "0.5826762", "0.5823305", "0.5822475", "0.5810344", "0.5796249", "0.5794116", "0.5783273", "0.5772697" ]
0.6938872
0
Apply video compression to sample `x`.
def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: def compress_video(x: np.ndarray, video_format: str, constant_rate_factor: int, dir_: str = ""): """ Apply video compression to video input of shape (frames, height, width, channel). """ import ffmpeg video_path = os.path.join(dir_, f"tmp_video.{video_format}") _, height, width, _ = x.shape # numpy to local video file process = ( ffmpeg.input("pipe:", format="rawvideo", pix_fmt="rgb24", s=f"{width}x{height}") .output(video_path, pix_fmt="yuv420p", vcodec="libx264", crf=constant_rate_factor) .overwrite_output() .run_async(pipe_stdin=True, quiet=True) ) process.stdin.write(x.flatten().astype(np.uint8).tobytes()) process.stdin.close() process.wait() # local video file to numpy stdout, _ = ( ffmpeg.input(video_path) .output("pipe:", format="rawvideo", pix_fmt="rgb24") .run(capture_stdout=True, quiet=True) ) return np.frombuffer(stdout, np.uint8).reshape(x.shape) if x.ndim != 5: raise ValueError("Video compression can only be applied to spatio-temporal data.") if self.channels_first: x = np.transpose(x, (0, 2, 3, 4, 1)) # apply video compression per video item x_compressed = x.copy() with TemporaryDirectory(dir=config.ART_DATA_PATH) as tmp_dir: for i, x_i in enumerate(tqdm(x, desc="Video compression", disable=not self.verbose)): x_compressed[i] = compress_video(x_i, self.video_format, self.constant_rate_factor, dir_=tmp_dir) if self.channels_first: x_compressed = np.transpose(x_compressed, (0, 4, 1, 2, 3)) return x_compressed, y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n x = self.upsample(x)\n return x", "def compress_video(x: np.ndarray, video_format: str, constant_rate_factor: int, dir_: str = \"\"):\n import ffmpeg\n\n video_path = os.path.join(dir_, f\"tmp_video.{video_format}\")\n _, height, width, _ = x.shape\n\n # numpy to local video file\n process = (\n ffmpeg.input(\"pipe:\", format=\"rawvideo\", pix_fmt=\"rgb24\", s=f\"{width}x{height}\")\n .output(video_path, pix_fmt=\"yuv420p\", vcodec=\"libx264\", crf=constant_rate_factor)\n .overwrite_output()\n .run_async(pipe_stdin=True, quiet=True)\n )\n process.stdin.write(x.flatten().astype(np.uint8).tobytes())\n process.stdin.close()\n process.wait()\n\n # local video file to numpy\n stdout, _ = (\n ffmpeg.input(video_path)\n .output(\"pipe:\", format=\"rawvideo\", pix_fmt=\"rgb24\")\n .run(capture_stdout=True, quiet=True)\n )\n return np.frombuffer(stdout, np.uint8).reshape(x.shape)", "def transform(self, x):\n\n sr = self.audio_settings[\"sr\"]\n\n ###################\n # Waveform \n ###################\n\n if self.cache < 2:\n if self.aug_settings is not None:\n if \"bg_noise\" in self.aug_settings:\n x = self.bg_adder(samples=x, sample_rate=sr)\n\n if \"time_shift\" in self.aug_settings:\n x = time_shift(x, sr, **self.aug_settings[\"time_shift\"])\n\n if \"resample\" in self.aug_settings:\n x, _ = resample(x, sr, **self.aug_settings[\"resample\"])\n \n x = librosa.util.fix_length(x, sr)\n\n ###################\n # Spectrogram\n ###################\n \n x = librosa.feature.melspectrogram(y=x, **self.audio_settings) \n x = librosa.feature.mfcc(S=librosa.power_to_db(x), n_mfcc=self.audio_settings[\"n_mels\"])\n\n\n if self.aug_settings is not None:\n if \"spec_aug\" in self.aug_settings:\n x = spec_augment(x, **self.aug_settings[\"spec_aug\"])\n\n x = torch.from_numpy(x).float().unsqueeze(0)\n return x", "def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x", "def compress(self, tensor):", "def compress(self, x,f1,f2,f3,f4,f5,f6,f7,f8,outputfile,path,row):\n mse, bpp, x_hat, pack = self._run(\"compress\", x=x,feature1=f1,feature2=f2,feature3=f3,feature4=f4,\n feature5=f5,feature6=f6,feature7=f7,feature8=f8)\n\n # Write a binary file with the shape information and the compressed string.\n packed = tfc.PackedTensors()\n tensors, arrays = zip(*pack)\n packed.pack(tensors, arrays)\n with open(outputfile, \"wb\") as f:\n f.write(packed.string)\n\n x *= 255 # x_hat is already in the [0..255] range\n psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))\n msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))\n\n # The actual bits per pixel including overhead.\n x_shape = tf.shape(x)\n num_pixels = tf.cast(tf.reduce_prod(x_shape[:-1]), dtype=tf.float32)\n packed_bpp = len(packed.string) * 8 / num_pixels\n \n for col in range(np.shape(x_hat)[1]):\n img = x_hat[0,col,:,:,:]/255 \n save_img(path,0,img,row,col+1)\n return x_hat, psnr, msssim, packed_bpp", "def encode(self, x):\n return [self.vae[c_idx].encode(x[c_idx])\n for c_idx in range(self.n_channels)]", "def preprocess(x):\n if x.shape[-1] < 16000 * 8:\n raise ValueError(\n \"Cannot preprocess tensor less than 8 seconds in duration.\"\n )\n vad = VadChunk(*get_vad(\"both\"))\n return vad(x)", "def compress_image(filename,k):", "def apply_fourier_transform(chunked_audio):\n pass", "def compress_video(\n original_video: Union[str, os.PathLike],\n original_video_name: Union[str, os.PathLike],\n outdir: Union[str, os.PathLike],\n ctx: click.Context) -> None:\n try:\n import ffmpeg\n except (ModuleNotFoundError, ImportError):\n ctx.fail('Missing ffmpeg! Install it via \"pip install ffmpeg-python\"')\n\n print('Compressing the video...')\n resized_video_name = os.path.join(outdir, f'{original_video_name}-compressed.mp4')\n ffmpeg.input(original_video).output(resized_video_name).run(capture_stdout=True, capture_stderr=True)\n print('Success!')", "def compress(self, *args):\n return _osgAnimation.Vec3Packed_compress(self, *args)", "def forward(self, x):\n # encode\n encode_block1 = self.conv_encode1(x)\n encode_pool1 = self.conv_maxpool1(encode_block1)\n encode_block2 = self.conv_encode2(encode_pool1)\n encode_pool2 = self.conv_maxpool2(encode_block2)\n encode_block3 = self.conv_encode3(encode_pool2)\n encode_pool3 = self.conv_maxpool3(encode_block3)\n # Bottleneck\n bottleneck1 = self.bottleneck(encode_pool3)\n # Decode\n decode_block3 = crop_and_concat(\n bottleneck1, encode_block3, crop=True)\n cat_layer2 = self.conv_decode3(decode_block3)\n decode_block2 = crop_and_concat(\n cat_layer2, encode_block2, crop=True)\n cat_layer1 = self.conv_decode2(decode_block2)\n decode_block1 = crop_and_concat(\n cat_layer1, encode_block1, crop=True)\n final_layer = self.final_layer(decode_block1)\n return final_layer", "def _posterize(self, x, bits):\r\n bits = int((bits/MAX_LEVEL) * 4)\r\n shift = tf.cast(8 - bits, x.dtype)\r\n return tf.bitwise.left_shift(tf.bitwise.right_shift(x, shift), shift)", "def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x", "def compress(self, tensor, *args, **kwargs):\n pass", "def compress(block):\n\n # Transform RGB to YCbCr\n yc_bl = np.zeros((8, 8, 3), dtype=np.int8)\n \n for i in range(8):\n for j in range(8):\n rgb_cmp = np.asmatrix(block[i][j])\n y,cb,cr = (np.array((rgb_cmp*yc_mat+yc_pad).astype(np.uint8))[0]-128).astype(np.int8)\n yc_bl[i][j] = np.array([y, cb, cr])\n \n # Switch YCbCr block to 3 block for each Y, Cb, Cr component and calculate DCT for them\n y_dct = sf.dct(yc_bl[:,:,0], norm='ortho')\n cb_dct = sf.dct(yc_bl[:,:,1], norm='ortho')\n cr_dct = sf.dct(yc_bl[:,:,2], norm='ortho')\n \n # From DCT data to quantization data\n y_quant = np.round(y_dct / quant_tbl).astype(np.int8)\n cb_quant = np.round(cb_dct / quant_tbl).astype(np.int8)\n cr_quant = np.round(cr_dct / quant_tbl)).astype(np.int8)\n \n # Convert 8x8 block to zigzag 1x64 block\n y_zz = zig_zag(y_quant)\n cb_zz = zig_zag(cb_quant)\n cr_zz = zig_zag(cr_quant)\n \n # Calc DC and AC, put together to list\n y_cmp, cb_cmp, cr_cmp = dc_and_ac_calc(y_zz, cb_zz, cr_zz)\n \n # Encode using entropy coding\n y_encode = encode(y_cmp)\n cb_encode = encode(cb_cmp)\n cr_encode = encode(cr_cmp)\n \n return [y_encode, cb_encode, cr_encode]", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def shortcut(self, z, x):\n if x.shape != z.shape:\n d = self.downsample(x)\n p = torch.mul(d, 0)\n return z + torch.cat((d, p), dim=1)\n else:\n return z + x", "def compress(self, samples):\n rms = np.sqrt(np.dot(samples, samples) / window)\n power = self.power * (1.0 - self.smooth) + rms * self.smooth\n self.power = power\n if power <= 1e-40:\n samples *= 0\n return\n db_in = 10.0 * math.log10(power)\n if db_in <= self.limit:\n samples *= 0\n return\n db_out = self.cf(db_in)\n db_gain = db_out - db_in + self.postgain\n gain = 10**(0.1 * db_gain)\n samples *= gain", "def compression(s):", "def iblrig_video_compression(session_path, command):\n output_files = list(session_path.joinpath(\"raw_video_data\").rglob('_iblrig_*.mp4'))\n rig_avi_files = list(session_path.joinpath(\"raw_video_data\").rglob('_iblrig_*.avi'))\n # first compress everything (the rationale is not to delete anything if there is a crash)\n for file_in in rig_avi_files:\n _logger.info(f\" compressing {file_in}\")\n file_out = file_in.with_suffix('.mp4')\n status, fout = compress(file_in=file_in, file_out=file_out,\n command=command, remove_original=False)\n output_files.append(fout)\n # then remove everything\n for file_in in rig_avi_files:\n file_in.unlink()\n return output_files", "def apply(self, sample):\n\n buf = io.BytesIO()\n imageio.imwrite(buf, sample, format='jpg', quality=self.quality)\n return imageio.imread(buf.getvalue(), format='jpg')", "def forward(self, x):\n x = self.Encoder(x)\n x = self.Decoder(x)\n return x", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def compress(filename,threshold,ratio,makeup,attack,release,wout=True,plot=False):\n start=time.time()\n if ratio < 1.0:\n print('Ratio must be > 1.0 for compression to occur! You are expanding.')\n if ratio==1.0:\n print('Signal is unaffected.')\n n, data, data_dB,sr,ch=inputwav(filename)\n #Array for the compressed data in dB\n dataC=data_dB.copy()\n #attack and release time constant\n a=np.exp(-np.log10(9)/(44100*attack*1.0E-3))\n re=np.exp(-np.log10(9)/(44100*release*1.0E-3))\n #apply compression\n print('Compressing...')\n for k in range(ch):\n for i in range (n):\n if dataC[i,k]>threshold:\n dataC[i,k]=threshold+(dataC[i,k]-threshold)/(ratio)\n #gain and smooth gain initialization\n gain=np.zeros(n)\n sgain=np.zeros(n)\n #calculate gain\n gain=np.subtract(dataC,data_dB)\n sgain=gain.copy()\n #smoothen gain\n print('Smoothing...')\n for k in range(ch):\n for i in range (1,n):\n if sgain[i-1,k]>=sgain[i,k]:\n sgain[i,k]=a*sgain[i-1,k]+(1-a)*sgain[i,k]\n if sgain[i-1,k]<sgain[i,k]:\n sgain[i,k]=re*sgain[i-1,k]+(1-re)*sgain[i,k] \n #Array for the smooth compressed data with makeup gain applied\n dataCs=np.zeros(n)\n dataCs=data_dB+sgain+makeup\n #Convert our dB data back to bits\n dataCs_bit=10.0**((dataCs)/20.0)\n #sign the bits appropriately:\n for k in range (ch):\n for i in range (n):\n if data[i,k]<0.0:\n dataCs_bit[i,k]=-1.0*dataCs_bit[i,k]\n #Plot the data:\n if plot==True:\n print('Plotting...')\n t=np.linspace(0,n/(1.0*sr),n)\n py.close()\n fig, (ax1, ax2) = py.subplots(nrows=2) \n ax2.plot(t,gain,'k-',linewidth=0.1,label='Gain Reduction')\n ax2.plot(t,sgain,'r-',linewidth=1, label='Gain Reduction Smooth')\n ax1.plot(t,data,'k-',linewidth=1,label=filename)\n ax1.plot(t,dataCs_bit,'m-',linewidth=0.1,\n label=filename+' compressed')\n ax1.axhline(10**(threshold/20.0),linestyle='-',\n color='cyan',linewidth=1)\n ax1.axhline(-10**(threshold/20.0),linestyle='-',\n color='cyan',linewidth=1)\n ax1.legend()\n ax2.legend()\n ax2.set_xlabel('Time (s)')\n ax2.set_ylabel('Gain Reduction (dB)')\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')\n #write data to 16 bit file\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_compressed.wav',dataCs_bit,\n sr,'PCM_16')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('Done!')\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.') \n return dataCs,dataCs_bit", "def encode(self, x):\n mu = []\n logvar = []\n for i in range(self.n_views):\n mu_, logvar_ = self.encoders[i](x[i])\n mu.append(mu_)\n logvar.append(logvar_)\n mu = torch.stack(mu)\n logvar = torch.stack(logvar)\n mu_out, logvar_out = self.join_z(mu, logvar)\n qz_x = hydra.utils.instantiate( \n self.cfg.encoder.default.enc_dist, loc=mu_out, scale=logvar_out.exp().pow(0.5)\n )\n return [qz_x]", "def __call__(self, x):\n batch_shape = x.shape[:-2]\n signal_shape = x.shape[-2:]\n x = x.reshape((-1, 1) + signal_shape)\n if not self.pre_pad:\n x = self.padding_module(x)\n\n # Note: PyTorch is not effective to pad signals of size N-1 with N\n # elements, thus we had to add this fix.\n if self.pad_size[0] == self.input_size[0]:\n x = torch.cat([x[:, :, 1, :].unsqueeze(2), x, x[:, :, x.shape[2] - 2, :].unsqueeze(2)], 2)\n if self.pad_size[2] == self.input_size[1]:\n x = torch.cat([x[:, :, :, 1].unsqueeze(3), x, x[:, :, :, x.shape[3] - 2].unsqueeze(3)], 3)\n\n output = x.new_zeros(x.shape + (2,))\n output[..., 0] = x\n output = output.reshape(batch_shape + output.shape[-3:])\n return output" ]
[ "0.62431544", "0.61411947", "0.59244233", "0.5863686", "0.547034", "0.5356348", "0.53366745", "0.52343965", "0.5205393", "0.52026373", "0.520205", "0.5157827", "0.515539", "0.5153465", "0.51358205", "0.51312935", "0.5118787", "0.5098527", "0.5098527", "0.5097953", "0.50705427", "0.50661093", "0.5061761", "0.5039137", "0.50367975", "0.5008318", "0.5008318", "0.5002226", "0.49399963", "0.4917739" ]
0.6377516
0
Apply video compression to video input of shape (frames, height, width, channel).
def compress_video(x: np.ndarray, video_format: str, constant_rate_factor: int, dir_: str = ""): import ffmpeg video_path = os.path.join(dir_, f"tmp_video.{video_format}") _, height, width, _ = x.shape # numpy to local video file process = ( ffmpeg.input("pipe:", format="rawvideo", pix_fmt="rgb24", s=f"{width}x{height}") .output(video_path, pix_fmt="yuv420p", vcodec="libx264", crf=constant_rate_factor) .overwrite_output() .run_async(pipe_stdin=True, quiet=True) ) process.stdin.write(x.flatten().astype(np.uint8).tobytes()) process.stdin.close() process.wait() # local video file to numpy stdout, _ = ( ffmpeg.input(video_path) .output("pipe:", format="rawvideo", pix_fmt="rgb24") .run(capture_stdout=True, quiet=True) ) return np.frombuffer(stdout, np.uint8).reshape(x.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n\n def compress_video(x: np.ndarray, video_format: str, constant_rate_factor: int, dir_: str = \"\"):\n \"\"\"\n Apply video compression to video input of shape (frames, height, width, channel).\n \"\"\"\n import ffmpeg\n\n video_path = os.path.join(dir_, f\"tmp_video.{video_format}\")\n _, height, width, _ = x.shape\n\n # numpy to local video file\n process = (\n ffmpeg.input(\"pipe:\", format=\"rawvideo\", pix_fmt=\"rgb24\", s=f\"{width}x{height}\")\n .output(video_path, pix_fmt=\"yuv420p\", vcodec=\"libx264\", crf=constant_rate_factor)\n .overwrite_output()\n .run_async(pipe_stdin=True, quiet=True)\n )\n process.stdin.write(x.flatten().astype(np.uint8).tobytes())\n process.stdin.close()\n process.wait()\n\n # local video file to numpy\n stdout, _ = (\n ffmpeg.input(video_path)\n .output(\"pipe:\", format=\"rawvideo\", pix_fmt=\"rgb24\")\n .run(capture_stdout=True, quiet=True)\n )\n return np.frombuffer(stdout, np.uint8).reshape(x.shape)\n\n if x.ndim != 5:\n raise ValueError(\"Video compression can only be applied to spatio-temporal data.\")\n\n if self.channels_first:\n x = np.transpose(x, (0, 2, 3, 4, 1))\n\n # apply video compression per video item\n x_compressed = x.copy()\n with TemporaryDirectory(dir=config.ART_DATA_PATH) as tmp_dir:\n for i, x_i in enumerate(tqdm(x, desc=\"Video compression\", disable=not self.verbose)):\n x_compressed[i] = compress_video(x_i, self.video_format, self.constant_rate_factor, dir_=tmp_dir)\n\n if self.channels_first:\n x_compressed = np.transpose(x_compressed, (0, 4, 1, 2, 3))\n\n return x_compressed, y", "def video_encoding(self):\n self.output_file = outputs_filenames(self.input, self.output_file)\n\n if self.resume and (self.temp / 'done.json').exists():\n set_logging(self.logging, self.temp)\n else:\n setup(self.temp, self.resume)\n set_logging(self.logging, self.temp)\n print(self.queue)\n framenums = split_routine(self.input, self.scenes, self.split_method, self.temp, self.min_scene_len, self.queue, self.threshold)\n\n if self.extra_split:\n framenums = extra_splits(input, framenums, self.extra_split)\n\n segment(self.input, self.temp, framenums)\n extract_audio(input, self.temp, self.audio_params)\n\n chunk = get_video_queue(self.temp, self.resume)\n\n # Make encode queue\n commands, self.video_params = compose_encoding_queue(chunk, self.temp, self.encoder, self.video_params, self.ffmpeg_pipe, self.passes)\n log(f'Encoding Queue Composed\\n'\n f'Encoder: {self.encoder.upper()} Queue Size: {len(commands)} Passes: {self.passes}\\n'\n f'Params: {self.video_params}\\n\\n')\n\n self.workers = determine_resources(self.encoder, self.workers)\n\n self.encoding_loop(commands)\n\n try:\n concatenate_video(self.temp, self.output_file, keep=self.keep)\n\n except Exception as e:\n _, _, exc_tb = sys.exc_info()\n print(f'Concatenation failed, FFmpeg error\\nAt line: {exc_tb.tb_lineno}\\nError:{str(e)}')\n log(f'Concatenation failed, aborting, error: {e}\\n')\n terminate()\n\n if self.vmaf:\n plot_vmaf(self.input, self.output_file, model=self.vmaf_path)", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def compress_video(\n original_video: Union[str, os.PathLike],\n original_video_name: Union[str, os.PathLike],\n outdir: Union[str, os.PathLike],\n ctx: click.Context) -> None:\n try:\n import ffmpeg\n except (ModuleNotFoundError, ImportError):\n ctx.fail('Missing ffmpeg! Install it via \"pip install ffmpeg-python\"')\n\n print('Compressing the video...')\n resized_video_name = os.path.join(outdir, f'{original_video_name}-compressed.mp4')\n ffmpeg.input(original_video).output(resized_video_name).run(capture_stdout=True, capture_stderr=True)\n print('Success!')", "def make_video(data,\n xdim, ydim, sample_read_rows, sample_read_cols, image_write_rows, image_write_cols,\n directory, filename, fps = 24.0, start_frame = 1, end_frame = None, timestamp = False, fontsize = 30, ts_pos = (0,0), save_raw = False):\n\n #Command to send via the command prompt which specifies the pipe parameters\n # command = ['ffmpeg',\n # '-y', # (optional) overwrite output file if it exists\n # '-f', 'image2pipe',\n # '-vcodec', 'mjpeg', #'mjpeg',\n # '-r', '1',\n # '-r', str(fps), # frames per second\n # '-i', '-', # The input comes from a pipe\n # '-an', # Tells FFMPEG not to expect any audio\n # '-vcodec', 'mpeg4',\n # '-b:v', '5000k',\n # directory + filename + \"/\"+filename+\".mp4\",\n # '-hide_banner',\n # '-loglevel', 'panic']\n\n # Create directories if they don't exist\n if not os.path.exists(os.path.join(directory, filename, 'frames/')):\n os.makedirs(os.path.join(directory, filename, 'frames/'))\n if save_raw and not os.path.exists(os.path.join(directory, filename, 'frames-raw/')):\n os.makedirs(os.path.join(directory, filename, 'frames-raw/'))\n\n if end_frame == None:\n end_frame = data.FrameCount\n\n cm = colormap.get_cmap('viridis')\n\n for i, frame_offset in enumerate(tqdm.tqdm(range(start_frame, end_frame))):\n frame = FrameRead(data, frame_offset)\n frame_image = np.zeros([ydim, xdim], dtype=np.uint8)\n frame_image[image_write_rows, image_write_cols] = frame.frame_data[sample_read_rows, sample_read_cols]\n\n rgb_im = Image.fromarray(cm(frame_image, bytes=True)).convert('RGB')\n rgb_im.save(os.path.join(directory, filename, 'frames/', f'{i}.jpg'), 'JPEG')\n\n if save_raw:\n Image.fromarray(np.uint8(frame.frame_data), mode='L').save(os.path.join(directory, filename, 'frames-raw/', f'{i}.jpg'), 'JPEG')", "def resizeVideo(n, format, vpath, cpath):\r\n start_time = time.time()\r\n t = time.process_time()\r\n vidcap = cv2.VideoCapture(vpath)\r\n success, image = vidcap.read()\r\n cv2.namedWindow('image')\r\n cv2.imshow('image', image)\r\n cv2.waitKey(1)\r\n count = 0\r\n\r\n CODE = 'XVID'\r\n # default save to avi\r\n\r\n CODE1 = 'XVID'\r\n format1 = '.avi'\r\n CODE2 = 'WMV1' # OR WMV2\r\n format2 = '.wmv'\r\n CODE3 = 'FLV1'\r\n format3 = '.flv'\r\n CODE4 = 'MPEG'\r\n format4 = '.mp4'\r\n\r\n if (format == format1):\r\n CODE = CODE1\r\n if (format == format2):\r\n CODE = CODE2\r\n if (format == format3):\r\n CODE = CODE3\r\n if (format == format4):\r\n CODE = CODE4\r\n if format == '':\r\n CODE = CODE1\r\n format = '.avi'\r\n print(\"default save the resized video to .avi\")\r\n\r\n # fourcc used for saving videos\r\n fourcc = cv2.VideoWriter_fourcc(*CODE)\r\n # video saved to the same path as the capatured frame\r\n out = cv2.VideoWriter((str(cpath) + 'ResizedVideo%d' % n + format), fourcc, vidcap.get(5), (480, 480))\r\n infotxt = open(cpath + 'Resize Info' + '.txt', 'w')\r\n infotxt.write(vpath + '\\n')\r\n print(\"Resizing...\")\r\n\r\n while success:\r\n if success:\r\n resize = cv2.resize(image, (480, 480), interpolation=cv2.INTER_LINEAR)\r\n # frame name save as Frame%5d.jpg\r\n cv2.imwrite((str(cpath) + \"Frame%05d.jpg\" % count), resize)\r\n\r\n # write resized frame to saved video\r\n out.write(resize)\r\n\r\n cv2.imshow('image', resize)\r\n\r\n # print converage rate of the frame\r\n end_time = time.time()\r\n executeTime = end_time - start_time\r\n converageRate = executeTime / (count + 1)\r\n infotxt.write('converage rate is: %f' % converageRate + 'f/s' + '\\n')\r\n\r\n cv2.waitKey(1)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n # check whether capture finished\r\n\r\n success, image = vidcap.read()\r\n count += 1\r\n infotxt.write('Resizing Completed')\r\n print(\"Resizing Completed\")\r\n\r\n end_time = time.time()\r\n executeTime = end_time - start_time\r\n infotxt.close()\r\n printInfo.printInfo(executeTime, vidcap, cpath)\r\n\r\n cv2.destroyAllWindows()\r\n return executeTime", "def run(input_video_file, output_video_file):\n print(\"Debut de la transformation du format de la video\")\n #récupération de la vidéo\n video = cv2.VideoCapture(input_video_file)\n #fps de la vidéo\n fps = video.get(cv2.CAP_PROP_FPS)\n #largeur des images de la vidéo\n width_video = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n #hauteur des images de la vidéo\n height_video = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n #nombre d'images dans la vidéo\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n #durée de la vidéo\n duration = frame_count/fps\n #nouvelle durée de la vidéo (on arrondi)\n new_duration = math.floor(duration)\n #nouveau fps de la vidéo\n new_fps = float(round(fps))\n #appliquer le nouveau fps\n video.set(cv2.CAP_PROP_FPS,new_fps)\n #appliquer la nouvelle durée\n print(new_duration)\n print(new_fps)\n print(new_duration*new_fps)\n new_frame_count = new_duration*new_fps\n video.set(cv2.CAP_PROP_FRAME_COUNT,new_duration*new_fps)\n #déffinition du format de la vidéo en sortie\n video_out = cv2.VideoWriter(output_video_file,0x7634706d,new_fps,(width_video,height_video),True)\n \n count = 0\n #ouverture de la vidéo\n while(video.isOpened()):\n #lecture image par image\n ret, frame = video.read()\n if ret==True:\n\n #ecriture de l'image dans la vidéo en sortie\n video_out.write(frame)\n count = count + 1\n \n if (count > (new_frame_count-1)):\n # Libérer la vidéo\n video.release()\n break\n else:\n break\n\n print(\"fin de la transformation\")\n #fermer les vidéos\n video.release()\n video_out.release()", "def run_video(self, video_path):\n file, ext = os.path.splitext(video_path)\n video_name = file.split('/')[-1]\n out_filename = video_name + '_out' + '.avi'\n\n cap = cv2.VideoCapture(video_path)\n wi = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n he = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print(wi, he)\n\n vwriter = cv2.VideoWriter(out_filename, cv2.VideoWriter_fourcc(*'MJPG'), 10, (wi, he))\n counter = 0\n fac = 2\n start = time.time()\n while True:\n ret, image = cap.read()\n\n if ret:\n counter += 1\n\n ## resize image\n\n height, width, channels = image.shape\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n resized_image = cv2.resize(image, target_size, interpolation=cv2.INTER_AREA)\n output = resized_image.copy()\n\n ## get segmentation map\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n ## visualize\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n\n ## overlay on image\n alpha = 0.7\n cv2.addWeighted(seg_image, alpha, output, 1 - alpha, 0, output)\n\n output = cv2.resize(output, (wi, he), interpolation=cv2.INTER_AREA)\n # outimg = 'image_' + str(counter) + '.jpg'\n # cv2.imwrite(os.path.join(os.getcwd(), 'test_out', outimg),output)\n vwriter.write(output)\n else:\n break\n\n end = time.time()\n print(\"Frames and Time Taken: \", counter, end - start)\n cap.release()\n vwriter.release()", "def create_video():\n print(\"Generating output video\")\n frame_array = []\n files = [f for f in os.listdir(MODIFIED_FRAMES_DIR) if isfile(join(MODIFIED_FRAMES_DIR, f))]\n #for sorting the file names properly\n # files.sort(key = lambda x: x[3:-4])\n files = sorted(files,key=lambda x: int(os.path.splitext(x)[0]))\n for i in range(len(files)):\n filename= MODIFIED_FRAMES_DIR + files[i]\n # print(filename)\n #reading each files\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width,height)\n \n #inserting the frames into an image array\n frame_array.append(img)\n \n out = cv2.VideoWriter(OUTPUT_FILE,cv2.VideoWriter_fourcc(*'DIVX'), FRAME_RATE, size)\n for i in range(len(frame_array)):\n # writing to a image array\n out.write(frame_array[i])\n out.release()\n print(\"Output video generated successfully...\")\n\n # img_array = []\n # for filename in glob.glob(MODIFIED_FRAMES_DIR+'/*.jpg'):\n # img = cv2.imread(filename)\n # height, width, layers = img.shape\n # size = (width,height)\n # img_array.append(img)\n\n # height, width, layers = img_array[0].shape\n # size = (width,height)\n # out = cv2.VideoWriter('output.mov',cv2.VideoWriter_fourcc(*'DIVX'), 15, size) \n # for i in range(len(img_array)):\n # out.write(img_array[i])\n # out.release()", "def frames_to_video(frames, path, scale=2.0):\n\n width, height = frames.shape[-2:]\n width = int(width * scale)\n height = int(height * scale)\n fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n fps = 30\n out = cv2.VideoWriter(path, fourcc, fps, (width, height))\n for frame in frames:\n frame = cv2.merge([frame, frame, frame])\n if scale != 1:\n frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_LINEAR)\n out.write(frame.astype(np.uint8))\n\n out.release()", "def iblrig_video_compression(session_path, command):\n output_files = list(session_path.joinpath(\"raw_video_data\").rglob('_iblrig_*.mp4'))\n rig_avi_files = list(session_path.joinpath(\"raw_video_data\").rglob('_iblrig_*.avi'))\n # first compress everything (the rationale is not to delete anything if there is a crash)\n for file_in in rig_avi_files:\n _logger.info(f\" compressing {file_in}\")\n file_out = file_in.with_suffix('.mp4')\n status, fout = compress(file_in=file_in, file_out=file_out,\n command=command, remove_original=False)\n output_files.append(fout)\n # then remove everything\n for file_in in rig_avi_files:\n file_in.unlink()\n return output_files", "def convert_video_path_and_save(video_path, output_path=\"output.mp4\", temp_folder = \"./temp\",\n frame_frequency=24, image_reducer=100, fontSize=10, spacing=1.1, maxsize=None, chars=\" .*:+%S0#@\",\n logs=False, processes=4, progress_tracker=None):\n\n if logs:\n start_time = time.time()\n print (\"Converting video...\")\n \n # set up a capture temporarily so we can grab some basic info about it\n capture = cv2.VideoCapture(video_path)\n if not capture.isOpened():\n print (\"Could not read video. Please enter a valid video file!\")\n exit(0)\n\n fps = capture.get(cv2.CAP_PROP_FPS)\n bitrate = int(capture.get(cv2.CAP_PROP_BITRATE))\n total_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n frames_included = int(total_frames / frame_frequency)\n # total_frames / fps gives us our video duration.\n video_duration = total_frames / fps\n # frames included / video duration gives new fps\n new_fps = (total_frames / frame_frequency) / video_duration\n\n capture.release()\n\n # First, we grab all the frames we need and store them in a temp folder\n # After that, we convert all the image frames in the temp folder, and save them back in the temp folder\n # Then, we write them to video and save to disk\n # To utilize mutli processing, we separate grabbing frames and converting the frames into batches\n\n while os.path.isdir(temp_folder):\n temp_folder += \"_\"\n temp_folder += \"/\"\n os.mkdir(temp_folder)\n\n # initial setup\n # we divide our work into batches\n batches = processes\n frames_per_batch = int(total_frames / batches / frame_frequency)\n if progress_tracker is None:\n progress_tracker = Value(\"f\", 0, lock=True)\n # progress: saved frames + converted frames + written frames\n progress_step = 100 / (frames_included * 3)\n\n # grab the frames, and write to separate batch folders\n save_frames_processes = []\n for batch in range(batches):\n starting_frame = batch * frames_per_batch * frame_frequency\n batch_folder = temp_folder + str(batch) + \"/\"\n os.mkdir(batch_folder)\n args = (\n starting_frame,\n starting_frame + frames_per_batch * frame_frequency,\n video_path,\n batch_folder,\n frame_frequency,\n logs,\n progress_tracker,\n progress_step\n )\n p = Process(target=_save_frames, args=args)\n p.daemon = True\n p.start()\n save_frames_processes.append(p)\n for p in save_frames_processes:\n p.join()\n\n # convert all the frames in each batch folder\n convert_processes = []\n for batch in range(batches):\n batch_folder = temp_folder + str(batch) + \"/\"\n args = (\n batch_folder,\n frames_per_batch,\n image_reducer,\n fontSize, spacing, maxsize, chars,\n logs, progress_tracker, progress_step\n )\n p = Process(target=_convert_batch, args=args)\n p.daemon = True\n p.start()\n convert_processes.append(p)\n for p in convert_processes:\n p.join()\n\n # if no extension was assigned, automatically assign .mp4\n output_name, output_ext = os.path.splitext(output_path)\n if output_ext == \"\":\n output_ext = \".mp4\"\n # if final output path was specified, then modify it (append _Copy to it)\n final_output_path = output_name + output_ext\n while os.path.isfile(final_output_path):\n if logs : print (final_output_path, \"already exists!\")\n final_output_path = os.path.splitext(final_output_path)[0] + \"_Copy\" + output_ext\n\n # video settings\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n video_out = imageio.get_writer(final_output_path, fps=new_fps, quality=None, bitrate=(bitrate * 1024 * 2.5))\n size = None\n\n # write images to new video\n for batch in range(1, batches + 1):\n batch_folder = temp_folder + str(batch - 1) + \"/\"\n for i in range(1, frames_per_batch + 1):\n img = cv2.imread(batch_folder + str(i) + \".jpg\", 2)\n if size is None:\n height, width = img.shape\n size = (width, height)\n video_out.append_data(img)\n with progress_tracker.get_lock():\n progress_tracker.value += progress_step\n if logs : print (\"Progress: %.4f%%\" % progress_tracker.value, end=\"\\r\")\n video_out.close()\n shutil.rmtree(temp_folder)\n\n # when we are done, there might be some rounding errors when converting some stuff to integers, thus it doesn't appear to be done\n # So we just simply set it to 100\n with progress_tracker.get_lock():\n progress_tracker.value = 100\n\n if logs:\n print (\"=\" * 30)\n print (\"SUMMARY:\")\n print (\"-\" * 20)\n print (\"Progress: %.4f%%\" % progress_tracker.value)\n print (\"Total frames found:\", str(total_frames))\n print (\"Frames included and converted:\", str(frames_per_batch * batches))\n print (\"Original FPS:\", str(fps))\n print(\"New FPS:\", str(new_fps))\n print (\"Resolution:\", str(size))\n print (\"Saved to\", final_output_path)\n print (\"Time took: %.4f secs\" % (time.time() - start_time))", "def process_video(input_file, output_file):\n with open('all-features-rbf-svm.p', 'rb') as svm_fd:\n clf = pickle.load(svm_fd)\n with open('all-features-scaler.p', 'rb') as scaler_fd:\n hog_scaler = pickle.load(scaler_fd)\n hog_parameters = HogParameters(orientations=18, pixels_per_cell=8, cells_per_block=2)\n clip = VideoFileClip(input_file)\n test_clip = clip.fl_image(\n lambda frame: process_frame(frame, clf=clf, norm_scaler=hog_scaler, hog_parameters=hog_parameters, spatial_size=(16, 16), hist_bins=32))\n test_clip.write_videofile(output_file, audio=False)", "def write_video(frames, filename, fps=20):\n \n # On Mac systems, copy ffmeg binaries to your PATH (http://ffmpegmac.net/)\n \n if platform.system() == 'Windows':\n err_str = 'Don\\'t know how to write a movie for %s platform' % platform.system()\n raise NotImplementedError(err_str)\n\n \n if len(frames.shape) == 4:\n pix_fmt = 'rgb24'\n else:\n pix_fmt = 'gray'\n \n # normalize\n max_pix_val = np.percentile(frames, 99.9)\n if frames.dtype in (np.bool, bool):\n frames = frames.astype(np.uint8)\n frames -= frames.min()\n frames[frames>max_pix_val] = max_pix_val\n if max_pix_val > 0:\n frames *= 255. / max_pix_val\n frames = frames.astype(np.uint8)\n \n # figure out which av program is installed\n program_name = ''\n try:\n subprocess.check_call(['avconv', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'avconv'\n except OSError:\n try:\n subprocess.check_call(['ffmpeg', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'ffmpeg'\n except OSError:\n pass\n if not program_name:\n raise OSError('Can\\'t find avconv or ffmpeg')\n \n # prepare pipe to av converter program\n size_str = '%ix%i' % (frames.shape[1], frames.shape[2])\n cmd = [program_name,\n '-y', # (optional) overwrite output file if it exists\n '-f', 'rawvideo',\n '-vcodec','rawvideo',\n '-s', size_str, # size of one frame\n '-pix_fmt', pix_fmt,\n '-r', str(fps), # frames per second\n '-i', '-', # input comes from a pipe\n '-an', # no audio\n '-qscale', '1',\n '-vcodec','mjpeg',\n filename]\n \n pipe = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=DEVNULL, stderr=subprocess.STDOUT)\n \n # write frames \n for frame in frames:\n frame = np.fliplr(frame)\n pipe.stdin.write(frame.tostring())\n pipe.stdin.close()\n pipe.wait()", "def saveFramesToVideo(frames, videoPath): \n fourcc = cv2.VideoWriter_fourcc('a','v','c','1')\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n [height,width]=frames[0].shape[0:2]\n writer = cv2.VideoWriter(videoPath, fourcc, 30, (width, height), 1)\n for i in range(frames.shape[0]):\n frameBGR = yiq2bgrUint(frames[i])\n writer.write(frameBGR)\n writer.release()", "def process_video(self, input_path, output_path, debug=False):\n clip = VideoFileClip(input_path)\n if debug:\n test_clip = clip.fl_image(self.process_image_debug)\n else:\n test_clip = clip.fl_image(self.process_image)\n test_clip.write_videofile(output_path)", "def adorn_video(self, iterator):\n return iterator \\\n | select(lambda f: f + (\n video_to_npy(f[1],\n # note weird thing here, width doesn't work they appear to be inverted\n height=self.video_size,\n squarecrop=self.squarecrop,\n fps=self.framerate,\n maxlength=self.max_length,\n # save a npy replacement\n outfile=self.get_numpy_filename(f[1]),\n use_cache=self.use_cache\n ),))", "def make_video(pattern, plotdir, moviedir, movienametag):\n images_list = glob('%s/%s'%(plotdir, pattern))\n images_list.sort()\n # save all required files into tmp_moviedir, with simple filenames: %.4d.png\n tmp_moviedir = '%s/tmp_movie_%s'%(plotdir, movienametag)\n os.system('mkdir -p %s'%tmp_moviedir)\n for i in range(len(images_list)):\n fname = images_list[i].split('%s/'%plotdir)[-1].split('.png')[0]\n os.system('cp %s/%s.png %s/%.4d.png'%(plotdir, fname, tmp_moviedir, i))\n\n os.system('avconv -i %s'%tmp_moviedir +'/%04d.png ' \\\n +' -y -c:v libx264 -pix_fmt yuv420p %s/%s.mp4'%(moviedir, movienametag))", "def generate_video(image_folder, video_name, video_frames_path):\n \n try:\n os.stat(video_frames_path)\n except:\n os.makedirs(video_frames_path)\n \n images = [img for img in os.listdir(image_folder)\n if img.endswith(\".jpg\") or\n img.endswith(\".jpeg\") or\n img.endswith(\"png\") or\n img.endswith(\"tif\")]\n\n images.sort()\n\n print(images)\n\n frame = cv2.imread(os.path.join(image_folder, images[0]))\n\n height, width, layers = frame.shape\n\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n video = cv2.VideoWriter(video_frames_path + '/' + video_name, fourcc, 1, (width, height))\n\n # Appending the images to the video one by one\n video_frame = np.zeros((height, width, 3), np.uint8)\n for image in images:\n img = cv2.imread(os.path.join(image_folder, image), cv2.IMREAD_UNCHANGED)\n video_frame = overlay_transparent(video_frame, img)\n cv2.imwrite(os.path.join(video_frames_path, image), video_frame)\n video.write(video_frame)\n\n # Deallocating memories taken for window creation\n cv2.destroyAllWindows()\n video.release() # releasing the video generated", "def encode (self, frames, fps, destinationPath = None, preset = None):\n # generate a file name hash by source frames names fps and preset.\n hc = hash (\"video\", \"h264\", \"mp4\", fps, preset)\n for frame in frames:\n hc = hash (hc, str (pathlib.Path (frame).resolve ()))\n\n # check if file is already in cache\n cachePath = pathlib.Path (cache.persistentPath (hc, self.extension ())).resolve ()\n if cachePath.exists ():\n # return cached file or create copy\n if destinationPath == None:\n return str (cachePath)\n else:\n try:\n copyfile (cachePath, pathlib.Path (destinationPath))\n except:\n return None\n return str (destinationPath)\n\n # video doesn't exist, create it...\n\n # Encode via parent encoder (get avi file path)\n preEncoded = AviH264.encode (frames, fps, None, preset)\n\n # create temp working directory\n tempDir = cache.temporary ()\n os.makedirs (tempDir)\n\n # symlink video into temporary directory\n os.symlink (preEncoded, tempDir + os.path.sep + 'input.avi')\n\n # process inside temporary directory\n lastDir = os.path.abspath (os.curdir)\n os.chdir (tempDir)\n\n # TODO:\n silent = True\n\n # unpack h264 stream\n unpackCommand = [_MP4H264Encoder._getMP4BoxRunnable (), \"-aviraw\", \"video\", 'input.avi']\n result = subprocess.run (unpackCommand, capture_output=silent)\n if result.returncode != 0:\n if silent:\n print (result.stderr)\n print (result.stdout)\n try:\n os.chdir (lastDir)\n rmtree (tempDir)\n except:\n pass\n return None\n\n # temporary output file\n cacheFileTemp = \"output.mp4\"\n\n # pack mp4 file\n packCommand = [_MP4H264Encoder._getMP4BoxRunnable (), \"-add\", \"input_video.h264\", cacheFileTemp]\n result = subprocess.run (packCommand, capture_output=silent)\n if result.returncode != 0:\n if silent:\n print (result.stderr)\n print (result.stdout)\n try:\n os.chdir (lastDir)\n rmtree (tempDir)\n except:\n pass\n return None\n\n # copy to cache\n cacheFile = cache.persistentPath (hc, self.extension (), True)\n os.rename (cacheFileTemp, cacheFile)\n\n # leave & remove temporary directory\n try:\n os.chdir (lastDir)\n rmtree (tempDir)\n except:\n pass\n\n # need to copy to output file?\n if destinationPath == None:\n return str (cacheFile)\n else:\n try:\n copyfile (cacheFile, pathlib.Path (destinationPath))\n except:\n return None\n return str (destinationPath)", "def process_video(proc_state):\n entry = proc_state.entry\n workbench = proc_state.workbench\n video_config = mgg.global_config['media_type:mediagoblin.media_types.video']\n\n queued_filepath = entry.queued_media_file\n queued_filename = proc_state.get_queued_filename()\n name_builder = FilenameBuilder(queued_filename)\n\n medium_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}-640p.webm'))\n\n thumbnail_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}.thumbnail.jpg'))\n\n # Create a temporary file for the video destination (cleaned up with workbench)\n tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)\n with tmp_dst:\n # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square\n progress_callback = ProgressCallback(entry)\n\n dimensions = (\n mgg.global_config['media:medium']['max_width'],\n mgg.global_config['media:medium']['max_height'])\n\n # Extract metadata and keep a record of it\n metadata = transcoders.VideoTranscoder().discover(queued_filename)\n store_metadata(entry, metadata)\n\n # Figure out whether or not we need to transcode this video or\n # if we can skip it\n if skip_transcode(metadata):\n _log.debug('Skipping transcoding')\n\n dst_dimensions = metadata['videowidth'], metadata['videoheight']\n\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n did_transcode = False\n else:\n transcoder = transcoders.VideoTranscoder()\n\n transcoder.transcode(queued_filename, tmp_dst.name,\n vp8_quality=video_config['vp8_quality'],\n vp8_threads=video_config['vp8_threads'],\n vorbis_quality=video_config['vorbis_quality'],\n progress_callback=progress_callback,\n dimensions=dimensions)\n\n dst_dimensions = transcoder.dst_data.videowidth,\\\n transcoder.dst_data.videoheight\n\n # Push transcoded video to public storage\n _log.debug('Saving medium...')\n mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)\n _log.debug('Saved medium')\n\n entry.media_files['webm_640'] = medium_filepath\n\n did_transcode = True\n\n # Save the width and height of the transcoded video\n entry.media_data_init(\n width=dst_dimensions[0],\n height=dst_dimensions[1])\n\n # Temporary file for the video thumbnail (cleaned up with workbench)\n tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)\n\n with tmp_thumb:\n # Create a thumbnail.jpg that fits in a 180x180 square\n transcoders.VideoThumbnailerMarkII(\n queued_filename,\n tmp_thumb.name,\n 180)\n\n # Push the thumbnail to public storage\n _log.debug('Saving thumbnail...')\n mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)\n entry.media_files['thumb'] = thumbnail_filepath\n\n # save the original... but only if we did a transcoding\n # (if we skipped transcoding and just kept the original anyway as the main\n # media, then why would we save the original twice?)\n if video_config['keep_original'] and did_transcode:\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n # Remove queued media file from storage and database\n proc_state.delete_queue_file()", "def convert(processed_dir: str, video_file: str):\n\n video_name = osp.splitext(osp.basename(video_file))[0]\n out_dir = processed_dir + video_name\n\n # create img dir\n if not osp.exists(processed_dir):\n os.mkdir(processed_dir)\n\n # Create dir for video file if not existent\n # this is where we save our images\n if not osp.exists(out_dir):\n os.mkdir(out_dir)\n\n if osp.exists(out_dir):\n os.mkdir(out_dir + \"/kermit/\")\n os.mkdir(out_dir + \"/not_kermit/\")\n\n # open video file for processing\n cap = cv.VideoCapture(video_file)\n frame_rate = cap.get(5) # frame rate\n\n sec = 0\n total_count = (60*25)+50 # just an approximation\n pbar = tqdm.tqdm(total=total_count, leave=False)\n\n count = 0\n while (cap.isOpened()):\n frame_id = cap.get(1) # current frame number\n frame_exists, curr_frame = cap.read()\n\n if not frame_exists:\n break\n else:\n if (frame_id % math.floor(frame_rate) == 0):\n # output is : video_file/<video_file>_frameNr.jpg\n cv.imwrite(osp.join(out_dir, '{}_{}.jpg'.format(video_name,count)), curr_frame)\n count = count + 1\n pbar.update(1)\n\n pbar.close()\n # release resources\n cap.release()", "def make_video(input_files, width=0, height=0, frame_rate=24, crf=20, output_path=\"video.mp4\"):\n if isinstance(input_files, list):\n from PIL import Image # pylint: disable=C0415\n\n with Image.open(input_files[0]) as img:\n width, height = img.size\n tmp_dir = \"tmp_ffmpeg_dir\"\n os.mkdir(tmp_dir)\n if width % 2 != 0:\n print(f\"Width ({width}) not divisible by 2\")\n width -= 1\n if height % 2 != 0:\n print(f\"Height ({width}) not divisible by 2\")\n height -= 1\n for i, inp in enumerate(input_files):\n shutil.copy(inp, os.path.join(tmp_dir, f\"{i:06d}.png\"))\n inputs = f\"{tmp_dir}/%06d.png\"\n command = ffmpeg_common_args(frame_rate, inputs, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n for i in range(len(input_files)):\n os.remove(os.path.join(tmp_dir, f\"{i:06d}.png\"))\n os.rmdir(tmp_dir)\n elif isinstance(input_files, str):\n assert width != 0 and height != 0\n command = ffmpeg_common_args(frame_rate, input_files, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n else:\n assert (\n False\n ), f'input_files should be list (of files) or str (of file template, e.g., \"%04d.png\") instead of {type(input_files)}'", "def stream_frames(video_capture):", "def encode_videos(self):\n\t\t\n\t\tself.face_detector = MTCNN()\n\t\tencoder = MyVGGFace(self.vgg_l, self.vgg_v)\n\t\t\n\t\tfolder = self.folders['raw_video_folder']\n\t\t\n\t\tfor (dirpath, _, filenames) in os.walk(folder):\n\t\t\tif platform == 'linux' or platform == 'linux2' or platform == 'darwin':\n\t\t\t\t# linux and OSX\n\t\t\t\tsplit_path = dirpath.split('/')\n\t\t\telse:\n\t\t\t\t# windows\n\t\t\t\tsplit_path = dirpath.split('\\\\')\n\t\t\tif filenames:\n\t\t\t\tif self.options.verbose:\n\t\t\t\t\tprint(f'Extracting features from {dirpath}')\n\t\t\t\tfor file in progressbar.progressbar(filenames):\n\t\t\t\t\tencode_path = (f'{self.feature_folder}/{split_path[-2]}', f'{file[:14]}.pic')\n\t\t\t\t\tcoord_path = (f'{self.folders[\"facial_data\"]}', f'{file[:14]}.pic')\n\t\t\t\t\tif file.endswith('.mp4') and not os.path.exists(f'{encode_path[0]}/{encode_path[1]}'):\n\t\t\t\t\t\tfaces, coords = self.video_faces(f'{dirpath}/{file}', f'{coord_path[0]}/{coord_path[1]}')\n\t\t\t\t\t\tencoding = encoder.vggface_encoding(faces)\n\t\t\t\t\t\tsave_to_file(coord_path[0], coord_path[1], coords)\n\t\t\t\t\t\tsave_to_file(encode_path[0], encode_path[1], encoding.reshape(encoding.shape[0], -1))\n\t\t\t\t\t\tdel faces, encoding\n\t\t\t\t\t\tgc.collect()", "def save_video(video, video_path, dim=None, framerate=3):\n vid_data = []\n writer = skvideo.io.FFmpegWriter(video_path, inputdict={'-framerate':str(framerate)})\n for frame in video:\n if not dim is None:\n frame = scipy.misc.imresize(frame, dim)\n writer.writeFrame(frame)\n writer.close()", "def process_video(filename, args, cfg, net):\n # Split video into frames\n images = split_video(filename)\n # Set output dir\n output_dir = args.output\n # Add brackets and extension to filename\n output_path = create_video_output_path(output_dir, cfg)\n # Get height and width of 1st image\n height, width, _ = check_img_size(images[0]).shape\n # Create VideoWriter object\n video = cv2.VideoWriter(output_path, \n cv2.VideoWriter_fourcc(*'FMP4'), \n cfg['video']['fps'], \n (width, height))\n for image in images:\n # Process frames\n img_steps = process_image(image, cfg, net)\n # Check for --show-detections flag\n output_img = check_if_adding_bboxes(args, img_steps) \n # Write to video\n video.write(output_img) \n # Release video writer object\n video.release()", "def tagVideo(modelpath, videopath, outputPath=None): \n model = get_model_instance_segmentation(3)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # model.load_state_dict(torch.load(modelpath, map_location=device), strict=False)\n model.load_state_dict(torch.load(modelpath, map_location=device))\n model = model.to(device)\n model.eval()\n\n \n data_transform = transforms.Compose([\n ToPILImage(),\n transforms.ToTensor(), \n ])\n\n\n if outputPath:\n writer = FFmpegWriter(str(outputPath))\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.namedWindow('main', cv2.WINDOW_NORMAL)\n labels = ['No mask', 'Mask']\n labelColor = [(10, 0, 255), (10, 255, 0)]\n img_count = 0\n outputDir = os.path.dirname(os.path.realpath(outputPath))\n frame_count = 0\n boundingBoxes = []\n for frame in vreader(str(videopath)):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n print('Frame:', frame_count)\n\n if frame_count%30==0:\n frameTensor = data_transform(frame)\n frameTensor = torch.unsqueeze(frameTensor, 0).to(device)\n output = model(frameTensor)\n boundingBoxes = plot_image_new(frame, frameTensor[0], output[0]) \n \n if len(boundingBoxes)>0:\n for bb in boundingBoxes:\n cv2.rectangle(frame,\n (bb[0], bb[1]),\n (bb[2], bb[3]),\n (54, 66, 227),\n thickness=2)\n\n cv2.imshow('main', frame)\n if outputPath:\n writer.writeFrame(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_count += 1\n if outputPath:\n writer.close()\n cv2.destroyAllWindows()", "def process_video(lane, fname, output):\n\tclip = VideoFileClip(fname)\n\toutput_name = output\n\toutput_clip = clip.fl_image(lane.pipeline)\n\toutput_clip.write_videofile(output_name, audio=False)\n\tprint ('Video processed successfully')" ]
[ "0.7005404", "0.65108484", "0.65095425", "0.65095425", "0.6496657", "0.62425435", "0.617896", "0.61784387", "0.6174512", "0.6041225", "0.6038537", "0.60358", "0.6024578", "0.6021074", "0.60200936", "0.6014781", "0.59937036", "0.5989535", "0.59697646", "0.5966458", "0.59648705", "0.5929175", "0.5897293", "0.5885972", "0.5885563", "0.5849206", "0.58489215", "0.5829221", "0.5828971", "0.5820073" ]
0.67326015
1
Retrieve lessons from the lesson pages.
def get_lessons(lesson_id): url = '{0}?cat={1}'.format(BASE_URL, lesson_id) page = requests.get(url, verify=False) soup = BeautifulSoup(page.content) output = [] for item in soup.find(id='playlist').findAll('dd'): video_id = item.find('a')['href'].split('=')[-1] title = item.find('a').text output.append({ 'title': title, 'lesson_id': lesson_id, 'video_id': video_id}) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lessons(self, course: str):\n\n lesson_link: Any = self.courses[course][\"link\"]\n lesson_data = self._parse_lesson(lesson_link)\n # self.courses[course][\"lessons\"] = lesson_data\n self.lessons = lesson_data", "def get_lessons(course_id, lesson=None):\n lesson_list = []\n if lesson is None:\n lesson_list = Lesson.objects.filter(\n id__in=get_root_lesson_ids(course_id))\n else:\n lesson_list = lesson.get_children()\n result = []\n for lesson_item in lesson_list:\n result.append({\n 'id': lesson_item.id,\n 'title': lesson_item.title,\n 'needs_feedback': needs_feedback(lesson_item, course_id),\n 'mandatory': lesson_item.mandatory,\n })\n return result", "def get_all_lessons(module) -> list:\n from core.models import DetailPage, TopicPage\n\n return [\n lesson\n for lesson in DetailPage.objects.live().specific().descendant_of(module)\n if isinstance(lesson.get_parent().specific, TopicPage)\n ]", "async def get_lessons(\n self,\n last_sync: datetime = None,\n deleted=False,\n date_from=None,\n date_to=None,\n **kwargs,\n ) -> Union[AsyncIterator[Lesson], List[int]]:\n return Lesson.get(self._api, last_sync, deleted, date_from, date_to, **kwargs)", "def test_get_skills_multiple_lessons(self):\n skill_graph = SkillGraph.load()\n\n skill_1 = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))\n unit = self.course.add_unit()\n unit.title = 'Test Unit'\n lesson1 = self.course.add_lesson(unit)\n lesson1.title = 'Test Lesson 1'\n lesson2 = self.course.add_lesson(unit)\n lesson2.title = 'Test Lesson 2'\n self.course.save()\n lesson1.properties[SKILLS_KEY] = [skill_1.id]\n lesson2.properties[SKILLS_KEY] = [skill_1.id]\n self.course.save()\n\n actions.login(ADMIN_EMAIL)\n response = transforms.loads(self.get(self.URL).body)\n self.assertEqual(200, response['status'])\n\n skills = transforms.loads(response['payload'])['skills']\n self.assertEqual(1, len(skills))\n # All lessons listed\n self.assertEqual(2, len(skills[0]['lessons']))", "def _create_lessons(self):\n self.unit = self.course.add_unit()\n self.unit.title = 'Test Unit'\n self.lesson1 = self.course.add_lesson(self.unit)\n self.lesson1.title = 'Test Lesson 1'\n self.lesson2 = self.course.add_lesson(self.unit)\n self.lesson2.title = 'Test Lesson 2'\n self.lesson3 = self.course.add_lesson(self.unit)\n self.lesson3.title = 'Test Lesson 3'\n self.unit2 = self.course.add_unit()\n self.unit.title = 'Test Unit 2'\n self.lesson4 = self.course.add_lesson(self.unit2)\n self.lesson4.title = 'Test Lesson 4'", "def _parse_lesson(self, lesson_link: str):\n\n lesson_page: Soup = self._get_soup(lesson_link)\n lesson_content: NavigableString = lesson_page.find(\"div\", \n {\"class\": \"moduleContent\"})\n course_divs: ResultSet = lesson_content.find_all(\"div\")\n\n data = {}\n module = \"\"\n for div in course_divs:\n if div[\"class\"][0] == \"courseModule\":\n module = div.p.get_text().split(\":\")[-1].strip()\n data[module] = []\n elif div[\"class\"][0] == \"courseLesson\":\n lesson_title = div.find(\"span\", {\"class\": \"courseLessonTitle\"})\n data[module].append(lesson_title.get_text().strip())\n \n return data", "def get_course_all_slugs(self):\n\n unit_lessons_counter = 0\n # Unit Page -> Subunit Header + Subunit Block -> Lesson Block -> Lesson Title\n for course_unit_url, course_unit_slug in zip(\n self.course_unit_urls, self.course_unit_slugs\n ):\n\n unit_lessons_counter = 0\n # -> Unit Page\n try:\n course_unit_page = BeautifulSoup(\n requests.get(ROOT_URL + course_unit_url).text, \"lxml\"\n )\n except requests.ConnectionError as e:\n print(\"Error Connecting!\\n\", e)\n sys.exit(1)\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n sys.exit(1)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n sys.exit(1)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n sys.exit(1)\n except requests.exceptions.RequestException as err:\n print(\"OOps: Something Else\", err)\n sys.exit(1)\n\n subunit_couter = 0\n\n # -> Subunit Header -> Subunit Block\n for course_subunit_title, course_subunit_body in zip(\n course_unit_page.find_all(attrs=COURSE_SUBUNIT_TITLE_ATTRS),\n course_unit_page.find_all(\n COURSE_SUBUNIT_BODY[\"tag\"], class_=COURSE_SUBUNIT_BODY[\"class\"]\n ),\n ):\n\n logging.debug(\"course_subunit_title:{}\".format(course_subunit_title))\n lesson_counter = 0\n # -> Lesson Block\n for course_lesson_body in course_subunit_body.find_all(\n COURSE_LESSON_BODY[\"tag\"],\n {\n \"class\": [\n COURSE_LESSON_BODY[\"class_i\"],\n COURSE_LESSON_BODY[\"class_ii\"],\n ]\n },\n ):\n course_lesson_span = course_lesson_body.find_all(\n COURSE_LESSON_SPAN[\"tag\"], class_=COURSE_LESSON_SPAN[\"class\"]\n )\n course_lesson_aria_label = course_lesson_span[0][\n COURSE_LESSON_LABEL\n ]\n logging.debug(\n \"course_lesson_aria_label:{}\".format(course_lesson_aria_label)\n )\n # -> Lesson Title\n # Check whether lesson block is a video\n if course_lesson_aria_label == \"Video\":\n lesson_title = course_lesson_body.find(\n COURSE_LESSON_TITLE[\"tag\"],\n class_=COURSE_LESSON_TITLE[\"class\"],\n )\n\n logging.debug(\n \"course_lesson_title:{}\".format(lesson_title.text)\n )\n self.lesson_titles.append(lesson_title.text)\n self.course_all_slugs.append(\n self.output_rel_path\n + course_unit_slug\n + \"/\"\n + str(subunit_couter)\n + \"_\"\n + course_subunit_title.text.replace(\" \", \"_\")\n + \"/\"\n + str(lesson_counter)\n + \"_\"\n + lesson_title.text.replace(\" \", \"_\")\n )\n\n lesson_counter += 1\n unit_lessons_counter += lesson_counter\n subunit_couter += 1\n self.unit_slugs_counter[course_unit_url] = unit_lessons_counter\n logging.info(\"Course - All slugs generated\")", "def showLesson(self):\n lesson = \"\"\n lesson += self.__title + \"\\n\\n\"\n lesson += self.__description + \"\\n\"\n lesson += self.__lesson_text + \"\\n\"\n lesson += self.getLink()\n return lesson", "def lesson(lesson, page, solution=None):\n\n lesson_url, subpage_url, static_url = relative_url_functions(request.path, None, lesson)\n\n page = lesson.pages[page]\n\n content = page_content(lesson, page, solution=solution, lesson_url=lesson_url,\n subpage_url=subpage_url,\n static_url=static_url)\n\n content = content[\"content\"]\n allowed_elements_parser.reset_and_feed(content)\n\n kwargs = {}\n if solution is not None:\n kwargs[\"solution_number\"] = int(solution)\n\n return render_template(\n \"lesson.html\",\n content=content,\n page=page,\n lesson=lesson,\n edit_info=get_edit_info(page.edit_path),\n title=page.title,\n **kwargs\n )", "def common_context(request):\n c = {\n 'lessons': get_lesson_numbers(),\n }\n return c", "def get_overview_pages(self):\n self.load_website()\n maxNumber = 1\n for pageIndex in self.soup.find_all('div', {'class':'paginate bg-muted'}):\n for link in pageIndex.find_all('a'):\n # try to convert string to number; if error it's not a number\n try:\n number = int(link.text)\n if number > maxNumber:\n maxNumber = number \n except ValueError:\n pass\n print('Screening complete: %d pages found - accessing first %s pages' % (maxNumber, self.maxPages))\n self.pages = [np.arange(1, maxNumber, 1)]", "def get_news(url, n_pages=1):\n news = []\n while n_pages:\n print(\"Collecting data from page: {}\".format(url))\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n news_list = extract_news(soup)\n next_page = extract_next_page(soup)\n url = \"https://news.ycombinator.com/\" + next_page\n news.extend(news_list)\n n_pages -= 1\n return news", "def get(self, id):\n\n lesson = Lesson.get_by_id(int(id))\n if lesson:\n t = jinja_env.get_template(\"lesson.html\")\n response = t.render(lesson=lesson)\n else:\n error = \"there is no lesson with id %s\" % id\n t = jinja_env.get_template(\"404.html\")\n response = t.render(error=error)\n\n self.response.out.write(response)", "def get_news(url, n_pages=1):\n news = []\n while n_pages:\n print(\"Collecting data from page: {}\".format(url))\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n news_list = extract_news(soup)\n print(news_list)\n next_page = extract_next_page(soup)\n url = 'https://news.ycombinator.com/' + next_page\n news.extend(news_list)\n n_pages -= 1\n return news", "def get_new_studies(self):\n\n msg = \"Retrieving clinical-trials RSS feed under URL '{}'.\"\n msg_fmt = msg.format(self.url_rss)\n self.logger.info(msg_fmt)\n\n entries = feedparser.parse(self.url_rss).get(\"entries\", [])\n\n # Iterate over the entries in the RSS feed.\n for entry in entries:\n # Retreive the study ID.\n nct_id = entry[\"id\"]\n # Aseemble the URL of the study.\n entry_url = self.url_study_template.format(nct_id=nct_id)\n\n msg = \"Retrieving clinical-trials study '{}' under URL '{}'.\"\n msg_fmt = msg.format(nct_id, entry_url)\n self.logger.info(msg_fmt)\n\n # Retrieve the clinical trial XML string.\n response = requests.get(url=entry_url)\n\n if response.ok:\n yield response.content", "def index(request):\n\n page_number = request.GET.get('page', 1)\n wikipages = utils.get_view_paginator(WikiPage, page_number, count=10,\n ordering='url')\n\n return render(request, 'index.html', {'title': 'Index',\n 'wikipages': wikipages})", "def get_course_page(self):\n\n print(\"Course URL: {}\".format(self.course_url))\n try:\n self.course_page = BeautifulSoup(requests.get(self.course_url).text, \"lxml\")\n except requests.ConnectionError as e:\n print(\"Error Connecting!\\n\", e)\n sys.exit(1)\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n sys.exit(1)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n sys.exit(1)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n sys.exit(1)\n except requests.exceptions.RequestException as err:\n print(\"Oops: Something Else\", err)\n sys.exit(1)", "def get_first_lesson(module):\n try:\n return get_all_lessons(module)[0]\n except IndexError:\n return None", "def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)", "def _paginate(self) -> Iterable[List[str]]:\n req = self.html\n videos_lens = self._extractor(req)\n yield videos_lens # yielding doesn't mean that is the end\n\n # The above only returns 100 or fewer links\n # as Youtube loads 100 videos at a time\n # Simulating a browser request for the load more link\n load_more_url = self._find_load_more_url(req)\n\n while load_more_url: # there is an url found\n req = get(load_more_url)\n load_more = json.loads(req)\n try:\n html = load_more[\"content_html\"]\n except KeyError:\n return # if there is no content_html there is no chanch to find_load_more_url\n videos_lens = self._extractor(html)\n yield videos_lens\n\n load_more_url = self._find_load_more_url(\n load_more[\"load_more_widget_html\"],\n )\n\n return", "def overview():\n # TODO: fix ajax https://groups.google.com/d/msg/web2py/YyVilc2ywdg/ZLtN3Gg3Ft0J\n # TODO: fix ?plain link in results\n from plugin_introspect import get_task_code\n lesson = request.args[0] # controller with lesson contents\n # lesson = request.vars.lesson_controller # controller with lesson contents\n fun_names = exposed_functions_names( controller=lesson )\n exposed_functions = generate_exposed_functions_info( controller=lesson )\n examples_codes = [ get_task_code(code=exposed_functions[f]['code'], task_key=lesson+'/'+f, decorate=True) for f in fun_names ]\n results_urls = [ URL(lesson, f, vars=dict(plain=1)) for f in fun_names ]\n return response.render('tutor.html', dict(lesson=lesson, fun_names=fun_names, examples_codes=examples_codes, results_urls=results_urls) )", "def get_news(url, n_pages=1):\r\n news = []\r\n while n_pages:\r\n print(\"Collecting data from page: {}\".format(url))\r\n\r\n delay = 2\r\n max_retries = 5\r\n backoff_factor = 0.3\r\n for tryes in range(max_retries):\r\n try:\r\n response = requests.get(url)\r\n except requests.exceptions.RequestException:\r\n if tryes == max_retries - 1:\r\n raise\r\n else:\r\n break\r\n time.sleep(delay)\r\n delay = backoff_factor * (2 ** tryes)\r\n\r\n soup = BeautifulSoup(response.text, \"html.parser\")\r\n news_list = extract_news(soup)\r\n next_page = extract_next_page(soup)\r\n url = \"https://news.ycombinator.com/\" + next_page\r\n news.extend(news_list)\r\n n_pages -= 1\r\n return news", "def environmental_science_news():\n\n return general_scraper(['http://mesva.univaq.it/?q=avvisi/cl-clm/52671'])", "def techniques(self):\n return self._get_child_page_of_type(LearningTechniquesPage)", "def get_course_page_urls(self,soup):\n\t\tcourse_links =[]\n\t\troot_url = 'http://onlinelearning.cornell.edu'\n\t\tfor link in soup.select('span.field-content a[href]'):\n\t\t\tnew_url = root_url + link['href']\n\t\t\tcourse_links.append(new_url)\n\t\t\tcourse_links.append(' \\n')\n\t\t\n\t\tself.new_list.append(course_links)\n\t\treturn course_links", "def get(self, request):\n activities = (\n activitystreams.streams[\"local\"]\n .get_activity_stream(request.user)\n .filter(\n Q(comment__isnull=False)\n | Q(review__isnull=False)\n | Q(quotation__isnull=False)\n | Q(mention_books__isnull=False)\n )\n )\n\n large_activities = Paginator(\n activities.filter(mention_books__isnull=True)\n .exclude(content=None, quotation__quote=None)\n .exclude(content=\"\"),\n 6,\n )\n small_activities = Paginator(\n activities.filter(\n Q(mention_books__isnull=False) | Q(content=None) | Q(content=\"\")\n ),\n 4,\n )\n\n page = request.GET.get(\"page\")\n data = {\n \"large_activities\": large_activities.get_page(page),\n \"small_activities\": small_activities.get_page(page),\n }\n return TemplateResponse(request, \"discover/discover.html\", data)", "def get_lesson_url(self, node, state, request, **kwargs):\n course = state.get_data_attr('course')\n unitStatus = state.get_data_attr('unitStatus')\n ul = unitStatus.get_lesson()\n return ul.get_study_url(course.pk)", "def lesson_static_generator():\n for collection in model.collections.values():\n for lesson in collection.lessons.values():\n static = Path(lesson.path / \"static\").resolve()\n\n if not static.exists():\n continue\n\n yield from lesson_static_generator_dir(lesson.slug, static, static)", "def get(self):\n\n if not CourseOutlineRights.can_view(self):\n transforms.send_json_response(self, 401, 'Access denied.', {})\n return\n\n key = self.request.get('key')\n course = courses.Course(self)\n lesson = course.find_lesson_by_id(None, key)\n assert lesson\n\n fs = self.app_context.fs\n path = fs.impl.physical_to_logical(course.get_activity_filename(\n lesson.unit_id, lesson.lesson_id))\n if lesson.has_activity and fs.isfile(path):\n activity = fs.get(path)\n else:\n activity = ''\n\n payload_dict = {\n 'key': key,\n 'title': lesson.title,\n 'unit_id': lesson.unit_id,\n 'objectives': lesson.objectives,\n 'video': lesson.video,\n 'notes': lesson.notes,\n 'activity_title': lesson.activity_title,\n 'activity_listed': lesson.activity_listed,\n 'activity': activity,\n 'is_draft': not lesson.now_available\n }\n\n message = ['Success.']\n if self.request.get('is_newly_created'):\n message.append('New lesson has been created and saved.')\n\n transforms.send_json_response(\n self, 200, '\\n'.join(message),\n payload_dict=payload_dict,\n xsrf_token=XsrfTokenManager.create_xsrf_token('lesson-edit'))" ]
[ "0.7031197", "0.66613", "0.6472741", "0.6313995", "0.6297631", "0.5964357", "0.581034", "0.5612484", "0.55869335", "0.55612785", "0.54270446", "0.53654546", "0.5331647", "0.53290176", "0.53130364", "0.5286712", "0.52614826", "0.52531", "0.5216531", "0.5206971", "0.5196762", "0.51748294", "0.5168214", "0.5139042", "0.5133499", "0.5132674", "0.5129931", "0.5129632", "0.512176", "0.51154065" ]
0.6672646
1
Read kalman stats from file or from event info.json files. If ``from_info`` is True, read all individual event info files instead of the data file. This is also tried if the kalman stats file does not exist, which allows regenerating the kalman stats file.
def read_kalman_stats(opt, from_info=False) -> Table: path = PERIGEES_INDEX_TABLE_PATH(opt.data_dir) if path.exists() and not from_info: LOGGER.info(f"Reading kalman perigee data from {path}") kalman_stats = Table.read(path) else: rows = [] # Look for files like 2019/Jan-12/info.json for info_file in PERIGEES_DIR_PATH(opt.data_dir).glob("????/??????/info.json"): if re.search(r"\d{4}/\w{3}-\d{2}/info\.json", info_file.as_posix()): LOGGER.info(f"Reading kalman perigee data from {info_file}") info = json.loads(info_file.read_text()) rows.append(info) LOGGER.info(f"No kalman perigee stats data found at {path}") LOGGER.info(f"Creating new table from {len(rows)} info files") kalman_stats = Table(rows=rows) if rows: kalman_stats.sort("perigee", reverse=True) return kalman_stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_raw_data(dir, matlab=False):\n\n\tcurrent_dir = os.getcwd() \n\t\n\tos.chdir(dir)\n\t\n\tfile_names = []\n\tdata = {}\n\t\n\t\n\t## For text files\n\tif not matlab:\n\t\tfiles = glob.glob('*.txt')\n\t\t\n\t\tassert len(files) > 0, 'No *.txt files found!'\n\n\t\tif len(glob.glob('*.mat')) > 0:\n\t\t\tprint('WARNING: matlab files also found in directory: \\t%s'%dir)\n\t\t\n\t\tfor f in files:\n\t\t\tf_name = f.lower()\n\t\t\n\t\t\tif f_name.find('mark') > -1:\n\t\t\t\tdata['markers'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\t\t\n\t\t\telif f_name.find('spike') > -1:\n\t\t\t\tdata['spikes'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\t\t\n\t\t\telif f_name.find('shape') > -1:\n\t\t\t\tdata['shape'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\n\n\t## For matlab files\n\t# These matlab files have more useful data than is extracted here.\n\telif matlab:\n\t\tfiles = glob.glob('*.mat')\n\t\t\n\t\tassert len(files) > 0, 'No matlab files found!'\n\t\t\n\t\tif len(glob.glob('*.txt')) > 0:\n\t\t\tprint('WARNING: text files also found in directory: \\t%s' %dir)\n\n\t\tfor f in files:\n\t\t\tf_name = f.lower()\n\t\t\t\n\t\t\t\n\t\t\tif f_name.find('mark') > -1:\n\t\t\t\t\n\t\t\t\tmark_file = h5py.File(f) # Loads hfd5 file\n\t\t\t\tmark_key = mark_file.keys()[0] # Gets name of relevant file for extract\n\t\t\t\t\n\t\t\t\t# Extract times of the markers\n\t\t\t\tdata['markers'] = np.array(mark_file['%s/times' %mark_key])\n\t\t\t\tdata['markers'] = np.reshape(data['markers'], -1) # turn to 1D array, as first axis redundant\n\t\t\t\t\n\t\t\t\t# Extract the numerical codes of the markers, which are listed one-to-one\n\t\t\t\t# with the times extracted above. Useful for an integrity check.\n\t\t\t\t# Zero index necessary as marker codes has three empty columns\n\t\t\t\tdata['marker_codes'] = np.array(mark_file['%s/codes' %mark_key][0])\n\t\t\t\tdata['marker_codes'] = np.reshape(data['marker_codes'], -1) # turn to 1D array, as first axis redundant\n\t\t\t\tfile_names.append(f)\n\n\t\t\telif f_name.find('spike') > -1:\n\n\t\t\t\tspike_file = h5py.File(f) # Loads hfd5 file\n\t\t\t\tspike_key = spike_file.keys()[0] # Gets name of relevant file for extract\n\t\t\t\t\n\t\t\t\t# Extract times of the spikes\n\t\t\t\tdata['spikes'] = np.array(spike_file['%s/times' %spike_key])\n\t\t\t\tdata['spikes'] = np.reshape(data['spikes'], -1) # turn to 1D array, as first axis redundant\n\n\n\t\t\t\t#Extract trace for each spike. First Dim-trace, second-spikes.\n\t\t\t\tspike_traces = np.array(spike_file['%s/values' %spike_key])\n\t\t\t\t\n\t\t\t\t# Calculate Average shape (for all templates, which are coded in '/codes')\n\t\t\t\tavg_spike_trace = np.mean(spike_traces, axis=1)\n\t\t\t\tsem_avg_spike_trace = stats.sem(spike_traces, axis=1, ddof=1)\n\t\t\t\t\n\t\t\t\tdata['shape'] = avg_spike_trace\n\t\t\t\tdata['shape_SEM'] = sem_avg_spike_trace\n\t\t\t\tfile_names.append(f) \n\t\t\t\t\n\t\t\t\t\t\t\n\tos.chdir(current_dir)\n\n\t\t\t\n\tif len(data.keys()) != len(files):\n\t\tmesg = 'Not all of your file names are recognised; they may not have been imported appropriately.'\n\t\tmesg2 = 'File names must contain the key words \"mark\", \"spike\" and/or \"shape.\"'\n\t\tprint(mesg)\n\t\tprint(mesg2)\n\t\tprint('\\nFollowing files loaded successfully:\\n')\n\t\tfor i in file_names: print(i)\n\t\treturn data\n\n\t\n\telif len(data.keys()) == len(files):\n\t\tprint('All files imported and assigned')\n\t\tprint('\\nFollowing files loaded successfully:\\n')\n\t\tfor i in file_names: print(i)\n\t\treturn data", "def load_data(self, from_raw: bool = True, sample: float = 1, date: str = None) -> None:\n if from_raw is False:\n self.data = self.read_from_pickle('{}/data_{}.pickle'.format(self.raw_output_dir, date))\n self.ipo_dates = self.read_from_pickle('{}/ipo_{}.pickle'.format(self.raw_output_dir, date))\n self.issues_data = self.read_from_pickle('{}/issues_{}.pickle'.format(self.raw_output_dir, date))\n else:\n sample_size = round(len(self.paths_to_securities_files) * sample)\n self.logger.debug('data_loader… number of secs in sample: {}'.format(sample_size))\n self.data = {}\n self.paths_to_securities_files = random.sample(self.paths_to_securities_files, sample_size)\n for i_file, sec_file_path in enumerate(self.paths_to_securities_files):\n self.logger.info('start: {}'.format(sec_file_path))\n # read info sheet from excel file\n h = pd.read_excel('file:' + sec_file_path, sheetname='Info', header=None)\n\n isin_cell = h.iloc[17, 4]\n industry_cell = h.iloc[20, 4]\n ipo_cell = h.iloc[1, 1]\n if pd.isnull(isin_cell) or pd.isnull(ipo_cell) or pd.isnull(industry_cell):\n self.logger.info('without isin/sector/ipo: {}'.format(sec_file_path))\n continue\n isin = isin_cell.lower()\n try:\n self.ipo_dates[isin] = datetime.datetime.strptime(ipo_cell, \"%Y-%m-%d\").date()\n except ValueError:\n self.logger.info('invalid ipo date: {}'.format(sec_file_path))\n continue\n\n # shareholders (look for skarb panstwa)\n shareholders = pd.read_excel('file:' + sec_file_path, sheetname='Shareholders', header=None)\n is_owned_by_gov = bool(shareholders.iloc[:, 0].isin(['Skarb Państwa']).any())\n\n # ISSUES\n issues = pd.read_excel('file:' + sec_file_path, sheetname='Issues', header=None)\n # select data about issues\n issues_to_save = issues.iloc[1:, [5, 1, 8, 9, 7]]\n issues_to_save.columns = ['number_of_shares', 'date_wza', 'date_reg', 'date_gielda', 'date_knf']\n issues_to_save.loc[:, 'number_of_shares'] = pd.to_numeric(issues_to_save.loc[:, 'number_of_shares'])\n for i in ['date_wza', 'date_reg', 'date_gielda', 'date_knf']:\n try:\n issues_to_save.loc[issues_to_save.loc[:, i].notnull(), i] = \\\n issues_to_save.loc[issues_to_save.loc[:, i].notnull(), i].map(\n lambda m: datetime.datetime.strptime(m, \"%Y-%m-%d\").date())\n except (TypeError, ValueError):\n for iii, x in issues_to_save.loc[:, i].iteritems():\n try:\n issues_to_save.loc[iii, i] = datetime.datetime.strptime(x, \"%Y-%m-%d\").date()\n except (TypeError, ValueError):\n issues_to_save.loc[iii, i] = np.nan\n self.issues_data[isin] = issues_to_save\n\n self.logger.debug(f'adding to ipo_dates: {isin} {self.ipo_dates[isin]}')\n\n # read YC sheet from excel file\n h = pd.read_excel('file:' + sec_file_path, sheetname='YC', header=None, index_col=0)\n if len(h.columns) <= 3:\n # in tab YC there is no columns, so I assume that company has no consolidated\n # financial statements and I skip to the next company\n self.logger.warning('file without valid dates in statements: {} {}'.format(sec_file_path, isin))\n del self.ipo_dates[isin]\n continue\n h = h.loc[~h.index.duplicated(keep='first'), :]\n h.loc['industry', :] = industry_cell\n h.loc['isin', :] = isin\n if is_owned_by_gov is True:\n h.loc['gov_owned', :] = 1\n else:\n h.loc['gov_owned', :] = 0\n\n self.data[isin] = h\n self.logger.info(f'secs with valid ipo/isin/industry: {len(self.data)}')\n self.save_to_pickle(self.data, 'data')\n self.save_to_pickle(self.ipo_dates, 'ipo')\n self.save_to_pickle(self.issues_data, 'issues')", "def extract_features_file(self, audio, static=True, plots=False, fmt=\"npy\", kaldi_file=\"\"):\n if static:\n return self.extract_static_features(audio, plots, fmt)\n\n else:\n return self.extract_dynamic_features(audio, fmt, kaldi_file)", "def read_audio(audio_paths, speaker_dict, tool, config, normalize, is_training,\n save_path=None, save_format='numpy',\n global_mean_male=None, global_mean_female=None,\n global_std_male=None, global_std_female=None,\n dtype=np.float32):\n if not is_training:\n if global_mean_male is None or global_mean_female is None:\n raise ValueError('Set mean & std computed in the training set.')\n if normalize not in ['global', 'speaker', 'utterance', 'no']:\n raise ValueError(\n 'normalize must be \"utterance\" or \"speaker\" or \"global\" or \"no\".')\n if tool not in ['htk', 'python_speech_features', 'librosa']:\n raise TypeError(\n 'tool must be \"htk\" or \"python_speech_features\"' +\n ' or \"librosa\".')\n\n audio_path_list_male, audio_path_list_female = [], []\n total_frame_num_male, total_frame_num_female = 0, 0\n total_frame_num_dict = {}\n speaker_mean_dict = {}\n\n # NOTE: 講演ごとに異なるspeakerとみなす\n\n # Loop 1: Computing global mean and statistics\n if is_training and normalize != 'no':\n print('=====> Reading audio files...')\n for i, audio_path in enumerate(tqdm(audio_paths)):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio file into utterances\n _, input_utt_sum, speaker_mean, _, total_frame_num_speaker = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n if i == 0:\n # Initialize global statistics\n feature_dim = input_utt_sum.shape[0]\n global_mean_male = np.zeros((feature_dim,), dtype=dtype)\n global_mean_female = np.zeros(\n (feature_dim,), dtype=dtype)\n global_std_male = np.zeros((feature_dim,), dtype=dtype)\n global_std_female = np.zeros((feature_dim,), dtype=dtype)\n\n # For computing global mean\n if speaker[3] == 'M':\n audio_path_list_male.append(audio_path)\n global_mean_male += input_utt_sum\n total_frame_num_male += total_frame_num_speaker\n elif speaker[3] == 'F':\n audio_path_list_female.append(audio_path)\n global_mean_female += input_utt_sum\n total_frame_num_female += total_frame_num_speaker\n else:\n raise ValueError\n\n # For computing speaker stddev\n if normalize == 'speaker':\n speaker_mean_dict[speaker] = speaker_mean\n total_frame_num_dict[speaker] = total_frame_num_speaker\n # NOTE: speaker mean is already computed\n\n print('=====> Computing global mean & stddev...')\n # Compute global mean per gender\n global_mean_male /= total_frame_num_male\n global_mean_female /= total_frame_num_female\n\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, _, _, _ = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n # For computing global stddev\n if speaker[3] == 'M':\n for input_utt in input_data_dict_speaker.values():\n global_std_male += np.sum(\n np.abs(input_utt - global_mean_male) ** 2, axis=0)\n elif speaker[3] == 'F':\n for input_utt in input_data_dict_speaker.values():\n global_std_female += np.sum(\n np.abs(input_utt - global_mean_female) ** 2, axis=0)\n else:\n raise ValueError\n\n # Compute global stddev per gender\n global_std_male = np.sqrt(\n global_std_male / (total_frame_num_male - 1))\n global_std_female = np.sqrt(\n global_std_female / (total_frame_num_female - 1))\n\n if save_path is not None:\n # Save global mean & std per gender\n np.save(join(save_path, 'global_mean_male.npy'),\n global_mean_male)\n np.save(join(save_path, 'global_mean_female.npy'),\n global_mean_female)\n np.save(join(save_path, 'global_std_male.npy'),\n global_std_male)\n np.save(join(save_path, 'global_std_female.npy'),\n global_std_female)\n\n # Loop 2: Normalization and Saving\n print('=====> Normalization...')\n frame_num_dict = {}\n sampPeriod, parmKind = None, None\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n if normalize == 'speaker' and is_training:\n speaker_mean = speaker_mean_dict[speaker]\n else:\n speaker_mean = None\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, speaker_mean, speaker_std, _ = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=is_training,\n sil_duration=0,\n tool=tool,\n config=config,\n mean=speaker_mean) # for compute speaker sttdev\n # NOTE: input_data_dict_speaker have been not normalized yet\n\n for utt_index, input_utt in input_data_dict_speaker.items():\n\n if normalize == 'no':\n pass\n elif normalize == 'global' or not is_training:\n # Normalize by mean & std over the training set per gender\n if speaker[3] == 'M':\n input_utt -= global_mean_male\n input_utt /= global_std_male\n elif speaker[3] == 'F':\n input_utt -= global_mean_female\n input_utt /= global_std_female\n else:\n raise ValueError\n elif normalize == 'speaker':\n # Normalize by mean & std per speaker\n input_utt = (input_utt - speaker_mean) / speaker_std\n elif normalize == 'utterance':\n # Normalize by mean & std per utterance\n utt_mean = np.mean(input_utt, axis=0, dtype=dtype)\n utt_std = np.std(input_utt, axis=0, dtype=dtype)\n input_utt = (input_utt - utt_mean) / utt_std\n else:\n raise ValueError\n\n frame_num_dict[speaker + '_' + utt_index] = input_utt.shape[0]\n\n if save_path is not None:\n # Save input features\n if save_format == 'numpy':\n input_data_save_path = mkdir_join(\n save_path, speaker, speaker + '_' + utt_index + '.npy')\n np.save(input_data_save_path, input_utt)\n elif save_format == 'htk':\n if sampPeriod is None:\n _, sampPeriod, parmKind = read(audio_path)\n write(input_utt,\n htk_path=mkdir_join(\n save_path, speaker, speaker + '_' + utt_index + '.htk'),\n sampPeriod=sampPeriod,\n parmKind=parmKind)\n else:\n raise ValueError('save_format is numpy or htk.')\n\n if save_path is not None:\n # Save the frame number dictionary\n with open(join(save_path, 'frame_num.pickle'), 'wb') as f:\n pickle.dump(frame_num_dict, f)\n\n return (global_mean_male, global_mean_female,\n global_std_male, global_std_female, frame_num_dict)", "def read_kalman_config(path=None):\n if path is None:\n path = os.path.dirname(os.path.abspath(__file__))\n with open(path + \"/config.yml\", \"r\") as yml:\n conf = yaml.full_load(yml)\n kalman_config = conf['kalman']\n\n return kalman_config", "def test_readfile_fits(self):\n fitsname = os.path.join(self.datadir, 'monol_testA.evt')\n command = \"{0}\".format(fitsname)\n\n hen.io.main(command.split())", "def load_data_from_dir(self, dir_name, method):\n\n # TODO: deal with the different format\n # TODO: decide if we should add those nwb to the ones already opened (if that's the case)\n # or erase the ones present and replace them by the new one.\n # probably best to have 2 options on the menu open new, and something like add data\n file_names = []\n # look for filenames in the first directory, if we don't break, it will go through all directories\n for (dirpath, dirnames, local_filenames) in os.walk(dir_name):\n file_names.extend(local_filenames)\n break\n for file_name in file_names:\n if file_name.endswith(\".nwb\"):\n io = NWBHDF5IO(os.path.join(dir_name, file_name), 'r')\n nwb_file = io.read()\n self.data_dict[nwb_file.identifier] = nwb_file\n self.nwb_path_list[nwb_file.identifier] = os.path.join(dir_name, file_name)\n self.to_add_labels.append(nwb_file.identifier)\n self.labels = self.labels + self.to_add_labels\n # checking there is at least one data file loaded\n if len(self.data_dict) > 0:\n if method == 'clear':\n self.musketeers_widget.session_widget.populate(self.labels, method)\n self.load_group_from_config()\n else:\n self.musketeers_widget.session_widget.populate(self.to_add_labels, method)\n self.sortMenu.setEnabled(True)\n self.groupMenu.setEnabled(True)\n # then we save the last location opened in the yaml file in config\n self.save_last_data_location(dir_name=dir_name)", "def read_audio(audio_paths, speaker_dict, tool, config, normalize, is_training,\n save_path=None,\n train_global_mean_male=None, train_global_mean_female=None,\n train_global_std_male=None, train_global_std_female=None,\n dtype=np.float64):\n if not is_training:\n if train_global_mean_male is None or train_global_mean_female is None:\n raise ValueError('Set mean & std computed in the training set.')\n if normalize not in ['global', 'speaker', 'utterance']:\n raise ValueError('normalize is \"utterance\" or \"speaker\" or \"global\".')\n\n audio_path_list_male, audio_path_list_female = [], []\n total_frame_num_male, total_frame_num_female = 0, 0\n total_frame_num_dict = {}\n speaker_mean_dict = {}\n\n # NOTE: speaker norm は講演ごとの正規化とする\n # 講演間の話者関係がわからないから\n\n # Loop 1: Computing global mean and statistics\n if is_training:\n print('===> Reading audio files...')\n for i, audio_path in enumerate(tqdm(audio_paths)):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio file into utterances\n _, input_data_utt_sum, speaker_mean, _, total_frame_num_speaker = segment_htk(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n if i == 0:\n # Initialize global statistics\n feature_dim = input_data_utt_sum.shape[0]\n train_global_mean_male = np.zeros((feature_dim,), dtype=dtype)\n train_global_mean_female = np.zeros(\n (feature_dim,), dtype=dtype)\n train_global_std_male = np.zeros((feature_dim,), dtype=dtype)\n train_global_std_female = np.zeros((feature_dim,), dtype=dtype)\n\n # For computing global mean\n if speaker[3] == 'M':\n audio_path_list_male.append(audio_path)\n train_global_mean_male += input_data_utt_sum\n total_frame_num_male += total_frame_num_speaker\n elif speaker[3] == 'F':\n audio_path_list_female.append(audio_path)\n train_global_mean_female += input_data_utt_sum\n total_frame_num_female += total_frame_num_speaker\n else:\n raise ValueError\n\n # For computing speaker stddev\n if normalize == 'speaker':\n speaker_mean_dict[speaker] = speaker_mean\n total_frame_num_dict[speaker] = total_frame_num_speaker\n # NOTE: すでに話者平均は計算できている\n\n print('===> Computing global mean & stddev...')\n # Compute global mean per gender\n train_global_mean_male /= total_frame_num_male\n train_global_mean_female /= total_frame_num_female\n\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, _, _, _ = segment_htk(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n # For computing global stddev\n if speaker[3] == 'M':\n for input_data_utt in input_data_dict_speaker.values():\n train_global_std_male += np.sum(\n np.abs(input_data_utt - train_global_mean_male) ** 2, axis=0)\n elif speaker[3] == 'F':\n for input_data_utt in input_data_dict_speaker.values():\n train_global_std_female += np.sum(\n np.abs(input_data_utt - train_global_mean_female) ** 2, axis=0)\n else:\n raise ValueError\n\n # Compute global stddev per gender\n train_global_std_male = np.sqrt(\n train_global_std_male / (total_frame_num_male - 1))\n train_global_std_female = np.sqrt(\n train_global_std_female / (total_frame_num_female - 1))\n\n if save_path is not None:\n # Save global mean & std per gender\n np.save(join(save_path, 'train_global_mean_male.npy'),\n train_global_mean_male)\n np.save(join(save_path, 'train_global_mean_female.npy'),\n train_global_mean_female)\n np.save(join(save_path, 'train_global_std_male.npy'),\n train_global_std_male)\n np.save(join(save_path, 'train_global_std_female.npy'),\n train_global_std_female)\n\n # Loop 2: Normalization and Saving\n print('===> Normalization...')\n frame_num_dict = {}\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n if normalize == 'speaker' and is_training:\n speaker_mean = speaker_mean_dict[speaker]\n else:\n speaker_mean = None\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, speaker_mean, speaker_std, _ = segment_htk(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=is_training,\n sil_duration=0,\n tool=tool,\n config=config,\n mean=speaker_mean) # for compute speaker sttdev\n # NOTE: input_data_dict_speaker have been not normalized yet\n\n for utt_index, input_data_utt in input_data_dict_speaker.items():\n\n if normalize == 'utterance' and is_training:\n # Normalize by mean & std per utterance\n utt_mean = np.mean(input_data_utt, axis=0, dtype=dtype)\n utt_std = np.std(input_data_utt, axis=0, dtype=dtype)\n input_data_utt = (input_data_utt - utt_mean) / utt_std\n\n elif normalize == 'speaker' and is_training:\n # Normalize by mean & std per speaker\n input_data_utt = (input_data_utt - speaker_mean) / speaker_std\n\n else:\n # Normalize by mean & std over the training set per gender\n if speaker[3] == 'M':\n input_data_utt -= train_global_mean_male\n input_data_utt /= train_global_std_male\n elif speaker[3] == 'F':\n input_data_utt -= train_global_mean_female\n input_data_utt /= train_global_std_female\n else:\n raise ValueError\n\n if save_path is not None:\n # Save input features\n input_data_save_path = mkdir_join(\n save_path, speaker + '_' + utt_index + '.npy')\n np.save(input_data_save_path, input_data_utt)\n frame_num_dict[speaker + '_' +\n utt_index] = input_data_utt.shape[0]\n\n if save_path is not None:\n # Save the frame number dictionary\n with open(join(save_path, 'frame_num.pickle'), 'wb') as f:\n pickle.dump(frame_num_dict, f)\n\n return (train_global_mean_male, train_global_mean_female,\n train_global_std_male, train_global_std_female)", "def main():\n dir_path='.'\n meas_file='magic_measurements.txt'\n samp_file=\"er_samples.txt\"\n out_file='magic_measurements.txt'\n if '-h' in sys.argv:\n print(main.__doc__)\n sys.exit()\n if '-WD' in sys.argv:\n ind = sys.argv.index('-WD')\n dir_path=sys.argv[ind+1]\n if '-f' in sys.argv:\n ind = sys.argv.index('-f')\n meas_file=sys.argv[ind+1]\n if '-fsa' in sys.argv:\n ind = sys.argv.index('-fsa')\n samp_file=sys.argv[ind+1]\n if '-F' in sys.argv:\n ind = sys.argv.index('-F')\n out_file=sys.argv[ind+1]\n # read in measurements file\n meas_file=dir_path+'/'+meas_file\n out_file=dir_path+'/'+out_file\n samp_file=dir_path+'/'+samp_file\n data,file_type=pmag.magic_read(meas_file)\n samps,file_type=pmag.magic_read(samp_file)\n MeasRecs=[]\n sampnames,sflag=[],0\n for rec in data:\n for samp in samps:\n if samp['er_sample_name'].lower()==rec['er_sample_name'].lower():\n if samp['er_sample_name'] not in sampnames:sampnames.append(samp['er_sample_name'].lower())\n rec['er_site_name']=samp['er_site_name']\n rec['er_location_name']=samp['er_location_name']\n MeasRecs.append(rec)\n break\n if rec['er_sample_name'].lower() not in sampnames:\n sampnames.append(rec['er_sample_name'].lower())\n sflag=1\n SampRec={}\n for key in list(samps[0].keys()):SampRec[key]=\"\"\n SampRec['er_sample_name']=rec['er_sample_name']\n SampRec['er_citation_names']=\"This study\"\n SampRec['er_site_name']='MISSING'\n SampRec['er_location_name']='MISSING'\n SampRec['sample_desription']='recorded added by update_measurements - edit as needed'\n samps.append(SampRec)\n print(rec['er_sample_name'],' missing from er_samples.txt file - edit orient.txt file and re-import')\n rec['er_site_name']='MISSING'\n rec['er_location_name']='MISSING'\n MeasRecs.append(rec)\n pmag.magic_write(out_file,MeasRecs,'magic_measurements')\n print(\"updated measurements file stored in \", out_file)\n if sflag==1:\n pmag.magic_write(samp_file,samps,'er_samples')\n print(\"updated sample file stored in \", samp_file)", "def readAudioData(self, shouldProcess):\n if shouldProcess:\n return gatherData(self.playlists) \n else:\n return pd.read_pickle(\"data/audioDF.pkl\")", "def read_statistics(self):\n self.psdata=[]\n self.powerspectra=[]\n self.ds=[]\n self.dsigmasq=[]\n self.dsigma=[]\n self.bsdata=[]\n self.eqbispectra=[]\n self.fNLeq=[]\n\n for sub in range(self.Nsubs):\n self.psdata.append(np.load(self.datadir+self.filebase+\"_\"+str(sub)+\".npy\"))\n self.powerspectra.append(np.trim_zeros(self.psdata[-1][0][1:]))\n self.bsdata.append(np.load(self.datadir+self.fbbispec+\"_\"+str(sub)+\".npy\"))\n self.eqbispectra.append(self.bsdata[-1][0][1:len(self.powerspectra[-1])])\n\n self.ds.append(np.load(self.datadir+\"stat_\"+str(sub)+\".npy\")[0])\n self.dsigmasq.append(np.load(self.datadir+\"stat_\"+str(sub)+\".npy\")[1])\n self.dsigma = np.array([np.sqrt(dsq) for dsq in self.dsigmasq])\n\n self.klist=np.arange(1, len(self.powerspectra[-1]))*(2.*np.pi/self.Lsub)\n # subtract the mean ds\n self.ds = self.ds - np.mean(self.ds)\n self.fNLeq=np.mean(self.eqbispectra, axis=0)\n self.fNLeqsubs=np.mean(self.eqbispectra, axis=1)\n self.fNLeqds=[]\n for i in range(len(self.eqbispectra)):\n self.fNLeqds.append(np.array([self.ds[i]*self.eqbispectra[i][j] for j in range(45)]))", "def _AnatInfo(self, info, path):\n if info['data_filetype'] == 'ge_data':\n return ERROR\n outdir = '%s/%s' % (self.procdir, self.tmplt['anat']['outdir'])\n info['InversionTime'] = self.hdr['native_header']['InversionTime']\n\n if info['psdname'] == 'efgre3d' or info['psdname'] == 'bravo':\n# Structural scans are 3d inversion-recovery.\n if self.hdr['native_header']['InversionTime'] < 1.:\n# Only inversion recovery used for anatomy. Must be calibration.\n return None\n elif self.hdr['zsize'] > 1.25:\n# Only one slab acquired. Assume thick slices.\n name = 'T1Low_%d' % self.n_t1low\n self.n_t1low += 1\n else:\n if self.n_t1high == 0:\n name = 'T1High'\n else:\n name = 'T1High_%d' % self.n_t1high\n self.n_t1high += 1\n else:\n psdname = info['psdname']\n name = self.imgtype.get(psdname, info['psdname'])\n if self.ntype.has_key(psdname):\n self.ntype[psdname] += 1\n name = '%s_%0d' % (name, self.ntype[psdname])\n else:\n self.ntype[psdname] = 1\n info['norm_src'] = False\n info['outdir'] = outdir\n info['filetype'] = self.tmplt['anat']['format']\n info['imgfile'] = '%s/%s' % (info['outdir'], name)\n\n self.entry_map['anat'].append(self.current_entry)\n return OK", "def read_data(self):\n if not self.header['data included']:\n pass\n elif self.header['file type'] in (21, 26):\n self._isotope_data()\n if os.path.exists(self.filename + '_txt'):\n self._isotope_txt_data()\n elif self.header['file type'] == 22:\n # line scan types, no ImageHeader\n warnings.warn('No data read for line scan, fix')\n pass\n elif self.header['file type'] in (31, 35):\n self._beamstability_data()\n else:\n self._image_data()", "def read_data(args):\n\n print(\"Start read_data\")\n t_tot = 0 # sum of times for the all dataset\n date_dirs = os.listdir(args.path_data_base)\n for n_iter, date_dir in enumerate(date_dirs):\n # get access to each sequence\n path1 = os.path.join(args.path_data_base, date_dir)\n if not os.path.isdir(path1):\n continue\n date_dirs2 = os.listdir(path1)\n\n for date_dir2 in date_dirs2:\n path2 = os.path.join(path1, date_dir2)\n if not os.path.isdir(path2):\n continue\n # read data\n oxts_files = sorted(glob.glob(os.path.join(path2, 'oxts', 'data', '*.txt')))\n oxts = KITTIDataset.load_oxts_packets_and_poses(oxts_files)\n\n \"\"\" Note on difference between ground truth and oxts solution:\n - orientation is the same\n - north and east axis are inverted\n - position are closed to but different\n => oxts solution is not loaded\n \"\"\"\n\n print(\"\\n Sequence name : \" + date_dir2)\n if len(oxts) < KITTIDataset.min_seq_dim: #  sequence shorter than 30 s are rejected\n cprint(\"Dataset is too short ({:.2f} s)\".format(len(oxts) / 100), 'yellow')\n continue\n lat_oxts = np.zeros(len(oxts))\n lon_oxts = np.zeros(len(oxts))\n alt_oxts = np.zeros(len(oxts))\n roll_oxts = np.zeros(len(oxts))\n pitch_oxts = np.zeros(len(oxts))\n yaw_oxts = np.zeros(len(oxts))\n roll_gt = np.zeros(len(oxts))\n pitch_gt = np.zeros(len(oxts))\n yaw_gt = np.zeros(len(oxts))\n t = KITTIDataset.load_timestamps(path2)\n acc = np.zeros((len(oxts), 3))\n acc_bis = np.zeros((len(oxts), 3))\n gyro = np.zeros((len(oxts), 3))\n gyro_bis = np.zeros((len(oxts), 3))\n p_gt = np.zeros((len(oxts), 3))\n v_gt = np.zeros((len(oxts), 3))\n v_rob_gt = np.zeros((len(oxts), 3))\n\n k_max = len(oxts)\n for k in range(k_max):\n oxts_k = oxts[k]\n t[k] = 3600 * t[k].hour + 60 * t[k].minute + t[k].second + t[\n k].microsecond / 1e6\n lat_oxts[k] = oxts_k[0].lat\n lon_oxts[k] = oxts_k[0].lon\n alt_oxts[k] = oxts_k[0].alt\n acc[k, 0] = oxts_k[0].af\n acc[k, 1] = oxts_k[0].al\n acc[k, 2] = oxts_k[0].au\n acc_bis[k, 0] = oxts_k[0].ax\n acc_bis[k, 1] = oxts_k[0].ay\n acc_bis[k, 2] = oxts_k[0].az\n gyro[k, 0] = oxts_k[0].wf\n gyro[k, 1] = oxts_k[0].wl\n gyro[k, 2] = oxts_k[0].wu\n gyro_bis[k, 0] = oxts_k[0].wx\n gyro_bis[k, 1] = oxts_k[0].wy\n gyro_bis[k, 2] = oxts_k[0].wz\n roll_oxts[k] = oxts_k[0].roll\n pitch_oxts[k] = oxts_k[0].pitch\n yaw_oxts[k] = oxts_k[0].yaw\n v_gt[k, 0] = oxts_k[0].ve\n v_gt[k, 1] = oxts_k[0].vn\n v_gt[k, 2] = oxts_k[0].vu\n v_rob_gt[k, 0] = oxts_k[0].vf\n v_rob_gt[k, 1] = oxts_k[0].vl\n v_rob_gt[k, 2] = oxts_k[0].vu\n p_gt[k] = oxts_k[1][:3, 3]\n Rot_gt_k = oxts_k[1][:3, :3]\n roll_gt[k], pitch_gt[k], yaw_gt[k] = IEKF.to_rpy(Rot_gt_k)\n\n t0 = t[0]\n t = np.array(t) - t[0]\n # some data can have gps out\n if np.max(t[:-1] - t[1:]) > 0.1:\n cprint(date_dir2 + \" has time problem\", 'yellow')\n ang_gt = np.zeros((roll_gt.shape[0], 3))\n ang_gt[:, 0] = roll_gt\n ang_gt[:, 1] = pitch_gt\n ang_gt[:, 2] = yaw_gt\n\n p_oxts = lla2ned(lat_oxts, lon_oxts, alt_oxts, lat_oxts[0], lon_oxts[0],\n alt_oxts[0], latlon_unit='deg', alt_unit='m', model='wgs84')\n p_oxts[:, [0, 1]] = p_oxts[:, [1, 0]] # see note\n\n # take correct imu measurements\n u = np.concatenate((gyro_bis, acc_bis), -1)\n # convert from numpy\n t = torch.from_numpy(t)\n p_gt = torch.from_numpy(p_gt)\n v_gt = torch.from_numpy(v_gt)\n ang_gt = torch.from_numpy(ang_gt)\n u = torch.from_numpy(u)\n\n # convert to float\n t = t.float()\n u = u.float()\n p_gt = p_gt.float()\n ang_gt = ang_gt.float()\n v_gt = v_gt.float()\n\n mondict = {\n 't': t, 'p_gt': p_gt, 'ang_gt': ang_gt, 'v_gt': v_gt,\n 'u': u, 'name': date_dir2, 't0': t0\n }\n\n t_tot += t[-1] - t[0]\n KITTIDataset.dump(mondict, args.path_data_save, date_dir2)\n print(\"\\n Total dataset duration : {:.2f} s\".format(t_tot))", "def get_data(self, injparam=None, trueordering=None,\n systematic=None, direction=None):\n data_sets = OrderedDict()\n minimiser_info = OrderedDict()\n if injparam is not None:\n content = nsort(os.listdir(self.scandir))\n elif trueordering is not None:\n content = nsort(os.listdir(self.systdir))\n else:\n content = nsort(os.listdir(self.logdir))\n for basename in content:\n if injparam is not None:\n m = self.labels[injparam].subdir_re.match(basename)\n wanted_labels = self.labels[injparam]\n elif trueordering is not None:\n if direction is not None:\n m = self.labels[trueordering][systematic][\n direction].subdir_re.match(basename)\n wanted_labels = self.labels[trueordering][\n systematic][direction]\n else:\n m = self.labels[trueordering][\n systematic].subdir_re.match(basename)\n wanted_labels = self.labels[trueordering][systematic]\n else:\n m = self.labels.subdir_re.match(basename)\n wanted_labels = self.labels\n if m is None or 'pckl' in basename:\n continue\n\n if self.fluctuate_data:\n data_ind = int(m.groupdict()['data_ind'])\n dset_label = data_ind\n else:\n dset_label = wanted_labels.data_prefix\n if not wanted_labels.data_name in [None, '']:\n dset_label += '_' + wanted_labels.data_name\n if not wanted_labels.data_suffix in [None, '']:\n dset_label += '_' + wanted_labels.data_suffix\n\n lvl2_fits = OrderedDict()\n lvl2_fits['h0_fit_to_data'] = None\n lvl2_fits['h1_fit_to_data'] = None\n minim_info = OrderedDict()\n minim_info['h0_fit_to_data'] = None\n minim_info['h1_fit_to_data'] = None\n\n if injparam is not None:\n subdir = os.path.join(self.scandir, basename)\n elif trueordering is not None:\n subdir = os.path.join(self.systdir, basename)\n else:\n subdir = os.path.join(self.logdir, basename)\n\n # Account for failed jobs. Get the set of file numbers that\n # exist for all h0 an h1 combinations\n self.get_set_file_nums(\n filedir=subdir,\n injparam=injparam,\n trueordering=trueordering,\n systematic=systematic,\n direction=direction\n )\n fnum = None\n \n for fnum, fname in enumerate(nsort(os.listdir(subdir))):\n fpath = os.path.join(subdir, fname)\n for x in ['0', '1']:\n k = 'h{x}_fit_to_data'.format(x=x)\n if fname == wanted_labels.dict[k]:\n lvl2_fits[k] = self.extract_fit(fpath, 'metric_val')\n break\n # Also extract fiducial fits if needed\n if 'toy' in dset_label:\n ftest = ('hypo_%s_fit_to_%s'\n %(wanted_labels.dict['h{x}_name'.format(x=x)],\n dset_label))\n elif dset_label == 'data':\n ftest = ('hypo_%s_fit_to_data'\n %(wanted_labels.dict['h{x}_name'.format(x=x)]))\n if ftest in fname:\n k = 'h{x}_fit_to_{y}'.format(x=x, y=dset_label)\n lvl2_fits[k] = self.extract_fit(\n fpath,\n ['metric_val', 'params']\n )\n break\n k = 'h{x}_fit_to_{y}'.format(x=x, y=dset_label)\n for y in ['0', '1']:\n k = 'h{x}_fit_to_h{y}_fid'.format(x=x, y=y)\n r = wanted_labels.dict[k + '_re']\n m = r.match(fname)\n if m is None:\n continue\n if self.fluctuate_fid:\n fid_label = int(m.groupdict()['fid_ind'])\n else:\n fid_label = wanted_labels.fid\n if k not in lvl2_fits:\n lvl2_fits[k] = OrderedDict()\n minim_info[k] = OrderedDict()\n if fid_label in self.set_file_nums:\n lvl2_fits[k][fid_label] = self.extract_fit(\n fpath,\n ['metric', 'metric_val', 'params']\n )\n minim_info[k][fid_label] = self.extract_fit(\n fpath,\n ['minimizer_metadata', 'minimizer_time']\n )\n break\n\n if fnum is None:\n raise ValueError('No files?')\n\n data_sets[dset_label] = lvl2_fits\n minimiser_info[dset_label] = minim_info\n data_sets[dset_label]['params'] = self.extract_fit(\n fpath,\n ['params']\n )['params']\n\n if injparam is not None:\n self.data_sets[injparam] = data_sets\n self.minimiser_info[injparam] = minimiser_info\n elif trueordering is not None:\n if direction is not None:\n self.data_sets[trueordering][systematic][direction] = data_sets\n else:\n self.data_sets[trueordering][systematic]= data_sets\n else:\n self.data_sets = data_sets\n self.minimiser_info = minimiser_info", "def loader(filename,wdm=0,verbose=0,kmpers=1):\n with open(filename, 'rb') as f:\n if wdm == False:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n infoBytes = f.tell()\n if verbose>2:\n print(infoBytes)\n #skip darkmatter\n #read the first dm line\n if verbose>2:\n print(f.tell())\n catd = np.fromfile(f,dtype= dmdtype, count=1) \n #get the bytes location and subtract off the bytes location after loading info to get n bytes a line for dm\n if verbose>2:\n print(f.tell())\n current = f.tell()\n dmBytes = current-infoBytes\n f.seek(dmBytes*(info['nd'][0]-1)+current)\n if verbose>2:\n print(f.tell())\n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n else:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n if verbose>2:\n print(f.tell())\n # #dark matter setup count is reading the number of ?rows? \n catd= np.fromfile(f,dmdtype, count=info['nd'][0]) \n if verbose>2:\n print(f.tell()) \n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n \n \n #convert to physical units as found in README.md\n if wdm == True:\n catd['mass']*=2.324876e9\n if kmpers == 1:\n catd['vx']*=100.\n catd['vy']*=100.\n catd['vz']*=100.\n cats['mass']*=2.324876e9\n if kmpers == 1:\n cats['vx']*=100.\n cats['vy']*=100.\n cats['vz']*=100.\n \n if wdm == True:\n return(catd,cats,info)\n else:\n return(cats,info)", "def stat_file(self, path, info):\n return {}", "def load_info_file(info):\n with open(info, \"r\") as info_file:\n info = json.load(info_file)\n input_height = info['input_height']\n input_width = info['input_width']\n input_layer = info['input_layer']\n output_layer = info['output_layer']\n labels = info['labels']\n return input_height, input_width, input_layer, output_layer, labels", "def from_dir(cls, path, for_stsp=False):\n if not for_stsp:\n times, fluxes, errors, quarters = [np.loadtxt(os.path.join(path, '{0}.txt'.format(attr)))\n for attr in ['times_jd', 'fluxes', 'errors', 'quarters']]\n else:\n quarters = None\n times, fluxes, errors = np.loadtxt(path, unpack=True)\n\n if os.sep in path:\n name = path.split(os.sep)[-1]\n else:\n name = path\n\n if name.endswith('.txt'):\n name = name[:-4]\n\n return cls(times, fluxes, errors, quarters=quarters, name=name)", "def load_data(self):\n logging.debug('Loading data from file ({})...'.format(self.file_name))\n parsed_data = list()\n with open(self.file_name) as file_data:\n for line in file_data.readlines():\n temp = dict()\n if 'JD' in line:\n continue\n line = line.split()\n temp['ts'], temp['mag'], temp['dif'] = float(line[0][:14]), float(line[1]), float(line[2])\n temp['f_mag'] = self.kalman_filter(temp['mag'])\n temp['dt'] = self.jd_to_datetime(temp['ts'])\n temp['dt_cor'] = self.jd_to_datetime(temp['ts'] - TIME_CRT)\n parsed_data.append(temp)\n logging.debug(' {} records loaded.'.format(len(parsed_data)))\n logging.debug(parsed_data[0])\n self.data_stream = parsed_data", "def dl_fighters(self, reload=False, from_archive=False):\n if self.fightersInfo and not reload and not from_archive:\n print(\"fighters are in memory\")\n self.dump_archive(self.fn_fighters)\n self.size_fighters = len(self.fightersInfo)\n elif os.path.isfile(self.fn_fighters) and not reload:\n self.load_archive(self.fn_fighters)\n self.size_fighters = len(self.fightersInfo)\n else:\n self.dl_fights()\n self._fighters_getter()\n self._remove_duplicates(info=True)\n self.dump_archive(self.fn_fighters)\n self._finalysing()\n self.size_fighters = len(self.fightersInfo)", "def _get_data(\n self,\n vis_hdu,\n antenna_nums,\n antenna_names,\n ant_str,\n bls,\n frequencies,\n freq_chans,\n times,\n time_range,\n polarizations,\n blt_inds,\n read_metadata,\n keep_all_metadata,\n run_check,\n check_extra,\n run_check_acceptability,\n strict_uvw_antpos_check,\n ):\n # figure out what data to read in\n blt_inds, freq_inds, pol_inds, history_update_string = self._select_preprocess(\n antenna_nums,\n antenna_names,\n ant_str,\n bls,\n frequencies,\n freq_chans,\n times,\n time_range,\n polarizations,\n blt_inds,\n )\n\n if blt_inds is not None:\n blt_frac = len(blt_inds) / float(self.Nblts)\n else:\n blt_frac = 1\n\n if freq_inds is not None:\n freq_frac = len(freq_inds) / float(self.Nfreqs)\n else:\n freq_frac = 1\n\n if pol_inds is not None:\n pol_frac = len(pol_inds) / float(self.Npols)\n else:\n pol_frac = 1\n\n min_frac = np.min([blt_frac, freq_frac, pol_frac])\n\n if min_frac == 1:\n # no select, read in all the data\n if vis_hdu.header[\"NAXIS\"] == 7:\n raw_data_array = vis_hdu.data.data[:, 0, 0, :, :, :, :]\n assert self.Nspws == raw_data_array.shape[1]\n\n else:\n # in many uvfits files the spw axis is left out,\n # here we put it back in so the dimensionality stays the same\n raw_data_array = vis_hdu.data.data[:, 0, 0, :, :, :]\n raw_data_array = raw_data_array[:, np.newaxis, :, :]\n else:\n # do select operations on everything except data_array, flag_array\n # and nsample_array\n self._select_metadata(\n blt_inds, freq_inds, pol_inds, history_update_string, keep_all_metadata\n )\n\n # just read in the right portions of the data and flag arrays\n if blt_frac == min_frac:\n if vis_hdu.header[\"NAXIS\"] == 7:\n raw_data_array = vis_hdu.data.data[blt_inds, :, :, :, :, :, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]\n assert self.Nspws == raw_data_array.shape[1]\n else:\n # in many uvfits files the spw axis is left out,\n # here we put it back in so the dimensionality stays the same\n raw_data_array = vis_hdu.data.data[blt_inds, :, :, :, :, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :]\n raw_data_array = raw_data_array[:, np.newaxis, :, :, :]\n if freq_frac < 1:\n raw_data_array = raw_data_array[:, :, freq_inds, :, :]\n if pol_frac < 1:\n raw_data_array = raw_data_array[:, :, :, pol_inds, :]\n elif freq_frac == min_frac:\n if vis_hdu.header[\"NAXIS\"] == 7:\n raw_data_array = vis_hdu.data.data[:, :, :, :, freq_inds, :, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]\n assert self.Nspws == raw_data_array.shape[1]\n else:\n # in many uvfits files the spw axis is left out,\n # here we put it back in so the dimensionality stays the same\n raw_data_array = vis_hdu.data.data[:, :, :, freq_inds, :, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :]\n raw_data_array = raw_data_array[:, np.newaxis, :, :, :]\n\n if blt_frac < 1:\n raw_data_array = raw_data_array[blt_inds, :, :, :, :]\n if pol_frac < 1:\n raw_data_array = raw_data_array[:, :, :, pol_inds, :]\n else:\n if vis_hdu.header[\"NAXIS\"] == 7:\n raw_data_array = vis_hdu.data.data[:, :, :, :, :, pol_inds, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]\n assert self.Nspws == raw_data_array.shape[1]\n else:\n # in many uvfits files the spw axis is left out,\n # here we put it back in so the dimensionality stays the same\n raw_data_array = vis_hdu.data.data[:, :, :, :, pol_inds, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :]\n raw_data_array = raw_data_array[:, np.newaxis, :, :, :]\n\n if blt_frac < 1:\n raw_data_array = raw_data_array[blt_inds, :, :, :, :]\n if freq_frac < 1:\n raw_data_array = raw_data_array[:, :, freq_inds, :, :]\n\n assert len(raw_data_array.shape) == 5\n # FITS uvw direction convention is opposite ours and Miriad's.\n # So conjugate the visibilities and flip the uvws:\n self.data_array = (\n raw_data_array[:, :, :, :, 0] - 1j * raw_data_array[:, :, :, :, 1]\n )\n self.flag_array = raw_data_array[:, :, :, :, 2] <= 0\n self.nsample_array = np.abs(raw_data_array[:, :, :, :, 2])\n\n # check if object has all required UVParameters set\n if run_check:\n self.check(\n check_extra=check_extra,\n run_check_acceptability=run_check_acceptability,\n strict_uvw_antpos_check=strict_uvw_antpos_check,\n )", "def read_json(self):\n # read in all json files in the input_path, that match the\n # algorithm_name and are not outputs\n for f in os.listdir(self.input_path):\n if(os.path.splitext(f)[1] == \".json\") and (os.path.basename(f).startswith(self.algorithm_name)) and (not os.path.basename(f).startswith(\"_\")):\n self.__input_jsons += [json.load(open(self.input_path + f))]", "def load_stat(input):\n with open(input['json'], 'r', encoding=input['encoding']) as f:\n return json.load(f)", "def show_scale_file_info(input_info_filename):\n with open(input_info_filename) as f:\n info = json.load(f)\n show_scales_info(info)", "def read(self, file_info, **kwargs):\n\n # We need to import at least the standard fields\n user_fields = kwargs.pop(\"fields\", {})\n fields = self.standard_fields | set(user_fields)\n\n # We catch the user mapping here, since we do not want to deal with\n # user-defined names in the further processing. Instead, we use our own\n # mapping\n user_mapping = kwargs.pop(\"mapping\", None)\n\n # Load the dataset from the file:\n dataset = super().read(\n file_info, fields=fields, mapping=self.mapping, **kwargs\n )\n\n dataset[\"time\"] = self._get_time_field(dataset, file_info)\n\n # Remove fields that we do not need any longer (expect the user asked\n # for them explicitly)\n dataset = dataset.drop_vars(\n {\"UTC_start\", \"Profile_time\"} - set(user_fields),\n )\n\n if user_mapping is not None:\n dataset = dataset.rename(user_mapping)\n\n return dataset", "def load_data(self):\r\n if not os.path.exists(self.origin_dir):\r\n raise ValueError(f\"Folder {self.origin_dir} not exists!\")\r\n\r\n # loop folders\r\n listglobs = glob.glob(os.path.join(self.origin_dir)+r\"[0-9]*\")\r\n count = 0\r\n temp = []\r\n for x in listglobs:\r\n\r\n # step1, get speaker id md5\r\n user_id = x.rsplit(\"\\\\\")[-1]\r\n speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n print(\"1=>\", x)\r\n\r\n for k in [\"你好小顺\", \"小顺小顺\"]:\r\n paths = os.path.join(x, k)\r\n print(\"2=>\", paths)\r\n # step2, parse speaker info\r\n with open(os.path.join(paths, \"spearker_info.txt\"), 'r', encoding=\"utf-8\") as f:\r\n line = f.readline()\r\n arrs = line.strip().split(\"\\\\t\")\r\n if len(arrs) != 3:\r\n raise ValueError(\"Required three field in speaker_info<id>\\t<gender>\\t<age>\")\r\n self.wav_desc[\"gender\"] = arrs[1].strip(\"<\").rstrip(\">\")\r\n self.wav_desc[\"age\"] = arrs[-1].strip(\"<\").rstrip(\">\")\r\n\r\n # step3, parse wav detailed information\r\n # key: wav_id, value: info_list, [keyword, noise_type, distance, speed,user_id, equipment]\r\n wav_infos_dict = {}\r\n with open(os.path.join(paths, \"wav_desc.txt\"), \"r\", encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n arrs = line.strip().split(\"\\\\t\")\r\n wav_infos_dict[arrs[0].strip(\"<\").rstrip(\">\")] = [x.strip(\"<\").rstrip(\">\") for\r\n x in arrs[1:]]\r\n\r\n print(f\"Parse wav info finished find {len(wav_infos_dict)} infos.\")\r\n\r\n # Step4, audio with background noise and without nose, which was back_wav and wav_data folder\r\n for wav_folder in [\"back_wav\", \"wav_data\"]:\r\n audio_lists = glob.glob(os.path.join(paths + f\"\\\\{wav_folder}\", \"*.wav\"))\r\n for xa in audio_lists:\r\n # copy data to\r\n wav_id, user_id = get_wav_name(xa)\r\n # print(wav_id, user_id)\r\n # create md5 id\r\n utt_id = hashlib.md5(xa.encode(\"utf-8\")).hexdigest()\r\n # speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n # print(utt_id, speaker_id)\r\n # collect all info for an audio\r\n self.wav_desc[\"utt_id\"] = utt_id\r\n infos = wav_infos_dict[wav_id]\r\n if len(infos) != 6:\r\n print(\"==>\", infos)\r\n self.wav_desc[\"keyword_id\"] = self.keywords_dict[infos[0]]\r\n self.wav_desc[\"noise_type\"] = infos[1]\r\n self.wav_desc[\"distance\"] = infos[2]\r\n self.wav_desc[\"record_speed\"] = infos[3]\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n self.wav_desc[\"record_equipment\"] = infos[5]\r\n\r\n # record wav information\r\n t_infos = copy.deepcopy(self.wav_desc)\r\n self.all_wavs.append(t_infos)\r\n count += 1\r\n temp.append(utt_id)\r\n\r\n # copy data to resource folder\r\n dest = shutil.copy2(xa, os.path.join(self.dest_dir, f\"audios/{utt_id}.wav\"))\r\n set_index = which_set(dest, 20, 30)\r\n self.data_index[set_index].append(t_infos)\r\n\r\n # write wav information into json file\r\n with open(os.path.join(self.dest_dir, \"resources/wav_desc.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.all_wavs, f, ensure_ascii=False, indent=True)\r\n print(f\"total wavs:{count}, total ids:{len(temp)}\")\r\n for set_index in self.data_index.keys():\r\n with open(os.path.join(self.dest_dir, f\"resources/p_{set_index}.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.data_index[set_index], f, ensure_ascii=False, indent=True)\r\n print(f\"Collect {set_index} data total {len(self.data_index[set_index])} samples.\")", "def read_data(self):\n\n try:\n self.data_instance.read_data(self.directory + self.fName)\n except FileNotFoundError as file_error:\n print(\n \"# The file {} belonging to {} do not exist.\".format(\n file_error.filename, self.fName))", "def read_aeronet_data_main(station_name, month, year, plot_results):\n # Load AERONET file of month-year\n station = gs.Station(station_name)\n\n monthdays = (date(year, month + 1, 1) - date(year, month, 1)).days\n start_day = datetime(year, month, 1, 0, 0)\n end_day = datetime(year, month, monthdays, 0, 0)\n wavelengths = [355, 532, 1064]\n\n base_name = f\"{start_day.strftime('%Y%m%d')}_{end_day.strftime('%Y%m%d')}_{station.aeronet_name}\"\n file_name = os.path.join(station.aeronet_folder, base_name, base_name + '.lev20')\n # TODO : add automatic download of `.lev20' file from AERONET in case a file is missing.\n aeronet_data = pd.read_csv(file_name, skiprows=6).dropna()\n\n # Parse data and rename columns for easier extrapolation of AOD values\n df_dt = pd.to_datetime(aeronet_data['Date(dd:mm:yyyy)'] + aeronet_data['Time(hh:mm:ss)'], format=\"%d:%m:%Y%H:%M:%S\")\n columns = ['AOD_1640nm', 'AOD_1020nm', 'AOD_675nm', 'AOD_500nm', 'AOD_380nm', 'AOD_340nm']\n df_AOD_ANGSTROM = aeronet_data[columns].copy(deep=True)\n df_AOD_ANGSTROM.index = df_dt\n for col in sorted(columns):\n col_new = int(col.split('_')[1].replace('nm', ''))\n df_AOD_ANGSTROM.rename(columns={col: col_new}, inplace=True)\n\n cols = df_AOD_ANGSTROM.columns.values.tolist()\n cols.extend(wavelengths)\n df_AOD_ANGSTROM = df_AOD_ANGSTROM.reindex(cols, axis='columns').sort_index(axis=1)\n\n # Calculate AOD for missing wavelengths as $355,532,1064$\n # by interpolation values from the nearest existing measured wavelengths.\n cols = df_AOD_ANGSTROM.columns.values.tolist()\n for wavelength in wavelengths:\n col_ind = df_AOD_ANGSTROM.columns.get_loc(wavelength)\n ratio = (cols[col_ind + 1] - cols[col_ind]) / (cols[col_ind + 1] - cols[col_ind - 1])\n df_AOD_ANGSTROM[wavelength] = df_AOD_ANGSTROM.iloc[:, col_ind - 1] * \\\n ratio + (1 - ratio) * \\\n df_AOD_ANGSTROM.iloc[:, col_ind + 1]\n\n # Create dataset of AOD per wavelength\n ds_chans = []\n for wavelength in wavelengths:\n aeronet_ds_chan = xr.Dataset(\n data_vars={'aod': ('Time', df_AOD_ANGSTROM[wavelength]),\n 'lambda_nm': ('Wavelength', [wavelength])\n },\n coords={'Time': df_AOD_ANGSTROM.index.tolist(),\n 'Wavelength': [wavelength]\n })\n ds_chans.append(aeronet_ds_chan)\n ds_aod = xr.concat(ds_chans, dim='Wavelength')\n\n ds_aod.aod.attrs['long_name'] = r'$\\tau$'\n ds_aod = ds_aod.aod.where(ds_aod >= 0, drop=True)\n ds_aod.attrs = {'info': 'Aerosol Optical Depth - generated from AERONET - level 2.0',\n 'location': station.name, 'source_file': file_name,\n 'start_time': start_day.strftime(\"%Y-%d-%m\"), 'end_time': end_day.strftime(\"%Y-%d-%m\")}\n\n # Calculate Angstrom Exponent\n couples = [(355, 532), (355, 1064), (532, 1064)]\n angstrom_daily = []\n for lambda_1, lambda_2 in couples:\n angstrom_couple = xr.apply_ufunc(lambda x, y: misc_lidar.angstrom(ds_aod.sel(Wavelength=x).aod,\n ds_aod.sel(Wavelength=y).aod, x, y), lambda_1, lambda_2,\n keep_attrs=True).rename('angstrom')\n angstrom_ds_chan = xr.Dataset(\n data_vars={'angstrom': ('Time', angstrom_couple.values),\n 'lambda_nm': ('Wavelengths', [f\"{lambda_1}-{lambda_2}\"])\n },\n coords={'Time': df_AOD_ANGSTROM.index.tolist(),\n 'Wavelengths': [f\"{lambda_1}-{lambda_2}\"]\n })\n\n angstrom_daily.append(angstrom_ds_chan)\n ds_ang = xr.concat(angstrom_daily, dim='Wavelengths')\n ds_ang.angstrom.attrs['long_name'] = r'$\\AA$'\n ds_ang.attrs = {'info': 'Angstrom Exponent - generated from AERONET AOD',\n 'location': station.name, 'source_file': file_name,\n 'start_time': start_day.strftime(\"%Y-%d-%m\"), 'end_time': end_day.strftime(\"%Y-%d-%m\")}\n\n # Show AOD and Angstrom Exponent for a period\n if plot_results:\n t_slice = slice(start_day, start_day + timedelta(days=30) - timedelta(seconds=30))\n\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 8))\n ax = axes.ravel()\n for wavelength in wavelengths:\n aod_mean = ds_aod.aod.sel(Wavelength=wavelength, Time=t_slice).mean().item()\n aod_std = ds_aod.aod.sel(Wavelength=wavelength, Time=t_slice).std().item()\n textstr = ' '.join((\n r'$\\mu=%.2f$, ' % (aod_mean,),\n r'$\\sigma=%.2f$' % (aod_std,)))\n ds_aod.aod.sel(Wavelength=wavelength, Time=t_slice).plot(label=fr\"{wavelength}, \" + textstr, ax=ax[0])\n ax[0].set_title(ds_aod.attrs['info'])\n ax[0].legend()\n ax[0].set_ylabel(r'$\\tau$')\n\n for lambda_1, lambda_2 in couples:\n angstrom_mean = ds_ang.angstrom.sel(Wavelengths=f\"{lambda_1}-{lambda_2}\", Time=t_slice).mean().item()\n angstrom_std = ds_ang.angstrom.sel(Wavelengths=f\"{lambda_1}-{lambda_2}\", Time=t_slice).std().item()\n textstr = ' '.join((\n r'$\\mu=%.2f$, ' % (angstrom_mean,),\n r'$\\sigma=%.2f$' % (angstrom_std,)))\n ds_ang.angstrom.sel(Wavelengths=f\"{lambda_1}-{lambda_2}\", Time=t_slice).plot(x='Time',\n label=fr\"$ \\AA \\, {lambda_1},{lambda_2}$, \" + textstr\n , ax=ax[1])\n ax[1].legend()\n ax[1].set_title('Angstrom Exponent')\n plt.tight_layout()\n plt.show()\n\n # Angstrom Exponent distribution of a month\n couple_0 = f\"{355}-{532}\"\n couple_1 = f\"{532}-{1064}\"\n\n x = ds_ang.angstrom.sel(Time=t_slice, Wavelengths=couple_0).values\n y = ds_ang.angstrom.sel(Time=t_slice, Wavelengths=couple_1).values\n\n fig, ax = plt.subplots(nrows=1, ncols=1)\n ax.scatter(x=x, y=y)\n ax.set_ylabel(couple_0)\n ax.set_xlabel(couple_1)\n ax.set_title(f\"Angstrom Exponent distribution {t_slice.start.strftime('%Y-%m')}\")\n plt.tight_layout()\n plt.show()\n\n # Save AOD and Angstrom Exponent datasets\n nc_base_name = f\"{start_day.strftime('%Y%m%d')}_{end_day.strftime('%Y%m%d')}_{station.name}\"\n\n xr_utils.save_dataset(ds_aod, folder_name=station.aeronet_folder, nc_name=nc_base_name+\"_aod.nc\")\n xr_utils.save_dataset(ds_ang, folder_name=station.aeronet_folder, nc_name=nc_base_name+\"_ang.nc\")", "def load_data(path_stats, path_rules):\n with open(path_stats) as json_file:\n material_probs = json.load(json_file)\n with open(path_rules) as json_file:\n convertion_rules = json.load(json_file)\n\n return material_probs, convertion_rules" ]
[ "0.4899632", "0.48992625", "0.47160295", "0.437661", "0.4311666", "0.4288543", "0.42800748", "0.42469117", "0.4233831", "0.42296544", "0.42119592", "0.41696903", "0.41607457", "0.41453016", "0.4135022", "0.41290072", "0.41133738", "0.40929687", "0.40890434", "0.40732569", "0.4068756", "0.4055263", "0.40515023", "0.40111968", "0.4008026", "0.4004511", "0.39962807", "0.39648202", "0.39560688", "0.394853" ]
0.75553703
0
Get the perigee events within start/stop. This selects perigees within start/stop and then finds the span of ERs (obsid > 38000) within +/ 12 hours of perigee.
def get_evts_perigee( start: CxoTime, stop: CxoTime, stats_prev: Table ) -> List["EventPerigee"]: LOGGER.info(f"Getting perigee events between {start} and {stop}") # event_types = ["EEF1000", "EPERIGEE", "XEF1000"] cmds_perigee = get_cmds( start=start, stop=stop, type="ORBPOINT", event_type="EPERIGEE" ) # Find contiguous intervals of ERs (obsid > 38000) states = get_states(start - 3 * u.day, stop + 3 * u.day, state_keys=["obsid"]) states["obsid"] = np.where(states["obsid"] > 38000, 1, 0) states = reduce_states(states, state_keys=["obsid"], merge_identical=True) dirnames_prev = stats_prev["dirname"] if len(stats_prev) > 0 else [] events = [] for cmd in cmds_perigee: t_perigee = cmd["time"] ok = (states["tstart"] <= t_perigee) & (t_perigee < states["tstop"]) n_ok = np.count_nonzero(ok) if n_ok == 0: LOGGER.warning( "WARNING: No ER observations found covering perigee at" f" {CxoTime(t_perigee).date}" ) continue elif n_ok > 1: raise ValueError( "Found multiple states covering perigee at" f" {CxoTime(t_perigee).date} (this really should not happen, this must" " be a bug" ) t_rad_entry = max(states["tstart"][ok][0], t_perigee - 20000) t_rad_exit = min(states["tstop"][ok][0], t_perigee + 20000) event = EventPerigee( rad_entry=t_rad_entry, perigee=cmd["date"], rad_exit=t_rad_exit, ) if event.dirname in dirnames_prev: # If the event is already in the previous kalman stats table then # move on silently. continue if event.tlm is not None: events.append(event) else: LOGGER.info(f"No TLM found for perigee event at {event.perigee}, skipping") continue LOGGER.info(f"Found {len(events)} new perigee event(s)") return events
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def events_between(self, starting_measure, starting_offset, ending_measure, ending_offset):\n output_events = []\n for i in range(starting_measure - 1, ending_measure - 1 + 1):\n for event in self.event_groups[i].events:\n if i == starting_measure - 1:\n if i == 0 and event.offset >= starting_offset:\n output_events.append(event)\n elif i != 0 and event.offset > starting_offset:\n output_events.append(event)\n elif i == ending_measure - 1:\n if event.offset < ending_offset and ending_offset != 0:\n output_events.append(event)\n else:\n output_events.append(event)\n return output_events", "def get_timeline_events(self, req, start, stop, filters):", "def event_start(site_id, end):\n\n query = \"SELECT ts, ts_updated FROM \"\n query += \" (SELECT * FROM public_alerts \"\n query += \" WHERE site_id = %s \" %site_id\n query += \" AND (ts_updated <= '%s' \" %end\n query += \" OR (ts_updated >= '%s' \" %end\n query += \" AND ts <= '%s')) \" %end\n query += \" ) AS pub \"\n query += \"INNER JOIN \"\n query += \" (SELECT * FROM public_alert_symbols \"\n query += \" WHERE alert_type = 'event') AS sym \"\n query += \"USING (pub_sym_id) \"\n query += \"ORDER BY ts DESC LIMIT 3\"\n \n # previous positive alert\n prev_pub_alerts = db.df_read(query, connection='website')\n\n if len(prev_pub_alerts) == 1:\n start_monitor = pd.to_datetime(prev_pub_alerts['ts'].values[0])\n # two previous positive alert\n elif len(prev_pub_alerts) == 2:\n # one event with two previous positive alert\n if pd.to_datetime(prev_pub_alerts['ts'].values[0]) - \\\n pd.to_datetime(prev_pub_alerts['ts_updated'].values[1]) <= \\\n timedelta(hours=0.5):\n start_monitor = pd.to_datetime(prev_pub_alerts['ts'].values[1])\n else:\n start_monitor = pd.to_datetime(prev_pub_alerts['ts'].values[0])\n # three previous positive alert\n else:\n if pd.to_datetime(prev_pub_alerts['ts'].values[0]) - \\\n pd.to_datetime(prev_pub_alerts['ts_updated'].values[1]) <= \\\n timedelta(hours=0.5):\n # one event with three previous positive alert\n if pd.to_datetime(prev_pub_alerts['ts'].values[1]) - \\\n pd.to_datetime(prev_pub_alerts['ts_updated'].values[2]) \\\n <= timedelta(hours=0.5):\n start_monitor = pd.to_datetime(prev_pub_alerts['timestamp']\\\n .values[2])\n # one event with two previous positive alert\n else:\n start_monitor = pd.to_datetime(prev_pub_alerts['ts'].values[1])\n else:\n start_monitor = pd.to_datetime(prev_pub_alerts['ts'].values[0])\n\n return start_monitor", "def _findEvidExonRange(self, transAnnot, evidTrans):\n # Walk from start to find start and end to find end, as multiple might overlap,\n # however don't go past bounds of the annotation, in case evidence and annotation\n # are interleaved. This would be simper without the extend mode.\n return (self._findEvidExonRangeStart(transAnnot, evidTrans),\n self._findEvidExonRangeEnd(transAnnot, evidTrans))", "def _locate_events(self, start_time, end_time):\n\n # Define pre-pad as a function of the onset windows\n if self.pre_pad is None:\n self.pre_pad = max(self.p_onset_win[1],\n self.s_onset_win[1]) \\\n + 3 * max(self.p_onset_win[0],\n self.s_onset_win[0])\n\n # Adjust pre- and post-pad to take into account cosine taper\n t_length = self.pre_pad + 4*self.marginal_window + self.post_pad\n self.pre_pad += np.ceil(t_length * 0.06)\n self.post_pad += np.ceil(t_length * 0.06)\n\n trig_events = self.output.read_triggered_events(start_time, end_time)\n n_evts = len(trig_events)\n\n for i, trig_event in trig_events.iterrows():\n event_uid = trig_event[\"EventID\"]\n msg = \"=\" * 120 + \"\\n\"\n msg += \"\\tEVENT - {} of {} - {}\\n\"\n msg += \"=\" * 120 + \"\\n\\n\"\n msg += \"\\tDetermining event location...\\n\"\n msg = msg.format(i + 1, n_evts, event_uid)\n self.output.log(msg, self.log)\n\n w_beg = trig_event[\"CoaTime\"] - 2*self.marginal_window \\\n - self.pre_pad\n w_end = trig_event[\"CoaTime\"] + 2*self.marginal_window \\\n + self.post_pad\n\n timer = util.Stopwatch()\n self.output.log(\"\\tReading waveform data...\", self.log)\n try:\n self._read_event_waveform_data(trig_event, w_beg, w_end)\n except util.ArchiveEmptyException:\n msg = \"\\tNo files found in archive for this time period\"\n self.output.log(msg, self.log)\n continue\n except util.DataGapException:\n msg = \"\\tAll available data for this time period contains gaps\"\n msg += \"\\n\\tOR data not available at start/end of time period\\n\"\n self.output.log(msg, self.log)\n continue\n self.output.log(timer(), self.log)\n\n timer = util.Stopwatch()\n self.output.log(\"\\tComputing 4D coalescence grid...\", self.log)\n\n daten, max_coa, max_coa_norm, loc, map_4d = self._compute(\n w_beg, w_end,\n self.data.signal,\n self.data.availability)\n coord = self.lut.xyz2coord(np.array(loc).astype(int))\n event_coa_data = pd.DataFrame(np.array((daten, max_coa,\n coord[:, 0],\n coord[:, 1],\n coord[:, 2])).transpose(),\n columns=[\"DT\", \"COA\", \"X\", \"Y\", \"Z\"])\n event_coa_data[\"DT\"] = event_coa_data[\"DT\"].apply(UTCDateTime)\n event_coa_data_dtmax = \\\n event_coa_data[\"DT\"].iloc[event_coa_data[\"COA\"].astype(\"float\").idxmax()]\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n\n if (event_coa_data_dtmax >= trig_event[\"CoaTime\"]\n - self.marginal_window) \\\n and (event_coa_data_dtmax <= trig_event[\"CoaTime\"]\n + self.marginal_window):\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n else:\n msg = \"\\n\\tEvent {} is outside marginal window.\\n\"\n msg += \"\\tDefine more realistic error - the marginal window\"\n msg += \" should be an estimate of the origin time uncertainty,\"\n msg += \"\\n\\tdetermined by the expected spatial uncertainty and\"\n msg += \"the seismic velocity in the region of the earthquake\\n\"\n msg += \"\\n\" + \"=\" * 120 + \"\\n\"\n msg = msg.format(event_uid)\n self.output.log(msg, self.log)\n continue\n\n event_mw_data = event_coa_data\n event_mw_data = event_mw_data[(event_mw_data[\"DT\"] >= w_beg_mw) &\n (event_mw_data[\"DT\"] <= w_end_mw)]\n map_4d = map_4d[:, :, :,\n event_mw_data.index[0]:event_mw_data.index[-1]]\n event_mw_data = event_mw_data.reset_index(drop=True)\n event_max_coa = event_mw_data.iloc[event_mw_data[\"COA\"].astype(\"float\").idxmax()]\n\n # Update event UID; make out_str\n event_uid = str(event_max_coa.values[0])\n for char_ in [\"-\", \":\", \".\", \" \", \"Z\", \"T\"]:\n event_uid = event_uid.replace(char_, \"\")\n out_str = \"{}_{}\".format(self.output.name, event_uid)\n self.output.log(timer(), self.log)\n\n # Make phase picks\n timer = util.Stopwatch()\n self.output.log(\"\\tMaking phase picks...\", self.log)\n phase_picks = self._phase_picker(event_max_coa)\n self.output.write_picks(phase_picks[\"Pick\"], event_uid)\n self.output.log(timer(), self.log)\n\n # Determining earthquake location error\n timer = util.Stopwatch()\n self.output.log(\"\\tDetermining earthquake location and uncertainty...\", self.log)\n loc_spline, loc_gau, loc_gau_err, loc_cov, \\\n loc_cov_err = self._calculate_location(map_4d)\n self.output.log(timer(), self.log)\n\n # Make event dictionary with all final event location data\n event = pd.DataFrame([[event_max_coa.values[0],\n event_max_coa.values[1],\n loc_spline[0], loc_spline[1], loc_spline[2],\n loc_gau[0], loc_gau[1], loc_gau[2],\n loc_gau_err[0], loc_gau_err[1],\n loc_gau_err[2],\n loc_cov[0], loc_cov[1], loc_cov[2],\n loc_cov_err[0], loc_cov_err[1],\n loc_cov_err[2]]],\n columns=self.EVENT_FILE_COLS)\n\n self.output.write_event(event, event_uid)\n\n self._optional_locate_outputs(event_mw_data, event, out_str,\n phase_picks, event_uid, map_4d)\n\n self.output.log(\"=\" * 120 + \"\\n\", self.log)\n\n del map_4d, event_coa_data, event_mw_data, event_max_coa, \\\n phase_picks\n self.coa_map = None", "def findguidingstop(starttime, event_list):\n for r in event_list:\n if r[0]==6 and r[1]+datetime.timedelta(seconds=0)>starttime: return r[1]\n return None", "def _select_events_outside_pie(sources, events, pointing_position, fov_radius):\n sources = _add_column_and_sort_table(sources, pointing_position)\n radius = Angle(sources[\"Radius\"])[0]\n phi = Angle(sources[\"phi\"])[0]\n separation = Angle(sources[\"separation\"])[0]\n if separation > fov_radius:\n return np.arange(len(events.table))\n else:\n phi_min = phi - np.arctan(radius / separation)\n phi_max = phi + np.arctan(radius / separation)\n phi_events = pointing_position.position_angle(events.radec)\n if phi_max.degree > 360:\n phi_max = phi_max - Angle(360, \"deg\")\n idx = np.where((phi_events > phi_max) & (phi_events < phi_min))\n else:\n idx = np.where((phi_events > phi_max) | (phi_events < phi_min))\n\n return idx[0]", "def get_events(start, end, filters=None):\n\n from frappe.desk.calendar import get_event_conditions\n conditions = get_event_conditions('Booking Request', filters)\n\n data = frappe.db.sql(\"\"\"\n SELECT\n `tabBooking Request`.name, `tabBooking Request`.eta_date,\n `tabBooking Request`.etd_date, `tabBooking Request`.status, `tabBooking Request Status`.color\n FROM\n `tabBooking Request`\n LEFT JOIN `tabBooking Request Status` ON `tabBooking Request`.status = `tabBooking Request Status`.name\n WHERE\n (`tabBooking Request`.docstatus < 2)\"\"\", as_dict=True)\n\n return data", "def get_timeoff_whosout(self):\n response = requests.get(\n self._base_url + \"time_off/whos_out/?end=\" + str(date.today()),\n auth=(self._api_key, 'pass'),\n headers={'Accept': 'application/json'})\n if response.status_code != 200:\n response.raise_for_status()\n leaves_json = json.loads(response.text)\n return {x['employeeId']: Leave(self._get_date_from_string(x['start']),\n self._get_date_from_string(x['end']))\n for x in leaves_json if 'employeeId' in x}", "def _get_hours_pro_entry(time_entries):\n events = []\n for event in time_entries:\n start_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.start_at.hour,\n event.start_at.minute,\n event.start_at.second,\n )\n end_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.finish_at.hour,\n event.finish_at.minute,\n event.finish_at.second,\n )\n\n timediff = end_time - start_time\n events.append(\n {\n \"worked_hours\": round(timediff.total_seconds() / 3600, DECIMALS_HOUR),\n \"event\": event,\n }\n )\n return events", "def get_events_for_specific_hours(start,end):\n\tresults = session.query(\"event_name\",\"date\",\"start_time\",\"end_time\").\\\n\tfrom_statement(\"select event_name,date,start_time,end_time from event where date=curdate() and \\\n\t\tstart_time >= :starttime and end_time <= :endtime\").\\\n\tparams(starttime = start, endtime = end).all()\n\tif(len(results) > 0):\n\t\tret_dict = {}\n\t\tevents = []\n\n\t\tfor event_tuple in results:\n\t\t\ttemp = {}\n\t\t\ttemp['event_name'] = event_tuple[0]\n\t\t\ttemp['start_date'] = str(event_tuple[1])\n\t\t\ttemp['start_time'] = str(event_tuple[2])\n\t\t\ttemp['end_time'] = str(event_tuple[3])\n\t\t\tevents.append(temp)\n\n\t\tret_dict['events'] = events\n\t\treturn jsonify(ret_dict)\n\telse:\n\t\treturn \"{'events':'no results returned'}\"", "def getTimeSegments(segments,bounds,radius,starttime,endtime,magrange,catalog,contributor):\n stime = starttime\n etime = endtime\n \n dt = etime - stime\n dtseconds = dt.days*86400 + dt.seconds\n #segment 1\n newstime = stime\n newetime = stime + timedelta(seconds=dtseconds/2)\n nevents,maxevents = getEventCount(bounds=bounds,radius=radius,starttime=newstime,endtime=newetime,\n magrange=magrange,catalog=catalog,contributor=contributor)\n if nevents < maxevents:\n segments.append((newstime,newetime))\n else:\n segments = getTimeSegments(segments,bounds,radius,newstime,newetime,\n magrange,catalog,contributor)\n #segment 2\n newstime = newetime\n newetime = etime\n nevents,maxevents = getEventCount(bounds=bounds,radius=radius,\n starttime=newstime,endtime=newetime,\n magrange=magrange,catalog=catalog,\n contributor=contributor)\n if nevents < maxevents:\n segments.append((newstime,newetime))\n else:\n segments = getTimeSegments(segments,bounds,radius,newstime,newetime,\n magrange,catalog,contributor)\n\n return segments", "def get_hour_offsets(self):\n starttime = self.parameters['startlocaltime']\n stoptime = self.parameters['stoplocaltime']\n timediff = (stoptime - starttime)\n logger.debug(\"Start time: {} | Stop time: {}\".format(starttime, stoptime))\n if timediff > config.TIME_THRESHOLD:\n logger.debug(\"Time delta is {}. This is significantly larger than anticipated\".format(timediff))\n else:\n logger.debug(\"Time delta is {}. Using start time as the global time\".format(timediff))\n\n \"\"\"\n timediff = (stoptime - starttime).total_seconds()\n logger.debug(\"Start time: {} | Stop time: {}\".format(starttime, stoptime))\n #TODO: How do we want to handle large images with huge time differences?\n if timediff > config.TIME_THRESHOLD:\n logger.debug(\"Time delta is {}. This is significantly larger than anticipated\".format(timediff))\n starttime = starttime\n else:\n logger.debug(\"Time delta is {}. Using start time as the global time\".format(timediff))\n \"\"\"\n #Given the image start time, find the nearest index and set to the middle,\n # then find the adjacent two nodes in both directions to get allow a\n # cubic interpolation.\n #image_time = starttime.hour + starttime.minute / 60.0\n # This grabs the hour that is nearest, but hour is circular\n image_time = starttime\n if abs(image_time - 24) < abs(image_time - 23.5):\n image_time -= 24\n mididx, midhour = utils.getnearest(self.times, image_time)\n logger.debug(\"Time is {}. The nearest lookup node is {}\".format(image_time, mididx))\n minidx = mididx - 2\n maxidx = mididx + 2\n\n hourslice = np.arange(minidx, maxidx + 1, dtype=np.int8)\n\n hourslice[hourslice < 0] += 18\n\n if hourslice[-1] >= len(self.times):\n #The hour slice needs to be shifted over the time break\n hourslice[hourslice >= len(self.times)] -= len(self.times)\n logger.debug(\"Using indices {} and start time of {}.\".format(hourslice, image_time))\n return hourslice, image_time", "def list_events(self, start_ts, end_ts, min_susp=None, max_susp=None, simplify=True):\n show_time(start_ts, \"start_ts\")\n show_time(end_ts, \"end_ts\")\n query = query_in_range(start_ts, end_ts, min_susp=min_susp, max_susp=max_susp)\n if simplify:\n query[\"_source\"] = [\"id\", \"event_type\", \"view_ts\", \"last_modified_ts\", \"summary\"] # we only want timestamps\n index_pattern = self.esconn.get_index_name(event_type=self.event_type)\n logging.info(index_pattern)\n\n for e in self.esconn.search_generator(index=index_pattern, query=query, raw_json=simplify):\n if simplify:\n # we will have e as raw json, and then simply it\n inf = e[\"summary\"][\"inference_result\"]\n try:\n event = {\n \"event_id\": e[\"id\"],\n \"event_type\": e[\"event_type\"],\n \"event_time\": e[\"view_ts\"],\n \"property_tags\": e[\"summary\"][\"tags\"],\n \"inference_tags\": [i[\"inference_id\"] for i in inf[\"inferences\"]],\n \"suspicion_level\": inf[\"primary_inference\"][\"suspicion_level\"],\n \"confidence\": inf[\"primary_inference\"][\"confidence\"],\n \"url\": f\"https://dev.hicube.caida.org/feeds/hijacks/events/{e['event_type']}/{e['id']}\"\n }\n # print(event)\n yield event\n except TypeError as error:\n print(json.dumps(e, indent=4))\n raise error\n else:\n yield e", "def get_events(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n\r\n result = []\r\n for tag in self.interests:\r\n query = \"\"\"\r\n SELECT DISTINCT E.eid, E1.ename, E1.description,\r\n E.category, E1.start_date, E1.end_date, E1.num_cap,\r\n E1.num_attending, L.lname, L.address_1, E.tag, L.lat, L.lon\r\n FROM {}.EventTags AS E, {}.UserTags AS U, {}.Events as E1, {}.Locations as L\r\n WHERE E.tag = '{}' AND\r\n E1.eid = E.eid AND\r\n E1.lid = L.lid AND\r\n E1.start_date > {}\r\n ORDER by E1.start_date\r\n \"\"\".format(\r\n ENV_DB,\r\n ENV_DB,\r\n ENV_DB,\r\n ENV_DB,\r\n tag,\r\n str(datetime.date.today())\r\n )\r\n\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n result.extend([i for i in data])\r\n\r\n database.close()\r\n\r\n return result", "def get_events(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n\r\n query = \"\"\"\r\n SELECT DISTINCT E.eid, E1.ename, E1.description,\r\n E.category, E1.start_date, E1.end_date, E1.num_cap,\r\n E1.num_attending, L.lname, L.address_1, E.tag, L.lat, L.lon\r\n FROM {}.EventTags AS E, {}.UserTags AS U, {}.Events as E1, {}.Locations as L\r\n WHERE U.username='{}' AND\r\n E.tag = U.tag AND\r\n E1.eid = E.eid AND\r\n E1.lid = L.lid AND\r\n E1.start_date >= {}\r\n ORDER by E1.start_date\r\n \"\"\".format(\r\n ENV_DB,\r\n ENV_DB,\r\n ENV_DB,\r\n ENV_DB,\r\n self.user.username,\r\n str(datetime.date.today())\r\n )\r\n\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n database.close()\r\n\r\n return [i for i in data]", "def events_info(request):\n \n global input\n \n if request == 'event-based':\n client_neries = Client_neries()\n \n events = client_neries.getEvents(min_datetime=input['min_date'], \\\n max_datetime=input['max_date'], min_magnitude=input['min_mag'], \\\n max_magnitude=input['max_mag'], min_latitude=input['evlatmin'], \\\n max_latitude=input['evlatmax'], min_longitude=input['evlonmin'], \\\n max_longitude=input['evlonmax'], min_depth = input['min_depth'], \\\n max_depth=input['max_depth'], max_results=input['max_result'])\n \n for i in range(0, len(events)):\n events[i]['t1'] = events[i]['datetime'] - input['preset']\n events[i]['t2'] = events[i]['datetime'] + input['offset']\n \n elif request == 'continuous':\n m_date = UTCDateTime(input['min_date'])\n M_date = UTCDateTime(input['max_date'])\n \n t_cont = M_date - m_date\n \n events = []\n \n if t_cont > input['interval']:\n num_div = int(t_cont/input['interval'])\n t_res = t_cont - num_div*input['interval']\n \n for i in range(0, num_div):\n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + i*input['interval'], \\\n 't1': m_date + i*input['interval'],\\\n 't2': m_date + (i+1)*input['interval'] + 60.0,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n \n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i+1), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + (i+1)*input['interval'], \\\n 't1': m_date + (i+1)*input['interval'],\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n else:\n events.append({'author': 'NAN', 'event_id': 'continuous0', \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date, \\\n 't1': m_date,\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n\n return events", "def findAvailableTimes(self, nowDay, nowHour, nowMinute, workStart, workEnd, events, timeEst):\n global format\n format = Format()\n global timeSlot\n timeSlot = TimeSlot(timeEst)\n global availableTimes\n availableTimes = []\n print(self.current)\n try:\n if len(events) > 1:\n for i in range(len(events) - 1):\n\n event1 = events[i]\n event2 = events[i + 1]\n e1, e2 = format.formatEvent(event1, event2)\n self.compareEvents(e1, e2, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n lastEvent = events[len(events) - 1]\n secondToLast = events[len(events) - 2]\n self.compareLastEvent(lastEvent, secondToLast, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n elif len(events) == 1:\n lastEvent = events[0]\n nowTime = [self.current[:11] + str(int(self.current[11:13]) - 1) + self.current[13:], self.current]\n nowTime = format.eventFormatDictionary(nowTime, 'now')\n\n self.compareLastEvent(lastEvent, nowTime, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n self.addEmptyDays(events, workStart, workEnd, timeEst)\n availableTimes.sort()\n return availableTimes\n except:\n global msg\n msg = \"There isn't enough time. Try again\"\n return redirect('/error')", "def get_sleep_data(start, end):\n cnx, cursor = connect_db()\n query = \"\"\"select \"from\" as _if, \"to\" as _it, delta\n from types, intervals\n where intervals.type = types.guid and\n types.name = 'S1:Sleep' and\n _it > {0} and\n _it < {1}\n order by _it\"\"\".format(start, end)\n cursor.execute(query)\n sleep_entries = cursor.fetchall()\n cnx.close()\n return sleep_entries", "def __get_interval (self, event_df, mode = 's-s',**target):\r\n if len(event_df) == 0:\r\n pass\r\n\r\n else:\r\n s_event = target.get('start')\r\n e_event = target.get('end')\r\n \r\n timeMakeup = lambda s, e, event_df: (s, e) if len(s) == len(e) else (s, np.append(e, event_df['Time'].values[-1])) if len(s) > len(e) else (np.append(event_df['Time'].values[0], s),e)\r\n \r\n start = event_df[(event_df['Events']== s_event) ]['Time']\r\n end = event_df[(event_df['Events']== e_event) ]['Time']\r\n if mode == 's-s': #interval start to start\r\n interval_list = start[1:].values - start[:-1].values\r\n if mode == 'e-e': #interval end to end\r\n interval_list = end[1:].values - end[:-1].values\r\n if mode == 'e-s': #duration\r\n start_array, end_array = timeMakeup (start.values, end.values, event_df)\r\n interval_list = end_array - start_array\r\n if mode == 's-e': #Interal event interal (IEI)\r\n start_array, end_array = timeMakeup (start.values, end.values, event_df)\r\n interval_list = start_array[1:] - end_array[:-1]\r\n \r\n return interval_list", "def collect_events(helper, ew):\n\n opt_start_time_start = helper.get_arg('start_time_start')\n opt_endpoints = helper.get_arg('endpoints')\n opt_interval = int(helper.get_arg('interval'))\n opt_live = False\n\n proxy = helper.get_proxy()\n if proxy:\n proxy_auth = \"{}:{}\".format(\n proxy['proxy_username'], proxy['proxy_password'])\n proxies = {\n \"https\": \"{protocol}://{auth}@{host}:{port}/\".format(protocol=proxy['proxy_type'], auth=proxy, host=proxy['proxy_url'], port=proxy['proxy_port']),\n \"http\": \"{protocol}://{auth}@{host}:{port}/\".format(protocol=proxy['proxy_type'], auth=proxy, host=proxy['proxy_url'], port=proxy['proxy_port'])\n }\n else:\n proxies = None\n\n helper.log_debug(\n \"[-] webex password_type: {}\".format(helper.get_global_setting(\"password_type\")))\n\n params = {\"opt_username\": helper.get_global_setting(\"username\"),\n \"opt_password\": helper.get_global_setting(\"password\"),\n \"opt_site_name\": helper.get_global_setting(\"site_name\"),\n \"limit\": 500,\n \"timezone\": \"20\",\n # \"password_type\": authentication_type[\"Password Authentication\"],\n # \"password_type\": authentication_type[\"OAuth\"],\n \"password_type\": authentication_type[helper.get_global_setting(\"password_type\")],\n \"client_id\": helper.get_global_setting(\"client_id\"),\n \"client_secret\": helper.get_global_setting(\"client_secret\"),\n \"refresh_token\": helper.get_global_setting(\"refresh_token\"),\n \"proxies\": proxies}\n\n # Historical Data\n helper.log_debug(\"Historical Data\")\n for opt_endpoint in opt_endpoints:\n helper.log_debug(\"[-] \\t At {}\".format(opt_endpoint))\n\n # endtime is midnight of GMT - 3days\n enddt = datetime.utcnow().date() - timedelta(3)\n end_time = datetime.combine(\n enddt, datetime.max.time()).strftime('%m/%d/%Y %H:%M:%S')\n\n # create checkpoint key for offest and timestamp\n timestamp_key = \"timestamp_{}_{}_processing\".format(\n helper.get_input_stanza_names(), opt_endpoint)\n\n start_time = helper.get_check_point(timestamp_key)\n if start_time is None:\n # if it's the 1st time, get the start_time from UI, and then save it in checkpoint\n start_time = opt_start_time_start\n helper.save_check_point(timestamp_key, start_time)\n else:\n # shift the start_time by 1 second\n start_time = (datetime.strptime(start_time, '%m/%d/%Y %H:%M:%S') +\n timedelta(seconds=1)).strftime('%m/%d/%Y %H:%M:%S')\n\n helper.log_debug(\"Start time: {}\".format(start_time))\n helper.log_debug(\"End time: {}\".format(end_time))\n\n # Update Parameters\n params.update({\"mode\": \"historical\"})\n params.update({\"opt_endpoint\": opt_endpoint})\n params.update({\"start_time\": start_time})\n params.update({\"end_time\": end_time})\n params.update({\"timestamp_key\": timestamp_key})\n\n records = params['limit']\n offset = 1\n while (records == params['limit']):\n helper.log_debug(\"current_offset: {}\".format(offset))\n params['offset'] = offset\n records = fetch_webex_logs(ew, helper, params)\n helper.log_debug(\"\\t Offet:{}\\tLimit: {}\\tRecords Returned: {}\".format(\n offset, params['limit'], records))\n if records:\n offset += records", "def SearchMaxElongation(body, startTime):\n if body == Body.Mercury:\n s1 = 50.0\n s2 = 85.0\n elif body == Body.Venus:\n s1 = 40.0\n s2 = 50.0\n else:\n raise InvalidBodyError()\n syn = _SynodicPeriod(body)\n iter = 1\n while iter <= 2:\n plon = EclipticLongitude(body, startTime)\n elon = EclipticLongitude(Body.Earth, startTime)\n rlon = _LongitudeOffset(plon - elon) # clamp to (-180, +180]\n\n # The slope function is not well-behaved when rlon is near 0 degrees or 180 degrees\n # because there is a cusp there that causes a discontinuity in the derivative.\n # So we need to guard against searching near such times.\n if rlon >= -s1 and rlon < +s1:\n # Seek to the window [+s1, +s2].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = +s1.\n rlon_lo = +s1\n # Search forward for the time t2 when rel lon = +s2.\n rlon_hi = +s2\n elif rlon > +s2 or rlon < -s2:\n # Seek to the next search window at [-s2, -s1].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = -s2.\n rlon_lo = -s2\n # Search forward for the time t2 when rel lon = -s1.\n rlon_hi = -s1\n elif rlon >= 0.0:\n # rlon must be in the middle of the window [+s1, +s2].\n # Search BACKWARD for the time t1 when rel lon = +s1.\n adjust_days = -syn / 4.0\n rlon_lo = +s1\n rlon_hi = +s2\n # Search forward from t1 to find t2 such that rel lon = +s2.\n else:\n # rlon must be in the middle of the window [-s2, -s1].\n # Search BACKWARD for the time t1 when rel lon = -s2.\n adjust_days = -syn / 4.0\n rlon_lo = -s2\n # Search forward from t1 to find t2 such that rel lon = -s1.\n rlon_hi = -s1\n\n t_start = startTime.AddDays(adjust_days)\n t1 = SearchRelativeLongitude(body, rlon_lo, t_start)\n if t1 is None:\n return None\n\n t2 = SearchRelativeLongitude(body, rlon_hi, t1)\n if t2 is None:\n return None\n\n # Now we have a time range [t1,t2] that brackets a maximum elongation event.\n # Confirm the bracketing.\n m1 = _neg_elong_slope(body, t1)\n if m1 >= 0.0:\n raise InternalError() # there is a bug in the bracketing algorithm!\n\n m2 = _neg_elong_slope(body, t2)\n if m2 <= 0.0:\n raise InternalError() # there is a bug in the bracketing algorithm!\n\n # Use the generic search algorithm to home in on where the slope crosses from negative to positive.\n tx = Search(_neg_elong_slope, body, t1, t2, 10.0)\n if tx is None:\n return None\n\n if tx.tt >= startTime.tt:\n return Elongation(body, tx)\n\n # This event is in the past (earlier than startTime).\n # We need to search forward from t2 to find the next possible window.\n # We never need to search more than twice.\n startTime = t2.AddDays(1.0)\n iter += 1", "def filterEvents(intervals_dates,list_infected,distance):\n d=distance\n list_gpsevents=[]\n for z in range(len(intervals_dates)-1):\n print(\"Interval: \",intervals_dates[z], \"y\", intervals_dates[z+1])\n infected,uninfected=getTrazaTimestamp(intervals_dates[z],intervals_dates[z+1],GPSrecords,list_infected)\n events_gps = nearest_neighbor(infected, uninfected, d)\n events_gps = events_gps.drop(['geometry','closest_stop_geom'], axis=1)\n print(len(events_gps))\n if(len(events_gps)!=0):\n list_gpsevents.append(events_gps.reset_index(drop=True))\n else:\n events_gps=pd.DataFrame()\n list_gpsevents.append(events_gps)\n #GPSevents=pd.concat(list_gpsevents).reset_index(drop=True)\n #return GPSevents\n return list_gpsevents", "def getInterpretedSpectraForAllEvents(self, particleName=\"pion\", pTs=np.linspace(0,2.5,10), where=\"\", orderBy=\"event_id\", verbose=False):\n # processing\n dNdyData = self.getSpectraDataForAllEvents(particleName=particleName, where=where, orderBy=orderBy)\n dNdyintepBlock = []\n if verbose: print(\"Looping over {} events... (please be patient)\".format(dNdyData.shape[0]))\n for iev in range(dNdyData.shape[0]):\n dNdyintep = exp(np.interp(pTs, dNdyData[iev,:,0], log(dNdyData[iev,:,1])))\n dNdyintepBlock.append(dNdyintep)\n if verbose: print(\"Done. Thanks for waiting.\")\n return np.asarray(dNdyintepBlock)", "def many_events(start_time,end_time,subevent_bools):\r\n \r\n #running through for each event\r\n for j in range(len(start_time)):\r\n \r\n #start, end, and subevent bool for this event\r\n st = start_time[j]\r\n et = end_time[j]\r\n subevent = bool(subevent_bools[j])\r\n \r\n #checking if start time is actually available\r\n if str(st) != 'nan':\r\n try:\r\n st = parse(st)\r\n yes_st = True\r\n except ValueError:\r\n yes_st = False\r\n else:\r\n yes_st = False\r\n \r\n #checking if end time is actually available\r\n if str(et) != 'nan':\r\n try:\r\n et = parse(et)\r\n yes_et = True\r\n except ValueError:\r\n yes_et = False\r\n else:\r\n yes_et = False\r\n \r\n #if both start and end times are available, running the code\r\n if yes_st and yes_et:\r\n #event must be after Nov. 2010 because currently no capability for\r\n #instruments in use before then - change this if you have that\r\n #capability\r\n if st > datetime(2010,9,1):\r\n try:\r\n print('got start and end times! running database extraction') \r\n database_extraction(st,et,instrument_chosen,subevent)\r\n except:\r\n continue\r\n else:\r\n print('cannot run for events before November 2010 because do not have '\r\n 'access to instruments before then')", "def events(self, start=0, limit=15, etype=None):\r\n params = base.get_params(('start', 'limit', 'etype'), locals())\r\n url = '{0}/events/'.format(self.get_url())\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def events(self, start=0, limit=15, etype=None):\r\n params = base.get_params(('start', 'limit', 'etype'), locals())\r\n url = '{0}/events/'.format(self.get_url())\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def episode_triggers(rate, volume, uptime, start_mjd, end_mjd):\n nyears = (end_mjd - start_mjd)/365.2422\n n = num_events(rate, volume, uptime, nyears)\n events = sample_sphere(n, truncate=True)\n events['mjd'] = np.random.uniform(start_mjd, end_mjd, n)\n events['event_id'] = np.arange(n)\n events = events[['mjd', 'ra', 'decl']]\n events.sort_values('mjd', inplace=True)\n events['event_id'] = np.arange(n)\n return events", "def set_penalty_start_eventnums(self):\n for period in self.Periods:\n period.PenaltyStartEventNums = {self.HomeTeamId: None, self.VisitorTeamId: None}\n if period.Number <= 4:\n fouls_to_give = {self.HomeTeamId: 4, self.VisitorTeamId: 4}\n else:\n # in overtime periods teams start with 3 fouls to give\n fouls_to_give = {self.HomeTeamId: 3, self.VisitorTeamId: 3}\n\n for event in period.Events:\n if event.is_foul_that_counts_toward_penalty():\n foul_team = event.team_id\n event_time = event.seconds_remaining\n if event_time <= 120 and fouls_to_give[foul_team] > 1:\n # only 1 foul to give in final 2 minutes regardless of how many fouls committed up until then\n fouls_to_give[foul_team] = 1\n if fouls_to_give[foul_team] > 0:\n fouls_to_give[foul_team] -= 1\n if fouls_to_give[foul_team] == 0:\n # team entered penalty on this foul\n if 'Shooting' in event.get_foul_type():\n # shooting foul - start tracking at final ft so we don't count FTs as penalty\n final_fts_at_time_of_foul = [\n pbp_event for pbp_event in period.Events\n if pbp_event.seconds_remaining == event_time and\n pbp_event.team_id != foul_team and\n (pbp_event.is_ft_1_of_1() or pbp_event.is_ft_2_of_2() or pbp_event.is_ft_3_of_3())\n ]\n if len(final_fts_at_time_of_foul) == 0:\n # Example of when this happens: lane violation\n # just use last event that occured at time of foul\n events_at_time_of_foul = [\n pbp_event for pbp_event in period.Events\n if pbp_event.seconds_remaining == event_time\n ]\n start_event = events_at_time_of_foul[-1].order\n elif final_fts_at_time_of_foul[-1].is_missed_ft():\n # if FT is missed need to see if it was oreb or dreb\n rebounds_after_ft = [\n pbp_event for pbp_event in period.Events\n if pbp_event.order > event.order and\n pbp_event.is_rebound()\n ]\n # use first rebound after missed FT as bonus start event\n start_event = rebounds_after_ft[0].order\n else:\n # use last FT as bonus start event\n start_event = final_fts_at_time_of_foul[-1].order\n else:\n # non shooting foul - start tracking bonus at this event\n start_event = event.order\n offense_team = utils.swap_team_id_for_game(foul_team, [self.HomeTeamId, self.VisitorTeamId])\n period.PenaltyStartEventNums[offense_team] = start_event", "def getAttendance(self):\n probes = []\n allParticles = self.pid_lookup.items()\n allParticles.sort(key=lambda x: abs(x[1]))\n for aParticle, pid in allParticles:\n numberOfEvents = self.db.selectFromTable(\"multiplicities\", \"count()\", \"pid = %d\" % pid)[0][0]\n probes.append((aParticle, numberOfEvents))\n return probes" ]
[ "0.6134211", "0.5756863", "0.55772746", "0.55682397", "0.5507305", "0.5474169", "0.5452615", "0.54349256", "0.54202586", "0.54048604", "0.54045105", "0.53980404", "0.5391256", "0.53887755", "0.53694963", "0.5359015", "0.5358162", "0.53519845", "0.5336736", "0.53093964", "0.5300176", "0.5298539", "0.52616334", "0.52497596", "0.5245376", "0.52424294", "0.52424294", "0.52337676", "0.5206745", "0.51971203" ]
0.7126352
0
generates layouts for every turn reachable in tic tac toe and stores unique ones as well as noting the branching paths between them
def generate_boards(): print "Generating data, please hold on..." # a list for turns, each which is a list of boards, which are unique layouts # a completely blank layout is always the start of the game, counting for turn 0 game = [[Board(' ' * 9, 1)]] # there are at most 9 turns in a game of tic tac toe for turnNum in range(1, 10): # list of layouts for the current turn turn = [] upperLayouts = game[-1] if turnNum % 2 == 1: player = 'X' else: player = 'O' # every turns' unique layouts are numbered to seperate them more easily pattern = 1 # goes through every layout from the previous turn for ul in upperLayouts: # game does not continue after a winning move, and using a won board is only possible after turn 5 if turnNum <= 5 or not ul.check_win()[0]: # 9 positions on every board for pos in range(9): if ul[pos] == ' ': newLayout = Board(ul[0:pos] + player + ul[pos+1:]) # if it is a unique layout unique = True # goes through every existing layout for this turn for item in turn: if newLayout.matches(item): unique = False # the upper layout leads to an existing layout ul.paths.append(item.pattern) break if unique: turn.append(Board(newLayout, pattern)) # the current upper layout leads to the new layout ul.paths.append(pattern) pattern += 1 else: # adds a zero for paths because a played character is taking up that space ul.paths.append(0) game.append(turn) return game
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_double_layouts(self):\n\n # create a new game, dict for layouts and key pairs list\n game = Game(self.sourcefile)\n layout_dict = {}\n key_pairs = []\n \n # save layout per move\n for i in range(len(self.moves_set)):\n game.move(self.moves_set[i])\n game.board.create_layout()\n layout = game.board.layout\n\n # if layout is already in dict, save keys as key pair\n for key in layout_dict:\n if layout.tobytes() == layout_dict[key].tobytes():\n key_pairs.append([key, i])\n\n layout_dict[i] = layout\n \n return key_pairs", "def get_static_board_layout(things, width, height):\n obj_map = convert_to_dict(things)\n matrix = []\n for yloc in xrange(1, height-1):\n row = []\n for xloc in xrange(1, width-1):\n if obj_map.has_key((xloc, yloc)):\n row.append(obj_map[(xloc, yloc)])\n else:\n row.append('.')\n matrix.insert(0, row)\n return matrix", "def generate_board():\n b = open(_BOARD_FILE, \"r\").readlines()\n for line in b:\n raw = line.strip().split(\" \")\n _board_graph[str_to_pos(raw[0])] = Space(\n (raw[1] == \"R\"),\n TYPE_MAP[raw[1]],\n {str_to_pos(str_pos) for str_pos in raw[2:]})", "def loads_pathways(self, turn):\n black_coords, white_coords = self.parser()\n counter = 0\n path_dict, poss_dict, check_dict, long_dict = {BLACK : [], WHITE : []}, {BLACK : [], WHITE : []}, {BLACK : [], WHITE : []}, {BLACK : [], WHITE : []}\n \n for i in self.board:\n if i != self.empty:\n if i.colour == WHITE:\n path, poss, checked_path, long_path = i.available_moves(self.board, white_coords[counter], WHITE, self.coords[self.board.index(i)])\n counter += 1\n if path != None and path != []:\n path_dict[WHITE] += path\n if poss != None and poss != []:\n poss_dict[WHITE] += poss\n if checked_path != []:\n check_dict[WHITE] += (checked_path)\n if long_path != []:\n long_dict[WHITE] += long_path\n\n counter = 0\n\n for i in self.board:\n if i != self.empty:\n if i.colour == BLACK:\n path, poss, checked_path, long_path = i.available_moves(self.board, black_coords[counter], BLACK, self.coords[self.board.index(i)])\n counter += 1\n if path != None and path != []:\n path_dict[BLACK] += path\n if poss != None and poss != []:\n poss_dict[BLACK] += poss\n if checked_path != []:\n check_dict[BLACK] += (checked_path)\n if long_path != []:\n long_dict[BLACK] += long_path \n \n self.path_dict = path_dict\n self.poss_dict = poss_dict\n self.check_dict = check_dict\n self.long_dict = long_dict", "def get_all_possible_moves():\r\n \"\"\"\r\n Creates the labels for the universal chess interface into an array and returns them\r\n \"\"\"\r\n labels_array = []\r\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\r\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\r\n promoted_to = ['q', 'r', 'b', 'n']\r\n\r\n for l1 in range(8):\r\n for n1 in range(8):\r\n destinations = [(t, n1) for t in range(8)] + \\\r\n [(l1, t) for t in range(8)] + \\\r\n [(l1 + t, n1 + t) for t in range(-7, 8)] + \\\r\n [(l1 + t, n1 - t) for t in range(-7, 8)] + \\\r\n [(l1 + a, n1 + b) for (a, b) in\r\n [(-2, -1), (-1, -2), (-2, 1), (1, -2), (2, -1), (-1, 2), (2, 1), (1, 2)]]\r\n for (l2, n2) in destinations:\r\n if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8):\r\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2]\r\n labels_array.append(move)\r\n for l1 in range(8):\r\n l = letters[l1]\r\n for p in promoted_to:\r\n labels_array.append(l + '2' + l + '1' + p)\r\n labels_array.append(l + '7' + l + '8' + p)\r\n if l1 > 0:\r\n l_l = letters[l1 - 1]\r\n labels_array.append(l + '2' + l_l + '1' + p)\r\n labels_array.append(l + '7' + l_l + '8' + p)\r\n if l1 < 7:\r\n l_r = letters[l1 + 1]\r\n labels_array.append(l + '2' + l_r + '1' + p)\r\n labels_array.append(l + '7' + l_r + '8' + p)\r\n return labels_array", "def _generate_layout(self):\n\n pass", "def get_candidate_layouts(neighbors_by_tile, shape):\n candidate_layouts = []\n for tile_id, tile in neighbors_by_tile.items():\n candidates_for_root = generate_candidates_for_root(shape, neighbors_by_tile)\n candidate_layouts.extend(candidates_for_root)\n return candidate_layouts", "def _check_inner_dirs(self, i_row, i_col, adj_opp_cells):\n opp_player = \"B\" if self._turn == \"W\" else \"W\"\n \n if self._board[i_row-1][i_col] == opp_player: #north, tile to be placed will enter from the south\n adj_opp_cells.append((i_row-1, i_col, \"s\")) \n if self._board[i_row-1][i_col+1] == opp_player: #northeast, tile to be placed will enter from the sw\n adj_opp_cells.append((i_row-1, i_col+1, \"sw\"))\n if self._board[i_row][i_col+1] == opp_player: #east, tile to be placed will enter from the west\n adj_opp_cells.append((i_row, i_col+1, \"w\"))\n if self._board[i_row+1][i_col+1] == opp_player: #southeast, tile to be placed will enter from the nw\n adj_opp_cells.append((i_row+1, i_col+1, \"nw\"))\n if self._board[i_row+1][i_col] == opp_player: #south, tile to be placed will enter from the north\n adj_opp_cells.append((i_row+1, i_col, \"n\"))\n if self._board[i_row+1][i_col-1] == opp_player: #southwest, tile to be placed will enter from the ne\n adj_opp_cells.append((i_row+1, i_col-1, \"ne\"))\n if self._board[i_row][i_col-1] == opp_player: #west, tile to be placed will enter from the east.\n adj_opp_cells.append((i_row, i_col-1, \"e\"))\n if self._board[i_row-1][i_col-1] == opp_player: #northwest, tile to be placed will enter from the se.\n adj_opp_cells.append((i_row-1, i_col-1, \"se\"))", "def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)", "def __init__(self, initial_board, team_size, company_info):\n self.graph = defaultdict(list)\n self.num_rows = len(initial_board)\n self.num_cols = len(initial_board[0])\n\n\n for i in range(self.num_rows):\n for j in range(self.num_cols):\n tile = initial_board[i][j]\n if tile.get_booth() != None:\n self.graph[(i,j)] = [2**31]*4\n else:\n tileup = self.checktile(i-1,j,self.num_rows,self.num_cols,initial_board)\n tiledown = self.checktile(i+1,j,self.num_rows,self.num_cols,initial_board)\n tileright = self.checktile(i,j+1,self.num_rows,self.num_cols,initial_board)\n tileleft = self.checktile(i,j-1,self.num_rows,self.num_cols,initial_board)\n self.graph[(i,j)] = [tileup,tiledown,\n tileleft,tileright]\n\n\n self.lines = defaultdict(list)\n for i in range(self.num_rows):\n for j in range(self.num_cols):\n tile = initial_board[i][j]\n if tile.is_end_of_line():\n self.lines[tile.get_line()] = [(i, j)]\n\n for company in self.lines:\n (x, y) = self.lines[company][0]\n end = (x, y)\n while self.in_bounds((x-1, y)) and initial_board[x-1][y].get_line() == company:\n end = (x-1, y)\n x -= 1\n while self.in_bounds((x+1, y)) and initial_board[x+1][y].get_line() == company:\n end = (x+1, y)\n x += 1\n while self.in_bounds((x, y-1)) and initial_board[x][y-1].get_line() == company:\n end = (x, y-1)\n y -= 1\n while self.in_bounds((x, y+1)) and initial_board[x][y+1].get_line() == company:\n end = (x, y+1)\n y += 1\n self.lines[company].append(end)\n\n\n\n self.board = initial_board\n self.team_size = team_size\n self.team_name = \"Player 2\"# Add your team name here!\n\n\n compAsList = [ [k,v] for k, v in company_info.items() ]\n self.companyList = compAsList\n self.num_companies = len(compAsList)\n\n self.company_info = company_info\n self.booths = dict()\n for i in range(self.num_rows):\n for j in range(self.num_cols):\n tile = initial_board[i][j]\n if tile.get_booth() != None:\n self.booths[tile.get_booth()] = (i,j)\n\n self.targets = [None,None,None,None]", "def import_track_layout(self):\n self.blocks_green_arr = []\n self.blocks_red_arr = []\n self.switches_green_arr = []\n self.switches_red_arr = []\n\n # Initialize red blocks\n for i in range(76):\n self.blocks_red_arr.append(Track())\n # Initialize green blocks\n for i in range(150):\n self.blocks_green_arr.append(Track())\n\n # Create Green Switches\n sw1 = Switch(1, 12)\n sw2 = Switch(30, 150)\n sw3 = Switch(-1, 59)\n sw4 = Switch(-1, 61)\n sw5 = Switch(76, 101)\n sw6 = Switch(86, 100)\n self.switches_green_arr = [sw1, sw2, sw3, sw4, sw5, sw6]\n\n # Create Red Switches\n rsw1 = Switch(-1, 8)\n rsw2 = Switch(1, 15)\n rsw3 = Switch(28, 76)\n rsw4 = Switch(32, 72)\n rsw5 = Switch(39, 71)\n rsw6 = Switch(43, 67)\n rsw7 = Switch(53, 66)\n self.switches_red_arr = [rsw1, rsw2, rsw3, rsw4, rsw5, rsw6, rsw7]", "def init_board(self, size):\n # One entry for every node, if diamond all will be filled with pieces, if triange half of matrix including \n # diagonal from top left to bottom right will be filled\n self.board = [[False for i in range(size)] for j in range(size)] \n\n # One entry for every node pair (i, j), where cM(i, j) = direction enum if there is a connection from i to j. \n # (i, i) does not have a connection\n self.connection_matrix = [[False for i in range(size*size)] for j in range(size*size)]\n if self.shape == ShapeType.DIAMOND:\n for node_i in range(size*size):\n top_boundry = node_i < size # Check if node is on top of board\n left_boundry = node_i % size == 0 # Check if node is in leftmost column in board\n right_boundry = (node_i + 1) % size == 0 # Check if node is in rightmost column in board\n bottom_boundry = node_i > size*size-1-size # Check if node is in bottommost coulmn in board\n \n # See docs/Diamond_Connection_Matrix.png for visualization\n if not top_boundry:\n self.connection_matrix[node_i][node_i-size] = DirectionType.UP_RIGHT\n if not top_boundry and not right_boundry:\n self.connection_matrix[node_i][node_i-size+1] = DirectionType.RIGHT\n if not right_boundry:\n self.connection_matrix[node_i][node_i+1] = DirectionType.DOWN_RIGHT\n if not bottom_boundry:\n self.connection_matrix[node_i][node_i+size] = DirectionType.DOWN_LEFT\n if not bottom_boundry and not left_boundry:\n self.connection_matrix[node_i][node_i+size-1] = DirectionType.LEFT\n if not left_boundry:\n self.connection_matrix[node_i][node_i-1] = DirectionType.UP_LEFT\n \n elif self.shape == ShapeType.TRIANGLE:\n for node_i in range(size*size):\n # check if node_i is in the empty triangle. \n # No proof for this but some sketching suggested the formula, and the formula worked with empirical testing\n # for many different sizes\n # == gives on diagonal to the right of main diagonal through matrix, greater gives the numbers on the rest of the row\n # basic intuition: size-node_i//size-1 gives how many of the nodes on a row in the board matrix are empty, \n # and the rest checks if the node_i is in such an area\n triangle_check = node_i%size >= size - (size - node_i//size - 1) \n if triangle_check: # If it is in the empty side there should be no connections so skip ahead\n continue\n\n top_boundry = node_i < size # Checks if node is on top of board\n left_boundry = node_i % size == 0 # Check if node is in leftmost column in board\n right_boundry = (node_i + 1) % size == 0 # Check if node is in rightmost column in board\n bottom_boundry = node_i > size*size-1-size # Check if node is in bottommost coulmn in board\n diagonal_boundry = node_i%(size+1) == 0 # Check if node is on diagonal in board\n\n # See docs/Triangle_Connection_Matrix.png for visualization\n if not top_boundry and not diagonal_boundry:\n self.connection_matrix[node_i][node_i-size] = DirectionType.UP_RIGHT\n if not right_boundry and not diagonal_boundry:\n self.connection_matrix[node_i][node_i+1] = DirectionType.RIGHT\n if not right_boundry and not bottom_boundry:\n self.connection_matrix[node_i][node_i+size+1] = DirectionType.DOWN_RIGHT\n if not bottom_boundry:\n self.connection_matrix[node_i][node_i+size] = DirectionType.DOWN_LEFT\n if not left_boundry:\n self.connection_matrix[node_i][node_i-1] = DirectionType.LEFT\n if not left_boundry and not top_boundry:\n self.connection_matrix[node_i][node_i-size-1] = DirectionType.UP_LEFT", "def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards", "def playout(self, state):\n node = self.root\n isTerminal = False\n depth = 0\n\n while not isTerminal and depth < self.playout_depth:\n #A = len(node.children) # num_children\n A = len(node.children[0]) + len(node.children[1]) \n if A < self.num_initActions:\n #if len(node.children[0]) < self.num_initActions:\n node, init_action_xy, init_spin = self.initChildren(node, state, depth)\n _, ShotVec = CreateShot(_ShotPos(init_action_xy[0], init_action_xy[1], init_spin))\n success, ResShot = Simulation(state, ShotVec, Config.RAND, -1)\n isTerminal = (state.ShotNum==0)\n \n depth += 1\n break\n \n n_a = [c.n_visits for c in node.children[0].values()] + [c.n_visits for c in node.children[1].values()]\n # progressive widening\n # if chilren node has been visited much times then expand\n #if np.sqrt(sum(n_a)) >= A:\n if sum(n_a) >= 10 * A: \n # expand\n node, expanded_action_xy, expanded_spin = self.expand(node)\n _, ShotVec = CreateShot(_ShotPos(expanded_action_xy[0],expanded_action_xy[1], expanded_spin))\n success, ResShot = Simulation(state, ShotVec, Config.RAND, -1)\n isTerminal = (state.ShotNum==0) # one end game\n \n depth += 1\n break\n\n # select\n node, selected_action_xy, selected_spin = self.ucb_select(node)\n _, ShotVec = CreateShot(_ShotPos(selected_action_xy[0], selected_action_xy[1], selected_spin))\n success, ResShot = Simulation(state, ShotVec, Config.RAND, -1)\n isTerminal = (state.ShotNum==0) # one end game\n \n depth += 1\n\n if isTerminal:\n break\n\n if not isTerminal and depth < self.playout_depth:\n # save the rollout_state for speed.\n #if node.rollout_state is None:\n state = self.rollOut(node, state, depth)\n #node.rollout_state = state\n #else:\n # state = node.rollout_state\n \n self.update(node, state)", "def _possible_grids(self, num_windows):\n if num_windows < 2:\n end = 2\n else:\n end = num_windows // 2 + 1\n for rows in range(1, end):\n cols = int(math.ceil(num_windows / rows))\n yield (rows, cols, ROWCOL)\n if rows != cols:\n # also want the reverse test\n yield (cols, rows, COLROW)", "def switch(n, m):\r\n out = []\r\n\r\n # Convert to stupid letters for 1 and 2\r\n name_letters = {1: \"S\", 2: \"D\"}\r\n name_n = name_letters[n] if n in name_letters else str(n)\r\n name_m = name_letters[m] if m in name_letters else str(m)\r\n\r\n # Number of pins on the right is n*m, plus one per pole for spacing,\r\n # minus the final spacing (n starts at 1), rounded up to nearest odd\r\n # number so that half the height is on the 100mil grid.\r\n n_pins_right = n * m + n - 1\r\n if n_pins_right % 2 == 0:\r\n n_pins_right += 1\r\n height = 100 * (n_pins_right - 1)\r\n hheight = height // 2\r\n\r\n # Ref goes at the top, 100 above the top pin, unless only one throw\r\n # in which case we also need to clear the switch graphic\r\n refheight = hheight + 100\r\n if m == 1:\r\n refheight += 50\r\n\r\n # Value/name goes below, unless m is even, in which case the bottom spacer\r\n # isn't there so needs to be ignored\r\n valheight = -(hheight + 100)\r\n if n % 2 == 1 and m % 2 == 0:\r\n valheight += 100\r\n\r\n # Output component header\r\n name = \"SWITCH_{}P{}T\".format(name_n, name_m)\r\n out.append(\"#\\n# {}\\n#\".format(name))\r\n out.append('DEF {} SW 0 1 Y N 1 F N'.format(name))\r\n out.append('F0 \"SW\" 0 {} 50 H V C CNN'.format(refheight))\r\n out.append('F1 \"{}\" 0 {} 50 H V C CNN'.format(name, valheight))\r\n out.append('F2 \"\" 0 0 50 H I C CNN')\r\n out.append('F3 \"\" 0 0 50 H I C CNN')\r\n out.append('DRAW')\r\n\r\n # Output drawing\r\n pole_top = hheight\r\n for pole in range(n):\r\n # Draw pole\r\n pole_num = pole*(m+1) + 2\r\n pole_y = pole_top - (100 * (m - 1))//2\r\n if m % 2 == 0:\r\n pole_y -= 50\r\n out.append('X \"~\" {} -100 {} 40 R 50 50 1 1 P'\r\n .format(pole_num, pole_y))\r\n out.append('C -50 {} 10 1 1 0 N'.format(pole_y))\r\n out.append('P 2 1 1 0 -50 {} 50 {} N'\r\n .format(pole_y + 10, pole_y + 90))\r\n\r\n for throw in range(m):\r\n # Draw throws\r\n throw_num = pole_num + throw - 1\r\n throw_y = pole_top - 100 * throw\r\n if throw > 0:\r\n throw_num += 1\r\n out.append('X \"~\" {} 100 {} 40 L 50 50 1 1 P'\r\n .format(throw_num, throw_y))\r\n out.append('C 50 {} 10 1 1 0 N'.format(throw_y))\r\n\r\n # Move down for next pole\r\n pole_top -= 100 * (m + 1)\r\n\r\n # Draw connecting dashed line\r\n if n > 1:\r\n pole_y = hheight - (100 * (m - 1))//2 + 50\r\n if m % 2 == 0:\r\n pole_y -= 50\r\n for _ in range(5*(m+1)*(n-1)):\r\n out.append('P 2 1 1 0 0 {} 0 {} N'\r\n .format(pole_y, pole_y - 5))\r\n pole_y -= 20\r\n\r\n # Done\r\n out.append('ENDDRAW\\nENDDEF\\n')\r\n\r\n return out", "def _build_board(y_size, x_size, game_board):\n\n for y_coordinate in range(1, y_size + 1):\n for x_coordinate in range(1, x_size + 1):\n game_board[(x_coordinate, y_coordinate)] = {0: {}, 1: {}, 2: {}}", "def dfs(i):\n if i == n:\n # just gone through a valid scenario\n # add current board to the result set\n output.append(list(board))\n\n row_idx = i\n for col_idx in range(n):\n if (col_idx not in cols and \n row_idx + col_idx not in sums and \n row_idx - col_idx not in diffs):\n print('tender %d' %(row_idx - col_idx))\n cols.add(col_idx)\n sums.add(row_idx + col_idx)\n diffs.add(row_idx - col_idx)\n\n # this might be the next valid board. Not confirmed yet.\n board.append(convert(col_idx, n))\n\n # recurse to next row\n dfs(i + 1)\n\n # if current scenario with col_idx is not valid,\n # then backtrack\n board.pop()\n cols.remove(col_idx)\n sums.remove(row_idx + col_idx)\n diffs.remove(row_idx - col_idx)", "def layout(self):\n layout = self.venue.layout\n for blocked in self.blocked_seats.all():\n s = blocked.seat\n layout[s.section.pk][s.row][s.column] = \"B\"\n\n for guest in self.guests.all():\n s = guest.seat\n layout[s.section.pk][s.row][s.column] = \"G\"\n\n return layout", "def __generate_goal_board(self):\n element = 1\n array = []\n\n for row in range(self._n):\n row_to_append = []\n for col in range(self._n):\n row_to_append.append(element)\n element += 1\n array.append(row_to_append)\n\n array[self._n - 1][self._n - 1] = 0\n self._solved_board = Board(array=array, space=[self._n - 1, self._n - 1])", "def knights_bt(dimensions):\n\n height, width = dimensions\n # Exit cases for recurive call\n # Odd boards\n if height == 5 and width == 5:\n return _5x5, (5,5)\n elif height == 7 and width == 7:\n return _7x7, (7,7)\n elif height == 9 and width == 9:\n return _9x9, (9,9)\n # Even boards\n elif height == 6:\n if width == 6:\n return _6x6, (6,6)\n elif width == 8:\n return _6x8, (6,8)\n elif height == 8:\n if width == 6:\n return _8x6, (8,6)\n elif width == 8:\n return _8x8, (8,8)\n elif width == 10:\n return _8x10, (8,10)\n elif height == 10:\n if width == 8:\n return _10x8, (10,8)\n elif width == 10:\n return _10x10, (10,10)\n elif width == 12:\n return _10x12, (10,12)\n elif height == 12:\n if width == 10:\n return _12x10, (12,10)\n\n\n # Determine if the quadrants must be odd\n isOdd = (width >= 10 and width % 2 == 0 and width % 4 != 0)\n if isOdd:\n print(\"odd board\")\n\n # Find the position to cut the board into quadrants.\n row_cut, column_cut = find_cuts(width, height, isOdd)\n\n # Divide the board at the cut points and recurse until we have a fixed solution.\n ul, ul_dim = knights_bt((row_cut, column_cut))\n ur, ur_dim = knights_bt((row_cut, width-column_cut))\n bl, bl_dim = knights_bt((height-row_cut, column_cut))\n br, br_dim = knights_bt((height-row_cut, width-column_cut))\n\n # Rotate the quadrants\n if isOdd:\n ul = rotate_flip(route=ul, b_size=ul_dim[0])\n ur = rotate_counter_clockwise(route=ur, b_size=ur_dim[0])\n bl = rotate_clockwise(route=bl, b_size=bl_dim[0])\n # br already has the corner hole in the correct position\n elif width == height and width >= 12 and width % 4 == 0:\n ur = rotate_clockwise(route=ur, b_size=bl_dim[0])\n br = rotate_flip(route=br, b_size=br_dim[0])\n bl = rotate_counter_clockwise(route=bl, b_size=bl_dim[0])\n\n\n # Merge the quadrants together.\n board = merge(route_to_board(ul, ul_dim),\n route_to_board(ur, ur_dim),\n route_to_board(bl, bl_dim),\n route_to_board(br, br_dim),\n isOdd)\n\n return board_to_route(board), (height, width)", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def show_possibles(self):\n for row in range(self.board_size):\n for col in range(self.board_size):\n poss = list(self.possibles[row][col])\n if poss:\n teil = qbwrdd.Tile(poss, self.board.scene)\n teil.cell = \"poss\"\n cell = row * self.board_size + col\n pos_x, pos_y = self.board.cells[cell].x(), self.board.cells[cell].y()\n if col % 3 > 0:\n pos_x += 2\n self.poss_tiles[row][col] = teil\n teil.draw_tile_at(pos_x, pos_y)", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def processLayoutText(self, layoutText):## This function is being called from the __init__ function of the Layout class.\n maxY = self.height - 1 # So that maxY was reduced by -1. This is critical as the loop below reduces maxY by y. The reduction is necessary as self.height was determined by len(layoutText) which counts all the elements but the for loop for the 'y' variable' uses range(self.height) which counts from '0'. maxY means that we are countin through the 2-dimensional array columns from the back to the front.\n for y in range(self.height): ## Why are we counting through the text elements in reverse? Because the __str__function of the Grid class returned the out variable which contains the GRID in reverse.\n for x in range(self.width): ### PLEASE NOTE! The need for reversing this is that we WANT TO LOOK AT THIS IN A COORDINATE FORMAT WITH (0,0) representing the bottom left corner.\n layoutChar = layoutText[maxY - y][x] #Passes the layout character ('%' '.' or 'o' --> see above or layout file) to the layoutChar variable. This is done in a 'flipped mannor from the input format to the (x,y) convention.\n ## Based on the 2D array the (visualized in file Grid_text) the layoutChar variable assumes the following values:\n self.processLayoutChar(x, y, layoutChar) # layoutChar is based on the variable layout Text: [%.%OG%,.%OG%.%,%%%%%%,%....%] with each position to be submited one-by-one based on the nested for loops. This maps the 2-dimentional Grid created in the __init__function and changes the boolean values to suit the layout of the board. See example in 'processLayoutChar' function below.\n self.agentPositions.sort()\n #print(self.agentPositions)\n self.agentPositions = [ ( i == 0, pos) for i, pos in self.agentPositions] #This basically creates a list of tuples containing the coordinates of the agents.\n #print(self.agentPositions)\n #print(self.capsules)\n #print(self.numGhosts)", "def set_pieces(self):\n\n for i in range(len(self._game_board)):\n\n # Row 1\n if i == 0:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"black\", \"BCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"black\", \" BH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"black\", \" BE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"black\", \" BA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"black\", \" BG \")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 3\n if i == 2:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"black\", \"BCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 4\n if i == 3:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"black\", \"BSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 7\n if i == 6:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"red\", \"RSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 8\n if i == 7:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"red\", \"RCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 10\n if i == 9:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"red\", \"RCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"red\", \" RH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"red\", \" RE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"red\", \" RA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"red\", \" RG \")\n self._game_board[i][ii].update_location([i, ii])", "def make_layout(self):\n\n for h in range(0, self.num_layout_heads):\n self.set_random_layout(h)\n self.set_local_layout(h)\n self.set_global_layout(h)\n\n self.check_and_propagate_first_head_layout()\n return self.layout", "def _generate_images(self, trace):\n images = []\n colors = []\n colors_by_shape = {}\n for board in trace:\n width = int(round((float(board.shape[1]) / board.shape[0]) * self._height))\n cellsize = width / board.shape[1] # cell size\n img = np.zeros((self._height, width, 3), dtype=np.uint8)\n\n tiles = {} # map from integer rep. of the tile to a shape\n for y in range(board.shape[0]):\n for x in range(board.shape[1]):\n cell = board[y,x]\n if cell not in tiles:\n tiles[cell] = (x, y, 1, 1) # x, y, w, h\n else:\n cur_x, cur_y, cur_w, cur_h = tiles[cell]\n if x >= cur_x + cur_w:\n cur_w = (x-cur_x) + 1\n if y >= cur_y + cur_h:\n cur_h = (y-cur_y) + 1\n tiles[cell] = (cur_x, cur_y, cur_w, cur_h)\n\n # Colors\n if len(colors_by_shape) == 0:\n for tid in tiles:\n shape = (tiles[tid][2], tiles[tid][3])\n if shape not in colors_by_shape:\n colors_by_shape[shape] = hex_to_rgb(random_unique_color(colors))\n colors.append(colors_by_shape[shape])\n\n for tid in tiles:\n x, y, w, h = tiles[tid]\n shape = (w,h)\n empty = board[y,x] == 0\n x, y, w, h = x*cellsize, y*cellsize, w*cellsize, h*cellsize\n # Draw a filled rectangle without color\n if not empty:\n cv2.rectangle(img, (x, y), (x+w, y+h), colors_by_shape[shape],-1)\n else:\n cv2.rectangle(img, (x, y), (x+w, y+h), [0,0,0], -1) #, 8)-\n # Draw a boundary\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2, 8)\n \n images.append(img)\n return images", "def generate_all_locations(grid, shape):", "def make_layout(self):\n\n for h in range(0, self.num_layout_heads):\n self.set_random_layout(h)\n self.set_sliding_window_layout(h)\n self.set_global_layout_itc(h)\n\n self.check_and_propagate_first_head_layout()\n return self.layout" ]
[ "0.62333226", "0.575861", "0.57528615", "0.57119054", "0.569873", "0.56291664", "0.5619137", "0.5589775", "0.5571963", "0.55691195", "0.5565758", "0.55309206", "0.5498591", "0.54836804", "0.54823405", "0.54684675", "0.54012", "0.53977394", "0.53662795", "0.5365347", "0.5360632", "0.535341", "0.53495854", "0.5329498", "0.5320641", "0.52955353", "0.5272699", "0.5250554", "0.52492785", "0.5222381" ]
0.74448156
0
Print a comparison of two time series (original and computed)
def print_comparison(name, dates, times, orig_data, comp_data): # Output comparison of data print(' ORIGINAL COMPUTED') print(f' DATE TIME {name.upper():>9} {name.upper():>9} DIFFERENCE') print('------- ------ --------- --------- ----------') zip_data = zip(dates, times, orig_data, comp_data) for date, time, orig, comp in zip_data: diff = orig - comp print(f'{date} {time:>6} {orig:9.6f} {comp:9.6f} {diff:10.6f}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_comparison(name, dates, times, original_data, computed_data):\n \n # Output comparison of data\n print(' ORIGINAL COMPUTED')\n print(f' DATE TIME {name.upper():>9} {name.upper():>9} DIFFERENCE')\n print('------- ------ --------- --------- ----------')\n zip_data = zip(dates, times, original_data, computed_data)\n for date, time, orig, comp in zip_data:\n diff = orig - comp\n print(f'{date} {time:>6} {orig:9.6f} {comp:9.6f} {diff:10.6f}')", "def display_diff(dta1, dta2, all_data=False):\n if not isinstance(dta1, Dta) or not isinstance(dta2, Dta):\n raise TypeError(\"objects to be compared must be Dta\")\n \n typlist_converters = {\n 'Dta115': {\n 'Dta117': lambda i: i if i <= 244 else 65530 + (251 - i)\n }\n }\n \n different = False\n \n # Python class types <-> dta version\n # ----------------------------------\n dta1_type, dta2_type = dta1.__class__.__name__, dta2.__class__.__name__\n if not dta1_type == dta2_type:\n different = True\n print(\" class types differ:\")\n print(\" {} vs {}\".format(dta1_type, dta2_type))\n \n # data set descriptors\n # --------------------\n if not dta1._ds_format == dta2._ds_format:\n different = True\n print(\" formats differ:\")\n print(\" {} vs {}\".format(dta1._ds_format, dta2._ds_format))\n \n if not dta1._data_label == dta2._data_label:\n different = True\n print(\" data labels differ:\")\n print(\" {} vs {}\".format(dta1._data_label, dta2._data_label))\n \n # time stamp\n # ----------\n stamp1 = dta1._time_stamp.split()\n stamp2 = dta2._time_stamp.split()\n stamp1[0] = int(stamp1[0]) #day\n stamp2[0] = int(stamp2[0])\n stamp1[2] = int(stamp1[2]) #year\n stamp2[2] = int(stamp2[2])\n stamp1 = stamp1[:-1] + [int(x) for x in stamp1[-1].split(':')] # hr & min\n stamp2 = stamp2[:-1] + [int(x) for x in stamp2[-1].split(':')]\n if not stamp1 == stamp2:\n different = True\n print(\" time stamps differ:\")\n print(\" {} vs {}\".format(dta1._time_stamp, dta2._time_stamp))\n \n # number of variables and observations\n # ------------------------------------\n if not dta1._nvar == dta2._nvar:\n different = True\n print(\" # of vars differs:\")\n print(\" {} vs {}\".format(dta1._nvar, dta2._nvar))\n print(\" > comparison now limited to vars 0 .. min(nvar1, nvar2)\")\n \n if not dta1._nobs == dta2._nobs:\n different = True\n print(\" # of obs differs:\")\n print(\" {} vs {}\".format(dta1._nobs, dta2._nobs))\n print(\" > comparison now limited to obs 0 .. min(nobs1, nobs2)\")\n \n nvar = min(dta1._nvar, dta2._nvar)\n nobs = min(dta1._nobs, dta2._nobs)\n \n # descriptors\n # -----------\n \n # typlist\n # If dta versions are the same, can make direct comparison. If versions\n # are different, a direct comparison doesn't mean much if data types\n # are encoded differently, so convert one before comparing.\n if dta1_type == dta2_type:\n diff = [i for i in range(nvar) if dta1._typlist[i] != dta2._typlist[i]]\n else:\n s = sorted(((dta1_type, dta1), (dta2_type, dta2)))\n (older_type, older_dta), (newer_type, newer_dta) = s\n converter = typlist_converters[older_type][newer_type]\n diff = [i for i in range(nvar) \n if converter(older_dta._typlist[i]) != newer_dta._typlist[i]]\n if diff != []:\n different = True\n print(\" Stata data types differ in {} places\".format(len(diff)))\n print(\" first difference in position {}\".format(diff[0]))\n \n # varlist\n diff = [i for i in range(nvar) if dta1._varlist[i] != dta2._varlist[i]]\n if diff != []:\n different = True\n print(\" variable names differ in {} places\".format(len(diff)))\n print(\" first difference in position {}\".format(diff[0]))\n \n # srtlist\n diff = [i for i in range(nvar) if dta1._srtlist[i] != dta2._srtlist[i]]\n if diff != []:\n different = True\n print(\" sort lists differ in {} places\".format(len(diff)))\n print(\" first difference in position {}\".format(diff[0]))\n \n # fmtlist\n diff = [i for i in range(nvar) if dta1._fmtlist[i] != dta2._fmtlist[i]]\n if diff != []:\n different = True\n print(\" display formats differ in {} places\".format(len(diff)))\n print(\" first difference in position {}\".format(diff[0]))\n \n # lbllist\n diff = [i for i in range(nvar) if dta1._lbllist[i] != dta2._lbllist[i]]\n if diff != []:\n different = True\n msg = \" attached value labels differ in {} places\".format(len(diff))\n print(msg)\n print(\" first difference in position {}\".format(diff[0]))\n \n # vlblist\n diff = [i for i in range(nvar) if dta1._vlblist[i] != dta2._vlblist[i]]\n if diff != []:\n different = True\n print(\" variable labels differ in {} places\".format(len(diff)))\n print(\" first difference in position {}\".format(diff[0]))\n \n # characteristics\n # ---------------\n keys1 = set(dta1._chrdict.keys())\n keys2 = set(dta2._chrdict.keys())\n diff = keys1 - keys2\n if diff != set():\n different = True\n print(\" charataristic keys in #1 but not in #2:\")\n print(\" \", str(diff))\n \n diff = keys2 - keys1\n if diff != set():\n different = True\n print(\" charataristic keys in #2 but not in #1:\")\n print(\" \", str(diff))\n \n diff = [k for k in keys1.intersection(keys2) \n if dta1._chrdict[k] != dta2._chrdict[k]]\n if diff != []:\n different = True\n print(\" charataristic keys with different value:\")\n print(\" \", str(diff))\n \n # defined value labels\n # --------------------\n keys1 = set(dta1._vallabs.keys())\n keys2 = set(dta2._vallabs.keys())\n diff = keys1 - keys2\n if diff != set():\n different = True\n print(\" value labels defined in #1 but not in #2:\")\n print(\" \", str(diff))\n \n diff = keys2 - keys1\n if diff != set():\n different = True\n print(\" value labels defined in #2 but not in #1:\")\n print(\" \", str(diff))\n \n diff = [k for k in keys1.intersection(keys2)\n if dta1._vallabs[k] != dta2._vallabs[k]]\n if diff != []:\n different = True\n print(\" value labels with same name but different mapping:\")\n print(\" \", str(diff))\n \n # data values\n # -----------\n if all_data:\n diff = sum([0] + [1 for i in range(nobs) for j in range(nvar)\n if dta1._varvals[i][j] != dta2._varvals[i][j]])\n if diff != 0:\n different = True\n print(\" data values differ in \" + str(diff) + \" places\")\n else:\n for i in range(nobs):\n for j in range(nvar):\n if dta1._varvals[i][j] != dta2._varvals[i][j]:\n different = True\n print(\"\".join(\n (\" data values differ\\n \",\n \"first difference in position {},{}\".format(i,j))))\n break\n else:\n continue # executed if the loop ended normally (no break)\n break # executed if 'continue' was skipped (break)\n # trick from http://stackoverflow.com/questions/653509 \n # to exit from nested for loops\n\n if not different:\n print(\" no difference found\")", "def compare_series(series_a, series_b):\n return {\n 'rmse': ((series_a - series_b) ** 2).mean() ** 0.5,\n 'mbe': (series_b - series_a).mean(),\n 'mae': abs(series_b - series_a).mean(),\n 'rsqr': stats.linregress(series_a, series_b).rvalue ** 2,\n }", "def print_diff_summary(self, other, **kwargs):\r\n\r\n def diff_dict(a_time, b_time_):\r\n r = {}\r\n b_time = copy.copy(b_time_)\r\n for a, ta in a_time.items():\r\n r.setdefault(a, 0)\r\n tb = b_time.pop(a, 0)\r\n r[a] += ta - tb\r\n\r\n #they are missing in a\r\n for a, t in b_time.items():\r\n r.setdefault(a, 0)\r\n r[a] += t\r\n return r\r\n\r\n compile_time = self.compile_time - other.compile_time\r\n fct_call_time = diff_dict(self.fct_call_time, other.fct_call_time)\r\n fct_call = diff_dict(self.fct_call, other.fct_call)\r\n apply_time = diff_dict(self.apply_time, other.apply_time)\r\n apply_cimpl = self.apply_cimpl and other.apply_cimpl\r\n message = self.message\r\n variable_shape = diff_dict(self.variable_shape, other.variable_shape)\r\n self_linker_time = sum([ps.linker_time for ps\r\n in self.profile_stats.values()])\r\n other_linker_time = sum([ps.linker_time for ps\r\n in other.profile_stats.values()])\r\n self_optimizer_time = sum([ps.optimizer_time for ps\r\n in self.profile_stats.values()])\r\n other_optimizer_time = sum([ps.optimizer_time for ps\r\n in other.profile_stats.values()])\r\n\r\n other_time = {'linker_time': self_linker_time - other_linker_time,\r\n 'optimizer_time': self_optimizer_time -\r\n other_optimizer_time}\r\n self.print_summary_(\"print_diff_summary\", compile_time,\r\n fct_call_time, fct_call,\r\n apply_time, apply_cimpl, message, variable_shape,\r\n print_apply=False, other_time=other_time,\r\n **kwargs)", "def print_diff_summary(self, other, **kwargs):\n\n def diff_dict(a_time, b_time_):\n r = {}\n b_time = copy.copy(b_time_)\n for a, ta in iteritems(a_time):\n r.setdefault(a, 0)\n tb = b_time.pop(a, 0)\n r[a] += ta - tb\n\n # they are missing in a\n for a, t in iteritems(b_time):\n r.setdefault(a, 0)\n r[a] += t\n return r\n\n compile_time = self.compile_time - other.compile_time\n fct_call_time = diff_dict(self.fct_call_time, other.fct_call_time)\n fct_call = diff_dict(self.fct_call, other.fct_call)\n apply_time = diff_dict(self.apply_time, other.apply_time)\n apply_cimpl = self.apply_cimpl and other.apply_cimpl\n message = self.message\n variable_shape = diff_dict(self.variable_shape, other.variable_shape)\n self_linker_time = sum([ps.linker_time for ps\n in self.profile_stats.values()])\n other_linker_time = sum([ps.linker_time for ps\n in other.profile_stats.values()])\n self_optimizer_time = sum([ps.optimizer_time for ps\n in self.profile_stats.values()])\n other_optimizer_time = sum([ps.optimizer_time for ps\n in other.profile_stats.values()])\n\n other_time = {'linker_time': self_linker_time - other_linker_time,\n 'optimizer_time': self_optimizer_time -\n other_optimizer_time}\n self.print_summary_(\"print_diff_summary\", compile_time,\n fct_call_time, fct_call,\n apply_time, apply_cimpl, message, variable_shape,\n print_apply=False, other_time=other_time,\n **kwargs)", "def print_diff_summary(self, other, n_apply_to_print=15,\r\n n_ops_to_print=20):\r\n\r\n def diff_dict(a_time, b_time_):\r\n r = {}\r\n b_time = copy.copy(b_time_)\r\n for a, ta in a_time.items():\r\n r.setdefault(a, 0)\r\n tb = b_time.pop(a, 0)\r\n r[a] += ta - tb\r\n\r\n #they are missing in a\r\n for a, t in b_time.items():\r\n r.setdefault(a, 0)\r\n r[a] += t\r\n return r\r\n\r\n compile_time = self.compile_time - other.compile_time\r\n fct_call_time = diff_dict(self.fct_call_time, other.fct_call_time)\r\n fct_call = diff_dict(self.fct_call, other.fct_call)\r\n apply_time = diff_dict(self.apply_time, other.apply_time)\r\n op_cimpl = self.op_cimpl and other.op_cimpl\r\n message = self.message\r\n outputs_size = diff_dict(self.outputs_size, other.outputs_size)\r\n\r\n self.print_summary_(\r\n \"print_diff_summary\", compile_time, fct_call_time, fct_call,\r\n apply_time, op_cimpl, message, outputs_size,\r\n n_apply_to_print=n_apply_to_print,\r\n n_ops_to_print=n_ops_to_print, print_apply=False)", "def compare_displacements(ds1,ds2):\n # Obteniendo los datos para BP\n t1 = ds1['t']\n t1 = t1[:n_im-1]\n t1 = mplt.dates.date2num(t1)\n d1 = ds1['d_t']\n # Obteniendo los datos para RMA\n t2 = ds2['t']\n t2 = t2[:n_im-1]\n t2 = mplt.dates.date2num(t2)\n d2 = ds2['d_t']\n\n # Graficando las 2 curvas juntas\n formatter = DateFormatter(\"%d/%m - %H:%M\")\n for i in range(len(d1)):\n # Hallando el valor promedio final x zona\n mean_bp = d1[i].mean()\n mean_rma = d2[i].mean()\n print(\"Valor promedio BP_zona\"+str(i)+\": \",mean_bp)\n print(\"Valor promedio RMA_zona\"+str(i)+\": \",mean_rma)\n print(\"\")\n # Graficando\n direction = 'desplazamientosPromedios_dset'+str(i_o)+'-'+str(i_o+n_im-1)+'_zona'+str(i)\n\n fig, ax= plt.subplots(figsize=(10,7))\n ax.plot_date(t1,d1[i],'b',marker='',markerfacecolor='b',markeredgecolor='b',label='Back Projection')\n ax.plot_date(t2,d2[i],'r',marker='',markerfacecolor='r',markeredgecolor='r',label='RMA')\n ax.set(xlabel='Tiempo',ylabel='Desplazamiento(mm)',title=\"Desplazamientos promedios\\n(Zona \"+str(i)+')')\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_tick_params(rotation=20)\n #ax.set_xlim([R.min(),R.max()])\n ax.set_ylim([-c*1000*4/(4*fc),c*1000*4/(4*fc)])\n ax.grid(linestyle='dashed')\n ax.legend()\n plt.show()\n fig.savefig(os.getcwd()+\"/Results/Desplazamientos/\"+direction,orientation='landscape')\n\n return 'Ok'", "def test_ndiff(self):\n print \"\\n\"\n for d in ndiff(a, b): print d", "def compare(self, other_sequence):\n response = []\n if self.name != other_sequence.name:\n response.append('DIFF: Sequence names: %s' % self.name)\n response.append('and %s' % other_sequence.name)\n if self.increment_by != other_sequence.increment_by:\n response.append('DIFF: Increment interval')\n response.append('is %d,' % self.increment_by)\n response.append('for %s' % other_sequence.name)\n response.append('it is %d' % other_sequence.increment_by)\n if self.min_value != other_sequence.min_value:\n response.append('DIFF: Min value is %d' % self.min_value)\n response.append(' for %s' % other_sequence.name)\n response.append('it is %d' % other_sequence.min_value)\n if self.max_value != other_sequence.max_value:\n response.append('DIFF: Max value is %d' % self.max_value)\n response.append(', for %s ' % other_sequence.name)\n response.append('it is %d' % other_sequence.max_value)\n # The only attribute we don't check is currval, becuase it will be \n # different in 999 cases out of a 1000\n return response", "def comparison_with_another_mdv(self, mdv):\n for fragment in self.mdv:\n for number in self.mdv[fragment]:\n my =self.mdv[fragment][number]['ratio']\n you = mdv.mdv[fragment][number]['ratio']\n print(\"{0:8.8s} {1:1d} {2:6.5f} {3:6.5f} {4:-6.5f} {5:6.5f} {6:5.5s}\".format(fragment, number, my ,you, my-you,self.mdv[fragment][number]['std'] ,self.mdv[fragment][number]['use']))", "def compare(self):\n samples = self.data[-2:]\n if len(samples) != 2:\n return\n\n timestamp_a, data_a = samples[0]\n timestamp_b, data_b = samples[1]\n LOG.debug(\"%s comparing sample from %s to %s\", self, timestamp_a, timestamp_b)\n changes = dict_compare(data_a, data_b)\n for key in changes:\n OUTPUT.info(\"%s:%s: %s -> %s\", self, key, get_value(data_a, key), get_value(data_b, key))", "def bench_report(t1, t2):\n print \"\\n\\n Time taken: {0}\".format(t2 - t1)", "def PrintDiffs(message, lhs, rhs):\n dif = set(lhs).difference(rhs)\n if dif:\n print message, ', '.join(dif)", "def print_time_difference(initial_time, final_time):\n time_spent = pd.to_timedelta(int(final_time - initial_time), unit='s')\n print('Time spent: {}.'.format(time_spent))", "def compare_spectrum(spectrum0, spectrum1):\n title0 = spectrum0.get_title() \n title1 = spectrum1.get_title() \n if(title0 < title1): \n return -1\n elif(title0 > title1): \n return 1\n else:\n return 0", "def plot_diff(self, ax, d0, d1):\n i1 = 0\n for i0 in range(d0.shape[0]):\n try:\n while d0[i0,0] > d1[i1,0]:\n print((d0[i0,0], d1[i1,0]))\n i1 += 1\n except IndexError:\n break\n if d0[i0,0] == d1[i1,0]:\n break\n assert d0[i0,0] == d1[i1,0]\n i0s = i0\n d0s = 0\n d1s = 0\n dt = []\n dd = []\n for i0 in range(i0s, d0.shape[0]):\n d0s += d0[i0,1]\n try:\n d1s += d1[i1,1]\n assert d0[i0,0] == d1[i1,0]\n except IndexError:\n pass\n dt.append(d0[i0,0])\n dd.append(d0s - d1s)\n i1 += 1\n while i1 < d1.shape[0]:\n d1s += d1[i1,1]\n dt.append(d0[i0,0])\n dd.append(d0s - d1s)\n i1 += 1\n dd = np.array(dd)\n dd -= dd[-1]\n i = 0\n if self.start is not None:\n while dt[i] < self.start:\n i += 1\n ax.plot_date(dt[i:], dd[i:], fmt='-', color='yellow')", "def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length arrays.'\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n = a.shape[dimension]\r\n df = float(n-1)\r\n d = (a-b).astype('d')\r\n\r\n denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)\r\n zerodivproblem = N.equal(denom,0)\r\n denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place\r\n t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def compareTwoReco(reference, new, histos, debug=1):\n\n # Tracks with index False are the ones that have been matched to the reference track collection\n new_valid = [True for i in new]\n\n # Tracks with index False are the ones that have been matched to the comparison track collection\n original_valid = [True for i in reference]\n print \" \".join(\"%10s\" % k for k in variables)\n debug_verbose = checkDebug(debug, 'Verbose')\n debug_ordinary = checkDebug(debug, 'Ordinary')\n debug_recovery = checkDebug(debug, 'Recovery')\n debug_lost = checkDebug(debug, 'Lost')\n debug_fake = checkDebug(debug, 'Fake')\n\n for original_index, original in enumerate(reference):\n # Fill in cumulative plots for the reference sample first\n histos['reference_hits_vs_algo'].Fill(original.algo, original.hits)\n histos['reference_hits_vs_orialgo'].Fill(original.orialgo, original.hits)\n histos['reference_hits_vs_pt'].Fill(original.pt, original.hits)\n histos['den'].Fill(original.pt)\n histos['den_eta'].Fill(original.eta)\n histos['den_phi'].Fill(original.phi)\n histos['den_hits'].Fill(original.hits)\n histos['den_algo'].Fill(original.algo)\n histos['den_orialgo'].Fill(original.orialgo)\n\n # Now start to look for a matching track in the comparison track collection\n window_depth = 400 # elements to span to look for best candidate\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = -1, 100, 100\n if debug_verbose:\n print original\n for i,j in enumerate(new):\n if new_valid[i] == True:\n if debug_verbose:\n print \" \", i, j\n if window_depth == 0:\n break\n dr_squared, dPt_over_pt = match(original, j)\n if dr_squared < bestDeltaRMatch*bestDeltaRMatch and dPt_over_pt < DELTA_PT_OVER_PT_CUT:\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = i, dr_squared, dPt_over_pt\n if debug_verbose:\n print \" \", window_depth, iBest, bestDeltaRMatch, dr_squared, bestDeltaPt_over_PtMatch, dPt_over_pt\n if bestDeltaRMatch <= 0.0001 or bestDeltaPt_over_PtMatch == 0.0001:\n break\n window_depth -= 1\n if iBest != -1 and bestDeltaRMatch < DELTA_R_CUT:\n # These are the tracks in the reference track collection\n # that have been matched to a track in the comparison\n # track collection\n new_valid[iBest] = False\n original_valid[original_index] = False\n assert original.run == new[iBest].run, \"run mismatch\"\n assert original.ls == new[iBest].ls, \"ls mismatch\"\n assert original.event == new[iBest].event, \"event mismatch\"\n if debug_ordinary:\n print original\n print new[iBest]\n print iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch, '\\n'\n histos['num'].Fill(original.pt)\n histos['num_eta'].Fill(original.eta)\n histos['num_phi'].Fill(original.phi)\n histos['num_hits'].Fill(original.hits)\n histos['num_algo'].Fill(original.algo)\n histos['num_orialgo'].Fill(original.orialgo)\n histos['fake_num'].Fill(new[iBest].pt)\n histos['fake_num_eta'].Fill(new[iBest].eta)\n histos['fake_num_phi'].Fill(new[iBest].phi)\n histos['fake_num_hits'].Fill(new[iBest].hits)\n histos['fake_num_algo'].Fill(new[iBest].algo)\n histos['fake_num_orialgo'].Fill(new[iBest].orialgo)\n histos['comparison_algo_vs_reference_algo'].Fill(original.algo, new[iBest].algo)\n histos['comparison_orialgo_vs_reference_orialgo'].Fill(original.orialgo, new[iBest].orialgo)\n histos['comparison_hits_vs_reference_hits'].Fill(original.hits, new[iBest].hits)\n\n # Let's try a recovery loop with somewhat lesser stringent cuts\n for original_index, original in enumerate(reference):\n if original_valid[original_index]:\n # Now start to look for a matching track in the comparison track collection\n window_depth = 300 # elements to span to look for best candidate\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = -1, 100, 100\n if debug_verbose:\n print \"Recovery \", original\n for i,j in enumerate(new):\n if new_valid[i] == True:\n if debug_verbose:\n print \"Recovery \", i, j\n if window_depth == 0:\n break\n dr_squared, dPt_over_pt = match(original, j)\n if dr_squared < bestDeltaRMatch*bestDeltaRMatch and dPt_over_pt < DELTA_PT_OVER_PT_CUT*6:\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = i, dr_squared, dPt_over_pt\n if debug_verbose:\n print \"Recovery \", window_depth, iBest, bestDeltaRMatch, dr_squared, bestDeltaPt_over_PtMatch, dPt_over_pt\n if bestDeltaRMatch <= 0.0001 or bestDeltaPt_over_PtMatch == 0.0001:\n break\n window_depth -= 1\n if iBest != -1 and bestDeltaRMatch < DELTA_R_CUT*10: # inflate cut on DeltaR to recover some good-medium matching\n # These are the tracks in the reference track collection\n # that have been matched to a track in the comparison\n # track collection\n new_valid[iBest] = False\n original_valid[original_index] = False\n if debug_recovery:\n print \"Recovery \", original\n print \"Recovery \", new[iBest]\n print \"Recovery \", iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch\n histos['num'].Fill(original.pt)\n histos['num_eta'].Fill(original.eta)\n histos['num_phi'].Fill(original.phi)\n histos['num_hits'].Fill(original.hits)\n histos['num_algo'].Fill(original.algo)\n histos['num_orialgo'].Fill(original.orialgo)\n histos['fake_num'].Fill(new[iBest].pt)\n histos['fake_num_eta'].Fill(new[iBest].eta)\n histos['fake_num_hits'].Fill(new[iBest].hits)\n histos['fake_num_algo'].Fill(new[iBest].algo)\n histos['fake_num_orialgo'].Fill(new[iBest].orialgo)\n histos['comparison_algo_vs_reference_algo'].Fill(original.algo, new[iBest].algo)\n histos['comparison_orialgo_vs_reference_orialgo'].Fill(original.orialgo, new[iBest].orialgo)\n histos['comparison_hits_vs_reference_hits'].Fill(original.hits, new[iBest].hits)\n\n\n # These are the tracks in the reference track collection\n # that have *not* been associated to any track in the\n # comparison collection == > LOST TRACKS\n reference_not_assigned = [j for i,j in enumerate(reference) if original_valid[i]]\n reference_not_assigned.sort(key=lambda tr: tr.algo)\n if debug_lost:\n print \"**** Lost tracks **** %d\" % len(reference_not_assigned)\n for j in reference_not_assigned:\n histos['lost_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['lost_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['lost_hits_vs_pt'].Fill(j.pt, j.hits)\n histos['lost_eta'].Fill(j.eta)\n if debug:\n print j\n if debug_lost:\n print \"**** End of Lost tracks ****\"\n\n # Fake Tracks\n for i, j in enumerate(new):\n # Fill in the cumulative plots related to tracks in the comparison track collection\n histos['comparison_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['comparison_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['comparison_hits_vs_pt'].Fill(j.pt, j.hits)\n histos['fake_den'].Fill(j.pt)\n histos['fake_den_eta'].Fill(j.eta)\n histos['fake_den_phi'].Fill(j.phi)\n histos['fake_den_hits'].Fill(j.hits)\n histos['fake_den_algo'].Fill(j.algo)\n histos['fake_den_orialgo'].Fill(j.orialgo)\n\n # These are the tracks in the comparison track collection\n # that have *not* been associated to any track in the\n # reference collection ==> FAKE TRACKS\n new_not_assigned = [j for i,j in enumerate(new) if new_valid[i]]\n new_not_assigned.sort(key=lambda tr: tr.algo)\n if debug_fake:\n print \"**** Fake tracks **** %d\" % len(new_not_assigned)\n for j in new_not_assigned:\n histos['fake_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['fake_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['fake_hits_vs_pt'].Fill(j.pt, j.hits)\n if debug:\n print j\n if debug_fake:\n print \"**** End of Fake tracks ****\"", "def compare_graphs(self):\n\t\tpass", "def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length lists in ttest_rel.'\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = var(a)\r\n v2 = var(b)\r\n n = len(a)\r\n cov = 0\r\n for i in range(len(a)):\r\n cov = cov + (a[i]-x1) * (b[i]-x2)\r\n df = n-1\r\n cov = cov / float(df)\r\n sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))\r\n t = (x1-x2)/sd\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,min(a),max(a),\r\n name2,n,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t, prob", "def compare(mem1, mem2, name1, name2, output=sys.stdout, show_equal=True, granularity=1):\n\n addresses1, stream1 = make_stream(mem1, granularity=granularity)\n addresses2, stream2 = make_stream(mem2, granularity=granularity)\n\n s = difflib.SequenceMatcher(lambda x: x is None, stream1, stream2, autojunk=False)\n #~ sys.stderr.write('similarity [0...1]: {:.2f}\\n'.format(s.ratio())) # XXX if verbose\n equal = True\n for opcode, i1, i2, j1, j2 in s.get_opcodes():\n #~ print \"=== %6s a[%d:%d] b[%d:%d]\" % (opcode, i1, i2, j1, j2)\n if opcode == 'equal':\n if addresses1[i1] != addresses2[j1]:\n equal = False\n if show_equal:\n hexdump(' ', addresses1[i1:i2], addresses2[j1:j2], stream1[i1:i2], output)\n else:\n # XXX search for address jumps in the blocks just like hexdump does\n output.write('= {:08x} {:08x}: {} bytes identical{}\\n'.format(\n addresses1[i1],\n addresses2[j1],\n (i2 - i1) * granularity,\n ' at different addresses' if addresses1[i1] != addresses2[j1] else ''))\n elif opcode == 'insert':\n hexdump('+', None, addresses2[j1:j2], stream2[j1:j2], output)\n equal = False\n elif opcode == 'replace':\n #~ output.write('\\n')\n hexdump('<', addresses1[i1:i2], None, stream1[i1:i2], output)\n #~ sys.stdout.write('--- is replaced with\\n')\n hexdump('>', None, addresses2[j1:j2], stream2[j1:j2], output)\n #~ output.write('\\n')\n equal = False\n elif opcode == 'delete':\n hexdump('-', addresses1[i1:i2], None, stream1[i1:i2], output)\n equal = False\n\n if equal:\n output.write(\"files are identical\\n\")\n return True\n else:\n return False", "def _compare_results(y_pred, y_pred_sampled, y_true):\n scores_og = _compute_scores(y_pred, y_true)\n scores_samp = _compute_scores(y_pred_sampled, y_true)\n\n # Aggreggate both results\n result_comp = pd.concat({\"Og\": scores_og, \"samp\": scores_samp}, axis = 1)\n\n return result_comp", "def plotResultsComparison(monthlyData1, monthlyData2, indices, arg):\n \n energyType = arg[0] \n \n dummyRange = np.asarray(range(len(indices['E_tot1'])))\n \n fig = plt.figure(figsize=(16, 8))\n \n# plt.suptitle('Heating Demand (COP=' + str(usedEfficiencies['H_COP']) + ')')\n if energyType == 'PV':\n multiplier = -1\n else:\n multiplier = 1\n \n ax1 = plt.subplot(2,1,1)\n \n plt.plot(multiplier*monthlyData1[energyType][indices['E_tot1'], dummyRange], label = 'Results1', color='b')\n plt.plot(multiplier*monthlyData2[energyType][indices['E_tot2'], dummyRange], label = 'Results2', color='g')\n \n plt.ylabel('Energy [kWh]')\n plt.legend()\n \n majorLocator = MultipleLocator(24)\n majorFormatter = FormatStrFormatter('%d')\n minorLocator = MultipleLocator(24)\n minorFormatter = FormatStrFormatter('%d')\n\n ax1.xaxis.set_major_locator(majorLocator)\n ax1.xaxis.set_major_formatter(majorFormatter)\n ax1.xaxis.set_minor_locator(minorLocator)\n# ax1.xaxis.set_minor_formatter(minorFormatter)\n plt.grid(True, which='both')\n \n ax2 = plt.subplot(2,1,2, sharex=ax1)\n \n plt.plot(multiplier*monthlyData1[energyType][indices['E_tot1'], dummyRange]-multiplier*monthlyData2[energyType][indices['E_tot2'], dummyRange], label = '1-2', color='b')\n\n plt.ylabel('Energy Difference [kWh]')\n plt.legend()\n\n ax2.xaxis.set_major_locator(majorLocator)\n ax2.xaxis.set_major_formatter(majorFormatter)\n ax2.xaxis.set_minor_locator(minorLocator)\n# ax2.xaxis.set_minor_formatter(minorFormatter)\n plt.grid(True, which='both')\n \n return fig", "def display_analysis_result(column1: pd.Series, column2: pd.Series, name1: str, name2: str):\n\n print(\"Correlation between '%s' and '%s':\" % (name1, name2))\n print(\"Covariance: \" + str(calculate_covariance(column1, column2)))\n print(\"Correlation coefficient: \" + str(calculate_correlation_coefficient(column1, column2)))\n print(\"Significance of coefficient: \" + str(calculate_significance_of_coefficient(column1, column2)))\n print()\n\n draw_scatter_plot(column1, column2, name1, name2)\n draw_residual_plot(column1, column2, name1, name2)", "def _assert_series_equal_both(a, b, **kwargs):\n tm.assert_series_equal(a, b, **kwargs)\n tm.assert_series_equal(b, a, **kwargs)", "def _t_test(_sample_a, _sample_b):\n res = stats.ttest_ind(_sample_a, _sample_b, axis=0, equal_var=equal_var, nan_policy='propagate')\n print('Independent t-test\\nt-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)", "def test_TimeSeries_repr():", "def _compare(self, x,y, pr=False):\n batched = self.ex.batched(x, y)\n looped = self.ex.looped(x, y)\n #print(f'batched value {batched}')\n #print(f'looped value {looped}')\n \n self.assertTrue(\n torch.equal(batched, looped)\n )", "def format_comparison(objs):\n def formatter(comp):\n if not isinstance(comp, tuple):\n return str(comp)\n output = []\n return \"\\n\".join([comp.type] + [\" \"+errmessage for errmessage in output])\n\n results = map(formatter,objs)\n return \"\\n\".join(results)\n \n #obj1,obj2 = comp\n\n\n ### Sections\n #for i,s1,s2 in diffs:\n # if s1 and s2:\n # output.append(f\"Section {i} does not match:\")\n # result = compare_sections(s1,s2)\n # output.extend(almethods.linepadder(result))\n # else:\n # if s1:\n # output.append(f\"Door 2 missing Section {i}\")\n # else:\n # output.append(f\"Door 1 missing Section {i}\")", "def plot_step_details(df1, df2, rxn, detail):\n a1, a2 = df1.align(df2, join='outer', axis=0)\n x_list = [row[detail] for row in a1[rxn]]\n y_list = [row[detail] for row in a2[rxn]]\n comparison_plot(x_list, y_list, f'{rxn} {detail}')" ]
[ "0.7352779", "0.648902", "0.62641907", "0.62638307", "0.6202008", "0.6156125", "0.6137342", "0.61093706", "0.5890883", "0.5816313", "0.5804589", "0.5792591", "0.5783776", "0.57681495", "0.5733674", "0.57219535", "0.57205397", "0.566985", "0.5667738", "0.56619906", "0.5653714", "0.56395626", "0.5603362", "0.56012475", "0.5579427", "0.55675864", "0.5565546", "0.555861", "0.55581707", "0.55404323" ]
0.71664226
1
Chooses a random id of length 'length'. There are 62^'length' possible randids. For length '12', that's 10K+ years of making a guess 10 times a second to find a single random page, with a billion pages.
def randid(length=12): import random return ''.join(random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for x in range(length))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_random_id(length=MAX_ID_LENGTH - 1):\n arb_id = random.choice(LEAD_ID_CHARACTERS)\n for i in range(length - 1):\n arb_id += random.choice(CHARACTERS)\n return arb_id", "def generate_id(length: int = 8):\n return \"\".join(random.choices(string.ascii_uppercase, k=length))", "def _create_id(length=40):\n\n numbers = map(str, range(10))\n letters = string.ascii_lowercase\n options = [*letters[:letters.index('f') + 1], *numbers]\n\n return ''.join(random.choice(options) for _ in range(length))", "def random_number(length=6):\n return randint(10**(length-1), (10**(length)-1))", "def string_id(length=8):\n return ''.join(random.choice(string.ascii_letters +\n string.digits)\n for _ in range(length))", "def _generate_random_string(length: int):\n\tall_chars = string.ascii_letters + string.digits\n\treturn ''.join(random.choices(all_chars, k=length))", "def generate_random_string(length = 30):\n\n m_available_chars = ascii_letters + digits\n\n return \"\".join(choice(m_available_chars) for _ in xrange(length))", "def generateHash(*length):\n from string import letters, digits\n from random import choice\n pool, size = letters + digits, length or 10\n hash = ''.join([choice(pool) for i in range(size)])\n return hash.lower()", "def id_generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for x in range(size))", "def id_generator(size=7, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))", "def id_generator(size=15, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for x in range(size))", "def make_random(length, is_binary=False):\n limit = 255 if is_binary else 126\n return \"\".join([chr(choice(range(32, limit))) for _ in range(length)]) + \"\\n\"", "def id_generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))", "def random_string(length):\n # this conservatively gets 8*length bits and then returns 6*length of\n # them. Grabbing (6/8)*length bits could lose some entropy off the ends.\n return urlsafe_b64encode(os.urandom(length))[:length]", "def generate(length):\n return base64.encodestring(OpenSSL.rand.bytes(256))[:length]", "def gen_randomkey(length):\n chars = string.letters + string.digits + string.punctuation\n return ''.join([choice(chars) for _ in xrange(int(str(length)))])", "def generate_random_string(length):\r\n return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))", "def generation_randomid(size, integer=False):\n digits = range(10)\n id = []\n for i in range(size):\n id.append(choice(digits))\n res = ''.join(map(str, id))\n if not integer:\n return res\n else:\n return int(res)", "def generate_randomkey(length):\n chars = string.letters + string.digits\n return ''.join([choice(chars) for i in range(length)])", "def rndstr(length):\n return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))", "def generate_specified_length_unique_ids(id_length):\n\ts = set()\n\twhile len(s) < id_length:\n\t\t# Note: uuid4 generates a random UUID (Universally Unique IDentifier)\n\t\t#\tThere is a *very minor* chance of collisions b/c of random generation, but very low likelihood \n\t\ts.add(str(uuid.uuid4())) \n\treturn list(s)", "def get_random_string(length: int) -> str:\n return \"\".join(random.choices(string.ascii_letters + string.digits, k=length))", "def make_random_string(\n self,\n length: int = 8\n ) -> str:\n return ''.join(choice(self.lowercase + self.uppercase + self.digits) for _ in range(length))", "def generate_room_id():\r\n id_length = 6\r\n while True:\r\n id_tmp = ''.join(random.SystemRandom().choice(\r\n string.ascii_uppercase) for _ in range(id_length))\r\n conflict = id_tmp in rooms\r\n if not conflict:\r\n return id_tmp", "def create_random_string(length):\n return ''.join(random.choice(\"ACDEFGHJKMNPQRTWXYZ\")\n for _ in range(length)\n )", "def generate_random_string(length: int) -> str:\n charset = string.ascii_letters + string.digits\n return \"\".join(random.choice(charset) for _ in range(length))", "def idGenerator(size=16, chars=string.digits + string.ascii_letters + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))", "def generate_prime_candidate(length):\n mask = (1 << length) - 1\n offs = 1.4142135623731 * (1 << (length-1))\n p = 0\n while p < offs:\n # generate big integer from random bytes\n p = int.from_bytes(os.urandom((length+7)//8), byteorder='little')\n # apply a mask to limit to length bits\n p &= mask\n # apply a mask to set LSB to 1\n p |= 1\n return p", "def get_random_str(length=16):\n if length is None or not isinstance(length, int) or length > 1000 or length <= 0:\n length = 16\n\n alph = list(\"1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\")\n res_str = \"\"\n for idx in range(length):\n res_str += random.choice(alph)\n return res_str", "def random_str(length):\n digits = ''.join([str(num) for num in list(range(10))])\n res = ''.join(random.choice(ascii_lowercase + digits)\n for i in range(length))\n return res" ]
[ "0.74631286", "0.74071676", "0.7272849", "0.70104885", "0.67147696", "0.6712778", "0.66736937", "0.66303253", "0.6618916", "0.6553483", "0.65376747", "0.6533648", "0.65256494", "0.65190023", "0.65097946", "0.6489149", "0.64810395", "0.64748263", "0.6467832", "0.64407367", "0.64308685", "0.6413127", "0.63661915", "0.6364913", "0.6363649", "0.6323182", "0.63057077", "0.62754697", "0.6259769", "0.6256882" ]
0.7521966
0
(bed file) > (dictionary) Goes through a bed file and generates a dictionary (bed record) containing the as the key chromosome name and as the value a list of strings each containing the start and end values.
def go_thru(bed_file): bedrecord = dict() #Create dictionary with information from this bed file #print 'Hi I just made a dictionary in which to parse the contents of your bed file' beddata = open(bed_file,'r') #print 'OK, so now I am opening your file called', bed_file thereadline= beddata.readline() #print thereadline if "track" in thereadline : #if len(peak.split())>3 for peak in beddata: if peak.split()[0] in bedrecord: #if record is not already present in dictionary #print 'Another peak on it is located at', peak.split()[0] bedrecord[peak.split()[0]].append("-".join(peak.split()[1:3])) else : #bedrecord[peak.split()[0]]= str(peak.split()[1],'-',peak.split()[2]) bedrecord[peak.split()[0]]= ["-".join(peak.split()[1:3])] #if it IS already present, just add onto existing record #print "New chromosome in your file - it is ", peak.split()[0] else: print 'This is not a bed file dude!' return bedrecord
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocessBed(fname):\n res = {}\n iter = parseBed(fname)\n for i in iter:\n res.setdefault(i.chr,[])\n res[i.chr].append(i)\n for k in res.keys():\n res[k].sort()\n return res", "def parseBed(fname):\n \n handle=open(fname,'r')\n for line in handle:\n if line.startswith(\"#\"):\n continue\n if line.startswith(\"track\") or line.startswith(\"browser\"):\n continue\n vals=line.rstrip().split(\"\\t\")\n chr = vals[0]\n start = int(vals[1])\n end = int(vals[2])\n if len(vals)>=3:\n strand = vals[5]\n score = float(vals[4])\n name = vals[3]\n res = Interval(chr,start,end)\n if len(vals)>3:\n res.strand = strand\n res.score = score\n res.name = name\n res = Interval(chr,start,end,strand=strand,score=score,name=name)\n if len(vals)>6:\n res = SplicedInterval(res.chr,res.start,res.end,res.strand,score=res.score,name=res.name,exonLengths=vals[10],exonOffsets=vals[11])\n #res=dict(zip(bed_fields,vals))\n #res['start'],res['end'],res['score'] = int(res['start']),int(res['end']),int(res['score'])\n yield res", "def bed_chrom_map(args):\n infile = '/dev/stdin' if args.bed == '-' else args.bed\n chrom_map = get_chrom_map(mode=args.chrom_mode, maps=args.chrom_maps)\n with open(infile) as fp:\n if args.header:\n line = next(fp)\n print (line, end='')\n\n for line in fp:\n if line.startswith('#'):\n print (line, end='')\n else:\n rec = line.rstrip('\\n\\r').split('\\t')\n chrom = rec[0]\n chrom = chrom_map.get(chrom, chrom)\n print (chrom, *rec[1:], sep='\\t')", "def generate_bed_dict(line, bed_header):\n out_dict = dict((key, value) for key, value in izip(bed_header, line))\n return(out_dict)", "def load_data() -> list:\n # trans_dict is used for changing the given names into standardized names.\n trans_dict = {\"chr1\": \"1\", \"chr2\": \"2\", \"chr3\": \"3\", \"chr4\": \"4\", \"chr5\": \"5\", \"chr6\": \"6\", \"chr7\": \"7\",\n \"chr8\": \"8\", \"chr9\": \"9\", \"chr10\": \"10\", \"chr11\": \"11\", \"chr12\": \"12\", \"chr13\": \"13\", \"chr14\": \"14\",\n \"chr15\": \"15\", \"chr16\": \"16\", \"chr17\": \"17\", \"chr18\": \"18\", \"chr19\": \"19\", \"chrx\": \"x\", \"chry\": \"y\"}\n # This try statement catches user error.\n try:\n with open(sys.argv[1]) as bed_file, open(sys.argv[2]) as fasta_file:\n fasta_records = []\n # Opens the bed file and splits into lists\n bed_file = list(csv.reader(bed_file, delimiter='\\t'))\n # Changes the names of the chromosomes in bed file, does some light rearranging and formatting.\n bed_file = [[trans_dict[record[0].lower()], record[1], record[3][0:record[3].index(\n '\\'')]] for record in bed_file]\n # Sorts the desired indices by chromosome, then by index in the chromosome.\n bed_file = sorted(bed_file, key=itemgetter(1))\n bed_file = sorted(bed_file, key=itemgetter(0))\n # This stores the desired indexes for each chromosome.\n indexable_bed_records = {'1': [], '2': [], '3': [], '4': [], '5': [], '6': [], '7': [], '8': [], '9': [],\n '10': [], '11': [], '12': [], '13': [], '14': [], '15': [], '16': [], '17': [],\n '18': [], '19': [], 'x': [], 'y': []}\n # Put each desired index into it's appropriate chromosome list.\n for record in bed_file:\n indexable_bed_records[record[0]].append([record[2], record[1]])\n # Loops over fasta records in the supplied fasta file\n for fasta_record in fasta_iter(fasta_file):\n # grabs the chromosome id\n chrom_id = fasta_record[\"header\"][:fasta_record[\"header\"].index(' ')].lower()\n # Some chromosomes are not desired, skip them.\n if chrom_id not in indexable_bed_records.keys():\n continue\n # Grabs the indexes we want to extract from the chromosome.\n indexes = indexable_bed_records[chrom_id]\n # Grabs each index+/-10 from the sequence\n for index in indexes:\n fasta_records.append([index[0], fasta_record[\"seq\"][int(index[1]) - 10:int(index[1]) + 10]])\n # Returns a list of lists of format [5'/3',splice site sequence]\n return fasta_records\n # Catches user error.\n except (FileNotFoundError, IndexError) as e:\n if type(e) is IndexError:\n sys.stderr.write(\"Usage: {} bed_file fasta_file\\n\\tbed_file: The appropriate bed file. \\n\\t\"\n \"fasta_file: The appropriate fasta file.\\n\".format(os.path.basename(__file__)))\n elif type(e) is FileNotFoundError:\n sys.stderr.write(\"One of the specified files was not found.\\n\")\n sys.exit(1)", "def convert_dict_to_bed(dicti, chr_list,file_name):\n filename= file_name+'.bed'\n myFile = open(filename,'w')\n myFile.write('track name=Inter'+sys.argv[1]+'-'+sys.argv[2]+'\\n')\n #print ' I just made a file to put your results in called'+ filename\n for chr in chr_list: \n #print 'the chr is', chr\n #print \"chr no.\",chr,\" : \",len(dicti[chr]),\" put enhancers\"\n for item in dicti[chr]:\n #print 'the item is ', item\n line=item.split('-')\n line.insert(0,chr)\n #print line\n myFile.write('\\t'.join(line)+'\\n')\n myFile.close()", "def dict_file_to_regions(in_file: Union[str, os.PathLike]\n ) -> Generator[BedRegion, None, None]:\n with open(in_file, \"rt\") as in_file_h:\n for line in in_file_h:\n fields = line.strip().split()\n if fields[0] != \"@SQ\":\n continue\n\n contig: Optional[str] = None\n length: Optional[int] = None\n for field in fields:\n if field.startswith(\"LN\"):\n length = int(field[3:])\n elif field.startswith(\"SN\"):\n contig = field[3:]\n if contig and length:\n yield BedRegion(contig, 0, length)", "def read_chr(fpath):\n\t# init dict and indices\n\tchrom_dicts={}\n\tstart=0\n\tindex=0\n\n\t# iterate through chromosome scores \n\tfor line in fileinput.input(fpath):\n\t\tx=line.split()\n\t\t\n\t\t# if chromosome skips some region, then normalize the previous window (<100 bp) and init new window \t\n\t\tif len(x)==4:\n\t\t\tif start in chrom_dicts:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\tstart=int(x[2].split(\"=\")[1])\n\t\t\tchrom_dicts[start]=0\n\t\t\tindex=0\n\n\t\t# if not a black region, then make news windows every 100 locations\n\t\tif len(x)==1:\n\t\t\tchrom_dicts[start]+=float(x[0])\n\t\t\tif index==100:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\t\tindex=0\n\t\t\t\tstart+=100\n\t\t\t\tchrom_dicts[start]=0\n\t\t\tindex+=1\n\t\n\t# track chromosomes that have been binned\n\tprint(\"%s %d\" % (fpath,len(chrom_dicts)))\n\treturn(chrom_dicts)", "def get_bed_annots(bed_path, ref_contigs, quiet=False):\n annots = defaultdict(list)\n with open(bed_path) as f:\n for line in f:\n line = line.strip().split(\"\\t\")\n # Note: BED coordinates are 0-indexed, right-open.\n chrom, start, end, name, strand = line[0], int(line[1]), int(line[2]), line[3], line[5]\n gene_id = line[12] if len(line) >= 13 else \"\"\n desc = line[13] if len(line) >= 14 else \"\"\n ref_contig = ref_contigs[chrom]\n gene_seq = Seq(str(ref_contig.seq)[start:end], generic_dna)\n if strand == '-':\n gene_seq = gene_seq.reverse_complement()\n gene_seq_record = SeqRecord(gene_seq, id=gene_id, name=name, description=desc)\n \n coding_blocks = []\n if (len(line) >= 12 and line[9].isdigit() and re.match(COMMA_DELIM_INTEGERS, line[10])\n and re.match(COMMA_DELIM_INTEGERS, line[11])):\n # We have full blockCount, blockSizes, and blockStarts annotations\n block_starts = map(int, re.split(r'\\s*,\\s*', line[11]))\n thick_start = int(line[6]) if line[6].isdigit() else start\n thick_end = int(line[7]) if line[7].isdigit() else end\n for i, block_size in enumerate(re.split(r'\\s*,\\s*', line[10])[0:int(line[9])]):\n if i >= len(block_starts): break\n block_start = block_starts[i] + start\n block_end = block_start + int(block_size)\n if block_end <= thick_start: next\n if block_start > thick_end: next\n block_start = max(thick_start, block_start)\n block_end = min(thick_end, block_end)\n coding_blocks.append((block_start, block_end))\n elif len(line) >= 8 and line[6].isdigit() and line[7].isdigit():\n # Only thickStart and thickEnd are specified. In this case, there is one coding block.\n coding_blocks.append((int(line[6]), int(line[7])))\n else:\n coding_blocks.append((start, end))\n \n annot = Annot(start, end, strand == '-', gene_seq_record, coding_blocks)\n annots[contig_to_vcf_chrom(chrom)].append(annot)\n return annots", "def create_bed_records(chromosome, phasing0, phasing1, positions, annotation_string):\n\tassert len(phasing0) == len(phasing1) == len(positions)\n\tswitch_encoding0 = switch_encoding(phasing0)\n\tswitch_encoding1 = switch_encoding(phasing1)\n\tfor i, (sw0, sw1) in enumerate(zip(switch_encoding0, switch_encoding1)):\n\t\tif sw0 != sw1:\n\t\t\tyield (chromosome, positions[i]+1, positions[i+1]+1, annotation_string)", "def get_cds_start_end_locations_genbank_file(filename):\n # Loop over the features\n genes = defaultdict(list)\n cds = 0\n for seq_record in SeqIO.parse(filename, \"genbank\"):\n print(f'Dealing with GenBank record {seq_record.id}')\n for seq_feature in seq_record.features:\n if seq_feature.type == \"CDS\" and 'protein_id' in seq_feature.qualifiers:\n cds += 1\n prot_id = seq_feature.qualifiers['protein_id'][0]\n start, end = int(seq_feature.location.start), int(seq_feature.location.end)\n genes[prot_id] = genes.get(prot_id, []) + [start, end]\n print(f'There are {cds} CDS and {len(genes)} genes annoted for this genbank record')\n return genes", "def getFastaFromBed(inputBed, inputGenomeFasta, outputGeneFasta, chrToUse=None):\n gd = {}\n for (title, seq) in FastaIterator(inputGenomeFasta):\n #rd[title] = seq\n print title\n if chrToUse and title == chrToUse:\n gd[chrToUse] = seq\n #print \" used\"\n break\n\n out = open(outputGeneFasta, \"w\")\n for line in open(inputBed):\n if line.startswith(\"track\"):\n continue\n\n pieces = line.split()\n chr = pieces[0]\n start = int(pieces[1])\n sizes = [int(x) for x in pieces[10].split(\",\")[:-1]]\n starts = [int(x) for x in pieces[11].split(\",\")[:-1]]\n seq = \"\"\n for i in range(len(starts)):\n seq = seq + gd[chr][start+starts[i]:start+starts[i]+sizes[i]]\n out.write(\">%s\\n\" % (pieces[3]))\n for i in range(0, len(seq), 60):\n out.write(seq[i:i+60])\n out.write(\"\\n\")", "def extract_sequence(chrom,start,end,fasta_file):\n # extract the sequence from this region with pybedtools\n my_peak = '\\t'.join([chrom,str(start),str(end)])\n bedtool_peak = pybedtools.BedTool(my_peak, from_string=True)\n fasta = pybedtools.example_filename(fasta_file)\n a = a.sequence(fi=fasta)\n #print(open(a.seqfn).read()) ", "def read_snp_map(bed_file):\n snp_map = {}\n with open(bed_file) as fh:\n for line in fh:\n chrom, pos, _, sample_id, _ = line.split('\\t')\n snp_map[sample_id] = '{0} {1}'.format(chrom, pos)\n\n return snp_map", "def bed_to_glitr(in_bed, out_starts):\n with open(in_bed) as infile:\n with open(out_starts, 'w') as outfile:\n for chrom, start, stop, strand in readBedLines(infile):\n outfile.write('\\t'.join([chrom, str(start), strand]) + '\\n')", "def BEDreader(fname):\n\n bed_score = dict() \n bfh = open(fname)\n for line in bfh:\n line = line.strip('\\n\\r').split('\\t')\n assert len(line) == 5, '\\t'.join(line)\n bed_score[float(line[3])] = 1\n bfh.close()\n return bed_score.keys()", "def getGFFStartEnd(file, len_param):\n dicS = {}\n dicE = {}\n direct = {}\n for line in open(file):\n itemList = line[:-1].split('\\t')\n start = int(itemList[3])-len_param\n if start <0:\n start = 0\n end = int(itemList[4])+len_param\n #id = getsubString(itemList[8][4:],';') # ToDo: need to check for other species\n id = itemList[8][itemList[8].find('=')+1:itemList[8].find(';')]\n dicS[id]= start\n dicE[id]= end\n direct[id] = itemList[6]\n return dicS,dicE,direct", "def read_cDNA_file_to_dict(filename):\n \n #initialize dictionary\n cDNA_dictionary = {}\n\n #open file\n with open(cDNA_file) as f:\n \n #loop through file line by line\n for line in f:\n\n #remove newline\n line = line.rstrip()\n \n #get gene name\n if line.startswith(\">\"):#If the line starts with the character \">\" then,\n gene_name = line.split(\"|\")[1]#I separate the line by the character \"|\" and assign index 1 to gene_name\n \n #read in sequence in uppercase\n if not line.startswith(\">\"):#If the line does not start with the character \">\" then,\n line = line.upper()#I make all of the characters within the line uppercase\n\n #put name and sequence in dictionary\n cDNA_dictionary[gene_name] = line#I assign the gene_name as the key and the line (sequence) as the value\n\n #return dictionary \n return cDNA_dictionary", "def get_chroms(chromfile):\n chroms = {}\n with open(chromfile) as c:\n for line in c:\n try:\n chrom, length = line.strip().split()\n chroms[chrom] = length\n except ValueError:\n chroms[line.strip()] = 1\n return chroms", "def bed_file_to_regions(in_file: Union[str, os.PathLike]\n ) -> Generator[BedRegion, None, None]:\n with open(in_file, \"rt\") as in_file_h:\n for line in in_file_h:\n fields = line.strip().split()\n # Skip browser and track fields and other invalid lines.\n if fields[0] in [\"browser\", \"track\"] or len(fields) < 3:\n continue\n # Take the first 3 columns of each line to create a new BedRegion\n yield BedRegion(fields[0], int(fields[1]), int(fields[2]))", "def readMappedData(options,phase):\n whole_mapped_data={}\n mapped_data_per_size_per_register={}\n alignment_filename=options.output_directory+\"/\"+options.input_filename+\"_bowtie1.bwt\"\n fhr=open(alignment_filename,\"r\")\n for line in fhr:\n try:\n read_id, strand, chromosome, coordinate, sequence, quality, mapped_times = line.strip().split()\n except ValueError:\n print(line)\n continue\n try:\n coordinate=int(coordinate)\n mapped_times=int(mapped_times)+1\n length=len(sequence)\n except ValueError:\n print(line)\n continue\n if strand==\"-\":\n coordinate+=2\n if chromosome not in whole_mapped_data:\n whole_mapped_data[chromosome]={}\n if coordinate not in whole_mapped_data[chromosome]: \n whole_mapped_data[chromosome][coordinate]=0\n whole_mapped_data[chromosome][coordinate]+=1\n \n if phase!=length:\n continue\n if chromosome not in mapped_data_per_size_per_register:\n mapped_data_per_size_per_register[chromosome]={}\n register=coordinate % length\n if register not in mapped_data_per_size_per_register[chromosome]:\n mapped_data_per_size_per_register[chromosome][register]={}\n if coordinate not in mapped_data_per_size_per_register[chromosome][register]:\n mapped_data_per_size_per_register[chromosome][register][coordinate]=0\n mapped_data_per_size_per_register[chromosome][register][coordinate]+=1\n if mapped_data_per_size_per_register[chromosome][register][coordinate]>2:\n print(\"Trouble with alignments\",length,chromosome,register,coordinate)\n \n return whole_mapped_data,mapped_data_per_size_per_register", "def GenomeReader(GenomeFile):\n GenomeScaffolds = {}\n key = []\n with open(GenomeFile, 'r') as f:\n for line in f:\n line = line.strip()\n if line.startswith(\">\"):\n NamedSeq = line.replace('>', '')\n key.append(NamedSeq)\n GenomeScaffolds[NamedSeq] = \"\"\n else:\n GenomeScaffolds[NamedSeq] += line\n return GenomeScaffolds # Returns a Dictionary object", "def bed_file(file_handle):\n for line in file_handle:\n chrom, start, end, pop, _, _, fold, l10p, _ = line.rstrip().split('\\t')\n yield Peak(chrom, start, end, pop, fold, l10p)", "def fetchRefSeqByChrom(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res.setdefault(i.chr,{})\n res[i.chr].setdefault(i.strand,[])\n res[i.chr][i.strand].append(i)\n return res", "def gather_strand_by_geneID_dict(genome_gtf):\n strand_by_geneID_dict = {}\n with open(genome_gtf) as f: \n for line in f: \n current_line = line.split('\\t')\n if current_line[2] == \"CDS\":\n current_orf = current_line[8].split(';')[2].split()[1].strip('\\\"')\n current_strand = current_line[6]\n strand_by_geneID_dict[current_orf] = current_strand\n return strand_by_geneID_dict", "def _readAndCombine(inputBed, withinBp):\n junct = {}\n\n # collapse a \n count = 0\n for line in open(inputBed):\n count += 1\n #if count % 100000==0: \n # print count \n if line.startswith(\"track\"):\n #out.write(line.strip()) \n #out.write(\" useScore=1\\n\") \n continue\n\n [chr, start, stop, name, score, strand, thStart, thStop, rgb, blockCount, blockSizes, blockStarts] = line.split(\"\\t\")\n score = float(score)\n if not junct.has_key(chr):\n junct[chr] = {}\n\n if int(blockCount) != 2:\n #print \"Illegal line does not have 2 blocks\" \n #print line \n continue\n\n start = int(start)\n stop = int(stop)\n [size1, size2] = [int(x) for x in blockSizes.split(\",\")[:2]]\n [start1, start2] = [int(x) for x in blockStarts.split(\",\")[:2]]\n leftEdge = start + size1\n rightEdge = start + start2 # start2 is relative to chr start \n intronLength = rightEdge - leftEdge\n\n toCombine = []\n for (other) in junct[chr].keys():\n (otherMinLeft, otherMaxLeft, otherMinRight, otherMaxRight, otherLength) = other\n if otherLength != intronLength:\n continue\n\n if otherMaxLeft < (leftEdge-withinBp) or otherMinLeft > (leftEdge+withinBp):\n continue\n\n if otherMaxRight < (rightEdge-withinBp) or otherMinRight > (rightEdge+withinBp):\n continue\n\n toCombine.append(other)\n\n allLines = [ (score, line, leftEdge, rightEdge) ]\n minLeft = maxLeft = leftEdge\n minRight = maxRight = rightEdge\n for (other) in toCombine:\n (otherMinLeft, otherMaxLeft, otherMinRight, otherMaxRight, intronLength) = other\n minLeft = min(minLeft, otherMinLeft)\n maxLeft = max(maxLeft, otherMaxLeft)\n minRight = min(minRight, otherMinRight)\n maxRight = max(maxRight, otherMaxRight)\n\n allLines.extend(junct[chr][other])\n del junct[chr][other]\n\n junct[chr][ (minLeft, maxLeft, minRight, maxRight, intronLength) ] = allLines\n\n return junct", "def read_and_Kent_index(filename):\n chr_dict = defaultdict(lambda : defaultdict(list))\n debug = 0\n with open(filename, 'rU') as fh:\n # Skip comment lines\n # :TODO Fix this and make more general\n fh.next()\n fh.next()\n for line in fh:\n p_line = line[:-1].split(\"\\t\")\n try:\n start = int(p_line[1])\n end = int(p_line[2])\n kent_bin = binFromRangeStandard(start, end)\n except ValueError:\n # Case for VCF files\n start = int(p_line[1]) - 1\n end = int(p_line[1])\n kent_bin = binFromRangeStandard(start, end)\n chr_dict[p_line[0]][kent_bin].append(GTab(start, end))\n return(chr_dict)", "def read_phenotype_bed(phenotype_bed):\n if phenotype_bed.lower().endswith(('.bed.gz', '.bed')):\n phenotype_df = pd.read_csv(phenotype_bed, sep='\\t', index_col=3, dtype={'#chr':str, '#Chr':str})\n elif phenotype_bed.lower().endswith('.bed.parquet'):\n phenotype_df = pd.read_parquet(phenotype_bed)\n phenotype_df.set_index(phenotype_df.columns[3], inplace=True)\n else:\n raise ValueError('Unsupported file type.')\n phenotype_df.rename(columns={i:i.lower().replace('#chr','chr') for i in phenotype_df.columns[:3]}, inplace=True)\n\n phenotype_df['start'] += 1 # change to 1-based\n pos_df = phenotype_df[['chr', 'start', 'end']]\n phenotype_df.drop(['chr', 'start', 'end'], axis=1, inplace=True)\n\n # make sure BED file is properly sorted\n assert pos_df.equals(\n pos_df.groupby('chr', sort=False, group_keys=False).apply(lambda x: x.sort_values(['start', 'end']))\n ), \"Positions in BED file must be sorted.\"\n\n if (pos_df['start'] == pos_df['end']).all():\n pos_df = pos_df[['chr', 'end']].rename(columns={'end':'pos'})\n\n return phenotype_df, pos_df", "def bed(args):\n from jcvi.formats.bed import sort\n\n p = OptionParser(bed.__doc__)\n p.add_option(\n \"--blockonly\",\n default=False,\n action=\"store_true\",\n help=\"Only print out large blocks, not fragments\",\n )\n p.add_option(\n \"--point\",\n default=False,\n action=\"store_true\",\n help=\"Print accesssion as single point instead of interval\",\n )\n p.add_option(\"--scale\", type=\"float\", help=\"Scale the OM distance by factor\")\n p.add_option(\n \"--switch\",\n default=False,\n action=\"store_true\",\n help=\"Switch reference and aligned map elements\",\n )\n p.add_option(\n \"--nosort\",\n default=False,\n action=\"store_true\",\n help=\"Do not sort bed\",\n )\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (xmlfile,) = args\n bedfile = xmlfile.rsplit(\".\", 1)[0] + \".bed\"\n\n om = OpticalMap(xmlfile)\n om.write_bed(\n bedfile,\n point=opts.point,\n scale=opts.scale,\n blockonly=opts.blockonly,\n switch=opts.switch,\n )\n\n if not opts.nosort:\n sort([bedfile, \"--inplace\"])", "def getCoveragePerBpVariable(wigFileName):\n\n d = {}\n\n for line in open(wigFileName):\n if line.startswith(\"track\") or line.startswith(\"browser\"):\n continue\n\n [chr, start, stop, level] = line.split()\n chr = chr.split(\"|\")[1].replace(\"MAL\", \"chr\")\n level = int(level)\n start = int(start)\n stop = int(stop)\n\n if not d.has_key(chr):\n d[chr] = {}\n\n for i in range(start, stop+1):\n d[chr][i] = level\n\n return d" ]
[ "0.6995929", "0.69823384", "0.6857566", "0.6807039", "0.67754483", "0.64668125", "0.63240325", "0.63110536", "0.6243366", "0.6156544", "0.6155014", "0.61454624", "0.6142284", "0.6125358", "0.6071565", "0.60690844", "0.6011632", "0.6009527", "0.600553", "0.5945978", "0.59370327", "0.59221035", "0.5881157", "0.58596414", "0.5846852", "0.5818227", "0.58091646", "0.57864004", "0.5761739", "0.5706308" ]
0.7402694
0
Make chromosomes (the keys to the dictionary generated in go_thru() and export them as a list. Then compare the list of chromosomes to make sure that they are all present in both bed files. Export the list of K present in both files. Gets rid of chromosomes absent in one of the files.
def compare_chr(track1, track2): intersect = set(track1.keys()).intersection(set(track2.keys())) chr_list = list(intersect) #print 'comparing chromosome lists' return chr_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vennDiagram(bed1File, bed2File, only1Output=None, only2Output=None, bothOutput=None):\n\n bed1 = readJunctionsFromBed(bed1File, True)\n bed2 = readJunctionsFromBed(bed2File, True)\n\n count1 = 0\n count2 = 0\n countBoth = 0\n\n out1 = None\n if only1Output:\n out1 = open(only1Output, \"w\")\n out2 = None\n if only2Output:\n out2 = open(only2Output, \"w\")\n both = None\n if bothOutput:\n both = open(bothOutput, \"w\")\n\n for chr, chrJunct in bed1.iteritems():\n for (start,stop) in chrJunct:\n if bed2.has_key(chr):\n if bed2[chr].has_key( (start, stop) ):\n if both:\n for line in bed1[chr][(start,stop)]:\n both.write(line)\n both.write(\"\\n\")\n del bed2[chr][(start,stop)]\n countBoth += 1\n else:\n count1 += 1\n if out1:\n line = bed1[chr][(start,stop)][0]\n pieces = line.split()\n bedVals = [chr, start-10, stop+10, pieces[3], pieces[4], pieces[5], start-10, stop+10, pieces[8], pieces[9],\n \"10,10\", \"0,%s\"%(stop-start+10)]\n out1.write(\"\\t\".join(str(x) for x in bedVals))\n out1.write(\"\\n\")\n #for line in bed1[chr][(start, stop)]:\n # out1.write(line)\n # out1.write(\"\\n\")\n else:\n count1 += 1\n if out1:\n line = bed1[chr][(start,stop)][0]\n pieces = line.split()\n bedVals = [chr, start-10, stop+10, pieces[3], pieces[4], pieces[5], start-10, stop+10, pieces[8], \"2\",\n \"10,10\", \"0,%s\"%(stop-start+10)]\n out1.write(\"\\t\".join(str(x) for x in bedVals))\n out1.write(\"\\n\")\n #for line in bed1[chr][(start, stop)]:\n # out1.write(line)\n # out1.write(\"\\n\")\n\n #print\n #print\n #print\n\n count2 = sum( len(chrJunct) for chrJunct in bed2.values())\n if out2:\n for chr, chrJunct in bed2.iteritems():\n for (start,stop) in chrJunct:\n line = bed2[chr][(start,stop)][0]\n pieces = line.split()\n bedVals = [chr, start-10, stop+10, pieces[3], pieces[4], pieces[5], start-10, stop+10, pieces[8], \"2\",\n \"10,10\", \"0,%s\"%(stop-start+10)]\n out2.write(\"\\t\".join(str(x) for x in bedVals))\n out2.write(\"\\n\")\n #for line in bed2[chr][(start, stop)]:\n # out2.write(line)\n # out2.write(\"\\n\")\n\n print \"There were %s in both, %s in the first one and %s in the second one\" % (countBoth, count1, count2)", "def get_desired_gene(j, k, l):\n unwanted_gene_list = j\n full_gene_file = open(k)\n desired_genes = open(l, 'wt')\n counter = 0\n\n for gene in full_gene_file:\n with open(unwanted_gene_list) as j:\n if gene not in j.read():\n desired_genes.write(gene)\n else:\n counter += 1\n\n\n print(\"Filtered sequences: \" + str(counter))\n full_gene_file.close()\n desired_genes.close()", "def makekaryo(sp1chrom, sp2chrom, fai1, fai2):\n# import ipdb; ipdb.set_trace()\n fai1_name = fai1.split(\".\")[0]\n fai2_name = fai2.split(\".\")[0]\n fai_pair = \"{}-{}\".format(fai1_name, fai2_name)\n for fai in [fai1, fai2]:\n karyodict = {}\n with open(fai, 'r') as fai_l:\n for line in fai_l:\n x = line.strip().split()\n chrom = x[0]\n size = x[1]\n karyodict[chrom] = size\n if fai is fai1:\n fname = \"circos.{}.{}.karyotype.txt\".format(fai1_name, fai_pair)\n withchrom(fname, sp1chrom, karyodict)\n elif fai is fai2:\n fname = \"circos.{}.{}.karyotype.txt\".format(fai2_name, fai_pair)\n withchrom(fname, sp2chrom, karyodict)\n return(None)", "def get_my_mutations(quality_cutoff, coverage_cutoff):\n\n # my_mutations = {}\n # with open('/home/perry/Projects/loh/working/murim.exome.aa_chg.vars') as f:\n # for line in f:\n # my_mutations[line.strip()] = True\n # return my_mutations\n\n bed_file = 'data/nimblegen/2.1M_Human_Exome_Annotation/2.1M_Human_Exome.bed'\n bed_chr2st2end, bed_chr2posLs = bed_tools.load_bed(bed_file, \n 'NimbleGen Tiled Regions')\n # NimbleGen Tiled Regions\n # Target Regions\n\n use_data_dir = '/home/perry/Projects/loh/data/all_non_ref_hg18/'\n all_somatic = {}\n all_inherited = {}\n cancer_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanT.ann')\n normal_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanN.ann')\n for exome in global_settings.exome_types:\n data_file = use_data_dir + exome\n inherited, somatic, murim = mutations.get_mutations(data_file, normal_qualities,\n cancer_qualities, quality_cutoff,\n False, coverage_cutoff)\n # only use the bed_tools NimbleGen\n # restriction for hg18 data\n for s in somatic['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_somatic[s] = True\n for i in inherited['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_inherited[i] = True\n return (set(all_somatic.keys()) & set(get_murim_covered(quality_cutoff)), set(all_inherited.keys()) & set(get_murim_covered(quality_cutoff)))", "def dumpchroms(release, output_path):\n global account, chnames\n if not os.path.isdir(output_path):\n raise ValueError(\"Create the output dir first\")\n \n runlog_path = os.path.join(output_path, 'dump_genome.log')\n LOGGER.log_file_path = runlog_path\n \n human = Genome(Species='human', Release=release, account=account)\n for i,n in enumerate(chnames, start=1):\n nx = \"Chr%02d_%s\" % (i, n) + \".fa\"\n print \"dumping chromosome file\", nx\n print \" fetching ...\"\n sys.stdout.flush()\n ch = human.getRegion(CoordName=n)\n ch.Seq.Name = \"Chr\" + n\n print \" dumping ...\"\n sys.stdout.flush()\n\n # output to temporary file name ... then rename once output completes\n # .. safer when working with large files.\n xoutpath = os.path.join(output_path, \"z-\"+nx)\n outpath = os.path.join(output_path, nx)\n with open(xoutpath, \"w\") as dst:\n dst.write(ch.Seq.toFasta()+\"\\n\")\n os.rename(xoutpath, outpath)\n LOGGER.output_file(outpath)\n \n print \"Done.\"\n sys.stdout.flush()\n return", "def Overtopping(self):\n\n #sort files\n leftOverTop = list()\n RightOverTop = list()\n # get names of files that has _left or _right at its end\n All1DFiles = os.listdir(self.OneDResultPath)\n for i in range(len(All1DFiles)) :\n if All1DFiles[i].endswith(self.leftOvertopping_Suffix):\n leftOverTop.append(All1DFiles[i])\n if All1DFiles[i].endswith(self.RightOvertopping_Suffix):\n RightOverTop.append(All1DFiles[i])\n\n # two dictionaries for overtopping left and right\n OverToppingSubsLeft = dict()\n OverToppingSubsRight = dict()\n # the _left and _right files has all the overtopping discharge\n # but sometimes the sum of all the overtopping is less than a threshold specified\n # and then the 2D algorithm does not run so these cross sections you will not find\n # any inundation beside it in the maps but you will find it in the _left or _right maps\n\n # for each sub-basin that has overtopping from the left dike\n for i in range(len(leftOverTop)):\n\n try:\n # open the file (if there is no column sthe file is empty)\n data = pd.read_csv(self.OneDResultPath + leftOverTop[i],header =None,delimiter = r'\\s+')\n # add the sub basin to the overtopping dictionary of sub-basins\n OverToppingSubsLeft[leftOverTop[i][:-len(self.leftOvertopping_Suffix)]] = dict()\n except:\n continue\n # get the XS that overtopping happened from\n XSs = list(set(data.loc[:,2]))\n # for each XS get the days\n for j in range(len(XSs)):\n OverToppingSubsLeft[leftOverTop[i][:-len(self.leftOvertopping_Suffix)]][XSs[j]] = list(set(data[0][data[2] == XSs[j]].tolist()))\n\n for i in range(len(RightOverTop)):\n\n try:\n # open the file\n data = pd.read_csv(self.OneDResultPath + RightOverTop[i],header =None,delimiter = r'\\s+')\n # add the sub basin to the overtopping dictionary of sub-basins\n OverToppingSubsRight[RightOverTop[i][:-len(self.RightOvertopping_Suffix)]] = dict()\n except :\n continue\n # get the XS that overtopping happened from\n XSs = list(set(data.loc[:,2]))\n # for each XS get the days\n for j in range(len(XSs)):\n OverToppingSubsRight[RightOverTop[i][:-len(self.RightOvertopping_Suffix)]][XSs[j]] = list(set(data[0][data[2] == XSs[j]].tolist()))\n\n self.OverToppingSubsLeft = OverToppingSubsLeft\n self.OverToppingSubsRight = OverToppingSubsRight", "def seed_and_extend(read, k, h, index, genome):\n\n list_mapping_read = [] # List containing the positions tested to map the read on the genome\n #(will be used to not try to align a read twice at the same position)\n\n # Variables which will be returned\n position_mapping = len(genome) # Optimal position of mapping for the read\n nb_mismatch = int(h) + 1 # Number of mismatch in this mapping\n list_mismatch = [] # List of mismatch positions on the genome\n\n for kmer_index in range(len(read)-int(k)+1):\n kmer = read[kmer_index:kmer_index + int(k)]\n # For each kmer, tries to find the optimal position of mapping\n # for the read with this kmer as seed.\n position_mapping_kmer = len(genome)\n nb_mismatch_kmer = int(h) + 1\n list_mismatch_kmer = []\n\n list_occurences = sorted(index.get_occurences(kmer))\n\n if not list_occurences:\n continue\n\n for occurences in list_occurences:\n\n nb_mismatch_occu = 0 # For each occurence of the kmer,\n # count the number of mismatch during alignment\n\n list_mismatch_occu = [] # List of mismatch seen during alignment\n # of read with this occurence of the kmer\n\n index_char_genome = occurences - kmer_index # Index where to map in the genome\n index_char_read = 0 # Index of the character to compare\n\n if index_char_genome in list_mapping_read: # If position already tested,\n #do not test it a second time.\n continue\n else:\n list_mapping_read.append(index_char_genome) # Add this position to the list\n # so it won't be tested a second time for this read\n\n while nb_mismatch_occu <= int(h) \\\n and index_char_read < len(read) \\\n and index_char_genome < len(genome):\n if genome[index_char_genome] != read[index_char_read]:\n nb_mismatch_occu += 1\n list_mismatch_occu.append(index_char_genome)\n\n index_char_genome += 1\n index_char_read += 1\n\n\n # If the mapping of the read with this occurence of the read\n # is better than the previous one (less mismatch) : optimal values for kmer stored\n if nb_mismatch_occu < nb_mismatch_kmer:\n nb_mismatch_kmer = nb_mismatch_occu\n list_mismatch_kmer = list_mismatch_occu\n position_mapping_kmer = occurences - kmer_index\n\n # If the best mapping found for this kmer is better than the mapping\n # found with the previous kmer : optimal values for read stored\n if nb_mismatch_kmer < nb_mismatch \\\n or nb_mismatch_kmer == nb_mismatch \\\n and position_mapping_kmer < position_mapping:\n nb_mismatch = nb_mismatch_kmer\n list_mismatch = list_mismatch_kmer\n position_mapping = position_mapping_kmer\n\n return position_mapping, nb_mismatch, list_mismatch", "def generate_jaccard0_isoseq_bed(self):\n all = set(self.isoseqid2exonlen.keys())\n notwant = set(self.isoseqid2besttransidB.keys())\n want = all - notwant\n want_lines = []\n with open(\"../data/pacbio/\" + self.name + \".B.j0.bed\", 'w') as f:\n for line in self.linesPacBioBed:\n (chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\")\n if name in want:\n f.write(line)", "def main():\n args = get_args()\n fasta = args.fasta\n kmer= args.overlap\n\n if kmer <= 1:\n die('-k \"{}\" must be a positive integer'.format(kmer))\n\n if not os.path.isfile(fasta):\n die('\"{}\" is not a file'.format(fasta))\n\n kstart={}\n kend={}\n with open(fasta, 'r') as f:\n for record in SeqIO.parse(f, \"fasta\"):\n kstart[record.id]=(find_kmers(record.seq, kmer)[0])\n kend[record.id]=(find_kmers(record.seq, kmer)[-1])\n \n for endk, endv in kend.items():\n for startk, startv in kstart.items():\n if endv in startv:\n if endk is not startk:\n print(endk, startk)", "def getReadOnGeneFile(rnameList, len_param):\n log.info(\"Select reads that are on genes\")\n for ch in rnameList:\n tcount = 0\n \n geneS = {}#gene start\n geneE = {}#gene end\n g_direct = {}#gene direction\n readS = {}#read start\n readE = {}#read End\n readDic = {}#readDic[id] = read\n sortGeneId = {}\n sortReadId = {}\n genefile = os.path.join(working_dir, 'removeOverlap.'+ch+'.gff')\n readfile = os.path.join(working_dir, 'MappedRead.'+ch+'.sam')\n rgfile = os.path.join(working_dir, 'ReadOnGeneList.'+ch+'.tab')\n log.info(\"Generate \" + rgfile)\n f=open(rgfile, \"w\") \n \n geneS, geneE, g_direct = getGFFStartEnd(genefile, len_param)\n sortGeneId = sortId(geneS)\n \n readS, readE,readDic = getSAMStartEnd(readfile)\n sortReadId = sortId(readS)\n ys = 0\n \n for x in range(len(sortGeneId)):\n \n gID = sortGeneId[x]#gene id\n gs = geneS.get(gID)#gene start\n ge = geneE.get(gID)#gene end\n gd = g_direct.get(gID)\n glineList = []\n sameG = False\n \n for y in range(ys,len(sortReadId)):\n rID = sortReadId[y]\n rs = readS.get(rID)\n re = readE.get(rID)\n if rs >= gs:\n if re <= ge:\n f.write(gID)\n f.write('\\t')\n f.write(str(gs))\n f.write('\\t')\n f.write(str(ge))\n f.write('\\t')\n f.write(gd)\n f.write('\\t')\n f.write(rID)\n f.write('\\t')\n f.write(str(rs))\n f.write('\\t')\n f.write(str(re))\n f.write('\\t')\n f.write(readDic.get(rID))\n elif re > ge:\n ys = y\n break\n elif rs > ge:\n ys = y\n break\n f.close()", "def _readAndCombine(inputBed, withinBp):\n junct = {}\n\n # collapse a \n count = 0\n for line in open(inputBed):\n count += 1\n #if count % 100000==0: \n # print count \n if line.startswith(\"track\"):\n #out.write(line.strip()) \n #out.write(\" useScore=1\\n\") \n continue\n\n [chr, start, stop, name, score, strand, thStart, thStop, rgb, blockCount, blockSizes, blockStarts] = line.split(\"\\t\")\n score = float(score)\n if not junct.has_key(chr):\n junct[chr] = {}\n\n if int(blockCount) != 2:\n #print \"Illegal line does not have 2 blocks\" \n #print line \n continue\n\n start = int(start)\n stop = int(stop)\n [size1, size2] = [int(x) for x in blockSizes.split(\",\")[:2]]\n [start1, start2] = [int(x) for x in blockStarts.split(\",\")[:2]]\n leftEdge = start + size1\n rightEdge = start + start2 # start2 is relative to chr start \n intronLength = rightEdge - leftEdge\n\n toCombine = []\n for (other) in junct[chr].keys():\n (otherMinLeft, otherMaxLeft, otherMinRight, otherMaxRight, otherLength) = other\n if otherLength != intronLength:\n continue\n\n if otherMaxLeft < (leftEdge-withinBp) or otherMinLeft > (leftEdge+withinBp):\n continue\n\n if otherMaxRight < (rightEdge-withinBp) or otherMinRight > (rightEdge+withinBp):\n continue\n\n toCombine.append(other)\n\n allLines = [ (score, line, leftEdge, rightEdge) ]\n minLeft = maxLeft = leftEdge\n minRight = maxRight = rightEdge\n for (other) in toCombine:\n (otherMinLeft, otherMaxLeft, otherMinRight, otherMaxRight, intronLength) = other\n minLeft = min(minLeft, otherMinLeft)\n maxLeft = max(maxLeft, otherMaxLeft)\n minRight = min(minRight, otherMinRight)\n maxRight = max(maxRight, otherMaxRight)\n\n allLines.extend(junct[chr][other])\n del junct[chr][other]\n\n junct[chr][ (minLeft, maxLeft, minRight, maxRight, intronLength) ] = allLines\n\n return junct", "def split_decode_file():\n # split files by chromosome\n header = []\n current_chrom = 'chr1'\n # file_template = decode_folder + '/{}.deCODE_2019.GRCh38.txt'\n file_template = decode_folder + '/{}.deCODE_2019_hg19.txt'\n decode_file = decode_folder + '/aau1043_DataS3_hg19_liftOver.bed'\n w = open(file_template.format(current_chrom), 'a')\n print('NOTE: appending to map files, not overwriting. may cause duplicates')\n with open(decode_file, 'r') as f:\n for line in f:\n # save the header info\n if line.startswith('#'):\n header.append(line)\n # save the column labels\n elif line.startswith('Chr'):\n header.append('# ' + line)\n # write header to first file now\n w.write(''.join(header))\n # the remaining lines are data\n else:\n # get the chromosome for the current line\n ch = line.split()[0]\n # if the chromosome matches the open file, write to it\n if ch == current_chrom:\n w.write(line)\n # if a new chromosome arises, switch to a new writefile\n else:\n w.close()\n current_chrom = ch\n w = open(file_template.format(current_chrom), 'a')\n # write header to file\n w.write(''.join(header))\n w.write(line)\n\n # close the last open file\n w.close()", "def parse_proteome(fasta_file,kmer_size=12,out_base=\"kmers\",seq_per_file=50000,num_to_write=1000000):\n\n all_kmers = {}\n seq_name = None\n current_sequence = []\n\n # Parse fasta file, splitting into kmers as we go\n with open(fasta_file) as infile:\n for l in infile:\n\n if l.startswith(\">\"):\n if seq_name is not None:\n\n sequence = \"\".join(current_sequence)\n kmer_list = create_kmers(sequence,kmer_size)\n\n for k in kmer_list:\n try:\n all_kmers[k].append(seq_name)\n except KeyError:\n all_kmers[k] = [seq_name]\n\n current_sequence = []\n seq_name = l[1:].strip()\n else:\n if seq_name is None or l.strip() == \"\":\n continue\n current_sequence.append(l.strip())\n\n if seq_name is not None:\n\n sequence = \"\".join(current_sequence)\n kmer_list = create_kmers(sequence,kmer_size)\n\n for k in kmer_list:\n try:\n all_kmers[k].append(seq_name)\n except KeyError:\n all_kmers[k] = [seq_name]\n\n # Sort kmers\n to_sort = [(len(all_kmers[k]),k) for k in all_kmers.keys()]\n to_sort.sort(reverse=True)\n\n # kmers \n kmers = [k[1] for k in to_sort]\n\n if len(kmers) > num_to_write:\n kmers = kmers[:num_to_write]\n else:\n\n # If there are more single kmers than the total we want to get, grab a\n # random selection of them.\n single_kmers = [k[1] for k in to_sort if k[0] == 1]\n if num_to_write - len(kmers) > 0:\n to_grab = num_to_write - len(kmers)\n random.shuffle(single_kmers)\n kmers.extend(single_kmers[:to_grab])\n\n out = []\n counter = 0\n for k in kmers:\n\n # make sure kmer has only amino acids in it\n score = sum([1 for l in k if l not in \"ACDEFGHIKLMNPQRSTVWY\"])\n if score > 0:\n continue\n\n ids = \",\".join(all_kmers[k])\n out.append(\"{} {:5d} {}\\n\".format(k,len(all_kmers[k]),ids))\n\n if counter != 0 and counter % seq_per_file == 0:\n\n out_file = \"{}_{}.kmers\".format(out_base,counter)\n print(counter,len(kmers))\n sys.stdout.flush()\n\n f = open(out_file,'w')\n f.write(\"\".join(out))\n f.close()\n\n out = []\n\n counter += 1\n\n\n out_file = \"{}_{}.kmers\".format(out_base,counter)\n\n f = open(out_file,'w')\n f.write(\"\".join(out))\n f.close()", "def load_data() -> list:\n # trans_dict is used for changing the given names into standardized names.\n trans_dict = {\"chr1\": \"1\", \"chr2\": \"2\", \"chr3\": \"3\", \"chr4\": \"4\", \"chr5\": \"5\", \"chr6\": \"6\", \"chr7\": \"7\",\n \"chr8\": \"8\", \"chr9\": \"9\", \"chr10\": \"10\", \"chr11\": \"11\", \"chr12\": \"12\", \"chr13\": \"13\", \"chr14\": \"14\",\n \"chr15\": \"15\", \"chr16\": \"16\", \"chr17\": \"17\", \"chr18\": \"18\", \"chr19\": \"19\", \"chrx\": \"x\", \"chry\": \"y\"}\n # This try statement catches user error.\n try:\n with open(sys.argv[1]) as bed_file, open(sys.argv[2]) as fasta_file:\n fasta_records = []\n # Opens the bed file and splits into lists\n bed_file = list(csv.reader(bed_file, delimiter='\\t'))\n # Changes the names of the chromosomes in bed file, does some light rearranging and formatting.\n bed_file = [[trans_dict[record[0].lower()], record[1], record[3][0:record[3].index(\n '\\'')]] for record in bed_file]\n # Sorts the desired indices by chromosome, then by index in the chromosome.\n bed_file = sorted(bed_file, key=itemgetter(1))\n bed_file = sorted(bed_file, key=itemgetter(0))\n # This stores the desired indexes for each chromosome.\n indexable_bed_records = {'1': [], '2': [], '3': [], '4': [], '5': [], '6': [], '7': [], '8': [], '9': [],\n '10': [], '11': [], '12': [], '13': [], '14': [], '15': [], '16': [], '17': [],\n '18': [], '19': [], 'x': [], 'y': []}\n # Put each desired index into it's appropriate chromosome list.\n for record in bed_file:\n indexable_bed_records[record[0]].append([record[2], record[1]])\n # Loops over fasta records in the supplied fasta file\n for fasta_record in fasta_iter(fasta_file):\n # grabs the chromosome id\n chrom_id = fasta_record[\"header\"][:fasta_record[\"header\"].index(' ')].lower()\n # Some chromosomes are not desired, skip them.\n if chrom_id not in indexable_bed_records.keys():\n continue\n # Grabs the indexes we want to extract from the chromosome.\n indexes = indexable_bed_records[chrom_id]\n # Grabs each index+/-10 from the sequence\n for index in indexes:\n fasta_records.append([index[0], fasta_record[\"seq\"][int(index[1]) - 10:int(index[1]) + 10]])\n # Returns a list of lists of format [5'/3',splice site sequence]\n return fasta_records\n # Catches user error.\n except (FileNotFoundError, IndexError) as e:\n if type(e) is IndexError:\n sys.stderr.write(\"Usage: {} bed_file fasta_file\\n\\tbed_file: The appropriate bed file. \\n\\t\"\n \"fasta_file: The appropriate fasta file.\\n\".format(os.path.basename(__file__)))\n elif type(e) is FileNotFoundError:\n sys.stderr.write(\"One of the specified files was not found.\\n\")\n sys.exit(1)", "def main():\n # checking the directory\n cwd = os.getcwd()\n print(f'The working directory: {cwd}')\n # counting time \n start_time = time.process_time()\n # passing args\n arg = parse_arguments()\n sub_dir = arg.sub_dir\n dir_out = arg.dir_out\n file_amb = 'csv_to_clean'\n names_ambigous = defaultdict(str)\n with open(file_amb, 'r') as fh:\n for line in fh:\n name = line.strip().split('/')[2]\n names_ambigous[name] = names_ambigous.get(name, '')\n names_ambigous[name] += line.strip()\n print(f'number files: {len(names_ambigous)}')\n # checking if the output directory exist\n # if not make it\n f_pwd = os.path.join('Results', 'kmer_counts')\n # get the genus names\n cnt = 0\n for name, filename in names_ambigous.items():\n cleaned = get_csv_clean(filename)\n full_path = os.path.join(f_pwd, name)\n if os.path.exists(full_path):\n print(f'The path {full_path} exist')\n pass\n else:\n os.makedirs(full_path)\n csv_name = f'{full_path}/{name}_k2_8_chr.csv'\n print(f'Checking the full path {csv_name}')\n with open(csv_name, 'w') as fout:\n for km, cn in cleaned.items():\n fout.write(f'{km},{cn}\\n')\n cnt += 1\n # get final time of the script\n end = time.process_time()\n total_time = end - start_time\n print(f'The script takes {total_time} to finish!')\n print(f'Where read and manipulated {cnt} files')\n print('Done!')", "def mainloop(sortedbedfile, par = {}):\n \n chrold = ''\n cluster = []\n plus = []\n wigfile = open(par['OUTFILE'], 'w')\n #outfilename=os.path.join(os.path.split(sortedbedfile)[0],os.path.split(sortedbedfile)[1].split('.bed')[0]+'.wig')\n #wigfile = open(outfilename, 'w')\n #wigfile = open(par['OUTFILE'], 'w')\n #wigfile=open(par['INFILE'].split('.')[0]+'_wd.wig','w')\n tag_thr = int(par['TAG_THR'])\n \n for line in open(sortedbedfile).xreadlines():\n if not line[:3] == 'chr':\n continue\n line = line.strip().split()\n if not chrlength[par['SPENAME']].has_key(line[0]):\n continue\n \n if chrold == '':\n chrold = line[0]\n print >>sys.stderr, 'Reading for', chrold, '......', time.asctime()\n if chrlength[par['SPENAME']][line[0]] % 10 == 0:\n cluster = [0] * (chrlength[par['SPENAME']][line[0]] / 10) # Tag number (10 bp space)\n plus = [0] * (chrlength[par['SPENAME']][line[0]] / 10) # Tag number in plus strand (10 bp space)\n else:\n cluster = [0] * (chrlength[par['SPENAME']][line[0]] / 10 + 1)\n plus = [0] * (chrlength[par['SPENAME']][line[0]] / 10 + 1)\n \n elif line[0] != chrold:\n print >>sys.stderr, 'Changing for', chrold, '......', time.asctime()\n package = {}\n package = change2postion(chrold, cluster, tag_thr) # package['position_file'], package['new_file']\n# change2postion_saveas_wig(wigfile,chrold, cluster)\n ##################################\n # test\n ##################################\n #print >>sys.stderr, 'writing for', chrold, '......', time.asctime()\n #filetmp = open(chrold + '.new', 'w')\n #for c in package['new_file']:\n # print >>filetmp, c\n #filetmp.close()\n #filepos = open(chrold + '.pos', 'w')\n #for p in package['position_file']:\n # print >>filepos, p\n #filepos.close()\n #peakregions = []\n #for m in open('/home/liulab/yzhang/ChIP_seq/num_pos/nucleosome_cluster_75/Nucleosome_chr1.cluster_10dec_wavelet.bed').xreadlines():\n # if m[:3] == 'chr':\n # peakregions.append(m.strip())\n ##################################\n \n ##################################\n # to be finished\n ##################################\n if par['WANT_DENOISE'] == 'yes':\n print >>sys.stderr, 'Denoising for', chrold, '......', time.asctime()\n denoised = denoiseChIPSeq(package['NEW_FILE'], package['POSITION_FILE'], par)\n else:\n denoised = package['NEW_FILE']\n \n print>>sys.stderr, 'Saving as a wig file', chrold, '......', time.asctime()\n change2wig(wigfile,package['POSITION_FILE'],denoised)\n\n package = {}\n peakregions = []\n peakregions_filtered = []\n ##################################\n chrold = line[0]\n print >>sys.stderr, 'Reading for', chrold, '......', time.asctime()\n \n if chrlength[par['SPENAME']][line[0]] % 10 == 0:\n cluster = [0] * (chrlength[par['SPENAME']][line[0]] / 10) # Tag number (10 bp space)\n plus = [0] * (chrlength[par['SPENAME']][line[0]] / 10) # Tag number in plus strand (10 bp space)\n else:\n cluster = [0] * (chrlength[par['SPENAME']][line[0]] / 10 + 1)\n plus = [0] * (chrlength[par['SPENAME']][line[0]] / 10 + 1)\n \n try:\n if line[5] == '+': # Tag in plus strand\n b = int(line[1]) + int(par['SHIFT'])\n e = b + int(par['EXTENSION'])\n if (max(b, 1) - 1) / 10 == 0:\n beginpos = max(b, 1)\n else:\n beginpos = (max(b, 1) / 10 + 1) * 10 + 1\n for k in xrange(beginpos, min(e, chrlength[par['SPENAME']][line[0]]), 10):\n cluster[(k - 1) / 10] += 1\n plus[(k - 1) / 10] += 1\n \n elif line[5] == '-': # Tag in minus strand\n e = int(line[2]) - int(par['SHIFT'])\n b = e - int(par['EXTENSION'])\n if (max(b, 1) - 1) / 10 == 0:\n beginpos = max(b, 1)\n else:\n beginpos = (max(b, 1) / 10 + 1) * 10 + 1\n for k in xrange(beginpos, min(e, chrlength[par['SPENAME']][line[0]]), 10):\n cluster[(k - 1) / 10] += 1\n else:\n continue\n except:\n print >> sys.stderr, 'Tag position file error: ', sys.exc_info()[0], sys.exc_info()[1]\n sys.exit()\n \n print >>sys.stderr, 'changing for', chrold, '......', time.asctime()\n package = change2postion(chrold, cluster, tag_thr) # package['position_file'], package['new_file']\n# change2postion_saveas_wig(wigfile,chrold, cluster)\n ##################################\n # test\n ##################################\n #print >>sys.stderr, 'writing for', chrold, '......', time.asctime()\n #filetmp = open(chrold + '.new', 'w')\n #for c in package['new_file']:\n # print >>filetmp, c\n #filetmp.close()\n #filepos = open(chrold + '.pos', 'w')\n #for p in package['position_file']:\n # print >>filepos, p\n #filepos.close()\n ##################################\n \n ##################################\n # to be finished\n ##################################\n if par['WANT_DENOISE'] == 'yes':\n print >>sys.stderr, 'Denoising for', chrold, '......', time.asctime()\n denoised = denoiseChIPSeq(package['NEW_FILE'], package['POSITION_FILE'], par)\n else:\n denoised = package['NEW_FILE']\n\n print>>sys.stderr, 'Saving as a wig file', chrold, '......', time.asctime()\n change2wig(wigfile,package['POSITION_FILE'],denoised)\n\n package = {}\n peakregions = []\n peakregions_filtered = []\n ##################################\n cluster = []\n plus = []\n wigfile.close()", "def get_2away_pairs(local_index_to_kmer, k):\n\n #These are the base cases for the recursion. If k==1, the kmers obviously can't differ in exactly two bases, so return an empty list. if k==2, return every pair of indices where the kmers at those indices differ at exactly two bases.\n if k == 1:\n return []\n if k == 2:\n return [(i, j) for (i,j) in combinations(local_index_to_kmer, 2) if local_index_to_kmer[i][0] != local_index_to_kmer[j][0] and local_index_to_kmer[i][1] != local_index_to_kmer[j][1]]\n\n #Get the two halves of the kmer\n k_L = k//2\n k_R = k-k_L\n\n #initialize dictionaries in which the key is the hash of half of the kmer, and the value is a list of indices of the kmers with that same hash\n kmer_L_hashes = defaultdict(list)\n kmer_R_hashes = defaultdict(list)\n\n #initialize pairs, which will be returned by get_1away_pairs\n pairs = []\n\n #initialize dictionaries containing the left halves and the right halves (since we will have to check cases where the left half differs by 1 and the right half differs by 1)\n local_index_to_kmer_L = {}\n local_index_to_kmer_R = {}\n\n #for each kmer, calculate its left hash and right hash, then add its index to the corresponding entries of the dictionary\n for i, kmer in local_index_to_kmer.items():\n kmer_L = kmer[:k_L]\n kmer_R = kmer[k_L:]\n local_index_to_kmer_L[i] = kmer_L\n local_index_to_kmer_R[i] = kmer_R\n kmer_L_hashes[kmer_to_int(kmer_L)] += [i]\n kmer_R_hashes[kmer_to_int(kmer_R)] += [i]\n\n #for each left hash in which there are multiple kmers with that left hash, find the list of pairs in which the right half differs by 2. (aka, if left half matches, recurse on right half).\n for kmer_L_hash_indices in kmer_L_hashes.values(): #same in first half\n if len(kmer_L_hash_indices) > 1:\n pairs += get_2away_pairs({kmer_L_hash_index:local_index_to_kmer[kmer_L_hash_index][k_L:] for kmer_L_hash_index in kmer_L_hash_indices}, k_R) #differ by 2 in right half\n\n #for each right hash in which there are multiple kmers with that right hash, find the list of pairs in which the left half differs by 2. (aka, if right half matches, recurse on left half).\n for kmer_R_hash_indices in kmer_R_hashes.values(): #same in second half\n if len(kmer_R_hash_indices) > 1:\n pairs += get_2away_pairs({kmer_R_hash_index:local_index_to_kmer[kmer_R_hash_index][:k_L] for kmer_R_hash_index in kmer_R_hash_indices}, k_L) #differ by 2 in left half\n\n #Find matching pairs where the left half is one away, and the right half is one away\n possible_pairs_L = set(get_1away_pairs(local_index_to_kmer_L,k_L))\n possible_pairs_R = set(get_1away_pairs(local_index_to_kmer_R,k_R))\n pairs += list(possible_pairs_L.intersection(possible_pairs_R))\n return(pairs)", "def mergePed(bnlist=[],faff=[],ofaff=[],newbasename='newped',fo=0):\r\n lcdmap = getLCD(bnlist) # list of chr,offset,rs for all snp common to all files\r\n print 'got %d lcd snps-%s' % (len(lcdmap),lcdmap[:5])\r\n cfped = []\r\n coped = []\r\n cfgeno = []\r\n cogeno = []\r\n allrsa = {}\r\n ignorers = {}\r\n for i,basename in enumerate(bnlist):\r\n fped,oped,fgeno,ogeno,trsadict = subsetPed(basename,lcdmap,faff[i],ofaff[i])\r\n print '%s gave %d fgeno' % (basename,len(fgeno))\r\n for rs in trsadict.keys():\r\n tk = trsadict[rs].keys()\r\n if len(tk) > 2:\r\n print 'for %s, rs %s has alleles %s' % (basename, rs, trsadict[rs])\r\n if not allrsa.get(rs,None):\r\n allrsa[rs] = {}\r\n for a in tk:\r\n if not allrsa[rs].get(a,None):\r\n allrsa[rs][a] = trsadict[rs][a]\r\n else:\r\n allrsa[rs][a] += trsadict[rs][a]\r\n tk = allrsa[rs].keys()\r\n if len(tk) > 2 and not ignorers.get(rs,None): # new\r\n #print 'After merge basename %s, rs %s has alleles %s' % (basename, rs,allrsa[rs])\r\n ignorers[rs] = rs\r\n cfped += fped\r\n coped += oped\r\n cfgeno += fgeno\r\n cogeno += ogeno\r\n print 'after merge all have %d fgeno and %d ogeno' % (len(cfgeno),len(cogeno))\r\n # now have offspring and founder rows in lcdmap order\r\n # write map file\r\n print '### found %d markers > 2 alleles' % (len(ignorers.keys()))\r\n keepmarkers = [x for x in range(len(lcdmap)) if not ignorers.get(lcdmap[x][2],None)]\r\n newmap = ['\\t'.join((lcdmap[x][0],lcdmap[x][2],'0','%d' % lcdmap[x][1])) for x in keepmarkers] # chrom,offset,rs\r\n f = file('%s.map' % newbasename,'w')\r\n f.write('%s\\n' % '\\n'.join(newmap))\r\n f.close()\r\n for i,geno in enumerate(cfgeno): # convert each array into a list and keep the good markers\r\n gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers])\r\n g = array.array('c',gs) # good ones\r\n cfgeno[i] = g # replace\r\n print 'cfgeno converted'\r\n if not fo: # not founders only - note arrays are not lists!\r\n cfped += copy.copy(coped) #\r\n del coped\r\n for i,geno in enumerate(cogeno): # convert each array into a list and keep the good markers\r\n gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers])\r\n g = array.array('c',gs) # good ones\r\n cfgeno.append(g) # extend founders\r\n del cogeno\r\n print 'after if not fo now have %d cfgeno' % (len(cfgeno))\r\n f = file('%s.ped' % newbasename,'w')\r\n for n,ped in enumerate(cfped):\r\n l = ' '.join(ped + list(cfgeno[n]))\r\n if n % 100 == 0 and n > 0:\r\n print 'writing line %d' % n\r\n f.write(l)\r\n f.write('\\n')\r\n f.close()\r\n print 'wrote %d map rows and %d ped rows to %s' % (len(newmap),len(cfped),newbasename)", "def change2postion_saveas_wig(wigfile,chrname, cluster, tag_thr=2):\n package = {}\n \n cutoff = 1000 / 10\n oceanbegin = 0 # ocean: tag num <= 2\n oceanflag = 1\n \n num = []\n for k in xrange(len(cluster)):\n num.append(cluster[k])\n \n #put a header for each chromosome\n print >>wigfile,\"track type=wiggle_0\\nvariableStep chrom=%s span=%d\" %(chrname,10)\n \n for k in xrange(len(num) - 1):\n if num[k] > tag_thr:\n if oceanflag == 1:\n oceanflag = 0\n if (k - oceanbegin) >= cutoff:\n oceanflag = 0\n for m in xrange(oceanbegin, k):\n num[m] = -1\n \n elif num[k] <= tag_thr and oceanflag == 0:\n oceanbegin = k\n oceanflag = 1\n if oceanflag == 1:\n for m in xrange(oceanbegin, len(num)):\n num[m] = -1\n\n linenum = 0\n islandflag = 0\n islandbegin = 0\n islandline = 0\n for k in xrange(len(num) - 1):\n if islandflag == 0 and num[k] > -1:\n islandflag = 1\n linenum += 1\n islandbegin = k + 1\n islandline = linenum\n print >>wigfile, \"%d\\t%d\" %(islandbegin*10-9,num[k])\n \n elif islandflag == 1 and num[k] > -1:\n linenum += 1\n print >>wigfile, \"%d\\t%d\" %(k*10+1,num[k])\n elif islandflag == 1 and num[k] == -1:\n islandflag = 0\n\n if islandflag == 1:\n linenum += 1\n print >>wigfile, \"%d\\t%d\" %(len(num)*10-9,num[len(num)-1])\n \n num = []", "def produce_protein_interaction_dict (inPath, outPath): \n PPIs = pd.read_table(inPath, sep=\"\\t\")\n proteins = set(PPIs[[\"Protein_1\", \"Protein_2\"]].values.flatten())\n proteinPartners = {}\n for protein in proteins:\n partners = set(PPIs.loc[(PPIs[[\"Protein_1\", \"Protein_2\"]]==protein).any(1),\n [\"Protein_1\", \"Protein_2\"]].values.flatten()) - {protein}\n if sum((PPIs[[\"Protein_1\", \"Protein_2\"]]==protein).all(1)) > 0:\n partners.add(protein)\n proteinPartners[protein] = partners\n with open(outPath, 'wb') as fOut:\n pickle.dump(proteinPartners, fOut)", "def subsetPed(basename=\"\",lcdmap = [],faff='1', ofaff='2'):\r\n mf = file('%s.map' % basename,'r').readlines()\r\n lmap = [x.strip().split() for x in mf]\r\n rscols = {} # lookup marker table\r\n colrs = [] # lookup rs from column\r\n for i,m in enumerate(lmap): # get columns to keep in the order we want them\r\n rscols[m[1]] = i # keep track of where each rs is in this map\r\n colrs.append(m[1]) # and keep the list of rs for tracking alleles\r\n wewant = [rscols[x[2]] for x in lcdmap] # columns we want to keep\r\n print '#Subsetped faff=%s ofaff=%s keeping %d (%s) of potential lcd %d for %s' % \\\r\n (faff,ofaff,len(wewant),wewant[:20],len(lcdmap),basename)\r\n pf = file('%s.ped' % basename,'r')\r\n ogeno = [] # offspring new lines\r\n fgeno = [] # founders\r\n oped = [] # for pedigrees\r\n fped = []\r\n rsadict = {} # keep a count of alleles - seems to be a problem\r\n for i,l in enumerate(pf):\r\n if (i+1) % 500 == 0:\r\n print '%s at line %d' % (basename,i+1)\r\n ll = l.strip().split()\r\n ped = ll[:6]\r\n founder = (ll[2] == '0' and ll[3] == '0') \r\n aff = faff\r\n if not founder:\r\n aff = ofaff\r\n ped[5] = aff # adjust as needed\r\n if founder:\r\n fped.append(ped)\r\n else:\r\n oped.append(ped)\r\n gt = ll[6:]\r\n geno = []\r\n for snp in wewant: # columns in order\r\n thisrs = colrs[snp]\r\n base = snp*2\r\n g1 = gt[base]\r\n g2 = gt[base+1]\r\n geno.append(g1)\r\n geno.append(g2)\r\n if not rsadict.get(thisrs,None):\r\n rsadict[thisrs] = {}\r\n if g1 <> '0':\r\n if not rsadict[thisrs].get(g1,None):\r\n rsadict[thisrs][g1] = 1\r\n else:\r\n rsadict[thisrs][g1] += 1 \r\n if g2 <> '0':\r\n if not rsadict[thisrs].get(g2,None):\r\n rsadict[thisrs][g2] = 1\r\n else:\r\n rsadict[thisrs][g2] += 1\r\n keepgt = array.array('c',geno)\r\n if founder:\r\n fgeno.append(keepgt)\r\n else:\r\n ogeno.append(keepgt)\r\n print '#Subsetped %s %d fgeno %d ogeno' % (basename,len(fgeno),len(ogeno))\r\n return fped,oped,fgeno,ogeno,rsadict", "def go_thru(bed_file):\n bedrecord = dict() #Create dictionary with information from this bed file\n #print 'Hi I just made a dictionary in which to parse the contents of your bed file'\n beddata = open(bed_file,'r')\n #print 'OK, so now I am opening your file called', bed_file\n thereadline= beddata.readline()\n #print thereadline\n if \"track\" in thereadline : \n #if len(peak.split())>3\n for peak in beddata: \n if peak.split()[0] in bedrecord: #if record is not already present in dictionary\n #print 'Another peak on it is located at', peak.split()[0] \n bedrecord[peak.split()[0]].append(\"-\".join(peak.split()[1:3])) \n else : \n #bedrecord[peak.split()[0]]= str(peak.split()[1],'-',peak.split()[2])\n bedrecord[peak.split()[0]]= [\"-\".join(peak.split()[1:3])] #if it IS already present, just add onto existing record\n #print \"New chromosome in your file - it is \", peak.split()[0] \n else:\n print 'This is not a bed file dude!'\n return bedrecord", "def print_candidates(k1=17, k2=20, filename=None):\n Id2 = []\n for l in range(k1,k2):\n Id2.extend(read_identities('../full_identities/2_' + str(l)))\n Id3 = []\n for l in range(22,24):\n Id3.extend(read_identities('../vv_identities/3_' + str(l)))\n\n if filename is None:\n import sys\n output = sys.stdout\n else:\n output = open(filename, 'w')\n\n for u,v in Id2:\n killedf = '../candidates_full/killed_{}_{}'.format(t2s(u), t2s(v))\n killed = set(read_identities(killedf))\n for s,t in set(Id3).difference(killed):\n output.write(t2s(s) + ' ' + t2s(t) + ' ' + t2s(u) + ' ' + t2s(v) + '\\n')\n if filename is not None:\n output.close()", "def copy_top_k(cfg):\n top_k = get_top_k(cfg)\n dirs = get_top_k_dirs(cfg)\n for key in top_k:\n make_directory(dirs[key])\n k = 0\n for coord_id in top_k[key]:\n in_pdb = get_in_pdb_path(cfg, coord_id)\n #out_pdb = os.path.join(dirs[key], str(k).zfill(Z_FILL) + \"-\" + coord_id + \".pdb\")\n out_pdb = os.path.join(dirs[key], str(k).zfill(Z_FILL) + \".pdb\")\n print(\"Copying \" + in_pdb + \" to \" + out_pdb)\n k = k + 1\n copyfile(in_pdb, out_pdb)", "def lcs_hamming_only_matches_with_many_k(s1: str, s2: str, ks: List[int], length: int, matches_lst: List, query_name, target_name):\n file_name = './chromo_matches_' + str(length) + '.txt'\n f = open(file_name, mode='a')\n count = [0, 0, 0]\n for i in range(0, len(s1) - length + 1):\n for j in range(0, len(s2) - length + 1):\n sub1 = s1[i: i + length]\n sub2 = s2[j: j + length]\n result = hamming_distance(sub1, sub2)\n if result <= 0:\n # matches_lst.append([query_name, target_name, i, j, result, sub1, sub2])\n f.write(query_name + ',' + target_name + ',' + str(i) + ',' + str(j) + ',' + str(k)+ ',' + str(sub1) + ',' + sub2 + '\\n')\n count[0] += 1\n if result <= 1:\n f.write(query_name + ',' + target_name + ',' + str(i) + ',' + str(j) + ',' + str(k)+ ',' + str(sub1) + ',' + sub2 + '\\n')\n count[1] += 1\n if result <= 2:\n f.write(query_name + ',' + target_name + ',' + str(i) + ',' + str(j) + ',' + str(k)+ ',' + str(sub1) + ',' + sub2 + '\\n')\n count[2] += 1\n\n # print(\"total matches: \" + str(count))\n f.close()\n return count", "def output_loci_bed(hpo_data, final_loci, cyto_bed, outfile, ncase_dict, cnv='NS', \n block_prefix=None, joint_credsets=False, cs_val=0.95,\n cnv_cov=None, jac_cutoff=0.8):\n\n if block_prefix is None:\n if cnv == 'NS':\n region_id_prefix = 'merged_segment'\n else:\n region_id_prefix = 'merged_{}_segment'.format(cnv)\n else:\n region_id_prefix = 'merged_{}_segment'.format(block_prefix)\n\n cols = 'chr start_min end_max region_id cnv best_sig_level cytoband ' + \\\n 'pooled_control_freq pooled_case_freq ' + \\\n 'pooled_ln_or pooled_ln_or_ci_lower pooled_ln_or_ci_upper ' + \\\n 'min_ln_or max_ln_or n_hpos hpos n_constituent_assocs constituent_assocs ' + \\\n 'n_cred_intervals cred_interval_coords cred_intervals_size'\n outfile.write('#' + '\\t'.join(cols.split()) + '\\n')\n\n out_presort = {}\n\n for k, members in enumerate(final_loci.values()):\n # Get basic information for credsets in final cluster\n n_members = len(members)\n hpo_dict = {cs : cs.split('_')[0] for cs in members}\n hpos = sorted(list(set(hpo_dict.values())))\n n_hpos = len(hpos)\n credints_dict = {cs : hpo_data[hpo]['blocks'][cs]['credset_coords'] for cs, hpo in hpo_dict.items()}\n credints_bts = [hpo_data[hpo]['blocks'][cs]['credset_bt'] for cs, hpo in hpo_dict.items()]\n if n_members > 1:\n if joint_credsets:\n credints_bt = \\\n define_joint_credints(hpo_data, hpo_dict, cs_val, cnv_cov, \n jac_cutoff, ncase_dict, return_all=False)\n else:\n credints_bt = credints_bts[0].cat(*credints_bts[1:], postmerge=False).sort().merge()\n else:\n credints_bt = credints_bts[0].sort().merge()\n n_credints = len(credints_bt)\n credints_size = np.nansum([x.length for x in credints_bt])\n credints_coords = ['{}:{}-{}'.format(x.chrom, x.start, x.end) for x in credints_bt]\n \n # Get region-level basic information\n chrom = credints_bt[0].chrom\n start = str(np.nanmin(credints_bt.to_dataframe().start))\n end = str(np.nanmax(credints_bt.to_dataframe().end))\n cytoband = get_cytobands(credints_bt, cyto_bed)\n region_id = '_'.join([region_id_prefix, cytoband, str(k)])\n\n # Summarize HPO-specific information pooled across all windows from each\n # contributing credset (note: *not* all windows for all merged cred intervals)\n windows_dict = {}\n for bid, hpo in hpo_dict.items():\n windows_dict[hpo] = [w for w in hpo_data[hpo]['blocks'][bid]['credset_windows'] \\\n if w in hpo_data[hpo]['all_windows'].keys()]\n # Compute pooled control & case frequencies as mean weighted by np.sqrt(N_cases)\n control_freq_dict, case_freq_dict = {}, {}\n for hpo, windows in windows_dict.items():\n control_freq_dict[hpo] = \\\n np.nanmean([hpo_data[hpo]['all_windows'][w]['control_freq'] for w in windows])\n case_freq_dict[hpo] = \\\n np.nanmean([hpo_data[hpo]['all_windows'][w]['case_freq'] for w in windows])\n control_freq = np.nanmean(list(control_freq_dict.values()))\n case_weights = [np.sqrt(ncase_dict[hpo]) for hpo in case_freq_dict.keys()]\n case_freq = np.average(list(case_freq_dict.values()), weights=case_weights)\n # Compute pooled effect size as inverse-variance weighted average\n lnor_means, lnor_cis = {}, {}\n for hpo in hpos:\n wdat = hpo_data[hpo]['all_windows']\n hlnors = [wdat[w]['lnOR'] for w in windows_dict[hpo]]\n hvars = [ci2se((wdat[w]['lnOR_lower'], wdat[w]['lnOR_upper'])) ** 2 \\\n for w in windows_dict[hpo]]\n hlnor, hlnor_ci = iv_mean(hlnors, hvars)\n lnor_means[hpo] = hlnor\n lnor_cis[hpo] = sorted(hlnor_ci)\n min_lnor = np.nanmin(list(lnor_means.values()))\n max_lnor = np.nanmax(list(lnor_means.values()))\n lnor, lnor_ci = \\\n iv_mean(list(lnor_means.values()), \n [ci2se(tuple(ci)) ** 2 for ci in lnor_cis.values()])\n # Get best significance level from any window\n sig_levels = [hpo_data[hpo]['blocks'][bid].get('credset_max_sig') \\\n for bid, hpo in hpo_dict.items()]\n if 'genome_wide' in sig_levels:\n best_sig_level = 'genome_wide'\n elif 'FDR' in sig_levels:\n best_sig_level = 'FDR'\n else:\n best_sig_level = 'not_significant'\n\n # Prepare to write region stats to file\n out_front = '\\t'.join([chrom, start, end])\n out_back = '\\t'.join([cnv, best_sig_level, cytoband])\n outnums_fmt = '\\t{:.3E}\\t{:.3E}\\t{:.3}\\t{:.3}\\t{:.3}\\t{:.3}\\t{:.3}'\n out_back += outnums_fmt.format(control_freq, case_freq, lnor, lnor_ci[0], \n lnor_ci[1], min_lnor, max_lnor)\n out_back += '\\t' + '\\t'.join([str(n_hpos), ';'.join(hpos), \n str(n_members), ';'.join(sorted(members)),\n str(n_credints), ';'.join(credints_coords),\n str(credints_size)]) + '\\n'\n if best_sig_level != 'not_significant':\n out_presort[(int(chrom), int(start), int(end))] = [out_front, region_id, out_back]\n\n # Final renaming of blocks with cytoband nomenclature using rename_blocks()\n block_dict = {v[1] : pbt.BedTool(v[0], from_string=True) for v in out_presort.values()}\n rename_dict = rename_blocks(block_dict, cyto_bed, region_id_prefix)\n\n # Iterate over sorted blocks and write to file\n for i, key in enumerate(sorted(out_presort.keys())):\n outline = '\\t'.join([out_presort[key][0], \n rename_dict[out_presort[key][1]], \n out_presort[key][2]])\n outfile.write(outline)\n\n outfile.close()", "def main():\n # get commmand line args\n args = parse_arguments()\n \n adj_file = args.adj # open(\"UCSC_VIPER/pathways/extended_pathways_transcriptional.adj\", \"r\")\n \n # this set isn't actually used in the script, but I was curious...\n adj_gene_set = set() \n \n cutoff_number = args.cutoff_number\n #cutoff_percent = args.cutoff_percent\n \n expr_gene_file = args.expr_genes #open(\"stanford_batchK1-12.HUGO_only_genes.lst\", 'r')\n expr_genes = [line.strip() for line in expr_gene_file] \n \n # for each line, check that the regulator and other genes are in the\n # expression matrix gene set. if not, remove them, or remove the whole\n # line if the regulator isn't in the set or if too few genes remain\n for line in adj_file:\n \n line_list = line.strip().split()\n regulator_plus_gene_list = [x for x in line_list if x !=\"1.0\"]\n regulator = regulator_plus_gene_list[0]\n \n if regulator not in expr_genes:\n # remove the whole regulator + regulon\n print(\"Skipped a line (regulator not in expr genes): \", \n line_list[0], file=sys.stderr) \n continue\n \n gene_list = regulator_plus_gene_list[1:]\n list_size = len(gene_list)\n adj_gene_set.update(gene_list)\n \n how_many_to_remove= 0\n good_genes = []\n \n for gene in gene_list:\n if gene not in expr_genes:\n how_many_to_remove += 1\n else:\n good_genes.append(gene)\n \n #print(\"\\n\")\n #pdb.set_trace()\n #if (100-how_many_to_remove/list_size*100 < cutoff_percent) and (list_size-how_many_to_remove < cutoff_number):\n if (list_size-how_many_to_remove < cutoff_number):\n print(\"Skipped a line (too many removed): \", line_list[0], file=sys.stderr)\n \n else:\n # re-generate the new line of the .adj file with kept genes\n #genes_to_print = good_genes.insert(0, regulator)\n regulated_genes = \"\\t1.0\\t\".join(good_genes)\n print(regulator+\"\\t\"+regulated_genes+\"\\t1.0\")", "def get_3away_pairs(kmers):\n k = len(kmers[0])\n if k == 1 or k==2:\n return []\n if k == 3:\n return [pair for pair in combinations(kmers, 2) if pair[0][0] != pair[1][0] and pair[0][1] != pair[1][1] and pair[0][2] != pair[1][2]]\n k_L = k//2\n k_R = k-k_L\n kmer_L_hashes = defaultdict(list)\n kmer_R_hashes = defaultdict(list)\n pairs = []\n kmers_L = []\n kmers_R = []\n for i, kmer in enumerate(kmers):\n kmer_L = kmer[:k_L]\n kmer_R = kmer[k_L:]\n #print(kmer_L)\n #print(kmer_R)\n kmers_L.append(kmer_L)\n kmers_R.append(kmer_R)\n kmer_L_hashes[kmer_to_int(kmer_L)] += [i]\n kmer_R_hashes[kmer_to_int(kmer_R)] += [i]\n for kmer_L_hash in kmer_L_hashes.values(): #same in first half\n if len(kmer_L_hash) > 1:\n kmer_L = kmers[kmer_L_hash[0]][:k_L] #first half\n pairs += [tuple(kmer_L + kmer for kmer in pair) for pair in get_3away_pairs([kmers[i][k_L:] for i in kmer_L_hash])] #differ by 3 in second half\n for kmer_R_hash in kmer_R_hashes.values(): #same in second half\n if len(kmer_R_hash) > 1:\n kmer_R = kmers[kmer_R_hash[0]][k_L:] #second half\n #print(kmer_R)\n pairs += [tuple(kmer + kmer_R for kmer in pair) for pair in get_3away_pairs([kmers[i][:k_L] for i in kmer_R_hash])] #differ by 3 in first half\n possible_pairs = []\n possible_pairs_L = get_1away_pairs(kmers_L)\n possible_pairs_R = get_2away_pairs(kmers_R)\n #print(kmers_L)\n #print(kmers_R)\n #print(possible_pairs_L)\n #print(possible_pairs_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n possible_pairs = []\n possible_pairs_L = get_2away_pairs(kmers_L)\n possible_pairs_R = get_1away_pairs(kmers_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n return(pairs)", "def intervals2wig(iter,sampleName=\"\",outDir=os.getcwd(),scratchDir=os.getcwd()):\n seqs = {}\n count = 0\n print \"Preparing Dictionary of alignments\\nEach '.' is 10000 alignments\"\n for interval in iter:\n count = count+1\n if count % 10000 == 0:\n sys.stdout.write(\".\")\n if count % 100000 == 0:\n print \"\\n%d\" % (count)\n if not seqs.has_key(interval.chr):\n seqs[interval.chr]={'+':scratchDir+\"/\"+GenRandom(),'-':scratchDir+\"/\"+GenRandom()}\n FILE = open(seqs[interval.chr][interval.strand],'a')\n for i in range(interval.start,len(interval)+1):\n print >>FILE, \"%d\\t%d\" % (i,interval.readcount)\n print \"Done preparing dictionary, Begin sort and write\"\n chrKeys = seqs.keys()\n chrKeys.sort()\n for chr in chrKeys:\n print \"Printing \" + chr\n strands = seqs[chr].keys()\n for strand in strands:\n INPUT = open(seqs[chr][strand],'r')\n filename = outDir + \"/%s_%s_%s.wig\" % (sampleName,chr,strand)\n OUTPUT = open(filename,'w')\n OUTPUT.write(\"track type=wiggle_0 name='%s_%s_%s' description='Wiggle Track for read alignment of %s sample to %s'\\n\" % (sampleName,chr,strand,sampleName,chr))\n print strand\n positions = {}\n while True:\n line = INPUT.readline()\n if not line: break\n pos,obs = line.split(\"\\t\")\n pos,obs = int(pos),int(obs)\n try: positions[pos]=positions[pos]+obs\n except KeyError: positions[pos]=obs\n posKeys = positions.keys()\n posKeys.sort()\n for pos in posKeys:\n wigLine = \"%s\\t%d\\t%d\\t%d\" % (chr,int(pos),int(pos)+1,positions[pos])\n print >>OUTPUT, wigLine\n os.remove(seqs[chr][strand])\n return", "def prepare_bed_file(bed_file, output, ouf=False, save_rejected=None, only_chrom=None):\n new_lines = [] # keep updated lines\n rejected = [] # keep IDs of skipped transcripts + the reason why\n names = Counter() # we need to make sure that all names are unique\n allowed_re = re.compile(ALLOWED_CHARSET_RE).search\n broken_names = []\n\n f = open(bed_file, \"r\")\n for num, line in enumerate(f, 1):\n # parse bed file according to specification\n line_data = line.rstrip().split(\"\\t\")\n\n if len(line_data) != 12:\n f.close() # this is for sure an error\n # it is possible only if something except a bed12 was provided\n die(\n \"Error! Bed 12 file is required! Got a file with {len(line_data)} fields instead\"\n )\n\n chrom = line_data[0]\n if only_chrom and chrom != only_chrom:\n # TOGA allows to perform the analysis on a specific chromosome only\n # is so, we can skip all transcripts that located on other chromosomes\n continue\n chromStart = int(line_data[1])\n chromEnd = int(line_data[2])\n name = line_data[3] # gene_name usually\n corr_name = not bool(allowed_re(name))\n if corr_name is False:\n broken_names.append(name)\n # TODO: check weird characters in the transcript name\n # bed_score = int(line_data[4]) # never used\n # strand = line_data[5] # otherwise:\n # strand = True if line_data[5] == '+' else False\n thickStart = int(line_data[6])\n thickEnd = int(line_data[7])\n # itemRgb = line_data[8] # never used\n blockCount = int(line_data[9])\n blockSizes = [int(x) for x in line_data[10].split(\",\") if x != \"\"]\n blockStarts = [int(x) for x in line_data[11].split(\",\") if x != \"\"]\n blockEnds = [blockStarts[i] + blockSizes[i] for i in range(blockCount)]\n blockAbsStarts = [blockStarts[i] + chromStart for i in range(blockCount)]\n blockAbsEnds = [blockEnds[i] + chromStart for i in range(blockCount)]\n blockNewStarts, blockNewEnds = [], []\n names[name] += 1\n\n if thickStart > thickEnd:\n f.close() # according to bed12 specification this should never happen\n sys.stderr.write(f\"Problem occurred at line {num}, gene {name}\\n\")\n die(\"Error! Bed file is corrupted, thickEnd MUST be >= thickStart\")\n elif thickStart == thickEnd:\n # this means that this is a non-coding transcript\n # TOGA cannot process them: we can skip it\n rejected.append((name, \"No CDS\"))\n continue\n\n if thickStart < chromStart or thickEnd > chromEnd:\n # a very strange (but still possible) case\n f.close() # for sure an error with input data\n sys.stderr.write(f\"Problem occurred at line {num}, gene {name}\\n\")\n die(\"Error! Bed file is corrupted, thickRange is outside chromRange!\")\n\n # now select CDS only\n # we keep UTRs in the filtered file\n # however, we need CDS to check whether it's correct (% 3 == 0)\n for block_num in range(blockCount):\n blockStart = blockAbsStarts[block_num]\n blockEnd = blockAbsEnds[block_num]\n\n # skip the block if it is entirely UTR\n if blockEnd <= thickStart:\n continue\n elif blockStart >= thickEnd:\n continue\n\n # if we are here: this is not an entirely UTR exon\n # it might intersect the CDS border or to be in the CDS entirely\n # remove UTRs: block start must be >= CDS_start (thickStart)\n # block end must be <= CDS_end (thickEnd)\n blockNewStart = blockStart if blockStart >= thickStart else thickStart\n blockNewEnd = blockEnd if blockEnd <= thickEnd else thickEnd\n blockNewStarts.append(blockNewStart - thickStart)\n blockNewEnds.append(blockNewEnd - thickStart)\n\n if len(blockNewStarts) == 0:\n # even it thickStart != thickEnd this transcript still can be non-coding\n # but if there are no blocks in the CDS -> we can catch this\n rejected.append((name, \"No CDS\"))\n continue\n\n block_new_count = len(blockNewStarts)\n blockNewSizes = [\n blockNewEnds[i] - blockNewStarts[i] for i in range(block_new_count)\n ]\n\n if sum(blockNewSizes) % 3 != 0 and not ouf:\n # this is an out-of-frame (or incomplete transcript)\n # ideally CDS length should be divisible by 3\n # not ouf means that we like to keep such transcripts for some reason\n rejected.append((name, \"Out-of-frame gene\"))\n continue\n\n # we keep this transcript: add in to the list\n new_line = \"\\t\".join([str(x) for x in line_data])\n new_lines.append(new_line)\n f.close()\n\n # if not allowed characters in transcript names: list them\n if len(broken_names) > 0:\n eprint(\"Error! Some transcript names contain not allowed characters\")\n for t in broken_names:\n eprint(t)\n die(f\"Allowed characters are: {ALLOWED_CHARSET}\")\n # if there are non-unique transcript IDs: die\n # I kill it there, not earlier to show them altogether\n if any(v > 1 for v in names.values()):\n eprint(\"Error! There are non-uniq transcript IDs:\")\n duplicates = [k for k, v in names.items() if v > 1]\n for d in duplicates:\n eprint(d)\n die(\"Abort\")\n\n if len(new_lines) == 0:\n # no transcripts pass the filter: probably an input data mistake\n sys.exit(\n f\"Error! No reference annotation tracks left after filtering procedure! Abort\"\n )\n\n # write transcripts that passed the filter to the output file\n f = open(output, \"w\") if output != \"stdout\" else sys.stdout\n f.write(\"\\n\".join(new_lines) + \"\\n\")\n f.close() if output != \"stdout\" else None\n\n if save_rejected:\n # save transcripts that didn't pass the filter + reason why\n f = open(save_rejected, \"w\")\n for elem in rejected:\n f.write(f\"{elem[0]}\\t{elem[1]}\\n\")\n f.close()" ]
[ "0.59915525", "0.5780983", "0.5709366", "0.5594828", "0.54817486", "0.5479288", "0.54211676", "0.53936136", "0.53814256", "0.53754616", "0.5370748", "0.5358605", "0.53075075", "0.52847266", "0.5281747", "0.5231825", "0.5209457", "0.5146825", "0.5144458", "0.5140907", "0.5127445", "0.5119576", "0.507935", "0.50499684", "0.504356", "0.5041293", "0.50170356", "0.5014156", "0.4989259", "0.49628255" ]
0.59735173
1
(list)(list)>(list)(list) Takes two lists consisting of start and end positions in bed file for each histone mark track, and concatenates them into a single list, then sorts them first by start or end and then by value of position.
def overlap(list1,list2): coord=[] for pos1 in list1: #print 'pos in list1 is', pos1 coord.append(('S',int(pos1.split('-')[0]), 'l1')) #print 'S is ', pos1.split('-')[0] coord.append(('E',int(pos1.split('-')[1]),'l1')) #print 'E is ', pos1.split('-')[1] #print coord for pos2 in list2: #print 'pos in list2 is', pos2 coord.append(('S',int(pos2.split('-')[0]),'l2')) #print 'S is ', pos2.split('-')[0] coord.append(('E', int(pos2.split('-')[1]),'l2')) #print 'E is ', pos2.split('-')[1] #print coord coord.sort(key = lambda x : x[0], reverse = True) #print 'coord after first sort \n', coord coord.sort(key = lambda x : x[1]) #print 'coord after 2nd sort by number \n', coord # PART 1: SEARCHES FOR OVERLAPS BETWEEN 2 HISTONE MARKS new_coord_list = [] #initialize new list to which to move all those that don't overlap #index = 0 #position in list spos=0 # start pos initialized ct=0 ovl=[] for pos in coord: new_coord_list.append(pos) #print pos, 'doesn\'t overlap' index = int(new_coord_list.index(pos)) if pos[0]=='S': ct+=1 if ct==2: spos=pos[1] if pos[0]=='E': ct-=1 if ct==1: if not spos==pos[1]: #print spos, '-', pos[1], 'overlap' ovl.append(('ovl', spos, pos[1])) # add to overlap vector the positions that overlap #print 'overlap found! :', [str(spos),str(pos[1]),'ovl'] #print 'removing ', new_coord_list[index] del new_coord_list[index] #print 'removing', new_coord_list[index-1] del new_coord_list[index-1] # new_coord_list.sort(key = lambda x : x[0], reverse = True) start=0 end = 0 two_hist_away_from_cent_of_peak = 0 two_hist_away_list = [] for nc_pos in new_coord_list: if nc_pos[0]=='S': if (start<=two_hist_away_from_cent_of_peak) and (two_hist_away_from_cent_of_peak !=0) and (end!=0): #if center_of_peak <= two_hist_away_from_cent_of_peak and (two_hist_away_from_cent_of_peak !=0): two_hist_away_list.append('-'.join([str(start),str(end), 'tha'])) start= nc_pos[1] if nc_pos[0]=='E': end = nc_pos[1] center_of_peak= (start+nc_pos[1])/2 two_hist_away_from_cent_of_peak = center_of_peak + 300 # print 'new_coord_list: ', new_coord_list return ovl, new_coord_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_sort(input_list,start,end):\n if start < end:\n mid=(start+end)//2\n merge_sort(input_list,start,mid)\n merge_sort(input_list,mid+1,end)\n return merge(input_list,start,mid,end)", "def sort(data,start,end):\n if start < end:\n partition_index=partition(data,start,end)\n sort(data,start,partition_index-1)\n sort(data,partition_index+1,end)", "def sort(artist_list):\n if len(artist_list) > 1: # If the length of the list is greater than 1 run this\n middleIndex = len(artist_list) // 2 # Middle Index is halfway point of the list using int division\n leftList = artist_list[:middleIndex]\n rightList = artist_list[middleIndex:] # left and right list created by splitting list by that middle index\n sort(leftList)\n sort(\n rightList) # recursive call of the left and right list to further break down the list till it gets to\n # length 1 where it will no longer run the if statement\n indexMain = 0\n indexLeft = 0\n indexRight = 0 # creation of indexes for each list\n while indexLeft < len(leftList) and indexRight < len(\n rightList): # Runs through the left and right list at the same time while comparing them until one\n # of the lists reaches the end\n leftString = leftList[indexLeft].lower()\n leftString = leftString.replace(\" \", \"\")\n rightString = rightList[indexRight].lower()\n rightString = rightString.replace(\" \",\n \"\") # Right and Left string created by standarizing each string to all\n # lowercase and no spaces\n if leftString <= rightString:\n artist_list[indexMain] = leftList[indexLeft]\n indexLeft += 1 # If the leftString is alphabetically before rightString i.e. <= it will set the main\n # string at indexMain to the leftList at indexLeft\n else:\n artist_list[indexMain] = rightList[indexRight]\n indexRight += 1 # If the leftString is alphabetically after rightString i.e. <= it will set the main\n # string at indexMain to the rightList at indexRight\n indexMain += 1 # Index of main increased by 1 to go to next element\n while indexLeft < len(leftList):\n artist_list[indexMain] = leftList[indexLeft]\n indexLeft += 1\n indexMain += 1 # All remaining parts of the left list if left is greater than right at that time,\n # run through here to set remaining values of index main to leftList\n while indexRight < len(rightList):\n artist_list[indexMain] = rightList[indexRight]\n indexRight += 1\n indexMain += 1 # All remaining parts of the right list if right is greater than left at that time,\n # run through here to set remaining values of index main to rightList", "def sort_fasta_by_abundance(fasta_lines, fasta_out_f):\r\n seq_index = {}\r\n count = 0\r\n for seq_id, seq in parse_fasta(fasta_lines):\r\n count += 1\r\n try:\r\n seq_index[seq].append(seq_id)\r\n except KeyError:\r\n seq_index[seq] = [seq_id]\r\n\r\n seqs = []\r\n for k, v in seq_index.items():\r\n seqs.append((len(v), k, v))\r\n del seq_index[k]\r\n seqs.sort()\r\n for count, seq, seq_ids in seqs[::-1]:\r\n for seq_id in seq_ids:\r\n fasta_out_f.write('>%s\\n%s\\n' % (seq_id, seq))", "def merge_ranges():", "def sort(self):\n \n ct=[]\n rt=[]\n wr=[]\n # search for tags that aren't in the right position\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n if c.wa:\n if not self.wa:\n self.wa=[]\n self.wa.extend(c.wa)\n if c.ct:\n newcts=[ct_tag for ct_tag in c.ct if ct_tag.name!=c.name]\n map(self.contigs[i].ct.remove,newcts)\n ct.extend(newcts)\n for j in range(len(c.reads)):\n r = c.reads[j]\n if r.rt:\n newrts=[rt_tag for rt_tag in r.rt if rt_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].rt.remove,newrts)\n rt.extend(newrts)\n if r.wr:\n newwrs=[wr_tag for wr_tag in r.wr if wr_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].wr.remove,newwrs)\n wr.extend(newwrs)\n # now sort them into their proper place\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n for ct_tag in ct:\n if ct_tag.name==c.name:\n if self.contigs[i].ct is None:\n self.contigs[i].ct=[]\n self.contigs[i].ct.append(ct_tag)\n if rt or wr:\n for j in range(len(c.reads)):\n r = c.reads[j]\n for rt_tag in rt:\n if rt_tag.name==r.rd.name:\n if self.contigs[i].reads[j].rt is None:\n self.contigs[i].reads[j].rt=[]\n self.contigs[i].reads[j].rt.append(rt_tag)\n for wr_tag in wr:\n if wr_tag.name==r.rd.name:\n if self.contigs[i].reads[j].wr is None:\n self.contigs[i].reads[j].wr=[]\n self.contigs[i].reads[j].wr.append(wr_tag)", "def sort_segment_points(Aps, Bps):\n mid = []\n j = 0\n mid.append(Aps[0])\n for i in range(len(Aps)-1):\n dist = distance_tt_point(Aps[i], Aps[i+1])\n for m in range(j, len(Bps)):\n distm = distance_tt_point(Aps[i], Bps[m])\n if dist > distm:\n direction = dot(normalize(line(Aps[i].gen2arr(), Aps[i+1].gen2arr())), normalize(Bps[m].gen2arr()))\n if direction > 0:\n j = m + 1\n mid.append(Bps[m])\n break\n\n mid.append(Aps[i+1])\n for m in range(j, len(Bps)):\n mid.append(Bps[m])\n return mid", "def mergesort(start):\n if len(start)==1:\n return start\n if len(start) > 1:\n a,b = split(start)\n return sort(mergesort(a), mergesort(b))", "def sort_by_assignments(peaklist, order=None, commented_at_end=False):\n anchors = peaklist.anchors\n anchored = tuple(i for anchor in anchors for i in anchor)\n unanchored = set(range(peaklist.dims)) - set(anchored)\n default_order = anchored + tuple(sorted(unanchored))\n order = order if order is not None else default_order\n peaklist.sort(key=lambda peak: tuple(peak[i] for i in order))\n if commented_at_end:\n peaklist.sort(key=lambda peak: peak.commented)\n return peaklist", "def _arrange_genes(gene_data_list):\n gene_data_list = sorted(gene_data_list, key=lambda x: x['end'] - x['start'], reverse=True)\n\n display_levels = [intervaltree.IntervalTree(), ]\n\n for gene_data in gene_data_list:\n found_home = False\n level_idx = 0\n while not found_home:\n if level_idx >= len(display_levels):\n display_levels.append(intervaltree.IntervalTree())\n if display_levels[level_idx].overlaps(gene_data['start'], gene_data['end']):\n level_idx += 1\n else:\n display_levels[level_idx].addi(gene_data['start'], gene_data['end'], data=gene_data)\n found_home = True\n\n return [[gene_interval.data['ID'] for gene_interval in this_level] for this_level in display_levels]", "def merge_sorth_in_place(num_list, start_index, end_index):\n pass", "def merge(input_list,start,mid,end):\n first = mid-start+1\n second = end-mid\n left = [0]*first\n right = [0]*second\n for i in range(first):\n left[i]=input_list[start+i]\n for j in range(second):\n right[j]=input_list[mid+1+j]\n i=j=0\n k=start\n while i<first and j<second:\n if left[i]<right[j]:\n input_list[k]=left[i]\n i+=1\n else:\n input_list[k]=right[j]\n j+=1\n k+=1\n while i<first :\n input_list[k]=left[i]\n i+=1\n k+=1\n while j<second:\n input_list[k]=right[j]\n j+=1\n k+=1\n return input_list", "def insertionSort(list):", "def _sort_locations(self,locations):\n i = np.lexsort(np.transpose(locations*np.array((1,-1))))\n return locations[i]", "def merge_sort(collection, start=None, end=None):\n\n if start is None or end is None:\n start = 0\n end = len(collection) - 1\n\n if start < end:\n mid = start + (end - start) // 2\n\n merge_sort(collection, start, mid)\n merge_sort(collection, mid + 1, end)\n merge(collection, start, mid, end)", "def order_chromosomal_contigs(chr_blast_output):\n ordered_chr_contigs = []\n current_contig = \"null\"\n current_contig_direction = 0\n current_contig_hits = 0\n\n with open(chr_blast_output) as blast_matches:\n for hit in blast_matches:\n hit_data = hit.rstrip(\"\\n\").split(\"\\t\")\n core_gene_dir = int(hit_data[0].split(\"|\")[1])\n if float(hit_data[2]) >= 90.0:\n new_contig = hit_data[1]\n new_contig_direction = core_gene_dir*np.sign(int(hit_data[9])-int(hit_data[8]))\n \n if new_contig == current_contig and new_contig_direction == current_contig_direction:\n current_contig_hits += 1\n else: \n contig_tuple = (current_contig, current_contig_direction, current_contig_hits)\n ordered_chr_contigs.append(contig_tuple)\n current_contig = new_contig\n current_contig_direction = new_contig_direction\n current_contig_hits = 1\n\n contig_tuple = (current_contig, current_contig_direction, current_contig_hits)\n ordered_chr_contigs.append(contig_tuple)\n ordered_chr_contigs.pop(0)\n\n #If hits to a contig are not contiguous, keep only the longest run \n chr_contig_dict = {} #stores the longest run for each contig\n remove_list = [] #stores the shorter runs for deletion\n n = -1\n for entry in ordered_chr_contigs:\n n += 1\n contig = entry[0]\n hits = entry[2]\n if contig not in chr_contig_dict:\n chr_contig_dict[contig] = (n, entry)\n elif hits > chr_contig_dict[contig][1][2]:\n remove_list.append(chr_contig_dict[contig])\n chr_contig_dict[contig] = (n, entry)\n else:\n remove_list.append((n, entry))\n\n #The first contig will usually also be the last - both should be kept \n for item in remove_list:\n \n if int(item[0]) == 0 or int(item[0]) == len(ordered_chr_contigs)-1:\n remove_list.remove(item)\n \n remove_list.sort(reverse = True)\n for item in remove_list:\n position = item[0]\n ordered_chr_contigs.pop(position)\n \n return ordered_chr_contigs", "def sortByDate(inlist):\n\n seq = []\n for i, each in enumerate(inlist):\n # Lightly parse each flight (just reads the preamble)\n # Putting the last 3 returns of MISlightly into the _ junk var\n flight, _, _, _ = parseMISlightly(each)\n seq.append(flight.takeoff)\n\n # Sort by takeoff time (flight.takeoff is a datetime obj!)\n newseq = np.argsort(seq)\n\n return newseq", "def mergesort_from_pos(dataset, lat, lng):\n\tdist_from_x = calculateDistance(lat, lng)\n\tadd_dist_to_dataset(dataset, dist_from_x)\n\treturn mergesort(dataset, \"dist\")", "def populate_coordinate_list(start, end):\n # print(\"im am here:\" , coordinates, DNA_start)\n corod_list = []\n # DNA start is the gene start in the gff\n # coord is the up stream as defined by the region of interest.\n # is gene is (+) coding: DNA_start > coordinates\n if start > end: # + coding\n for number in range(end, start):\n # print(\"DNA start greater, should be +\", direction)\n # need to get rid of negative coodinates is there\n # are any \n if number < 1:\n continue\n corod_list.append(int(number))\n corod_list = corod_list[::-1]\n if start < end:\n for number in range(start, end):\n # need to get rid of negative coodinates is there\n # are any \n if number < 1:\n continue\n corod_list.append(int(number))\n # print(corod_list)\n # we return a reversed list. So we can go through the coorinates away\n # from the gene to see to see if it fals into a gene \n return corod_list", "def merge(list1: list, list2: list) -> list:\n output = []\n i, j = 0, 0\n while i < len(list1) and j < len(list2):\n if list1[i][1] <= list2[j][1]:\n output += [list1[i]]\n i += 1\n else:\n output += [list2[j]]\n j += 1\n return output + list1[i:] + list2[j:]", "def _sort_manber_myers(self, suffix_pos: List) -> List:\n bucket = self._create_bucket(suffix_pos)\n for _, v in sorted(bucket.items()):\n if self.debug: print(f\"_sort_manber_myers function: bucket value: {v}\") \n if len(v) > 1:\n # recursive call for next stage\n self.stage *= 2\n self._sort_manber_myers(v)\n else:\n # otherwise add starting position of suffix to result\n self.suffixes.append(v[0]) \n if self.debug: print(f\"_sort_manber_myers function: suffixes: {self.suffixes}\\n\") \n return self.suffixes", "def sort(self,xy):\n xy.sort()\n #print xy\n x0=xy[0][0] # x of first tuple\n listy=[] # list of list of y values for given x\n listx=[] # list of x values\n ll=[]\n for i in xy:\n if(i[0] == x0): # change of x\n ll.append(i[1])\n else:\n listy.append(ll)\n listx.append(x0)\n ll=[]\n ll.append(i[1])\n x0=i[0]\n listy.append(ll)\n listx.append(x0)\n return listx,listy", "def sort_subgroups(new_document_list):\n for page in new_document_list:\n if page[0]:\n page[0][0] = sorted(page[0][0], key=lambda g: g['bbox'][1])\n if page[1]:\n if page[1][0]:\n page[1][0] = sorted(page[1][0], key=lambda g: g['bbox'][1])\n if len(page[1])>1:\n if page[1][1]:\n page[1][1] = sorted(page[1][1], key=lambda g: g['bbox'][1])\n if page[2]:\n if page[2][0]:\n page[2][0] = sorted(page[2][0], key=lambda g: g['bbox'][1])\n if len(page[2])>1:\n if page[2][1]:\n page[2][1] = sorted(page[2][1], key=lambda g: g['bbox'][1])\n if len(page[2])>2:\n if page[2][2]:\n page[2][2] = sorted(page[2][2], key=lambda g: g['bbox'][1])\n return new_document_list", "def quick_sort(lst, first, last):\r\n if first < last:\r\n split_marker = split_list(lst, first, last)\r\n\r\n quick_sort(lst, split_marker + 1, last)\r\n quick_sort(lst, first, split_marker - 1)", "def merge_sort(self, lst):\r\n [sorted_lst, number_of_inversions] = self.sort_and_get_number_of_inversions(lst)\r\n \r\n return sorted_lst", "def stitchpeaklist(inpeak_list,mergethreshold):\n peak_list=[]\n prev_peak=['chr0',0,1]\n inpeak_list.sort()\n for curr_peak in inpeak_list:\n if curr_peak[0]==prev_peak[0] and prev_peak[2]+mergethreshold>=curr_peak[1]:\n curr_peak[1]=min(prev_peak[1],curr_peak[1])\n curr_peak[2]=max(prev_peak[2],curr_peak[2])\n else:\n if prev_peak!=['chr0',0,1]:\n peak_list.append(prev_peak)\n prev_peak=curr_peak[:]\n peak_list.append(prev_peak)\n return peak_list", "def merge_quick_sort(L):\n list1 = []\n list2 = []\n (evens, odds) = merge_sort.split(L)\n list1 += quick_sort.quick_sort(evens)\n list2 += quick_sort.quick_sort(odds)\n x = merge_sort.merge(list1,list2)\n return x", "def prep_input(buses):\n return sorted([(bus, offset) \n for offset, bus \n in enumerate(buses) \n if bus], reverse=True)", "def merge_sort(arr: list, start: int, end: int):\n if end == start:\n return [arr[start]]\n else:\n mid = start + (end - start)//2\n left = merge_sort(arr, start, mid)\n right = merge_sort(arr, mid+1, end)\n sorted_arr = merge(left, right)\n return sorted_arr", "def merge_sort(arr, start, end):\n if len(arr) <= 1:\n return\n if start >= end:\n return\n\n mid = int((start + end) / 2)\n merge_sort(arr, start, mid)\n merge_sort(arr, mid+1, end)\n merging(arr, start, mid, mid+1, end)" ]
[ "0.5854926", "0.5793867", "0.5785533", "0.572386", "0.56655353", "0.5634396", "0.56323624", "0.5561383", "0.5551479", "0.5536983", "0.5528434", "0.55122155", "0.54951143", "0.5471806", "0.54362905", "0.542897", "0.54168636", "0.5415553", "0.53996116", "0.53777754", "0.53548026", "0.53489655", "0.53484106", "0.53450567", "0.53442025", "0.53424966", "0.5339346", "0.53372204", "0.5300382", "0.5294634" ]
0.63203555
0
Filter files based on inclusion lists Return a list of files which match and of the Unix shellstyle wildcards provided, or return all the files if no filter is provided.
def IncludeFiles(filters, files): if not filters: return files match = set() for file_filter in filters: match |= set(fnmatch.filter(files, file_filter)) return [name for name in files if name in match]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def infile_list(args):\n infiles = []\n for arg in args:\n infiles += glob.glob(arg)\n infiles = [pipes.quote(f) for f in infiles]\n return infiles", "def _filter_file_list(files, local_metadata, remote_metadata):\n def _is_tracked(filename, metadata):\n \"\"\"\n Is the filename tracked in the remote metadata dict.\n The file may be not even locally tracked yet\n \"\"\"\n current_local_sha = local_metadata.get(filename, None)\n current_remote_sha = metadata.get(filename, None)\n return current_local_sha is not None \\\n and current_remote_sha is not None \\\n and current_local_sha == current_remote_sha\n\n def _is_inside_ignored_dir(filename):\n \"\"\" Is the filename inside any of the IGNORE_DIRS list \"\"\"\n ignore_dirs = ['./' + x for x in IGNORE_DIRS]\n return any([filename.startswith(x) for x in ignore_dirs])\n\n def _has_ignored_extension(filename):\n return any([ext in IGNORE_EXTENSIONS\n for ext in filename.split('.')[1:]])\n\n files = [f for f in files\n if not _is_inside_ignored_dir(f)\n and not _has_ignored_extension(f)\n and not _is_tracked(f, remote_metadata)]\n return files", "def filterImages(files, cfg):\r\n regex = \"\\.(\" + \"|\".join(cfg.image_formats) + \")$\"\r\n #filter(lambda s: re.match(regex, s), files)\r\n return [s for s in files if re.findall(regex, s)]", "def filterAudioFilesFromFilelist(filelist):\n audioFileList = []\n for audioFilter in filelist:\n audioRoot, audioExt = os.path.splitext(audioFilter)\n if audioExt in ['.wav', '.aiff', '.aif']:\n audioFileList.append(audioFilter)\n # end for loop\n return audioFileList", "def list_match(names: List[str], expression: str) -> List[str]:\n exprs_unix_shell_style = parse_exp2unix_shell_style(expression)\n filter_results = []\n for expr in exprs_unix_shell_style:\n filter_results.extend(fnmatch.filter(names, expr))\n return list(set(filter_results))", "def get_file_list(work_dir, match_flag='*.*'):\n matches = []\n for root, dir, files in os.walk(work_dir):\n for items in fnmatch.filter(files, match_flag):\n matches.append(os.path.realpath(os.path.join(root, items)))\n\n return matches", "def detectFiles(self, input):\n output = []\n if os.path.isfile(input):\n output.append(input)\n else:\n input = os.path.join(input, '*') if os.path.isdir(input) else input\n for file in glob.glob(input):\n output.append(file)\n return output", "def match_files(patterns, files):\n\tall_files = files if isinstance(files, collections.Container) else list(files)\n\treturn_files = set()\n\tfor pattern in patterns:\n\t\tif pattern.include is not None:\n\t\t\tresult_files = pattern.match(all_files)\n\t\t\tif pattern.include:\n\t\t\t\treturn_files.update(result_files)\n\t\t\telse:\n\t\t\t\treturn_files.difference_update(result_files)\n\treturn return_files", "def searchfiles(directory, filenames, ext=None):\n if ext:\n filenames = [f'{file}{ext}' for file in filenames]\n return [\n file for file in Path(directory).glob('*')\n if file.name in filenames\n ]", "def filter_python_files(files):\n return [f for f in files if f.endswith('.py')]", "def files(pathspec):\n\treturn [f for f in glob.glob(pathspec)]", "def set_in_files():\r\n\tindatadir = '/nobackup/ejblom/reddit'\r\n\tcom_dir = '/comments'\r\n\tsubm_dir = '/submissions'\r\n\tglob_end = '/filtered*'\r\n\tcom_glob_str = indatadir + com_dir + glob_end\r\n\tsubm_glob_str = indatadir + subm_dir + glob_end\r\n\tinfilenames = sorted(glob.glob(com_glob_str)) + sorted(glob.glob(subm_glob_str))\r\n\treturn infilenames", "def regex_filter_list(file_list, pattern, output=True):\n r = re.compile(pattern)\n matches = filter(r.match, file_list)\n\n if (len(matches) > 0 and output is True):\n #print colors.BLUE + '\\033[1m' + \"matches:\" + '\\033[0m'\n for match in matches:\n print colors.BLUE + match + colors.ENDC\n\n return matches", "def build_file_list(location, filters):\n f = []\n for (dir_path, dir_name, file_names) in os.walk(location):\n for file in file_names:\n f.append(os.path.join(dir_path, file))\n obj_list = map(lambda file: os.path.join(location, file), f)\n\n if type(filters) == list:\n for filter in filters:\n obj_list = [i for i in obj_list if filter in i]\n else:\n obj_list = [i for i in obj_list if filters in i]\n\n return obj_list", "def buildListOfFiles(searchGlob):\n return [fpath for fpath in glob2.iglob(searchGlob) if os.path.isfile(fpath)]", "def filter_filenames(filenames, filters, inverse=False):\n out = []\n for filename in filenames:\n for filt in filters:\n if (filt not in filename) + (inverse) == 1:\n break\n else:\n out.append(filename)\n return out", "def ExcludeFiles(filters, files):\n if not filters:\n return files\n match = set()\n for file_filter in filters:\n excludes = set(fnmatch.filter(files, file_filter))\n match |= excludes\n return [name for name in files if name not in match]", "def get_all_filenames_from_dir(directory,suffex, filename_allowed_list = None):\n\n files_list = list()\n if filename_allowed_list == None:\n for item in glob.glob(directory+'*'+suffex): # Example /datasets/Stock_dataset/Stocks/*.txt\n files_list.append(item) \n else:\n filename_allowed_list = [v.lower() for v in filename_allowed_list] # To avoid case sensitve\n for item in glob.glob(directory+'*'+suffex):\n if item.split(\"/\")[-1].split('.')[0].lower() in filename_allowed_list: # Since linux is case sensitive, then so is this function, make sure the names match correctly\n files_list.append(item)\n if not len(files_list) == len(filename_allowed_list):\n print 'Some Stocks files are missing'\n return files_list", "def __getFileList(self, path, filterRe):\n path = os.path.abspath(path)\n files = []\n for dirname, _, names in os.walk(path):\n files.extend([os.path.join(dirname, f)\n for f in names\n if re.match(filterRe, f)]\n )\n return files", "def _filter_mrpack_files(file_list: List[MrpackFile], mrpack_install_options: MrpackInstallOptions) -> List[MrpackFile]:\n filtered_list: List[MrpackFile] = []\n for file in file_list:\n if \"env\" not in file:\n filtered_list.append(file)\n continue\n\n if file[\"env\"][\"client\"] == \"required\":\n filtered_list.append(file)\n if file[\"env\"][\"client\"] == \"optional\" and file[\"path\"] in mrpack_install_options.get(\"optionalFiles\", []):\n filtered_list.append(file)\n\n return filtered_list", "def getFileList(*args, filespec: AnyStr=\"\", folder: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass", "def _search_files(self, path, path_glob):\n files = glob.glob(\"%s/%s\"% (path, path_glob))\n files_filt = []\n print \"Searching for matching files in %s/:\" % path\n for f in files:\n if re.search(self._info['pattern'], os.path.basename(f)) is not None:\n files_filt.append(f)\n if len(files_filt) == 0:\n print \"None found.\"\n return files_filt", "def filter_files(self, pattern, filter_fn=None):\n def filter_function(f):\n return re.search(pattern, f) != None\n if not filter_fn:\n filter_fn = filter_function\n return filter(filter_fn, self.files)", "def glob_fs(self):\n\n found_files = []\n for pattern in self.glob_patterns:\n found_files += [PathString(present_file)\n for present_file in glob.glob(pattern)]\n return found_files", "def recognize_files(list_of_filenames):\n reg_exp = define_regex()\n pattern = re.compile(reg_exp) \n matched = []\n for filename in list_of_filenames:\n match = pattern.match(filename)\n if match != None:\n matched.append(filename)\n return matched", "def files(self):\r\n files = []\r\n for path in self.paths:\r\n if os.path.isdir(path):\r\n files.extend(glob.glob(os.path.join(path, f'*{self.ext}')))\r\n else:\r\n files.extend(glob.glob(path))\r\n return list(set(self.get_pattern(fname) for fname in files))", "def _filter_patterns(self, filepath, pattern_prefix, exclude_pattern, include_pattern):\n isfilter = False\n if exclude_pattern:\n full_exclude_pattern = os.path.join(pattern_prefix, exclude_pattern)\n if fnmatch.fnmatch(filepath, full_exclude_pattern):\n isfilter = True\n if include_pattern:\n full_include_pattern = os.path.join(pattern_prefix, include_pattern)\n if fnmatch.fnmatch(filepath, full_include_pattern):\n isfilter = False\n return isfilter", "def sglob(files_pattern):\n return sorted(glob.glob(files_pattern))", "def _filter_files(file_dir: Union[str, Path], is_viya4: Optional[bool] = False) -> list:\n file_names = []\n file_names.extend(sorted(Path(file_dir).glob(\"*.json\")))\n if is_viya4:\n file_names.extend(sorted(Path(file_dir).glob(\"score_*.py\")))\n file_names.extend(sorted(Path(file_dir).glob(\"*.pickle\")))\n # Include H2O.ai MOJO files\n file_names.extend(sorted(Path(file_dir).glob(\"*.mojo\")))\n if file_names:\n return file_names\n else:\n raise FileNotFoundError(\n \"No valid model files were found in the provided file directory.\"\n )", "def glob1(self, dirname, pattern):\n names = self.listdir(dirname)\n if pattern[0] != '.':\n names = filter(lambda x: x[0] != '.',names)\n return fnmatch.filter(names, pattern)" ]
[ "0.6862338", "0.67467535", "0.6701581", "0.66219026", "0.65032023", "0.63339174", "0.63240343", "0.6266551", "0.62537444", "0.62388927", "0.620612", "0.62047184", "0.6192042", "0.61909294", "0.6186091", "0.6183439", "0.61790496", "0.61758393", "0.6166299", "0.6145202", "0.61318463", "0.61238384", "0.61227596", "0.6120521", "0.610067", "0.6098398", "0.6084366", "0.60680497", "0.6032067", "0.60174227" ]
0.7507073
0
Filter files based on exclusions lists Return a list of files which do not match any of the Unix shellstyle wildcards provided, or return all the files if no filter is provided.
def ExcludeFiles(filters, files): if not filters: return files match = set() for file_filter in filters: excludes = set(fnmatch.filter(files, file_filter)) match |= excludes return [name for name in files if name not in match]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter_file_list(files, local_metadata, remote_metadata):\n def _is_tracked(filename, metadata):\n \"\"\"\n Is the filename tracked in the remote metadata dict.\n The file may be not even locally tracked yet\n \"\"\"\n current_local_sha = local_metadata.get(filename, None)\n current_remote_sha = metadata.get(filename, None)\n return current_local_sha is not None \\\n and current_remote_sha is not None \\\n and current_local_sha == current_remote_sha\n\n def _is_inside_ignored_dir(filename):\n \"\"\" Is the filename inside any of the IGNORE_DIRS list \"\"\"\n ignore_dirs = ['./' + x for x in IGNORE_DIRS]\n return any([filename.startswith(x) for x in ignore_dirs])\n\n def _has_ignored_extension(filename):\n return any([ext in IGNORE_EXTENSIONS\n for ext in filename.split('.')[1:]])\n\n files = [f for f in files\n if not _is_inside_ignored_dir(f)\n and not _has_ignored_extension(f)\n and not _is_tracked(f, remote_metadata)]\n return files", "def exclude_filter(excl_filter, paths):\n misses = set()\n for p in paths:\n if re.search(excl_filter, p) is None:\n misses.add(p)\n\n return misses", "def filter_python_files(files):\n return [f for f in files if f.endswith('.py')]", "def negative_filtering(patterns: list, file_list):\n if len(patterns) == 0:\n return file_list\n prog = re.compile(patterns.pop())\n it = (i for i in file_list if not prog.search(i))\n return negative_filtering(patterns, it)", "def exclude_dirs(self, matches: Iterable[str]) -> List[str]:\n filters = [(\"ifmodule\", self.modules.keys()), (\"ifdefine\", self.variables)]\n\n valid_matches = []\n\n for match in matches:\n for filter_ in filters:\n if not self._pass_filter(match, filter_):\n break\n else:\n valid_matches.append(match)\n return valid_matches", "def filter_filenames(filenames, filters, inverse=False):\n out = []\n for filename in filenames:\n for filt in filters:\n if (filt not in filename) + (inverse) == 1:\n break\n else:\n out.append(filename)\n return out", "def filterImages(files, cfg):\r\n regex = \"\\.(\" + \"|\".join(cfg.image_formats) + \")$\"\r\n #filter(lambda s: re.match(regex, s), files)\r\n return [s for s in files if re.findall(regex, s)]", "def get_filtered_list_without_temporary_files(self, file_list=None):\n\t\ttemp_file_regex = re.compile(r'.*\\~\\$.*')\n\t\ttry:\n\t\t\ttemporary_files = list(filter(temp_file_regex.search, file_list))\n\t\t\tfiles_filtered = list(set(file_list) - set(temporary_files))\n\t\t\treturn files_filtered\n\t\texcept:\n\t\t\treturn file_list", "def filename_filter ( self, filename, _fnmatch=fnmatch.fnmatch ):\n return all (\n not _fnmatch ( filename, pat ) for pat in self.FILENAMES_IGNORE\n )", "def _filter_diff(diff, include_list, exclude_list=()):\n filtered = []\n for d in diff:\n if (d.status != 'D' and\n _match_regex_list(d.file, include_list) and\n not _match_regex_list(d.file, exclude_list)):\n # We've got a match!\n filtered.append(d)\n return filtered", "def IncludeFiles(filters, files):\n if not filters:\n return files\n match = set()\n for file_filter in filters:\n match |= set(fnmatch.filter(files, file_filter))\n return [name for name in files if name in match]", "def allow_patterns(*patterns):\n\n def _ignore_patterns(path, names):\n\n files_only = [\n name for name in names if not os.path.isdir(os.path.join(path, name))\n ]\n\n allowed_files = []\n for pattern in patterns:\n allowed_files.extend(fnmatch.filter(files_only, pattern))\n\n ignore_others = set(files_only) - set(allowed_files)\n return ignore_others\n\n return _ignore_patterns", "def filter_jars(jars, include_filters, exclude_filters):\n filtered = []\n # Apply include filters\n for j in jars:\n basename = os.path.basename(j)\n for f in include_filters:\n if f.match(basename):\n filtered += [j]\n break\n else:\n logging.debug(\"Ignoring JAR %s\", j)\n # Apply exclude filters\n exclude_filtered = []\n for j in filtered:\n basename = os.path.basename(j)\n for f in exclude_filters:\n if f.match(basename):\n logging.debug(\"Ignoring JAR %s\", j)\n break\n else:\n exclude_filtered += [j]\n\n return exclude_filtered", "def ignore_patterns(*patterns):\n def _ignore_patterns(path, names):\n ignored_names = []\n for pattern in patterns:\n ignored_names.extend(fnmatch.filter(names, pattern))\n return set(ignored_names)\n return _ignore_patterns", "def filter_missing_files(file_names, split_by_client=False, allow_missing_files=True):\n\n if not allow_missing_files:\n return file_names\n\n if split_by_client:\n # filter out missing files and empty clients\n existing_files = [\n [f for f in client_files if os.path.exists(f)] for client_files in file_names]\n existing_files = [\n client_files for client_files in existing_files if client_files]\n else:\n # filter out missing files\n existing_files = [f for f in file_names if os.path.exists(f)]\n return existing_files", "def filter_list(to_process_list):\n log_file_list = [file for file in to_process_list if \"tar\" not in file]\n tar_file_list = [file for file in to_process_list if \"tar\" in file]\n return log_file_list, tar_file_list", "def __ignore_files_with_suffix(self, files):\n if self.__config.suffix().strip():\n files = [file for file in files if self.__config.suffix() not in file]\n return files", "def gas_exclusions(self) -> List[ContractFunctionPath]:\n\n cli_value = self.pytest_config.getoption(\"--gas-exclude\")\n exclusions: List[ContractFunctionPath] = []\n if cli_value:\n items = cli_value.split(\",\")\n for item in items:\n exclusion = ContractFunctionPath.from_str(item)\n exclusions.append(exclusion)\n\n paths = _get_config_exclusions(self.ape_test_config.gas)\n exclusions.extend(paths)\n return exclusions", "def filter_paths(pathnames, patterns=None, ignore_patterns=None):\n result = []\n if patterns is None:\n patterns = ['*']\n if ignore_patterns is None:\n ignore_patterns = []\n for pathname in pathnames:\n if match_patterns(pathname, patterns) and not match_patterns(pathname,\n ignore_patterns):\n result.append(pathname)\n return result", "def filter_files(file_names, file_name_words):\n extensions = ('yaml', 'yml')\n _file_names = list()\n # Uncomment out this for debugging purposes\n # print(\"Filtering according to words {}\".format(file_name_words))\n for file_name in file_names:\n if file_name.endswith(extensions):\n if any(i in file_name for i in file_name_words):\n # Uncomment out this for listing the matching files\n # print(\"Filename {} is a match\".format(file_name))\n _file_names.append(file_name)\n else:\n return _file_names", "def filter_files(self, pattern, filter_fn=None):\n def filter_function(f):\n return re.search(pattern, f) != None\n if not filter_fn:\n filter_fn = filter_function\n return filter(filter_fn, self.files)", "def _get_file_paths(self, ignored_exts: Optional[Set[str]]) -> List[str]:\n dir_path = os.path.join(self._target_dir, '**')\n all_paths = glob.glob(dir_path, recursive=True)\n if ignored_exts is None:\n return [p for p in all_paths if os.path.isfile(p)]\n file_paths = [p for p in all_paths if self._extr_ext(p) not in ignored_exts]\n return [p for p in file_paths if os.path.isfile(p)]", "def get_sql_files(root=\".\", filterl=None, exclude=None):\r\n paths = Path(root).rglob(\"*.sql\")\r\n\r\n if root:\r\n if filterl:\r\n return [\r\n str(path)\r\n for path in paths\r\n if any(\r\n re.match(path.parts[-2], name, re.IGNORECASE) for name in filterl\r\n )\r\n ]\r\n if exclude:\r\n return [\r\n str(path)\r\n for path in paths\r\n if not any(\r\n re.match(path.parts[0], name, re.IGNORECASE) for name in exclude\r\n )\r\n ]\r\n return [str(path) for path in Path(root).rglob(\"*.sql\")]\r\n return [str(path) for path in paths]", "def list_match(names: List[str], expression: str) -> List[str]:\n exprs_unix_shell_style = parse_exp2unix_shell_style(expression)\n filter_results = []\n for expr in exprs_unix_shell_style:\n filter_results.extend(fnmatch.filter(names, expr))\n return list(set(filter_results))", "def get_files(self, components, excluded_components=None):\n if excluded_components is None:\n excluded_components = []\n \n # List of already processed or excluded\n files_set = set()\n excluded_files_set = set()\n files = []\n \n for c in excluded_components:\n excluded_files_set.add(self.get(c))\n\n for c in components:\n f = self.get(c)\n if f not in files_set and f not in excluded_files_set:\n files_set.add(f)\n files.append(f)\n return files", "def exclude(requestContext, seriesList, pattern):\n regex = re.compile(pattern)\n return [s for s in seriesList if not regex.search(s.name)]", "def get_unignored_file_paths(ignore_list=None, whitelist=None):\n unignored_files = []\n if ignore_list is None:\n ignore_list = []\n if whitelist is None:\n whitelist = []\n\n for root, dirs, files in os.walk(\".\"):\n floyd_logger.debug(\"Root:%s, Dirs:%s\", root, dirs)\n\n if ignore_path(unix_style_path(root), ignore_list, whitelist):\n # Reset dirs to avoid going further down this directory.\n # Then continue to the next iteration of os.walk, which causes\n # everything in this directory to be ignored.\n #\n # Note that whitelisted files that are within directories that are\n # ignored will not be whitelisted. This follows the expected\n # behavior established by .gitignore logic:\n # \"It is not possible to re-include a file if a parent directory of\n # that file is excluded.\"\n # https://git-scm.com/docs/gitignore#_pattern_format\n dirs[:] = []\n floyd_logger.debug(\"Ignoring directory : %s\", root)\n continue\n\n for file_name in files:\n file_path = unix_style_path(os.path.join(root, file_name))\n if ignore_path(file_path, ignore_list, whitelist):\n floyd_logger.debug(\"Ignoring file : %s\", file_name)\n continue\n\n unignored_files.append(os.path.join(root, file_name))\n\n return unignored_files", "def get_files_exclude(matches, dname):\n all_files=os.listdir(dname)\n save_list=[]\n imatch=0\n for match in matches:\n for fname in all_files:\n ### exclude dir once\n if imatch==0:\n if os.path.isdir(fname):\n save_list.append(fname)\n #print file\n continue\n if re.search(match, fname):\n save_list.append(fname)\n #print file\n imatch+=1 \n for fname in save_list:\n #print file\n all_files.remove(fname)\n return all_files", "def _filter_mrpack_files(file_list: List[MrpackFile], mrpack_install_options: MrpackInstallOptions) -> List[MrpackFile]:\n filtered_list: List[MrpackFile] = []\n for file in file_list:\n if \"env\" not in file:\n filtered_list.append(file)\n continue\n\n if file[\"env\"][\"client\"] == \"required\":\n filtered_list.append(file)\n if file[\"env\"][\"client\"] == \"optional\" and file[\"path\"] in mrpack_install_options.get(\"optionalFiles\", []):\n filtered_list.append(file)\n\n return filtered_list", "def FilterLogfiles(files):\n log_files = list(files)\n for file_path in files:\n file_name = os.path.basename(file_path)\n if file_name == _KERNEL or file_name.endswith(_IMG_FILE_EXTENSION):\n log_files.remove(file_path)\n return log_files" ]
[ "0.70751107", "0.69046474", "0.68281114", "0.6743084", "0.66904205", "0.66463363", "0.6558046", "0.65148014", "0.6402117", "0.6395745", "0.6389003", "0.62757593", "0.621235", "0.62065065", "0.6146359", "0.6127292", "0.60841036", "0.60451764", "0.5971961", "0.5971407", "0.5970291", "0.59625816", "0.59556544", "0.5892354", "0.58910906", "0.5890766", "0.58895147", "0.58846396", "0.5874542", "0.58683026" ]
0.76014906
0
CopyPath from src to dst Copy a fully specified src to a fully specified dst. If src and dst are both files, the dst file is removed first to prevent error. If and include or exclude list are provided, the destination is first matched against that filter.
def CopyPath(options, src, dst): if options.includes: if not IncludeFiles(options.includes, [src]): return if options.excludes: if not ExcludeFiles(options.excludes, [src]): return if options.verbose: print('cp %s %s' % (src, dst)) # If the source is a single file, copy it individually if os.path.isfile(src): # We can not copy over a directory with a file. if os.path.exists(dst): if not os.path.isfile(dst): msg = "cp: cannot overwrite non-file '%s' with file." % dst raise OSError(msg) # If the destination exists as a file, remove it before copying to avoid # 'readonly' issues. os.remove(dst) # Now copy to the non-existent fully qualified target shutil.copy(src, dst) return # Otherwise it's a directory, ignore it unless allowed if os.path.isdir(src): if not options.recursive: print("cp: omitting directory '%s'" % src) return # We can not copy over a file with a directory. if os.path.exists(dst): if not os.path.isdir(dst): msg = "cp: cannot overwrite non-directory '%s' with directory." % dst raise OSError(msg) else: # if it didn't exist, create the directory os.makedirs(dst) # Now copy all members for filename in os.listdir(src): srcfile = os.path.join(src, filename) dstfile = os.path.join(dst, filename) CopyPath(options, srcfile, dstfile) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_file(src, dst, ignore=None):\n # Sanity checkpoint\n src = re.sub('[^\\w/\\-\\.\\*]', '', src)\n dst = re.sub('[^\\w/\\-\\.\\*]', '', dst)\n if len(re.sub('[\\W]', '', src)) < 5 or len(re.sub('[\\W]', '', dst)) < 5:\n debug.log(\"Error: Copying file failed. Provided paths are invalid! src='%s' dst='%s'\"%(src, dst))\n else:\n # Check destination\n check = False\n if dst[-1] == '/':\n if os.path.exists(dst):\n check = True # Valid Dir\n else:\n debug.log(\"Error: Copying file failed. Destination directory does not exist (%s)\"%(dst)) #DEBUG\n elif os.path.exists(dst):\n if os.path.isdir(dst):\n check = True # Valid Dir\n dst += '/' # Add missing slash\n else:\n debug.log(\"Error: Copying file failed. %s exists!\"%dst)\n elif os.path.exists(os.path.dirname(dst)):\n check = True # Valid file path\n else:\n debug.log(\"Error: Copying file failed. %s is an invalid distination!\"%dst)\n if check:\n # Check source\n files = glob.glob(src)\n if ignore is not None: files = [fil for fil in files if not ignore in fil]\n if len(files) != 0:\n debug.log(\"Copying File(s)...\", \"Copy from %s\"%src, \"to %s\"%dst) #DEBUG\n for file_ in files:\n # Check file exists\n if os.path.isfile(file_):\n debug.log(\"Copying file: %s\"%file_) #DEBUG\n shutil.copy(file_, dst)\n else:\n debug.log(\"Error: Copying file failed. %s is not a regular file!\"%file_) #DEBUG\n else: debug.log(\"Error: Copying file failed. No files were found! (%s)\"%src) #DEBUG", "def copyDir(src, dst, includes, excludes = []):\n\tmultiFilesReplacements([], dst, src, includes, excludes)", "def copy_if_needed(src: str, dst: str, filter: str|List[str]|None = None) -> bool:\n #console(f'COPY {src} --> {dst}')\n if os.path.isdir(src):\n return copy_dir(src, dst, filter)\n else:\n return copy_file(src, dst, filter)", "def minify(src, dst, exclude_files=None, file_processors=None):\n\n LOGGER.info(\"copying files in <%s> to <%s>\", src, dst)\n\n file_processors = DEFAULT_PROCESSORS if file_processors is None else file_processors\n prefix = os.path.join(src, \"\")\n\n for src_path in _walk_files(src, exclude_files):\n assert src_path.startswith(prefix)\n\n dst_path = os.path.join(dst, src_path[len(prefix) :])\n dst_dir, dst_file = os.path.split(dst_path)\n os.makedirs(dst_dir, exist_ok=True)\n\n _, ext = os.path.splitext(dst_file)\n ext = ext[1:].lower() if ext else None\n processor = file_processors.get(ext, copyfileobj)\n\n LOGGER.debug(\n \"copying file <%s> to <%s> using processor %r\",\n src_path,\n dst_path,\n processor,\n )\n\n with open(src_path, \"rb\") as fsrc, open(dst_path, \"wb\") as fdst:\n processor(fsrc, fdst)", "def copy(src, dst, ignore=False, force=False):\n if not os.path.exists(src) and not ignore:\n raise FileNotFoundError(\"'%s' is not found.\" % src)\n\n if os.path.isfile(src):\n if os.path.exists(dst):\n if not ignore and not force:\n raise FileExistsError(\"'%s' is exist.\" % dst)\n elif force:\n shutil.copyfile(src, dst)\n else:\n shutil.copyfile(src, dst)\n elif os.path.isdir(src):\n if os.path.exists(dst):\n if not ignore and not force:\n try:\n shutil.copytree(src, dst)\n except OSError:\n raise FileExistsError(\"'%s' is exist.\" % dst)\n elif force:\n remove(dst)\n shutil.copytree(src, dst)\n else:\n shutil.copytree(src, dst)", "def copy_file(src: str, dst: str, filter: str|List[str]|None = None) -> bool:\n if _passes_filter(src, filter):\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n if _should_copy(src, dst):\n #console(f'copy {src}\\n --> {dst}')\n shutil.copyfile(src, dst, follow_symlinks=True)\n shutil.copystat(src, dst, follow_symlinks=True)\n return True\n return False", "def copyAsset(self, src, dst, **kw):\n if self.isfile(src):\n self.copyfile(src, dst)\n else:\n # copy folder\n if not self.exists(dst):\n self.makedirs(dst)\n for name in self.listdir(src):\n self.copyAsset(self.joinpath(src, name), self.joinpath(dst, name), copycache=0)\n\n # copy cache\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return\n\n cache_dst = self.cache_path(dst)\n cache_dst_parent = os.path.dirname(cache_dst)\n if not os.path.exists( cache_dst_parent ):\n os.makedirs(cache_dst_parent )\n if not os.path.exists(cache_dst):\n ucopytree(cache_src, cache_dst)", "def copy_ext(src, dst, include=None, exclude=None):\n # Find files from the specified extensions.\n files = find_files(src, include, exclude)\n # Transform all file paths in relative.\n rel = [os.path.relpath(file, src) for file in files]\n # Concatenate the relative path to the destination folder.\n dst = [f'{dst}\\\\{rel}' for rel in rel]\n # Run in a thread pool.\n parallel.run(copy, list(zip(files, dst)), thread=True)", "def safecopy(src, dst):\r\n abs_src = os.path.abspath(src)\r\n abs_dst = os.path.abspath(dst)\r\n if (abs_src != abs_dst) \\\r\n and os.path.isfile(abs_src): \r\n dirname = os.path.dirname(abs_dst)\r\n recurse_mkdir(dirname)\r\n shutil.copy(abs_src, abs_dst)", "def copy_path(srcdir, dstdir, path, newpath=None, *, exclude=None):\n if newpath is None:\n newpath = path\n\n src = osp.join(srcdir, path)\n dst = osp.join(dstdir, newpath)\n\n if exclude(src): # skip this path\n return []\n\n print(f'copying {src} to {dst}')\n\n if osp.isfile(src):\n shutil.copyfile(src, dst)\n return [newpath]\n elif osp.isdir(src):\n if osp.exists(dst): # if the destination directory exists, delete it\n shutil.rmtree(dst)\n\n os.mkdir(dst)\n\n files = []\n\n for fpath in os.listdir(src): # recursively copy each child path\n p = osp.join(path, fpath)\n np = osp.join(newpath, fpath)\n files += copy_path(srcdir, dstdir, p, np, exclude=exclude)\n\n return files\n else:\n raise RuntimeError(f'unknown path type {src}')", "def Copy(args):\n parser = argparse.ArgumentParser(usage='cp [Options] sources... dest',\n description=Copy.__doc__)\n parser.add_argument(\n '-R', '-r', '--recursive', dest='recursive', action='store_true',\n default=False,\n help='copy directories recursively.')\n parser.add_argument(\n '-v', '--verbose', dest='verbose', action='store_true',\n default=False,\n help='verbose output.')\n parser.add_argument(\n '--include', dest='includes', action='append', default=[],\n help='include files matching this expression.')\n parser.add_argument(\n '--exclude', dest='excludes', action='append', default=[],\n help='exclude files matching this expression.')\n parser.add_argument('srcs', nargs='+', help='files to copy')\n parser.add_argument('dest', help='destination')\n\n options = parser.parse_args(args)\n\n src_list = []\n for src in options.srcs:\n files = glob.glob(src)\n if not files:\n raise OSError('cp: no such file or directory: ' + src)\n if files:\n src_list.extend(files)\n\n for src in src_list:\n # If the destination is a directory, then append the basename of the src\n # to the destination.\n if os.path.isdir(options.dest):\n CopyPath(options, src, os.path.join(options.dest, os.path.basename(src)))\n else:\n CopyPath(options, src, options.dest)", "def copyanything(src, dst):\n try:\n copytree(src, dst, dirs_exist_ok=True)\n except FileExistsError as e: # noqa\n pass\n except OSError as err:\n # TODO(dittrich): This causes a pylint error\n # Not sure what test cases would trigger this, or best fix.\n if err.errno == os.errno.ENOTDIR: # type: ignore\n copy(src, dst)\n else:\n raise\n finally:\n remove_other_perms(dst)", "def command_copy(args):\n sources = args.sources\n destpath = args.destpath\n source_files = []\n for file_ in sources:\n if \"*\" in file_:\n selected = glob(file_)\n source_files.extend(selected)\n elif os.path.isfile(file_):\n source_files.append(file_)\n\n if destpath.endswith(\"/\") or os.path.isdir(destpath) or len(sources) > 1:\n # -- DESTDIR-MODE: Last argument is a directory.\n destdir = destpath\n else:\n # -- DESTFILE-MODE: Copy (and rename) one file.\n assert len(source_files) == 1\n destdir = os.path.dirname(destpath)\n\n # -- WORK-HORSE: Copy one or more files to destpath.\n if not os.path.isdir(destdir):\n sys.stdout.write(\"copy: Create dir %s\\n\" % destdir)\n os.makedirs(destdir)\n for source in source_files:\n destname = os.path.join(destdir, os.path.basename(source))\n sys.stdout.write(\"copy: %s => %s\\n\" % (source, destname))\n shutil.copy(source, destname)\n return 0", "def safe_copy(file_path, out_dir, dst=None):\n name = dst or os.path.basename(file_path)\n if not os.path.exists(os.path.join(out_dir, name)):\n shutil.copy(file_path, os.path.join(out_dir, name))", "def copy_paste(src_path, dst_path):\n shutil.copy2(src_path, dst_path)\n\n return True", "def CopyFiles(src_list, dst, src_base='',\n preserve=False, recursive=False,\n ignore_existence_check=False):\n if not src_list:\n return\n\n def _MakeDirs(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n def _CopyDir(src_dir, dst_dir):\n _MakeDirs(dst_dir)\n\n for filename in os.listdir(src_dir):\n src = os.path.join(src_dir, filename)\n _Copy(src, dst_dir)\n\n def _Copy(src, dst):\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n\n if os.path.isdir(src):\n _CopyDir(src, dst)\n else:\n shutil.copy(src, dst)\n\n # Copy update time and permission\n if preserve:\n shutil.copystat(src, dst)\n # Changes the file writable so we can overwrite it later.\n os.chmod(dst, os.stat(dst).st_mode | stat.S_IWRITE)\n\n def _ErrorCheck(src, src_base):\n if not os.path.exists(src):\n if ignore_existence_check:\n return False\n else:\n _ErrorExit('No such file or directory: \"%s\"' % src)\n\n if os.path.isdir(src) and not recursive:\n _ErrorExit('Cannot copy a directory: \"%s\"' % src)\n\n if not src.startswith(src_base):\n _ErrorExit('Source file does not start with src_base: \"%s\"' % src)\n\n return True\n\n dst = os.path.abspath(dst)\n if src_base:\n src_base = os.path.abspath(src_base)\n\n # dst may be a file instead of a directory.\n if len(src_list) == 1 and not os.path.exists(dst) and not src_base:\n src = os.path.abspath(src_list[0])\n if _ErrorCheck(src, src_base):\n _MakeDirs(os.path.dirname(dst))\n _Copy(src, dst)\n return\n\n # dst should be a directory here.\n for src in src_list:\n src = os.path.abspath(src)\n\n if src_base:\n dst_dir = os.path.join(dst,\n os.path.relpath(os.path.dirname(src), src_base))\n else:\n dst_dir = dst\n\n if _ErrorCheck(src, src_base):\n _MakeDirs(dst_dir)\n _Copy(src, dst_dir)", "def copy_file(src_file,dst_folder):\n from shutil import copyfile\n from os.path import split\n copyfile(src_file, dst_folder+split(src_file)[1])\n return", "def copy_subtree(src, dst):\n for src_f in os.listdir(src):\n src_path = os.path.join(src, src_f)\n if os.path.isdir(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copytree(src_path, dst_path)\n else:\n ProcessJson.copy_subtree(src_path, dst_path)\n elif os.path.isfile(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copy(src_path, dst_path)", "def copy_paths(src, dst, paths, *, exclude=None):\n files = []\n\n for path in paths:\n if isinstance(path, tuple):\n files += copy_path(src, dst, path[0], path[1], exclude=exclude)\n else:\n files += copy_path(src, dst, path, exclude=exclude)\n\n return files", "def copy_dir(src=\"\", dst=\"\", header=\"\", footer=\"\", clip=0, ext=\"\", test=False):\n failed = []\n nfiles = 0\n if not os.path.exists(dst):\n os.makedirs(dst)\n if not os.path.exists(src):\n raise argparse.ArgumentError(\"source does not exist! It must be a directory.\")\n else:\n for root, dirs, files in os.walk(src, topdown=False):\n for name in files:\n name_wo_ext, file_ext = os.path.splitext(name)\n\n src_path = os.path.join(root, name)\n dstfilename = header + os.path.join(root[len(src)+1:], name_wo_ext[clip:]) + footer + file_ext\n dst_path = os.path.join(dst, dstfilename)\n\n dst_pdir = os.path.dirname(dst_path)\n if not os.path.exists(dst_pdir):\n os.makedirs(dst_pdir)\n\n if not os.path.exists(dst_path):\n if ext == \"\" or ext == file_ext[1:]:\n try:\n shutil.copy(src_path, dst_path)\n except:\n failed.append(src_path)\n print(f\"... {src_path} failed\")\n else:\n print(f\"... {dst_path} already exists'. Skipping\")\n nfiles += 1\n\n if test:\n break\n if test:\n break\n print(f\"{nfiles - len(failed)} / {nfiles} files were copied.\")\n return failed", "def copy(self):\n source = os.path.abspath(self.path)\n destination = os.path.abspath(self.target)\n\n logger.info(\"Running Copy Method - SOURCE=\\\"{src}\\\" DESTINATION=\\\"{dst}\\\" IGNORE=\\\"{ignore}\\\"\".format(src=source, dst=destination, ignore=self.ignore))\n\n if not os.path.exists(source):\n logger.error(\"\\\"{source}\\\" PATH DOESN'T EXIST. PROGRAM TERMINATED. Please check log file.\".format(source=source))\n\n if self.rules is not None:\n files = self.rules\n else:\n self.create_packet_structure(source)\n files = self.files\n\n for (k,v) in files.items():\n src = os.path.join(source,k)\n dst = os.path.join(destination,v)\n dirpath = os.path.dirname(dst)\n if not os.path.isdir(dirpath):\n logger.info(\"Create directory - \\\"{dst}\\\"\".format(dst=dirpath))\n os.makedirs(dirpath)\n logger.info(\"copy from \\\"{f}\\\" to \\\"{t}\\\"\".format(f=src,t=dst))\n shutil.copyfile(src,dst)\n logger.info(\"OK\")", "def run_copy(self, src, dst):\n pass", "def _copy_dir(src, dst):\n if os.path.isdir(src):\n os.makedirs(dst, exist_ok=True)\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n\n if os.path.isdir(s):\n _copy_dir(s, d)\n else:\n shutil.copy2(s, d)\n\n else:\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n _delete_file(dst)\n shutil.copy2(src, dst)", "def copyFile(srcPath, destPath):\n shutil.copy(srcPath, destPath)", "def copy_files(src_path, dst_path):\r\n for folder in os.listdir(src_path):\r\n for file in os.listdir(os.path.join(src_path, folder)):\r\n source = os.path.join(os.path.join(src_path, folder), file)\r\n dest = os.path.join(dst_path, file)\r\n shutil.copy(source, dest)", "def _copy_file_with_parents(src, dst, ignore_no_src=False):\n if not os.path.isfile(src) and ignore_no_src:\n return\n\n dst_dir = os.path.dirname(dst)\n create_directories(dst_dir)\n\n copyfile(src, dst)", "def copy(self, source, dest, dry_run=False, after=False, force=False,\n include=None, exclude=None):\n source = self._map_files(source)\n dest = self._map_files(dest)\n \n eh = SimpleErrorHandler()\n self._client.execute('copy', source, dest, n=dry_run, A=after,\n f=force, I=include, X=exclude, eh=eh)\n\n return bool(eh)", "def copy(self, src_path: str, tgt_path: str) -> None:", "def copy_file_to_multiple_subfolders(src, dst, *args, **kwargs):\n print '\\nSource: {}\\nDestinations parent folder: {}'.format(src, dst)\n filename = os.path.basename(src)\n for folder in (d for d in os.listdir(dst) if os.path.isdir(d)):\n print '\\nCopying {} to {}...'.format(filename, folder)\n try:\n shutil.copy(src, os.path.abspath(dst) + '\\\\' + folder)\n except Exception as e:\n print e", "def copy(src, dst):\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n shutil.copy2(src, dst)" ]
[ "0.6933519", "0.67284834", "0.6617384", "0.6540199", "0.6409578", "0.63443166", "0.63072366", "0.62956154", "0.6293814", "0.6271471", "0.6246239", "0.60427904", "0.59695697", "0.5941309", "0.5925351", "0.5902801", "0.5888825", "0.58824974", "0.587282", "0.58454823", "0.5841725", "0.5806995", "0.57897377", "0.57891864", "0.5778245", "0.57608724", "0.57384235", "0.5716775", "0.5694107", "0.5640309" ]
0.7357302
0
MovePath from src to dst. Moves the src to the dst much like the Unix style mv command, except it only handles one source at a time. Because of possible temporary failures do to locks (such as antivirus software on Windows), the function will retry up to five times.
def MovePath(options, src, dst): # if the destination is not an existing directory, then overwrite it if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) # If the destination exists, the remove it if os.path.exists(dst): if options.force: Remove(['-vfr', dst]) if os.path.exists(dst): raise OSError('mv: FAILED TO REMOVE ' + dst) else: raise OSError('mv: already exists ' + dst) for _ in range(5): try: os.rename(src, dst) break except OSError as error: print('Failed on %s with %s, retrying' % (src, error)) time.sleep(5) else: print('Gave up.') raise OSError('mv: ' + error)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mv(self, src_path, dst_path):\n try:\n postdata = codecs.encode(json.dumps({ 'src': src_path, 'dst': dst_path }), 'utf-8')\n self._urlopen('/api/fileops/move', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to move '{}' to '{}'\".format(src_path, dst_path))", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def move_file(src, dst):\n # Sanity checkpoint\n src = re.sub('[^\\w/\\-\\.\\*]', '', src)\n dst = re.sub('[^\\w/\\-\\.\\*]', '', dst)\n if len(re.sub('[\\W]', '', src)) < 5 or len(re.sub('[\\W]', '', dst)) < 5:\n debug.log(\"Error: Moving file failed. Provided paths are invalid! src='%s' dst='%s'\"%(src, dst))\n else:\n # Check destination\n check = False\n if dst[-1] == '/':\n if os.path.exists(dst):\n check = True # Valid Dir\n else:\n debug.log(\"Error: Moving file failed. Destination directory does not exist (%s)\"%(dst)) #DEBUG\n elif os.path.exists(dst):\n if os.path.isdir(dst):\n check = True # Valid Dir\n dst += '/' # Add missing slash\n else:\n debug.log(\"Error: Moving file failed. %s exists!\"%dst)\n elif os.path.exists(os.path.dirname(dst)):\n check = True # Valid file path\n else:\n debug.log(\"Error: Moving file failed. %s is an invalid distination!\"%dst)\n if check:\n # Check source\n files = glob.glob(src)\n if len(files) != 0:\n debug.log(\"Moving File(s)...\", \"Move from %s\"%src, \"to %s\"%dst)\n for file_ in files:\n # Check if file contains invalid symbols:\n invalid_chars = re.findall('[^\\w/\\-\\.\\*]', os.path.basename(file_))\n if invalid_chars:\n debug.graceful_exit((\"Error: File %s contains invalid \"\n \"characters %s!\"\n )%(os.path.basename(file_), invalid_chars))\n continue\n # Check file exists\n if os.path.isfile(file_):\n debug.log(\"Moving file: %s\"%file_)\n shutil.move(file_, dst)\n else:\n debug.log(\"Error: Moving file failed. %s is not a regular file!\"%file_)\n else: debug.log(\"Error: Moving file failed. No files were found! (%s)\"%src)", "def mv(src_path, dest_path):\n try:\n os.rename(src_path, dest_path)\n except OSError:\n # this will happen on windows\n os.remove(dest_path)\n os.rename(src_path, dest_path)", "def move(self, name, source, dest):\n self.m.path.assert_absolute(source)\n self.m.path.assert_absolute(dest)\n self._run(name, ['move', source, dest])\n self.m.path.mock_copy_paths(source, dest)\n self.m.path.mock_remove_paths(source)", "def safe_move(src: str, dst: str) -> None:\n try:\n os.rename(src, dst)\n except OSError as err:\n\n if err.errno == errno.EXDEV:\n # Generate a unique ID, and copy `<src>` to the target directory\n # with a temporary name `<dst>.<ID>.tmp`. Because we're copying\n # across a filesystem boundary, this initial copy may not be\n # atomic. We intersperse a random UUID so if different processes\n # are copying into `<dst>`, they don't overlap in their tmp copies.\n copy_id = uuid4()\n tmp_dst = \"%s.%s.tmp\" % (dst, copy_id)\n shutil.copyfile(src, tmp_dst)\n\n # Then do an atomic rename onto the new name, and clean up the\n # source image.\n os.rename(tmp_dst, dst)\n os.unlink(src)\n else:\n raise", "def RenameDir(srcdir, destdir):\n\n max_tries = 5\n num_tries = 0\n for num_tries in xrange(max_tries):\n try:\n RemoveDir(destdir)\n shutil.move(srcdir, destdir)\n return\n except OSError as err:\n if err.errno != errno.EACCES:\n raise err\n # If we are here, we didn't exit due to raised exception, so we are\n # handling a Windows flaky access error. Sleep one second and try\n # again.\n time.sleep(num_tries + 1)\n\n # end of while loop -- could not RenameDir\n raise Error('Could not RenameDir %s => %s after %d tries.\\n'\n 'Please check that no shells or applications '\n 'are accessing files in %s.'\n % (srcdir, destdir, num_tries, destdir))", "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def copy_with_retry(src, dst):\n\n for _i in range(0, 5):\n try:\n if os.path.exists(dst):\n delete_with_retry(dst)\n\n shutil.copytree(src, dst)\n return\n except:\n time.sleep(0.1)\n\n print(\"Could not copy directory after 5 attempts\")\n sys.exit(1)", "def move(self,src,dst):\n src = os.path.join(self.testpath,src)\n dst = os.path.join(self.testpath,dst)\n directory = os.path.split(dst)[0]\n try:\n os.makedirs(directory)\n except OSError:\n pass\n\n shutil.move(src,dst)", "def move_file(original_path,final_path,max_attempts=30):\n assert_is_string(original_path)\n assert_is_string(final_path)\n\n attempt_counter = 0\n while attempt_counter < max_attempts:\n attempt_counter += 1\n if attempt_counter > 1:\n # Pause if something went wrong, (yt-dl is a suspect, might not be closing files?)\n time.sleep(attempt_counter)\n logging.debug(\"Attempt \"+repr(attempt_counter)+\" to move \"+repr(original_path)+\" to \"+repr(final_path))\n try:\n # Make sure output folder exists\n output_dir = os.path.dirname(final_path)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n assert(os.path.exists(output_dir))\n # Move file\n shutil.move(original_path, final_path)\n assert(not os.path.exists(original_path))\n assert(os.path.exists(final_path))\n return\n except WindowsError, err:\n logging.exception(err)\n logging.error(\"Failed to move file: \"+repr(original_path)+\" to \"+repr(final_path))\n continue\n # If we get here we already have an exception to re-raise\n logging.critical(\"move_file() Too many failed attempts to move a file!\")\n logging.critical(\"move_file()\"+repr(locals()))\n raise", "def move_file(self, path: PathLike, dest: PathLike, force: bool = False):", "def SshMoveFile(host, src_path, dest_path):\n command = ['ssh', host, 'test', '-e', src_path]\n result = RunCommand(command)\n if result:\n # Nothing to do if src_path doesn't exist.\n return result\n\n command = ['ssh', host, 'mv', src_path, dest_path]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh mv \"%s\" -> \"%s\" on \"%s\" (%s)' %\n (src_path, dest_path, host, result))", "def mv(self, src: int, dest: int) -> bool:\n url = 'https://webapi.115.com/files/move'\n result = self.s.post(url, data={'pid': dest, 'fid[0]': src}, headers={'Origin': origin['webapi'], 'Referer': referer['115'].format(self.default_dir)}).json()\n if result['errno'] == '':\n _ = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs) # TODO: need to test\n self._dirs_lookup[src] = self._dirs_lookup[dest].append(dest)\n parent = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs)\n if src not in parent:\n parent.update({src: _})\n else:\n parent.get(src).update(_)\n return True", "def move(self, dst, src): # pragma: no cover\n raise NotImplementedError(\"Implement this\")", "def movefile(destpath,filename,sourcepath):\n\n\tcommand = 'mv ' + filename + ' ' + destpath\n\t\n\ttry :\n\t\tst = commands.getstatusoutput(command)\n\texcept Exception:\n\t\traise", "def hmove(src_path, res_path):\n os.rename(src_path, res_path)", "def Move(args):\n\n parser = argparse.ArgumentParser(usage='mv [Options] sources... dest',\n description=Move.__doc__)\n parser.add_argument(\n '-v', '--verbose', dest='verbose', action='store_true',\n default=False,\n help='verbose output.')\n parser.add_argument(\n '-f', '--force', dest='force', action='store_true',\n default=False,\n help='force, do not error it files already exist.')\n parser.add_argument('srcs', nargs='+')\n parser.add_argument('dest')\n\n options = parser.parse_args(args)\n\n if options.verbose:\n print('mv %s %s' % (' '.join(options.srcs), options.dest))\n\n for src in options.srcs:\n MovePath(options, src, options.dest)\n return 0", "def moveFile(source, dest):\n try:\n shutil.move(source, dest) \n except IOError as e:\n print (\"Unable to move file. %s\" %(e))", "def supportRecursiveMove(self, destPath):\r\n return True", "def supportRecursiveMove(self, destPath):\r\n return True", "def moveFile(sourceFullPath,targetDir):\n\n thisFunc = inspect.currentframe().f_code.co_name\n try:\n shutil.move(sourceFullPath,targetDir)\n return True\n except Exception as e:\n print(f\"{thisFunc} issue: {e}\")\n return False", "def MoveFile(from_path, to_path, check_conflicts=True):\n from_path = from_path.replace(\"/\", \"\\\\\")\n if check_conflicts and not os.path.isfile(from_path): # Don't move non-existant path\n raise FileNotFoundError(\"Path {} does not exist!\".format(from_path))\n to_path = to_path.replace(\"/\", \"\\\\\")\n if check_conflicts and not os.path.isdir(os.path.dirname(to_path)): # Don't move to non-existant dir\n raise FileNotFoundError(\"Path {} does not exist to move to!\".format(os.path.dirname(to_path)))\n values = __get_current_values()\n if check_conflicts and os.path.isfile(to_path): # Don't move to already-existing destination unless it will be deleted/moved\n values.reverse()\n try:\n to_path_index = values.index(\"\\\\??\\\\\" + to_path)\n except ValueError:\n to_path_index = -1\n if to_path_index % 2 == 0 or to_path_index == -1:\n raise FileExistsError(\"Path {} already exists and isn't already being deleted/moved!\".format(to_path))\n values.reverse()\n values.append(\"\\\\??\\\\\" + from_path)\n values.append(\"\\\\??\\\\\" + to_path)\n __set_registry(values)", "def move(queue: Queue,\n from_path: list,\n to_path: str\n ) -> None:\n if len(from_path) > 1: # In case files were got with mask\n for single_path in from_path:\n file = os.path.basename(os.path.normpath(single_path))\n files_location = os.path.commonpath(from_path)\n queue.put(file)\n Thread(target=move_file, args=(queue, files_location, to_path)).start()\n print('Files have been moved.')\n else: # In other cases there will be just one item in array\n source_location = from_path[0]\n if os.path.isdir(from_path[0]):\n files = os.listdir(source_location)\n folder_name = os.path.basename(os.path.normpath(source_location))\n path_to_folder = os.path.join(to_path, folder_name)\n\n if not os.path.exists(path_to_folder):\n os.mkdir(path_to_folder)\n\n threads = []\n for file in files:\n # Each file we put to a queue that has limited number of items.\n # And than it creates a separate thread for each file.\n queue.put(file)\n move_thread = Thread(target=move_file, args=(queue, source_location, path_to_folder))\n threads.append(move_thread)\n move_thread.start()\n # Make sure that all our thread are finished before removing original folder\n for thread in threads:\n thread.join()\n\n os.rmdir(source_location)\n print('Folder has been moved.')\n elif os.path.isfile(from_path[0]): # If it's a file we just copy it without any threads\n file_location = from_path[0]\n file_name = os.path.basename(os.path.normpath(file_location))\n if not os.path.exists(file_name):\n shutil.move(file_location, to_path)\n print(f'File {file_name} has been moved.')\n else:\n print(f'File {file_name} already exists')\n elif not os.path.exists(from_path[0]):\n raise NameError('No such files or folders.')", "def move_files(src_dir, dst_dir):\n for f in os.listdir(src_dir):\n try:\n name, season, episode = FILENAME_PATTERN.search(f).groups()\n except AttributeError:\n try:\n name, season, episode = FILENAME_PATTERN2.search(f).groups()\n except AttributeError:\n print \"Cannot parse\", f\n pass\n\n name = name.replace('.', ' ').replace('_', ' ').strip().title()\n\n dir_path = os.path.join(dst_dir, name, 'Season %02d' % int(season))\n full_path = os.path.join(dir_path, f)\n source_path = os.path.join(src_dir, f)\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path, 0777)\n\n if not os.path.exists(full_path):\n shutil.move(source_path, full_path)\n os.symlink(full_path, source_path)", "def moveAsset(self, src, dst):\n if not self.exists( self.dirname(dst) ):\n self.makedirs( self.dirname(dst) )\n self.move(src, dst)\n\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return \n\n cache_dst = self.cache_path(dst)\n if not os.path.exists( os.path.dirname(cache_dst) ):\n os.makedirs( os.path.dirname(cache_dst) )\n shutil.move(cache_src, cache_dst)", "def MoveFile(path, new_path):\n try:\n RemoveFile(new_path)\n os.rename(path, new_path)\n except OSError, e:\n if e.errno != errno.ENOENT:\n raise", "def move_file(path):\n new_path = os.path.join(TEST_DIR, TEST_FILE)\n command = ['mv', TEST_FILE, new_path]\n file_operation(path, command)", "def move(self, dest_fqpath):\n ret = move_file(self._host, self._fqpath, dest_fqpath)\n\n if ret:\n # TODO: change this to use a setter/getter for heavy lifting once\n # and can reset everything from one place\n self._previous_fqpath = self._fqpath\n self._fqpath = dest_fqpath\n\n return True\n\n return False" ]
[ "0.6545725", "0.6515758", "0.6515758", "0.64532274", "0.6410229", "0.6364384", "0.6355971", "0.62232614", "0.6181752", "0.61022437", "0.60913193", "0.60819167", "0.5994191", "0.59898484", "0.5962427", "0.5898883", "0.58975774", "0.587177", "0.5837211", "0.5770024", "0.5693028", "0.5693028", "0.5687646", "0.5642633", "0.5524945", "0.5515548", "0.55140334", "0.5472311", "0.54663867", "0.5445323" ]
0.7736526
0
A Unix style rm. Removes the list of paths. Because of possible temporary failures do to locks (such as antivirus software on Windows), the function will retry up to five times.
def Remove(args): parser = argparse.ArgumentParser(usage='rm [Options] PATHS...', description=Remove.__doc__) parser.add_argument( '-R', '-r', '--recursive', dest='recursive', action='store_true', default=False, help='remove directories recursively.') parser.add_argument( '-v', '--verbose', dest='verbose', action='store_true', default=False, help='verbose output.') parser.add_argument( '-f', '--force', dest='force', action='store_true', default=False, help='force, do not error it files does not exist.') parser.add_argument('files', nargs='+') options = parser.parse_args(args) try: for pattern in options.files: dst_files = glob.glob(pattern) if not dst_files: # Ignore non existing files when using force if options.force: continue raise OSError('rm: no such file or directory: ' + pattern) for dst in dst_files: if options.verbose: print('rm ' + dst) if os.path.isfile(dst) or os.path.islink(dst): for _ in range(5): try: # Check every time, since it may have been deleted after the # previous failed attempt. if os.path.isfile(dst) or os.path.islink(dst): os.remove(dst) break except OSError as error: print('Failed remove with %s, retrying' % error) time.sleep(5) else: print('Gave up.') raise OSError('rm: ' + str(error)) if options.recursive: for _ in range(5): try: if os.path.isdir(dst): if sys.platform == 'win32': # shutil.rmtree doesn't handle junctions properly. Let's just # shell out to rd for this. subprocess.check_call([ 'rd', '/s', '/q', os.path.normpath(dst)], shell=True) else: shutil.rmtree(dst) break except OSError as error: print('Failed rmtree with %s, retrying' % error) time.sleep(5) else: print('Gave up.') raise OSError('rm: ' + str(error)) except OSError as error: print(error) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RemoveDirectory(*path):\n file_path = os.path.join(*path)\n if not os.path.exists(file_path):\n return\n\n if sys.platform == 'win32':\n # Give up and use cmd.exe's rd command.\n file_path = os.path.normcase(file_path)\n for _ in xrange(3):\n print('RemoveDirectory running %s' % (' '.join(\n ['cmd.exe', '/c', 'rd', '/q', '/s', file_path])))\n if not subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', file_path]):\n break\n print(' Failed')\n time.sleep(3)\n return\n\n def RemoveWithRetry_non_win(rmfunc, path):\n if os.path.islink(path):\n return os.remove(path)\n else:\n return rmfunc(path)\n\n remove_with_retry = RemoveWithRetry_non_win\n\n def RmTreeOnError(function, path, excinfo):\n r\"\"\"This works around a problem whereby python 2.x on Windows has no ability\n to check for symbolic links. os.path.islink always returns False. But\n shutil.rmtree will fail if invoked on a symbolic link whose target was\n deleted before the link. E.g., reproduce like this:\n > mkdir test\n > mkdir test\\1\n > mklink /D test\\current test\\1\n > python -c \"import chromium_utils; chromium_utils.RemoveDirectory('test')\"\n To avoid this issue, we pass this error-handling function to rmtree. If\n we see the exact sort of failure, we ignore it. All other failures we re-\n raise.\n \"\"\"\n\n exception_type = excinfo[0]\n exception_value = excinfo[1]\n # If shutil.rmtree encounters a symbolic link on Windows, os.listdir will\n # fail with a WindowsError exception with an ENOENT errno (i.e., file not\n # found). We'll ignore that error. Note that WindowsError is not defined\n # for non-Windows platforms, so we use OSError (of which it is a subclass)\n # to avoid lint complaints about an undefined global on non-Windows\n # platforms.\n if (function is os.listdir) and issubclass(exception_type, OSError):\n if exception_value.errno == errno.ENOENT:\n # File does not exist, and we're trying to delete, so we can ignore the\n # failure.\n print('WARNING: Failed to list %s during rmtree. Ignoring.\\n' % path)\n else:\n raise\n else:\n raise\n\n for root, dirs, files in os.walk(file_path, topdown=False):\n # For POSIX: making the directory writable guarantees removability.\n # Windows will ignore the non-read-only bits in the chmod value.\n os.chmod(root, 0o770)\n for name in files:\n remove_with_retry(os.remove, os.path.join(root, name))\n for name in dirs:\n remove_with_retry(lambda p: shutil.rmtree(p, onerror=RmTreeOnError),\n os.path.join(root, name))\n\n remove_with_retry(os.rmdir, file_path)", "def RemoveDirectory(*path):\n file_path = os.path.join(*path)\n if not os.path.exists(file_path):\n return\n\n if sys.platform == 'win32':\n # Give up and use cmd.exe's rd command.\n file_path = os.path.normcase(file_path)\n for _ in xrange(3):\n print 'RemoveDirectory running %s' % (' '.join(\n ['cmd.exe', '/c', 'rd', '/q', '/s', file_path]))\n if not subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', file_path]):\n break\n print ' Failed'\n time.sleep(3)\n return\n\n def RemoveWithRetry_non_win(rmfunc, path):\n if os.path.islink(path):\n return os.remove(path)\n else:\n return rmfunc(path)\n\n remove_with_retry = RemoveWithRetry_non_win\n\n def RmTreeOnError(function, path, excinfo):\n r\"\"\"This works around a problem whereby python 2.x on Windows has no ability\n to check for symbolic links. os.path.islink always returns False. But\n shutil.rmtree will fail if invoked on a symbolic link whose target was\n deleted before the link. E.g., reproduce like this:\n > mkdir test\n > mkdir test\\1\n > mklink /D test\\current test\\1\n > python -c \"import chromium_utils; chromium_utils.RemoveDirectory('test')\"\n To avoid this issue, we pass this error-handling function to rmtree. If\n we see the exact sort of failure, we ignore it. All other failures we re-\n raise.\n \"\"\"\n\n exception_type = excinfo[0]\n exception_value = excinfo[1]\n # If shutil.rmtree encounters a symbolic link on Windows, os.listdir will\n # fail with a WindowsError exception with an ENOENT errno (i.e., file not\n # found). We'll ignore that error. Note that WindowsError is not defined\n # for non-Windows platforms, so we use OSError (of which it is a subclass)\n # to avoid lint complaints about an undefined global on non-Windows\n # platforms.\n if (function is os.listdir) and issubclass(exception_type, OSError):\n if exception_value.errno == errno.ENOENT:\n # File does not exist, and we're trying to delete, so we can ignore the\n # failure.\n print 'WARNING: Failed to list %s during rmtree. Ignoring.\\n' % path\n else:\n raise\n else:\n raise\n\n for root, dirs, files in os.walk(file_path, topdown=False):\n # For POSIX: making the directory writable guarantees removability.\n # Windows will ignore the non-read-only bits in the chmod value.\n os.chmod(root, 0770)\n for name in files:\n remove_with_retry(os.remove, os.path.join(root, name))\n for name in dirs:\n remove_with_retry(lambda p: shutil.rmtree(p, onerror=RmTreeOnError),\n os.path.join(root, name))\n\n remove_with_retry(os.rmdir, file_path)", "def rm(*fns):\n for fn in fns:\n try:\n os.remove(fn)\n except FileNotFoundError:\n pass", "def rm_rf(path, max_retries=5, trash=True, clean_empty_parents=False, *args, **kw):\n try:\n path = abspath(path)\n if isdir(path) and not islink(path):\n rmtree(path)\n elif lexists(path):\n unlink_or_rename_to_trash(path)\n finally:\n if lexists(path):\n return False\n if clean_empty_parents:\n remove_empty_parent_paths(path)\n return True", "def cleanFiles(a_file_list):\n for entry in a_file_list:\n cmd = 'sudo rm ' + entry\n os.system(cmd)", "def _clean_up(paths):\n print('Cleaning up')\n # Iterate over the given paths, unlinking them\n for path in paths:\n if os.path.exists(path):\n print('Removing %s' % path)\n os.unlink(path)\n else:\n print('%s Not found. Skipped.' % path)", "def rm(path):\n try:\n shutil.rmtree(path)\n except Exception as e:\n print(\"* [Error] occured: {}\\n\".format(e))\n else:\n print(\"* Done.\\n\")", "def clean(files):\n\tfor file in files:\n\t\ttry:\n\t\t\tos.remove(file)\n\t\texcept Exception as e:\n\t\t\tprint(e)", "def rm_r(sftp, path):\n files = sftp.listdir(path)\n for f in files:\n filepath = os.path.join(path, f)\n logger.info('Deleting: %s' % (filepath))\n try:\n sftp.remove(filepath)\n except IOError:\n rm_r(sftp, filepath)", "def delete_files_or_dirs(delete_list):\n try:\n from os import unlink\n from shutil import rmtree\n except ImportError, ie:\n log.err(ie)\n\n for temp in delete_list:\n try:\n unlink(temp)\n except OSError:\n rmtree(temp, ignore_errors=True)", "def safe_rm(path_to_rm):\n # just return if path doesn't exist\n if not os.path.exists(path_to_rm):\n return\n # handle directory\n if os.path.isdir(path_to_rm):\n files_to_rm = [f'{path_to_rm}/{fname}' for fname in os.listdir(path_to_rm)]\n dir_to_rm = path_to_rm\n else:\n files_to_rm = [path_to_rm]\n dir_to_rm = None\n # clear out files\n for file_to_rm in files_to_rm:\n if os.path.isfile(file_to_rm) and os.path.basename(file_to_rm) in REMOVABLE_PATHS:\n os.remove(file_to_rm)\n assert not os.path.exists(file_to_rm), f'Error removing: {file_to_rm}'\n # clear out directory\n if dir_to_rm is not None and os.path.isdir(dir_to_rm):\n os.rmdir(dir_to_rm)\n assert not os.path.exists(dir_to_rm), f'Error removing: {dir_to_rm}'", "def delete_with_retry(folder):\n\n for _i in range(0, 5):\n try:\n if os.path.exists(folder):\n shutil.rmtree(folder)\n\n return\n except:\n time.sleep(0.1)\n\n print(\"Could not delete directory after 5 attempts: %s\" % folder)\n sys.exit(1)", "def remove_files(file_list):\n###############################################################################\n for fpath in file_list:\n if os.path.exists(fpath):\n os.remove(fpath)\n # End if\n # End for", "def fake_sudo_rm(self, cmd):\n for filename in cmd[2:]:\n if os.path.exists(filename):\n os.remove(filename)", "def rm_subdirs(path, onerror=None):\r\n\r\n # NOTE this code is adapted from the one in shutil.rmtree, and is\r\n # just as fast\r\n\r\n names = []\r\n try:\r\n names = os.listdir(path)\r\n except os.error as err:\r\n if onerror is not None:\r\n onerror(os.listdir, path, sys.exc_info())\r\n else:\r\n raise\r\n\r\n for name in names:\r\n fullname = os.path.join(path, name)\r\n if os.path.isdir(fullname):\r\n if onerror is not None:\r\n shutil.rmtree(fullname, False, onerror)\r\n else:\r\n # allow the rmtree to fail once, wait and re-try.\r\n # if the error is raised again, fail\r\n err_count = 0\r\n while True:\r\n try:\r\n shutil.rmtree(fullname, False, None)\r\n break\r\n except os.error:\r\n if err_count > 0:\r\n raise\r\n err_count += 1\r\n time.sleep(RM_SUBDIRS_RETRY_TIME)", "def rm(args):\n try:\n opts, args = getopt(args, \"rRf\", [\"force\", \"recursive\"])\n except GetoptError as e:\n raise errors.PythonError(\"rm: %s\" % e, 1)\n force = False\n recursive = False\n for o, a in opts:\n if o in ('-f', '--force'):\n force = True\n elif o in ('-r', '-R', '--recursive'):\n recursive = True\n for f in args:\n if os.path.isdir(f):\n if not recursive:\n raise errors.PythonError(\"rm: cannot remove '%s': Is a directory\" % f, 1)\n else:\n shutil.rmtree(f, force)\n elif os.path.exists(f):\n try:\n os.unlink(f)\n except:\n if not force:\n raise errors.PythonError(\"rm: failed to remove '%s': %s\" % (f, sys.exc_info()[0]), 1)\n elif not force:\n raise errors.PythonError(\"rm: cannot remove '%s': No such file or directory\" % f, 1)", "def rm_r(to_rm, ignore_missing=True):\n def actually_rm_r(task):\n for f in sugar_list(to_rm):\n logging.getLogger(__name__).info(\"Removing recursively: \"+f)\n shutil.rmtree(f, ignore_errors=ignore_missing)\n return actually_rm_r", "def delete_files(pths):\n for f in pths:\n try:\n os.remove(f)\n except OSError:\n log.debug(\"Found and ignored Error when deleting file %s\" % f)\n pass\n log.debug(\"deleted %d files\" % len(pths))", "def cleanup_mounts(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start cleanup mounts\")\n all_mounts_procs = []\n valid_mounts = []\n for mount_obj in mounts:\n g.log.info(\"Cleaning up data from %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n if (not mount_obj.mountpoint or\n (os.path.realpath(os.path.abspath(mount_obj.mountpoint))\n is '/')):\n g.log.error(\"%s on %s is not a valid mount point\",\n mount_obj.mountpoint, mount_obj.client_system)\n continue\n cmd = \"rm -rf %s/*\" % (mount_obj.mountpoint)\n proc = g.run_async(mount_obj.client_system, cmd,\n user=mount_obj.user)\n all_mounts_procs.append(proc)\n valid_mounts.append(mount_obj)\n g.log.info(\"rm -rf on all clients is complete. Validating \"\n \"deletion now...\")\n\n # Get cleanup status\n _rc_rmdir = True\n for i, proc in enumerate(all_mounts_procs):\n ret, out, err = proc.async_communicate()\n if ret != 0 or out or err:\n g.log.error(\"Deleting files/dirs Failed on %s:%s\",\n valid_mounts[i].client_system,\n valid_mounts[i].mountpoint)\n _rc_rmdir = False\n else:\n g.log.info(\"Deleting files/dirs is successful on %s:%s\",\n valid_mounts[i].client_system,\n valid_mounts[i].mountpoint)\n if _rc_rmdir:\n g.log.info(\"Successfully deleted files/dirs from all mounts\")\n else:\n g.log.error(\"Deleting files/dirs failed on some of the mounts\")\n\n # Check if mount points are empty\n ignore_dirs_list = [\".trashcan\"]\n ignore_dirs = \"\\|\".join(ignore_dirs_list)\n all_mounts_procs = []\n for mount_obj in mounts:\n cmd = (\"find %s -mindepth 1 | grep -ve '%s'\" %\n (mount_obj.mountpoint, ignore_dirs))\n proc = g.run_async(mount_obj.client_system, cmd,\n user=mount_obj.user)\n all_mounts_procs.append(proc)\n\n # Get cleanup status\n _rc_lookup = True\n for i, proc in enumerate(all_mounts_procs):\n ret, out, err = proc.async_communicate()\n if ret == 0:\n g.log.error(\"Mount %s on %s is still having entries:\\n%s\",\n mounts[i].mountpoint, mounts[i].client_system, out)\n _rc_lookup = False\n else:\n g.log.info(\"Mount %s on %s is cleaned up\\n%s\",\n mounts[i].mountpoint, mounts[i].client_system, out)\n if _rc_lookup:\n g.log.info(\"All the mounts are successfully cleaned up\")\n else:\n g.log.error(\"Failed to cleanup all mounts\")\n\n # List mounts entries\n g.log.info(\"Listing mounts entries:\")\n list_all_files_and_dirs_mounts(mounts)\n\n return _rc_lookup", "def _remove_tmpfiles():\n for f in tmpfiles:\n try:\n os.remove(f)\n except OSError:\n pass", "def trial_reset():\n commands = [\n 'rm -f temp/*',\n ]\n\n for cmd in commands:\n p = Popen(cmd, shell=True)\n p.wait()", "def remove_old_files(filelist):\n\n for filename in filelist:\n if path.exists(filename):\n try:\n remove(filename)\n print \"%s deleted\" % filename \n except Exception: #TODO Exception spesifik.\n stderr.write(\"%s cannot remove. Please check your priviledge\\n\"\n % filename)\n exit(1)", "def rm(*dest):\n for name in dest:\n try:\n _os.unlink(native(name))\n except OSError as e:\n if _errno.ENOENT != e.errno:\n raise", "def rm(cli):\n __check_in_autonotes_dir()\n\n # File args\n files = cli.config.rm.file\n\n # Remove the files\n __rm(files)", "def DeleteFile(*path):\n\n for line in path:\n if os.path.isdir(line):\n shutil.rmtree(line)\n if os.path.isfile(line):\n os.remove(line)\n\n return 0", "def del_tmp() -> None:\n for elem in os.listdir('./tmp'):\n path = f\"./tmp/{elem}\"\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)", "def rmdirs(path):\n\n for f in file_walker(path,follow_links=False,report_dirs='last'):\n if f[-1]==os.sep:\n if f!=os.sep:\n #print \"os.rmdir(%r)\"%(f[:-1],)\n os.rmdir(f[:-1])\n else:\n #print \"os.remove(%r)\"%(f,)\n os.remove(f)", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def del_files_from_disk(path):\n\n shutil.rmtree(path) #,ignore_errors=True)", "def clean():\n folders = ['utils_dfn/temp', 'utils_dfn/img', 'utils_dfn/mask', 'utils_dfn/output']\n for folder in folders:\n for item in os.listdir(folder):\n item_path = os.path.join(folder, item)\n if os.path.isdir(item_path):\n shutil.rmtree(item_path)\n elif os.path.isfile(item_path):\n os.remove(item_path)" ]
[ "0.67757183", "0.67640287", "0.6433328", "0.63947535", "0.633532", "0.6212685", "0.6200401", "0.61364526", "0.6127158", "0.6074084", "0.60736746", "0.60635823", "0.60595375", "0.5995567", "0.59570605", "0.5914583", "0.58932567", "0.5856356", "0.58533007", "0.5850695", "0.5818536", "0.5804432", "0.57646465", "0.5738665", "0.57215565", "0.5693605", "0.56820965", "0.56755626", "0.5673507", "0.5652234" ]
0.6825982
0
A Unix style zip. Compresses the listed files.
def Zip(args): parser = argparse.ArgumentParser(description=Zip.__doc__) parser.add_argument( '-r', dest='recursive', action='store_true', default=False, help='recurse into directories') parser.add_argument( '-q', dest='quiet', action='store_true', default=False, help='quiet operation') parser.add_argument('zipfile') parser.add_argument('filenames', nargs='+') options = parser.parse_args(args) src_files = [] for filename in options.filenames: globbed_src_args = glob.glob(filename) if not globbed_src_args: if not options.quiet: print('zip warning: name not matched: %s' % filename) for src_file in globbed_src_args: src_file = os.path.normpath(src_file) src_files.append(src_file) if options.recursive and os.path.isdir(src_file): for root, dirs, files in os.walk(src_file): for dirname in dirs: src_files.append(os.path.join(root, dirname)) for filename in files: src_files.append(os.path.join(root, filename)) # zip_data represents a list of the data to be written or appended to the # zip_stream. It is a list of tuples: # (OS file path, zip path/zip file info, and file data) # In all cases one of the |os path| or the |file data| will be None. # |os path| is None when there is no OS file to write to the archive (i.e. # the file data already existed in the archive). |file data| is None when the # file is new (never existed in the archive) or being updated. zip_data = [] new_files_to_add = [OSMakeZipPath(src_file) for src_file in src_files] zip_path_to_os_path_dict = dict((new_files_to_add[i], src_files[i]) for i in range(len(src_files))) write_mode = 'a' if os.path.exists(options.zipfile): with zipfile.ZipFile(options.zipfile, 'r') as zip_stream: try: files_to_update = set(new_files_to_add).intersection( set(zip_stream.namelist())) if files_to_update: # As far as I can tell, there is no way to update a zip entry using # zipfile; the best you can do is rewrite the archive. # Iterate through the zipfile to maintain file order. write_mode = 'w' for zip_path in zip_stream.namelist(): if zip_path in files_to_update: os_path = zip_path_to_os_path_dict[zip_path] zip_data.append((os_path, zip_path, None)) new_files_to_add.remove(zip_path) else: file_bytes = zip_stream.read(zip_path) file_info = zip_stream.getinfo(zip_path) zip_data.append((None, file_info, file_bytes)) except IOError: pass for zip_path in new_files_to_add: zip_data.append((zip_path_to_os_path_dict[zip_path], zip_path, None)) if not zip_data: print('zip error: Nothing to do! (%s)' % options.zipfile) return 1 with zipfile.ZipFile(options.zipfile, write_mode, zipfile.ZIP_DEFLATED) as zip_stream: for os_path, file_info_or_zip_path, file_bytes in zip_data: if isinstance(file_info_or_zip_path, zipfile.ZipInfo): zip_path = file_info_or_zip_path.filename else: zip_path = file_info_or_zip_path if os_path: st = os.stat(os_path) if stat.S_ISDIR(st.st_mode): # Python 2.6 on the buildbots doesn't support writing directories to # zip files. This was resolved in a later version of Python 2.6. # We'll work around it by writing an empty file with the correct # path. (This is basically what later versions do anyway.) zip_info = zipfile.ZipInfo() zip_info.filename = zip_path zip_info.date_time = time.localtime(st.st_mtime)[0:6] zip_info.compress_type = zip_stream.compression zip_info.flag_bits = 0x00 zip_info.external_attr = (st[0] & 0xFFFF) << 16 zip_info.CRC = 0 zip_info.compress_size = 0 zip_info.file_size = 0 zip_stream.writestr(zip_info, '') else: zip_stream.write(os_path, zip_path) else: zip_stream.writestr(file_info_or_zip_path, file_bytes) if not options.quiet: if zip_path in new_files_to_add: operation = 'adding' else: operation = 'updating' zip_info = zip_stream.getinfo(zip_path) if (zip_info.compress_type == zipfile.ZIP_STORED or zip_info.file_size == 0): print(' %s: %s (stored 0%%)' % (operation, zip_path)) elif zip_info.compress_type == zipfile.ZIP_DEFLATED: print(' %s: %s (deflated %d%%)' % (operation, zip_path, 100 - zip_info.compress_size * 100 / zip_info.file_size)) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zip_files(files, empty_files, output):\n with zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED) as ziph:\n for dest in empty_files:\n info = zipfile.ZipInfo(filename=dest, date_time=(1980, 1, 1, 0, 0, 0))\n info.external_attr = 0777 << 16L # give full access to included file\n ziph.writestr(info, '')\n for (src, dest) in files:\n info = zipfile.ZipInfo(filename=dest, date_time=(1980, 1, 1, 0, 0, 0))\n info.external_attr = 0777 << 16L # give full access to included file\n with open(src, 'r') as fh:\n ziph.writestr(info, fh.read())", "def zipfiles (downloadable, name):\n\n print \"compressing files. almost done.\"\n import zipfile\n for book in downloadable:\n if (os.path.exists(os.path.join(name, book[1]))):\n files = os.listdir(os.path.join(name, book[1]))\n cbz = zipfile.ZipFile(os.path.join(name, name + '-' + book[1] + '.cbz'), 'w')\n for file in files:\n cbz.write(os.path.join(name, book[1],file))\n cbz.close()", "def create_zip_from_files(files: List[Path]) -> Any:\n temp = tempfile.NamedTemporaryFile()\n with zipfile.ZipFile(temp, 'w') as handle:\n for f in files:\n filename = f.name\n handle.write(f, arcname=filename)\n temp.flush()\n return temp", "def pack_zip(output_filename, sources):\n previous_dir = os.getcwd()\n if not isinstance(sources, (list, tuple)) and \\\n isinstance(sources, str):\n sources = [sources]\n zip_ds = zipfile.ZipFile(output_filename, 'w', zipfile.ZIP_DEFLATED)\n for source in sources:\n os.chdir(os.path.dirname(source))\n if os.path.isdir(source):\n for root, dirs, files in os.walk(os.path.basename(source)):\n for file in files:\n zip_ds.write(os.path.join(root, file))\n else:\n zip_ds.write(os.path.basename(source))\n zip_ds.close()\n os.chdir(previous_dir)", "def zip_package(paths: List[Path], fp, compression=zipfile.ZIP_DEFLATED):\n\n with zipfile.ZipFile(\n file=fp, mode=\"w\", compression=compression, compresslevel=9\n ) as z:\n for path in paths:\n (local_path, zip_path) = path\n z.write(filename=str(path[0]), arcname=str(path[1]))", "def _cat_multi_vol_zip(src, dst):\n concat_cmd = \"zip -s 0 {} --out {}\".format(src, dst)\n os.system(concat_cmd)", "def zip_files(file_list, output_path):\n bname = os.path.basename # for efficiency\n with zipfile.ZipFile(output_path, mode='w') as zf:\n # adding all fasta files\n for file_name in file_list:\n zf.write(file_name, bname(file_name))\n return output_path", "def _zip_files(files: Iterable[str], root: str) -> Tuple[bytes, str]:\n zip_data = StringIO()\n files = list(files) # create copy of list also converts generator to list\n with ZipFile(zip_data, \"w\", ZIP_DEFLATED) as zip_file:\n for file_name in files:\n zip_file.write(os.path.join(root, file_name), file_name)\n\n # Fix file permissions to avoid any issues - only care whether a file\n # is executable or not, choosing between modes 755 and 644 accordingly.\n for zip_entry in zip_file.filelist:\n perms = (zip_entry.external_attr & ZIP_PERMS_MASK) >> 16\n new_perms = 0o755 if perms & stat.S_IXUSR != 0 else 0o644\n if new_perms != perms:\n LOGGER.debug(\n \"fixing perms: %s: %o => %o\", zip_entry.filename, perms, new_perms\n )\n new_attr = (zip_entry.external_attr & ~ZIP_PERMS_MASK) | (\n new_perms << 16\n )\n zip_entry.external_attr = new_attr\n\n contents = zip_data.getvalue()\n zip_data.close()\n content_hash = _calculate_hash(files, root)\n\n return contents, content_hash", "def _make_archive(file_list, archive, root):\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zipf:\n for f in file_list:\n zipf.write(f, os.path.relpath(f, root))", "def _zip_files(self):\n\n zip_file = Path(self.build_directory.parent).joinpath(\n self.package_name + '.zip'\n )\n logger.info('Creating zip file: %s', zip_file)\n\n shutil.make_archive(zip_file.with_suffix(''), 'zip', self.build_directory)\n shutil.move(str(zip_file), self.build_directory)", "def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()", "def zip_file(backup_objects):\n\n # Get name from date_time\n name_of_zip_file = (get_date(\"%d%m%Y_%H.%S\") + '.zip')\n # put files in zip archiv\n z = zipfile.ZipFile(name_of_zip_file, 'a', zipfile.ZIP_DEFLATED) # create archive\n for i in backup_objects:\n if os.path.isdir(i):\n for root, dirs, files in os.walk(i): # get list of files in folder\n for file in files:\n z.write(os.path.join(root, file)) # Создание относительных путей и запись файлов в архив\n else:\n z.write(i)\n z.close()\n if zipfile.is_zipfile(name_of_zip_file):\n notest_file(\"arckhiving is conplite! Created file\" + name_of_zip_file)\n return name_of_zip_file", "def export_code(file_list, output_file):\r\n if not output_file.endswith('.zip'):\r\n output_file += '.zip'\r\n ofile = output_file\r\n counter = 0\r\n while os.path.exists(ofile):\r\n counter += 1\r\n ofile = output_file.replace('.zip', '_{}.zip'.format(counter))\r\n zipf = zipfile.ZipFile(ofile, mode=\"w\", compression=zipfile.ZIP_DEFLATED)\r\n for f in file_list:\r\n zipf.write(f)\r\n zipf.close()", "def make_zip(self):\n shutil.make_archive(self.name, 'zip', self.name)", "def process_into_zip(input_root, output_root, file_list):\n output_path = os.path.join(output_root, \"UNF_Backup.zip\")\n new_zip = zipfile.ZipFile(output_path, \"w\")\n for backup_id, backup_file in file_list.items():\n if backup_file.is_dir is not True:\n zinfo = backup_file.get_zipinfo()\n data = get_file_data(backup_file, input_root)\n if data is None:\n logging.warning(f\"Unable to find data: {backup_file.file_id} ({backup_file.relative_path})\")\n continue\n else:\n new_zip.writestr(zinfo, data)\n new_zip.close()", "def zip_dir(dir_list, zipfilename):\n\tfilelist = []\n\tflag_regex = False\n\tfor dirname, regex in dir_list:\n\t\tif regex:\n\t\t\tflag_regex = True\n\t\tif os.path.isfile(dirname):\n\t\t\tfilelist.append(dirname)\n\t\telse:\n\t\t\tfor root, dirs, files in os.walk(dirname):\n\t\t\t\tfor name in files:\n\t\t\t\t\tm = re.match(regex, name)\n\t\t\t\t\tif m is not None:\n\t\t\t\t\t\tfilelist.append(os.path.join(root, name))\n\t\t\t\tfor name in dirs:\n\t\t\t\t\tfilelist.append(os.path.join(root, name))\n\tzf = zipfile.ZipFile(zipfilename, \"w\")\n\tfor tar in filelist:\n\t\tif not flag_regex:\n\t\t\tarcname = os.path.basename(dirname) + tar[len(dirname):]\n\t\t\tzf.write(tar, arcname)\n\t\telse:\n\t\t\tzf.write(tar,tar)\n\tzf.close()", "def zip(self):\n global pointer\n global error_flag\n global totalFiles\n while pointer < len(self.files) and ((self.t and not error_flag) or not self.t):\n # Se o modo e' t e a error_flag nao for false entao pode avancar\n # Se o modo nao for t pode avancar sem restricoes\n self.sem.acquire()\n iterator = pointer\n pointer += 1\n self.sem.release()\n if iterator < len(self.files): # Iterator e' o ficheiro que deve ser utilizado pela thread\n File = self.files[iterator]\n if os.path.isfile(File): # Ver se o ficheiro existe\n with ZipFile(File + '.zip', 'w') as zipfile:\n zipfile.write(File) # Zip\n self.totalFilesSem.acquire()\n totalFiles += 1\n self.totalFilesSem.release()\n else:\n print \"O ficheiro\", File, \"não existe.\" # Se nao existir, avisa o utilizador\n error_flag = True # Atualiza a sua propria flag", "def make_zip_file(file_folder_path,file_name_list,output_file):\n ffp = file_folder_path\n if ffp is None:\n ffp = \"\"\n else:\n ffp += \"/\"\n with zipfile.ZipFile(output_file, 'w') as zf:\n for file_name in file_name_list:\n fpath = ffp + str(file_name)\n if not os.path.isfile(fpath):\n continue\n file_data = open(fpath,'r').read() \n data = zipfile.ZipInfo(file_name)\n data.compress_type = zipfile.ZIP_DEFLATED\n zf.writestr(data, file_data)", "def create_archive(filelist):\n\t\n\n\ttmp = tempfile.NamedTemporaryFile()\n\t# with tempfile.SpooledTemporaryFile() as tmp:\n\twith zipfile.ZipFile(tmp, 'w', zipfile.ZIP_DEFLATED) as archive:\n\t\tarcname = './docs/'\n\t\tfor x in filelist:\n\t\t\tfilename = os.path.basename(x[1])\n\t\t\t_file = x[0]\n\t\t\t# make sure we're at the start...\n\t\t\t_file.seek(0)\n\t\t\tarchive.write(_file.name, arcname=os.path.join(arcname, filename))\n\n\t# Reset file pointer\n\ttmp.seek(0)\n\n\treturn tmp\n\n\t\t# Write file data to response\n\t\t# return HttpResponse(tmp.read(), content_type='application/x-zip-compressed')", "def zip_output(directory):\n #directory = client_variables.output_zip_folder\n #create the zip archive\n zip = zipfile.ZipFile('outputs.zip', 'w')\n\n # add all files in specified folder\n for name in glob.glob(directory + '\\\\*'):\n zip.write(name, os.path.basename(name), zipfile.ZIP_DEFLATED)\n zip.close()", "def _get_compressed_file(files, password=None):\n multiple_files = len(files) > 1\n # Replace the data and report type with just `.zip`.\n zipfile = re.sub(r'(_(\\w+))?\\.(\\w+)$', '.zip', files[0].name)\n compression = pyminizip.compress_multiple if multiple_files else pyminizip.compress\n compression([f.name for f in files] if multiple_files else files[0].name, zipfile, password, COMPRESSION_LEVEL)\n return zipfile", "def zip(zipfilename, srcdir): # , recursive=True):\n\tsrcdir = uniformpath(srcdir)\n\trootdir = os.path.dirname(srcdir) # \"...doc/Java\" gives doc\n\trootnameindex = len(rootdir) + 1 # \"...doc/Java\" gives start of \"Java\"\n\twith zipfile.ZipFile(zipfilename, mode=\"w\", compression=zipfile.ZIP_DEFLATED) as z:\n\t\tfor f in allfiles(srcdir):\n\t\t\tz.write(f, f[rootnameindex:])", "def _create_zip_file(self, dest, paths):\n with zipfile.ZipFile(dest, 'w') as zip_file:\n for path in paths:\n zip_file.write(path, os.path.basename(path))", "def compress_files(self):\n archive_file_path = tkinter.filedialog.asksaveasfilename(parent=self,\n defaultextension=\".zip\",\n filetypes=[(\"Zip File\", \"*.zip\")])\n treeview_items = self.files_treeview.get_children()\n if archive_file_path and treeview_items:\n with ZipFile(archive_file_path, \"w\", ZIP_DEFLATED) as archive:\n for row in treeview_items:\n file_path = self.files_treeview.item(row, \"values\")[0]\n file_name = os.path.basename(file_path)\n archive.write(file_path, arcname=file_name)", "def compress_multiple_files(output_fname, filenames):\n try:\n import zlib\n mode = zipfile.ZIP_DEFLATED\n except ImportError:\n mode = zipfile.ZIP_STORED\n try:\n zf = zipfile.ZipFile(output_fname, 'a', mode, allowZip64=True)\n for filename in filenames:\n zf.write(filename=filename, arcname=os.path.basename(filename))\n except Exception as e:\n logger.error('Error trying to zip multiple log files: {}'.format(e))\n else:\n zf.close()", "def zip_(input_file, output_file, chunk_size, mode):\n output_file = validator.validate_zip(input_file, output_file)\n process = subprocess.Popen([PBWT_BIN, 'zip', input_file, output_file,\n str(chunk_size), mode], stdout=subprocess.PIPE)\n process_results(str(process.communicate()[0]), input_file, output_file)", "def archive_files(archive_fileprefix, flist, zip_type, reldir, prefix=\"\"):\n def archive_filter(tinfo):\n fdir, fbase = os.path.split(tinfo.name)\n archpath = os.path.join(prefix, os.path.relpath(tinfo.name, reldir))\n tinfo.name = archpath\n return tinfo\n write_type = 'w:'+zip_type\n\n if zip_type:\n archive_filename = '{}.tar.{}'.format(archive_fileprefix, zip_type)\n else:\n archive_filename = '{}.tar'.format(archive_fileprefix)\n\n with tarfile.open(archive_filename, write_type) as out_file:\n for f in flist:\n out_file.add(f, filter=archive_filter)", "def add_to_zip(zipfile, zippath, files):\n \n if zipfile == 'n' or zipfile == '':\n return False\n zippath = os.path.normpath(zippath) + '/'\n if os.path.isdir(zippath):\n z = subprocess.call(['zip', '-0', zippath + zipfile] + files)\n if z != 0:\n print(\"zip returned\", z)\n return False\n else:\n print(\"Could not create zip. Not a valid directory:\", zippath)\n return False\n return True", "def zipdir(path, ziph):\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file),\n arcname=os.path.join(os.path.relpath(root, path), file))", "def create_zip(file_dir):\n curr_path = os.getcwd()\n os.chdir(file_dir)\n zip_name = 'files_archive_{}.zip'.format(\n str(datetime.datetime.now())[5:16].replace(' ', \"_\"))\n files = os.listdir()\n print(\"Creating zipfile from files in...\", file_dir)\n with zipfile.ZipFile(zip_name, 'w') as zip:\n for f in files:\n zip.write(f)\n print(\"Added file: \", f)\n\n zip_path = file_dir + \"/\" + zip_name\n os.chdir(curr_path)\n # double check if path is absolute\n if os.path.isabs(zip_path):\n return zip_path\n else:\n return os.getcwd() + \"/\" + zip_name" ]
[ "0.733347", "0.72406137", "0.7125013", "0.7061359", "0.70390105", "0.7023428", "0.70149624", "0.6967614", "0.6934503", "0.6867805", "0.6803735", "0.67500716", "0.6682049", "0.6674095", "0.6657717", "0.6655772", "0.6642757", "0.6613005", "0.66035706", "0.65900576", "0.64857435", "0.64827025", "0.6472941", "0.6423958", "0.64062876", "0.64011437", "0.63811594", "0.6368326", "0.63207996", "0.63123864" ]
0.7761106
0
Creates a dictionary of features.
def to_feature_dict(self): return {feature:self.get_feature(feature) for feature in self._FEATURES}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_features(self):\n to_return = dict()\n\n to_return['bias'] = 1.0\n to_return['user:' + self.user] = 1.0\n to_return['format:' + self.format] = 1.0\n to_return['token:' + self.token.lower()] = 1.0\n\n to_return['part_of_speech:' + self.part_of_speech] = 1.0\n for morphological_feature in self.morphological_features:\n to_return['morphological_feature:' + morphological_feature] = 1.0\n to_return['dependency_label:' + self.dependency_label] = 1.0\n\n return to_return", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def _get_feature_map():\n return {\n # 3 sparse feature with variable length. Use this if you have a\n # variable number or more than 1 feature value per example.\n \"feature_1\":\n tf.io.VarLenFeature(dtype=tf.int64),\n \"feature_2\":\n tf.io.VarLenFeature(dtype=tf.int64),\n \"feature_3\":\n tf.io.VarLenFeature(dtype=tf.int64),\n \"label\":\n tf.io.FixedLenFeature([1], dtype=tf.int64),\n }", "def getFeatureDicts(self):\n feature_dicts = super().getFeatureDicts()\n feature_dicts.extend([self.__suffixes, self.__prefixes, self.__tags, self.__numbers, self.__caps, self.__caps_no_start])\n return feature_dicts", "def getFeatureDicts(self):\n pass", "def _make_features(self):\n self.features = {}\n self.labels = {}\n for key in ['train', 'cv', 'test']:\n if self.radius is not None:\n feat, label = self._sliding_window(self.images[key], self.masks[key], window_radius=self.radius)\n self.features[key] = feat\n self.labels[key] = label\n else:\n self.features[key] = self.images[key].reshape(-1, 3)\n self.labels[key] = self.masks[key].ravel()", "def __call__(self, *args, **kwargs):\n self.features = dict((k, v()) for k, v in self.features.items())\n return self.features", "def get_features(self):\n features = {}\n for i in self.binaries:\n features[i] = self.binaries[i].features\n return features", "def get_name_to_features(self):\n name_to_features = {\n 'input_ids': tf.io.FixedLenFeature([self.seq_len], tf.int64),\n 'label_ids': tf.io.FixedLenFeature([], tf.int64),\n }\n return name_to_features", "def create_feature(example):\n input_ids, label_ids = encode_fn(\n example['tokens'], example['labels'])\n\n features = {\n 'input_ids': int64_feature(input_ids),\n 'label_ids': int64_feature(label_ids)\n }\n\n return features", "def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}", "def features(self) -> datasets.Features:\n return datasets.Features(\n {\n \"sequence\": datasets.Value(\"string\"),\n \"description\": datasets.Value(\"string\"),\n \"id\": datasets.Value(\"string\"),\n }\n )", "def save_feature(self):\n feature_dict = {\n 'name': self.name,\n 'preActionDes': self.pre_action_des,\n 'inActionDes': self.in_action_des,\n 'postActionDes': self.post_action_des,\n 'actionable': self.actionable,\n 'usable': self.usable,\n 'state': self.state,\n 'featureId': self.feature_id\n }\n return feature_dict", "def features(self) -> Union[np.ndarray, Dict[str, np.ndarray]]:\n return self._features", "def example_feature_columns(self):\n example_feature = {}\n feature_names = self.example_features\n if self.sparse_features is None:\n example_feature = {name: tf.feature_column.numeric_column(name, shape=(1,), default_value=0.0)\n for name in feature_names}\n else:\n for name in feature_names:\n if name not in self.sparse_features:\n example_feature.update(\n {name: tf.feature_column.numeric_column(name, shape=(1,), default_value=0.0)})\n else:\n sparse_column = tf.feature_column.categorical_column_with_identity(name, 8)\n sparse_embedding_column = tf.feature_column.embedding_column(\n sparse_column, self.emb_dims)\n example_feature.update({name: sparse_embedding_column})\n return example_feature", "def to_dict(self):\n\n # base features\n dict_ = {\n 'name': self.name,\n 'desc_short': self.desc_short,\n 'desc_long': self.desc_long,\n 'rating_fun': self.rating_fun,\n 'rating_scenic': self.rating_scenic,\n 'rating_aerobic': self.rating_aerobic,\n 'rating_technical': self.rating_technical\n }\n \n # composition features\n for component_name, val in self.composition.items():\n dict_['composition_' + component_name] = val\n\n return dict_", "def features(self):\r\n dict_data = []\r\n my_dict = {\r\n \":IODisc\":\"\",\r\n \"Group\": \"$System\",\r\n \"Comment\": \"\",\r\n \"Logged\": \"No\",\r\n \"EventLogged\": \"No\",\r\n \"EventLoggingPriority\": 0,\r\n \"RetentiveValue\": \"No\",\r\n \"InitialDisc\": \"Off\",\r\n \"OffMsg\": \"\",\r\n \"OnMsg\": \"\",\r\n \"AlarmState\": \"None\",\r\n \"AlarmPri\": 1,\r\n \"DConversion\": \"Direct\",\r\n \"AccessName\": \"HC\",\r\n \"ItemUseTagname\": \"No\",\r\n \"ItemName\": \"\",\r\n \"ReadOnly\": \"No\",\r\n \"AlarmComment\": \"\",\r\n \"AlarmAckModel\": 0,\r\n \"DSCAlarmDisable\": 0,\r\n \"DSCAlarmInhibitor\": \"\",\r\n \"SymbolicName\": \"\"\r\n }\r\n\r\n dict_data.append(my_dict)\r\n\r\n return(my_dict)", "def get_simple_features(self):\n simple_features = {}\n for i in self.binaries:\n simple_features[i] = self.binaries[i].basic_features\n return simple_features", "def feature_dict(sent, i):\n # WORK HERE!!\n return {}", "def get_features(smiles: str, pad: bool = False, **kwargs) -> dict:\n features = preprocessor(smiles, train=False, **kwargs)\n\n if not pad:\n return features\n\n # We have to offset the connectivity array by 1 since we're adding a phantom atom\n # at position 0\n features[\"connectivity\"] += 1\n\n def pad_value(val):\n return np.pad(val, [(1, 0)] + [(0, 0)] * (val.ndim - 1))\n\n return {key: pad_value(val) for key, val in features.items()}", "def get_features(self):\n return []", "def getFeatureDicts(self):\n return [self.data.getWordTagDict(), self.data.tags_trigrams, self.data.tags_bigrams]", "def _create_feature_dict(feature_table_file) -> dict:\n feature_dict = dict()\n with open(feature_table_file, \"r\") as feature_table:\n csv_in = csv.reader(feature_table, delimiter=\"\\t\")\n\n header = [x.lower() for x in next(csv_in)]\n accession_idx = header.index(\"accession\")\n type_idx = header.index(\"type\")\n type_specific_idx = header.index(\"type_specific\")\n description_idx = header.index(\"description\")\n identifier = 2\n for line in csv_in:\n if line[accession_idx] not in feature_dict:\n feature_dict[line[accession_idx]] = dict()\n\n if line[1] not in feature_dict[line[accession_idx]]:\n feature_dict[line[accession_idx]][line[type_idx]] = []\n\n # Insert feature entry\n feature_dict[line[0]][line[1]].append(\n (line[type_specific_idx].split(\",\"), line[description_idx], str(identifier))\n )\n identifier += 1\n\n return feature_dict", "def generateFeatures(self, data):\n pass", "def create_new_features(self):\n train = self.train\n \n train['is_context'] = train['context_type'].isin(CONTEXT_TYPE_TEST)\n train['is_context_flow'] = train['listen_type'] * train['is_context']\n \n train['is_listened_context'] = train['is_listened'] * train['is_context']\n train['is_listened_flow'] = train['is_listened'] * train['listen_type']\n train['is_listened_context_flow'] = train['is_listened'] * train['is_context_flow']\n \n for feature in self.categorize_features:\n gby_feat = train.groupby(feature)\n new_features(train, gby_feat, feature, feature in self.listen_type_features, self.context_features, self.flow_features, self.fillna)\n \n # Variable combinations\n for feat1 in self.combo_features1:\n for feat2 in self.combo_features2:\n gby_feat = train.groupby([feat1, feat2])\n name = feat1 + '_' + feat2\n new_features(train, gby_feat, name, feat1 in self.listen_type_features, self.context_features, self.flow_features, self.fillna)", "def observation_features_to_dict(obs_features: ObservationFeatures) -> Dict[str, Any]:\n return {\n \"__type\": obs_features.__class__.__name__,\n \"parameters\": obs_features.parameters,\n \"trial_index\": obs_features.trial_index,\n \"start_time\": obs_features.start_time,\n \"end_time\": obs_features.end_time,\n \"random_split\": obs_features.random_split,\n \"metadata\": obs_features.metadata,\n }", "def feature_list(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___FeatureList]:", "def feat_dict(pos_feat,text):\n dict = {}\n bigrams = ngrams(word_tokenize(text),2)\n trigrams = ngrams(word_tokenize(text),3)\n \n for feat in pos_feat:\n dict[feat]=features(feat,text,bigrams,[],[])\n return dict", "def _parse_features(cls, node: OMNode) -> Dict[str, Dict[str, bool]]:\n features = {}\n for sectname in node:\n section = node[sectname]\n if not isinstance(section, dict) or '_types' not in section:\n continue\n features[sectname] = {}\n for opt in section:\n if not opt.startswith('has'):\n continue\n value = section[opt]\n if not isinstance(value, bool):\n continue\n option = opt[3:]\n features[sectname][option] = value\n return features", "def get_feature_set_PA(tweet):\n features= {}\n return features" ]
[ "0.7605807", "0.75966465", "0.7547767", "0.73763937", "0.7233561", "0.7123075", "0.70871353", "0.7015776", "0.700296", "0.6997553", "0.694234", "0.6940923", "0.6922698", "0.6909729", "0.69082737", "0.6811572", "0.67771375", "0.67759395", "0.6761649", "0.6675939", "0.6673177", "0.6671326", "0.6660682", "0.6616825", "0.65522", "0.65511763", "0.6530181", "0.65122867", "0.6470891", "0.6457479" ]
0.79030627
0
Method to collect Stock data from AlphaVantage Method collects the Tweets specified for the specified ticker from Tweepy and saves them.
def collect_tweets(ticker): # Authenticate Tweepy credentials auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_SECRET_CONSUMER_KEY) auth.set_access_token(settings.TWITTER_TOKEN_KEY, settings.TWITTER_SECRET_TOKEN_KEY) api = tweepy.API(auth) stock = Stock.objects.get(ticker=ticker) # Search for recent Tweets with the specific ticker collected_tweets = api.search(q=ticker, result_type='recent', count=100) # Iterate over the collected Tweets and save them for tweet in collected_tweets: try: Tweet.objects.create( text=tweet.text, created_at=tweet.created_at, user_id=tweet.user.id, user_screen_name=tweet.user.screen_name, verified=tweet.user.verified, followers_count=tweet.user.followers_count, friends_count=tweet.user.friends_count, favourites_count=tweet.user.favourites_count, retweet_count=tweet.retweet_count, stock=stock, ) except IntegrityError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather_stock_data(tickers, save=True):\n prices = pd.DataFrame()\n ts = TimeSeries(key='EY2QBMV6MD9FX9CP', output_format='pandas')\n\n for ticker in tickers:\n successful_grab = False\n ticker_daily_adj = None\n\n while successful_grab is not True:\n try:\n ticker_daily_adj = ts.get_daily_adjusted(ticker, outputsize='full')[0]\n successful_grab = True\n except ValueError:\n print('Waiting for API to let me in')\n time.sleep(10)\n\n ticker_daily_adj.loc[:, '0. ticker'] = ticker\n ticker_daily_adj = ticker_daily_adj[sorted(ticker_daily_adj.columns)]\n\n prices = pd.concat([prices, ticker_daily_adj])\n\n prices.sort_index(inplace=True)\n prices.reset_index(inplace=True)\n prices['date'] = pd.to_datetime(prices['date'])\n if save:\n prices.to_csv('stockdata.csv', index=True)\n\n return prices", "def gatherData():\n\n # connect to database, set up the tweepy API object, and find the next date to search\n\n cnx = sqlite3.connect(DB_FILE)\n api = generateAPI(wait_on_rate_limit=True, wait_on_rate_limit_notify=True, **CREDENTIALS)\n\n nextdate = findNextDate(cnx, FIRSTDATE)\n year = nextdate[:4]\n\n # attempt to scrape box office data\n\n bodata = getTopMovies(BO_ENDPOINT, nextdate, CNT_MOVIES)\n\n if not bodata.empty:\n bodata.to_sql('boxoffice', ENGINE, if_exists='append', index=False)\n print(\"Box Office Data for [{0}] Written to Database\".format(nextdate))\n else:\n raise BOError(\"Error Scraping/Writing Box Office Data for [{0}]\".format(nextdate))\n\n # attempt to collect tweet data\n\n for movie in bodata.title:\n try:\n tweets = searchMovie(api, movie, nextdate, MAX_TWEETS)\n if not tweets.empty:\n tweets.to_sql('tweets', ENGINE, if_exists='append', index=False)\n print(\"Tweets for [{0}] Written to Database\".format(movie))\n else:\n raise TweetError(\"Error Fetching/Writing Tweets for [{0}]\".format(movie))\n except tweepy.error.TweepError:\n raise TweetError(\"Error Fetching/Writing Tweets for [{0}]\".format(movie))\n\n # attempt to collect movie metadata\n\n for movie in bodata.title:\n minfo = getMovieInfo(OMDB_ENDPOINT, processTitle(movie), year)\n if minfo:\n insertMovie(cnx, movie, nextdate, minfo)\n else:\n minfo = getMovieInfo(OMDB_ENDPOINT, processTitle(movie), str(int(year)-1))\n if minfo:\n insertMovie(cnx, movie, nextdate, minfo)\n else:\n print(\"Movie: [{0}] Could Not be Found via OMDB\".format(movie))\n\n # commit changes and close DB connection\n\n cnx.commit()\n cnx.close()\n\n print(\"\\nAll Data for {0} Successfully Added to the Database!\\n\".format(nextdate))\n return nextdate", "def store_stock_data(stock_name = 'TSLA'):\n stonk = yf.Ticker(stock_name) # gets stock data from yahoo\n hist = stonk.history(period=\"max\") # historical stock prices\n hist.reset_index(inplace=True) # takes the date stamp out of the index column\n hist.rename(columns = {'Date':\"DateTime\"},inplace=True) # Changes the name of the date column\n hist['DateTime'] = pd.to_datetime(hist['DateTime'],utc=True) # Changes the timestamps to UTC\n hist.to_csv('../data/raw/'+stock_name+'_stock_price.csv')\n return", "def collect_all_stock_data(self):\n for stock in self.stocks:\n self.add_stock(stock)", "async def stocks(self, ctx):\n\t\tpass", "def update_data(self):\n data, meta_data = ts.get_daily(symbol=self.stock_ticker, outputsize='full')\n self.data = data\n self.meta_data = meta_data", "def _collect_and_train(self) -> None:\n self.info_process('\\n\\n')\n self.info_process('Performing daily data collection and model training...')\n\n for symbol in Settings.get_symbols(self):\n # Interrupt collection if the collection loop was stopped\n if not self._running:\n break\n\n # Revert data to last stable day.\n date_last_collected_for = self.time().now().date()\n # If it's past midnight, move back a day.\n if self.time().get_secs_to_open() < timedelta(hours=9, minutes=30).total_seconds():\n date_last_collected_for -= timedelta(days=1)\n # Move back two market days from the most recent market day.\n date_last_collected_for = self.time().get_prev_mkt_day(date_last_collected_for)\n date_last_collected_for = self.time().get_prev_mkt_day(date_last_collected_for)\n # Remove mongo price data after the stable day.\n self.mongo().remove_price_data_after(symbol, date_last_collected_for, today=self.time().now().today())\n date_rest_available_for = self.time().get_next_mkt_day(date_last_collected_for)\n\n # Collect yesterday's polygon-rest data and train on it.\n if self._train_on_rest_data(symbol, date_rest_available_for):\n self.info_process(f'Trained {symbol} on yesterday\\'s polygon rest data')\n else:\n self.warn_process(f'Invalid {symbol} rest data collected for {date_rest_available_for}. '\n f'Discarding them and attempting to use cached stream data instead')\n if self._train_on_stream_data(symbol, date_rest_available_for):\n self.info_process(f'Trained {symbol} on yesterday\\'s polygon stream data')\n else:\n self.warn_process(f'Invalid {symbol} candles cached for {date_rest_available_for}. '\n f'Could not find valid data to train on yesterday!')\n\n # Load today's polygon-stream data and train on it.\n date_cache_available_for = self.time().get_next_mkt_day(date_rest_available_for)\n if self._train_on_stream_data(symbol, date_cache_available_for):\n self.info_process(f'Trained {symbol} on today\\'s polygon stream data')\n else:\n self.warn_process(f'Invalid {symbol} candles cached for {date_rest_available_for}. '\n f'Could not find valid data to train on today!')", "def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)", "def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def load_data(self):\n try:\n df = self.live_quote_arg_func(self.tickers)\n for index, ticker in enumerate(self.tickers):\n ticker_info = df.loc[index]\n self.ticker_dict[ticker].append(ticker_info['price'],\n ticker_info['volume'],\n ticker_info['amount'],\n ticker_info['time'])\n except Exception:\n raise ValueError('Polling thread exception')", "def tick_task(*args):\n markets = fetch_markets()\n map(store_tick_data, markets)\n return", "async def _get_stock_data(self, stocks: list):\n\t\tapi_url = 'https://sandbox.tradier.com/v1/markets/quotes'\n\t\tstocks = ','.join(stocks)\n\t\tif not stocks:\n\t\t\treturn []\n\t\ttoken = await self.bot.get_shared_api_tokens('stocks')\n\t\ttoken = token.get('key', None)\n\t\tif not token:\n\t\t\traise ValueError(\n\t\t\t\t'You need to set an API key!\\n'\n\t\t\t\t'Follow this guide for instructions on how to get one:\\n'\n\t\t\t\t'<https://github.com/Flame442/FlameCogs/blob/master/stocks/setup.md>'\n\t\t\t)\n\t\tparams = {'symbols': stocks}\n\t\theaders = {'Authorization': f'Bearer {token}', 'Accept': 'application/json'}\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(api_url, params=params, headers=headers) as r:\n\t\t\t\ttry:\n\t\t\t\t\tr = await r.json()\n\t\t\t\texcept aiohttp.client_exceptions.ContentTypeError:\n\t\t\t\t\t#This might happen when being rate limited, but IDK for sure...\n\t\t\t\t\traise ValueError('Could not get stock data. The API key entered is most likely not valid.')\n\t\tr = r['quotes']\n\t\tif 'quote' not in r:\n\t\t\treturn []\n\t\tr = r['quote']\n\t\tif not isinstance(r, list):\n\t\t\tr = [r]\n\t\tstock = {\n\t\t\tx['symbol']: {\n\t\t\t\t'price': max(1, int(x['last'] * 100)),\n\t\t\t\t#New API does not give this info.\n\t\t\t\t'total_count': None, #int(x['marketCap'] / x['last']) if x['marketCap'] else None\n\t\t\t} for x in r if 'last' in x and x['last'] is not None\n\t\t}\n\t\treturn stock", "def __update_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'w')\n f_tweeted = open(f'{TWEETED}', 'w')\n try:\n f_tweets.write(json.dumps(self.tweets, sort_keys=True, indent=4))\n f_tweeted.write(json.dumps(self.tweeted, sort_keys=True, indent=4))\n finally:\n f_tweets.close()\n f_tweeted.close()", "def get_data_logic():\r\n global input_exchange\r\n global input_symbols\r\n global all_symbols\r\n global input_timeframe\r\n\r\n # create exchange connection\r\n exchange = Exchange(input_exchange)\r\n\r\n # perform check that exchange can grab price data\r\n if exchange.connection.has['fetchOHLCV']:\r\n\r\n # user ticked 'All Symbols?', so includes all symbols in\r\n # exchange_tickers.py for the particular exchange\r\n if all_symbols:\r\n symbol_list = SymbolList(symbols='auto', exchange=exchange)\r\n # user didn't tick 'All Symbols?', so create unpopulated symbol list\r\n else:\r\n symbol_list = SymbolList(exchange=exchange)\r\n # add all symbols user inputted\r\n for s in input_symbols:\r\n symbol_list.input_symbol(s)\r\n\r\n # get auto timeframe and check it is valid\r\n timeframe = Timeframe(timeframe=input_timeframe, exchange=exchange)\r\n while not timeframe.check_timeframe():\r\n timeframe.input_timeframe() # default to asking for input\r\n\r\n print(f\"Pulling data on the {timeframe.tf} timeframe for...\")\r\n print(symbol_list.symbols)\r\n\r\n # get current time in UTC in milliseconds\r\n now = datetime.now().astimezone(pytz.timezone('UTC'))\r\n now = int(now.timestamp()*1000)\r\n\r\n # loop over each symbol and pull new data\r\n for sym in symbol_list.symbols:\r\n # create csv filename and path\r\n file_sym = sym.replace('/', '')\r\n file_sym = file_sym.replace('-', '')\r\n filename = f\"{exchange.name}_{file_sym}_{timeframe.tf}.csv\" # generate filename from given information\r\n csv_path = f\"{exchange.name}/{timeframe.tf}/{filename}\"\r\n\r\n # get most recent price data and append it to existing data\r\n # (if it exists)\r\n price_data = PriceData(exchange=exchange, tf=timeframe.tf,\r\n sym=sym, now=now, path=csv_path)\r\n\r\n # check if price data csv already exists\r\n if price_data.exists():\r\n price_data.get_current()\r\n # get new data as far back as possible if csv does not exist\r\n else:\r\n price_data.get_new()\r\n\r\n # keep updating price_data until current time\r\n price_data.update()\r\n\r\n # write to csv\r\n price_data.write()\r\n\r\n print(\"Finished writing files!\")", "def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def run(self):\n page = self.fetch_data(self.url)\n stock_list = self.pop_stock_list(page)\n self.write_csv(stock_list)", "def run(self):\n new_tweets = pd.DataFrame(\n columns=[\"tweet_id\", \"congress\", \"session\", \"date\", \"vote\"],\n dtype=str,\n )\n for item in self.senate_data[\"vote_summary\"][\"votes\"][\"vote\"]:\n query = (\n \"congress == @self.congress \"\n \"and session == @self.session \"\n \"and date == @item['vote_date'] \"\n \"and vote == @item['vote_number']\"\n )\n\n # If the current vote isn't already processed, then process it\n if self.tweets.query(query).empty:\n try:\n text, party_data, vote_data = self.senate_obj.process_vote(\n item\n )\n status = self.twitter_api.update_status(text)\n # Keep track of new tweets to be reconciled with old\n # tweets later\n new_tweets = new_tweets.append(\n {\n \"tweet_id\": status.id_str,\n \"congress\": self.congress,\n \"session\": self.session,\n \"date\": item[\"vote_date\"],\n \"vote\": item[\"vote_number\"],\n **party_data,\n **vote_data,\n },\n ignore_index=True,\n )\n except Exception as e:\n # Tweet failed for some reason\n logging.error(\"Tweet failed\")\n logging.error(item)\n logging.error(e)\n\n # Only process a limited number of tweets in a single run\n if len(new_tweets) == self.MAX_TWEETS:\n break\n\n if not new_tweets.empty:\n logging.info(f\"Tweeted {len(new_tweets)} new votes\")\n self.__save(self.tweets.append(new_tweets))\n # Function needs to return something to work as a Cloud Function\n return new_tweets[\"tweet_id\"].to_json()\n else:\n return \"{}\" # Empty JSON object", "def __refresh_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'r')\n f_tweeted = open(f'{TWEETED}', 'r')\n\n try:\n self.tweets = json.load(f_tweets)\n self.tweeted = json.load(f_tweeted)\n finally:\n f_tweets.close()\n f_tweeted.close()", "def get_data_from_yahoo():\n try:\n ticker = input('Enter the ticker symbol: ').upper()\n start = dt.datetime(2004, 8, 19)\n end = dt.datetime.today()\n\n df = web.DataReader(ticker, 'yahoo', start, end)\n df.to_csv('stock_data.csv')\n except Exception as e:\n print(e)\n exit()", "def taq_quotes_trades_year_statistics_data(tickers, year):\n\n function_name = taq_quotes_trades_year_statistics_data.__name__\n\n # Create a file to save the info\n file = open('../taq_quotes_trades_year_statistics_data.csv', 'a+')\n file.write('Ticker, avg_quotes, avg_trades, avg_spread\\n')\n\n for ticker in tickers:\n\n taq_data_tools_statistics \\\n .taq_function_header_print_data(function_name, ticker, ticker,\n year, '', '')\n\n dates = taq_data_tools_statistics.taq_bussiness_days(year)\n\n stat = []\n args_prod = iprod([ticker], dates)\n\n # Parallel computation of the statistics. Every result is appended to\n # a list\n with mp.Pool(processes=mp.cpu_count()) as pool:\n stat.append(pool.starmap(taq_quotes_trades_day_statistics_data,\n args_prod))\n\n # To obtain the average of the year, I average all the results of the\n # corresponding values (number quotes, trades and avg spread)\n stat_year = np.nanmean(stat[0], axis=0)\n\n # Write data in file\n file.write(f'{ticker}, {stat_year[0]:.0f}, {stat_year[1]:.0f},'\n + f' {stat_year[2]:.2f}\\n')\n\n file.close\n\n return None", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def reqData(self):\r\n #self.reqGlobalCancel()\r\n #self.add_historical(\"Stock('TSLA', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('IBM', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('MSFT', 'SMART', 'USD')\")\r\n self.add_historical(\"Stock('FB', 'SMART', 'USD')\")", "def giveAllHistoricalData(stocksToGet = None):\n if stocksToGet == None:\n stocksToGet = db.STOCK_MAP.keys()\n\n conn = r.connect(\n db = db.DB\n )\n updateThread = threading.Thread(\n target = updateAllHistorical\n )\n\n historicalData = [\n r.table(db.HISTORICAL_TABLE).get(stockName).run(conn)\n for stockName in stocksToGet\n ]\n\n if not db.UPDATING_HISTORICAL:\n db.UPDATING_HISTORICAL = True\n updateThread.start()\n\n return json.dumps(historicalData)", "async def stock(self, ctx, ticker: str):\n symbols = await self.bot.aiojson(\"https://api.robinhood.com/quotes/\"\\\n f\"?symbols={ticker.upper()}\")\n if not symbols:\n await ctx.send(\"Stock not found. This stock is probably not tradeable on robinhood.\")\n return\n symbols_result = symbols[\"results\"][0]\n instrument = await self.bot.aiojson(symbols_result[\"instrument\"])\n fundamentals = await self.bot.aiojson(\n f\"https://api.robinhood.com/fundamentals/{ticker.upper()}/\")\n\n current_price = (symbols_result[\"last_trade_price\"] if\n \"last_extended_hours_trade_price\" in symbols_result\n else symbols_result[\"last_extended_hours_trade_price\"])\n diff = Decimal(Decimal(current_price) -\n Decimal(symbols_result[\"previous_close\"]))\n percentage = str(100 * diff / Decimal(current_price))[:6]\n\n if not percentage.startswith(\"-\"):\n percentage = \"+\" + percentage\n\n current_price_string = self.format_currency(current_price)\n diff_string = self.format_currency(diff)\n bid_price_string = self.format_currency(Decimal(symbols_result[\"bid_price\"]))\n ask_price_string = self.format_currency(Decimal(symbols_result[\"ask_price\"]))\n tradeable_string = (\n \":white_check_mark:\" if instrument[\"tradeable\"] else \":x:\")\n\n update_timestamp = parser.parse(symbols_result[\"updated_at\"])\n\n symbol = symbols_result[\"symbol\"]\n change_color = await self.get_stock_change_color(symbol)\n\n embed = discord.Embed(title=f\"{symbol}'s stocks info\",\n color=change_color,\n timestamp=update_timestamp)\n\n embed.add_field(name=\"Name\", value=instrument[\"name\"])\n embed.add_field(name=\"Current Price\", value=current_price_string)\n embed.add_field(name=\"Change from yesterday\", value=f\"{diff_string} ({percentage}%)\")\n embed.add_field(name=\"Bid size\", value=f\"{symbols_result['bid_size']} ({bid_price_string})\")\n embed.add_field(name=\"Ask size\", value=f\"{symbols_result['ask_size']} ({ask_price_string})\")\n embed.add_field(name=\"Current Volume\", value=fundamentals[\"volume\"])\n embed.add_field(name=\"Average Volume\", value=fundamentals[\"average_volume\"])\n embed.add_field(name=\"Tradeable on Robinhood\", value=tradeable_string)\n embed.add_field(name=\"Country\", value=f\":flag_{instrument['country'].lower()}:\")\n\n await ctx.send(embed=embed)", "def tweet(self):\n library = os.path.join(os.path.dirname(__file__),\n \"..//libraries//reuse//\")\n \n ql = QuickList().open(os.path.join(library,\"pool.xls\"))\n \n ql.shuffle()\n \n for r in ql:\n file_loc = os.path.join(library,r[\"file_name\"])\n text = r[\"nice_title\"]\n tags = [\n r['title'],\n str(r['year']),\n \"culture reuse\" \n ]\n \n name, gif_url = self._upload_gif(file_loc)\n\n \n #embed_code = \"<img class='gfyitem' data-id='JoyfulCircularHamster' />\".format(gif_url)\n embed_code = \"<img class='gfyitem' data-id='{0}' />\".format(name)\n \n tumblr_text = embed_code + '<p>{0}</p><p><a href=\"{1}\">get from gfycat</a></p>'.format(text,gif_url)\n \n tumblr_link = self._tumblr(tumblr_text,tags=tags,keyword=name) #video_url=str(file_loc)\n if tumblr_link:\n text += \" {0}\".format(tumblr_link)\n tweets = self._tweet_video(text,file_loc)\n \n break\n \n return tweets", "def collect_twitter_sentiment():\r\n # Open/create a file to append data to\r\n csvFile = open(NAME+'_posts.csv', 'a')\r\n # Use csv writer\r\n csvWriter = csv.writer(csvFile)\r\n # Calling the user function with current parameters\r\n results = twitter.user_timeline(id=NAME, count=TWEET_COUNT)\r\n for tweet in results:\r\n print(tweet.created_at, tweet.text)\r\n csvWriter.writerow([tweet.created_at, tweet.text.encode('utf-8')])\r\n return csvFile", "def get_tweets(self, kafka_obj):\n\n try:\n\n # call twitter api to fetch tweets\n # for tweet in api.search('#machinelearning', count=5):\n\n for tweet in tweepy.Cursor(api.search, q='#machinelearning', since='2019-06-25', until='2019-07-07').items():\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = dict()\n parsed_tweet['text'] = tweet.text\n parsed_tweet['date'] = str(tweet.created_at)\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n parsed_tweet['tweet_id'] = tweet.id_str\n parsed_tweet['location'] = tweet.user.location\n parsed_tweet['user'] = tweet.user.screen_name\n parsed_tweet['retweet_count'] = tweet.retweet_count\n\n if tweet.entities.get('hashtags'):\n parsed_tweet['hashtags'] = ', '.join([i['text'] for i in tweet.entities.get('hashtags')])\n else:\n parsed_tweet['hashtags'] = ''\n \n print('Search API', parsed_tweet)\n\n #Pushing all the tweets to the Kafka Topic\n\n kafka_producer = kafka_obj.producer_instance()\n kafka_obj.publish_urls(kafka_producer, 'twitter', 'tweet', json.dumps(parsed_tweet))\n\n except Exception as e:\n print(e)", "def get_data(retrieve = False, start='2019-01-01', comp = False):\r\n if retrieve == True:\r\n tickers = retrieve_sp500()\r\n else:\r\n with open('sp500_tickers.pickle', 'rb') as file:\r\n tickers = pickle.load(file)\r\n if not os.path.exists('sp500_data'):\r\n os.mkdir('sp500_data')\r\n exchg_close = dt.time(16,0,0,0)\r\n # use todays date if markets have closed.\r\n if dt.datetime.today().time() > exchg_close:\r\n end = dt.datetime.now()\r\n # use yesterdays dates if markets have not yet closed.\r\n else: \r\n end = dt.datetime.now() - dt.timedelta(1)\r\n for ticker in tickers:\r\n # updates data for tickers not currently stored.\r\n if not os.path.exists('sp500_data/{}.csv'.format(ticker)):\r\n df = pdr.get_data_yahoo(ticker, start, end)\r\n df.to_csv('sp500_data/{}.csv'.format(ticker))\r\n # updates data for tickers that have not been updated today.\r\n elif dt.datetime.fromtimestamp(os.path.getmtime('sp500_data/{}.csv'.format(ticker))).day != dt.datetime.today().day:\r\n df = pdr.get_data_yahoo(ticker, start, end)\r\n df.to_csv('sp500_data/{}.csv'.format(ticker))\r\n # prints out data that was not and does not need udpating.\r\n else:\r\n print('{} is already saved'.format(ticker))\r\n if comp == True:\r\n compile_data()", "def graph_data(self, ticker):\r\n key = 'GLC0GTVKR51SY1V'\r\n\r\n url = 'https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY&symbol=IBM&apikey=demo'\r\n response = requests.get(url)\r\n string = response.json()\r\n\r\n ticker = string['Meta Data']['2. Symbol']\r\n dic = string['Monthly Time Series']\r\n keys = string['Monthly Time Series'].keys()\r\n key_list = list(keys)\r\n\r\n key_data = []\r\n date_list = []\r\n open_list = []\r\n high_list = []\r\n low_list = []\r\n close_list = []\r\n volume_list = []\r\n\r\n for x in range(len(key_list)-1, 0, -1):\r\n\r\n date = key_list[x]\r\n Open = dic[key_list[x]]['1. open']\r\n High = dic[key_list[x]]['2. high']\r\n Low = dic[key_list[x]]['3. low']\r\n Close = dic[key_list[x]]['4. close']\r\n Volume = dic[key_list[x]]['5. volume']\r\n\r\n entry = date + \",\" + Open\r\n key_data.append(entry)\r\n date_list.append(date)\r\n open_list.append(float(Open))\r\n high_list.append(float(High))\r\n low_list.append(float(Low))\r\n close_list.append(float(Close))\r\n volume_list.append(float(Volume))\r\n\r\n date, price = np.loadtxt(reversed(key_data), delimiter=',', unpack=True, converters={0: self.bytes_to_dates})\r\n\r\n # datelist_strs = []\r\n #\r\n # for date in date_list:\r\n # new_date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\r\n # datelist_strs.append(new_date)\r\n\r\n date_objects = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in date_list]\r\n\r\n dictionary = {'Date': date_objects, 'Open': open_list, 'High': high_list, 'Low': low_list, 'Close': close_list,\r\n 'Volume': volume_list}\r\n\r\n df = pd.DataFrame.from_dict(dictionary)\r\n df.set_index('Date', inplace=True)\r\n\r\n self.df = df\r\n self.date = date\r\n self.price = price\r\n self.date_list = date_list\r\n self.generate_graph(ticker)", "def update_binance_data(tickers_intervals):\n for ticker, interval in tickers_intervals:\n try:\n path_to_file = os.path.join(DIR_PATH, interval, f\"{ticker}.csv\")\n csvfile = pd.read_csv(path_to_file, index_col=False, delimiter=\",\")\n starting_date = datetime.utcfromtimestamp(\n csvfile.iloc[-1][\"Open time\"]\n ).strftime(\"%d %b, %Y\")\n print(\n \"Getting historical data for the ticker {} with {} interval starting from {}\".format(\n ticker, interval, starting_date\n )\n )\n\n candlesticks = client.get_historical_klines(\n ticker,\n interval,\n starting_date,\n datetime.now().strftime(\"%d %b, %Y\"),\n limit=1000,\n )\n\n format_date = lambda t: t / 1000\n format_price = lambda p: float(f\"{float(p):.2f}\")\n\n # overriding the last row.\n candlesticks[0][0] = format_date(candlesticks[0][0])\n candlesticks[0][1] = format_price(candlesticks[0][1])\n candlesticks[0][2] = format_price(candlesticks[0][2])\n candlesticks[0][3] = format_price(candlesticks[0][3])\n candlesticks[0][4] = format_price(candlesticks[0][4])\n csvfile.loc[len(csvfile) - 1] = candlesticks[0][:-1]\n for candlestick in candlesticks[1:]:\n candlestick[0] = format_date(candlestick[0])\n candlestick[1] = format_price(candlestick[1])\n candlestick[2] = format_price(candlestick[2])\n candlestick[3] = format_price(candlestick[3])\n candlestick[4] = format_price(candlestick[4])\n csvfile.loc[len(csvfile)] = candlestick[:-1]\n\n csvfile.to_csv(path_to_file, index=False)\n except Exception as e:\n print(e)" ]
[ "0.6157226", "0.61464995", "0.59085757", "0.5862062", "0.5860293", "0.5832759", "0.58293784", "0.57610583", "0.56994617", "0.5661048", "0.56468123", "0.5610657", "0.5573914", "0.5555875", "0.55339915", "0.5507903", "0.5481855", "0.5459223", "0.5448679", "0.5437222", "0.543092", "0.54120666", "0.5393404", "0.5392012", "0.53833705", "0.5372896", "0.53710437", "0.5356475", "0.53548855", "0.53476226" ]
0.78968906
0
Register a callable object.
def register(self, obj): if not callable(obj): raise ValueError(f"object must be callable") obj_name = obj.__name__ if obj_name in self._obj_dict: pass # print(f"{obj_name} is already registered in {self.name}") # raise KeyError(f'{obj_name} is already registered in {self.name}') self._obj_dict[obj_name] = obj return obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register( key, obj ):\n global callbacks\n callbacks[ key ] = obj", "def register(self):\n REGISTERED_FUNCTIONS[self.path] = self", "def _register_callable(\n self,\n f: Any,\n name: str,\n aggregation: bool,\n parameters: List[Tuple[str, type]],\n return_type: type,\n replace: bool = False,\n ):\n lower_name = name.lower()\n if lower_name in self.functions:\n if replace:\n self.function_list = list(\n filter(lambda f: f.name.lower() != lower_name, self.function_list)\n )\n del self.functions[lower_name]\n\n elif self.functions[lower_name] != f:\n raise ValueError(\n \"Registering different functions with the same name is not allowed\"\n )\n\n self.function_list.append(\n FunctionDescription(name.upper(), parameters, return_type, aggregation)\n )\n self.function_list.append(\n FunctionDescription(name.lower(), parameters, return_type, aggregation)\n )\n self.functions[lower_name] = f", "def register_function(self, function, name=None):\n if name is None:\n name = function.__name__\n self.funcs[name] = function", "def register(func):\n PLUGINS[func.__name__] = func\n return func", "def register(func):\n plugins[func.__name__] = func\n return func", "def register(self, callback):\n self.callback = callback", "def register_object(self, obj):\n self.modules.append(obj)", "def register_function(self, *args):\n if len(args) == 1:\n function = args[0]\n try:\n name = function.fact_name\n except AttributeError:\n name = function.__name__\n if name is None:\n raise Exception(\"Function does not have a name\")\n else:\n name, function = args\n self.functions[name] = function", "def register_callback(self, func):\n self.callback = func", "def register_callback(self, obj, button, cmd):\n uid = self._uid(obj, button)\n if uid not in self._callbacks:\n self._callbacks[uid] = {}\n self._callbacks[uid] = cmd", "def register(name):\n def func(cls):\n \"\"\"\n See register\n \"\"\"\n REGISTRY[name] = cls()\n return cls\n return func", "def register_function(self, function, name=None):\n if name:\n self[name] = function\n else:\n self[function.__name__] = function", "def add_hook(self, name: str, callable):\n assert name in self.typenames, \"'{}' not in '{}'\".format(name, self.typenames)\n self.__hooks[name] = callable", "def __call__(self, func: Callable) -> Callable:\n NAME_TO_SYMBOL[self._exported_name] = Symbol.from_callable(\n self._exported_name, func)\n return func", "def register_function(\n self,\n f: Callable,\n name: str,\n parameters: List[Tuple[str, type]],\n return_type: type,\n replace: bool = False,\n ):\n self._register_callable(\n f,\n name,\n aggregation=False,\n parameters=parameters,\n return_type=return_type,\n replace=replace,\n )", "def register(self, callback, func = None):\n if self.conduit is None:\n self.conduit = RPCConnection(self.host, self.port)\n self.conduit.comm(dict(command = 'app-key', key = self.rpckey))\n self.events = deque()\n self.responses = deque()\n self.errors = deque()\n self.callbacks = defaultdict(set)\n self.receiver(start = True)\n if callback in self.callbacks:\n self.callbacks[callback].add(func)\n return\n # Accessing here just initializes an empty set. This prevents\n # multiple redundant calls from re-sending the callback-add, which\n # RPM reports an error to because the callback is already defined.\n if func is not None:\n self.callbacks[callback].add(func)\n else:\n self.callbacks[callback]\n self.conduit.send(dict(command = 'callback-add', callback = callback))", "def register(\n self, name: str, opset: OpsetVersion, func: Callable, custom: bool = True\n ) -> None:\n if \"::\" not in name:\n raise ValueError(\n f\"The name must be in the form of 'domain::op', not '{name}'\"\n )\n symbolic_functions = self._registry.setdefault(\n name, _SymbolicFunctionGroup(name)\n )\n if custom:\n symbolic_functions.add_custom(func, opset)\n else:\n symbolic_functions.add(func, opset)", "def register(self, obj, name=None):\n if not name:\n name = obj.__name__\n if name in self._registry:\n raise KeyError(\"Name '%s' has been registered in '%s'!\" %\n (name, self._name))\n\n # logging.vlog(1, \"Registering %s (%s) in %s.\", name, obj, self._name)\n self._registry[name] = obj", "def register(cls, claim_func):\n def _deco(serializer):\n cls._registered.insert(0, (claim_func, serializer))\n return serializer\n return _deco", "def register(self, param):\n\n def decorator(key, value):\n self[key] = value\n return value\n\n if callable(param):\n return decorator(None, param)\n return lambda x: decorator(param, x)", "def register_instance(self, obj):\n self.__instances.append(obj)\n self._proxy_class_methods(obj)", "def register_callback(self, callback):\n if not callable(callback):\n raise ValueError('callback is not a callable')\n\n self.callbacks.append(callback)", "def register(name, func):\n WebSocketRouter.funcmap[name] = func", "def register(self, event, who, callback=None):\n if callback == None:\n callback = getattr(who, event)\n\n self.get_subscribers(event)[who] = callback", "def register():\n PLUGINS = dict()\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n value = func(*args, **kwargs)\n PLUGINS[func.__name__] = func\n return value\n return wrapper\n return decorator", "async def register_event(self, event: str, func: callable, args: dict = None):\n if not callable(func):\n raise TypeError\n await self.subscribe(event, args or {})\n self._events[event.lower()] = func", "def register(self, funcs):\n for name, func in funcs.items():\n self.functions[name] = func", "def register_callback(self, callback: Callable[[], None]) -> None:\r\n print(\"register callback called\")\r\n self._callbacks.add(callback)", "def register_callback(cls, backend, callback):\n cls._callbacks[backend][cls] = callback" ]
[ "0.6782388", "0.6430132", "0.61815214", "0.61594105", "0.6158772", "0.6114551", "0.609576", "0.608244", "0.60799956", "0.60368454", "0.5975155", "0.5974563", "0.5970224", "0.5968892", "0.5927308", "0.5923337", "0.5915322", "0.59058833", "0.5901849", "0.58824235", "0.58619", "0.5851908", "0.58496106", "0.58480823", "0.5843822", "0.5840828", "0.5833644", "0.57897264", "0.57677996", "0.57676136" ]
0.7541181
0
Build a callable object from configuation dict.
def build_from_config( config, registry, default_args=None, match_object_args=False ): if config is None: return None assert isinstance(config, dict) and "name" in config assert isinstance(default_args, dict) or default_args is None name = config["name"] name = name.replace("-", "_") obj = registry.get(name) if obj is None: raise KeyError(f"{name} is not in the {registry.name} registry") print(f"[Loaded {name} path] {inspect.getfile(obj)}") args = dict() if "params" in config: args.update(config["params"]) if default_args is not None: args.update(default_args) if match_object_args: if inspect.isclass(obj): obj_args = inspect.getfullargspec(obj.__init__).args else: obj_args = inspect.getfullargspec(obj).args valid_args = set(args.keys()) & set(obj_args) invalid_args = set(args.keys()) - set(obj_args) args = {k: v for k, v in args.items() if k in valid_args} if len(invalid_args): print(f"[Ignore args] {invalid_args}") if (name in kvt.registry.METRICS._obj_dict.keys()) and ( inspect.isfunction(obj) ): o = functools.partial(obj, **args) else: o = obj(**args) return o
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_callable_kwargs(config: InstConf, default_module: Union[str, ModuleType] = None) -> (type, dict):\n if isinstance(config, dict):\n key = \"class\" if \"class\" in config else \"func\"\n if isinstance(config[key], str):\n # 1) get module and class\n # - case 1): \"a.b.c.ClassName\"\n # - case 2): {\"class\": \"ClassName\", \"module_path\": \"a.b.c\"}\n m_path, cls = split_module_path(config[key])\n if m_path == \"\":\n m_path = config.get(\"module_path\", default_module)\n module = get_module_by_module_path(m_path)\n\n # 2) get callable\n _callable = getattr(module, cls) # may raise AttributeError\n else:\n _callable = config[key] # the class type itself is passed in\n kwargs = config.get(\"kwargs\", {})\n elif isinstance(config, str):\n # a.b.c.ClassName\n m_path, cls = split_module_path(config)\n module = get_module_by_module_path(default_module if m_path == \"\" else m_path)\n\n _callable = getattr(module, cls)\n kwargs = {}\n else:\n raise NotImplementedError(f\"This type of input is not supported\")\n return _callable, kwargs", "def _load_from_callable(name, kwds, converters={}):\n # See if we actually have the named object.\n dotted_name = kwds.pop(name, None)\n if dotted_name is None:\n return None\n obj = resolveDotted(dotted_name)\n # Extract any arguments for the callable.\n obj_kwds = {}\n prefix = name + \"_\"\n for key in kwds.keys():\n if key.startswith(prefix):\n obj_kwds[key[len(prefix):]] = kwds.pop(key)\n # To any type conversion on the arguments.\n for key, value in obj_kwds.iteritems():\n converter = converters.get(key)\n if converter is not None:\n obj_kwds[key] = converter(value)\n # Call it if callable.\n if callable(obj):\n obj = obj(**obj_kwds)\n elif obj_kwds:\n raise ValueError(\"arguments provided for non-callable %r\" % (name,))\n return obj", "def from_callable(cls, func):\n argspec = getargspec(func)\n return cls(\n func.__name__,\n argspec.args,\n argspec.varargs,\n argspec.keywords,\n argspec.defaults,\n )", "def from_config(func):\n\t\n\tdef decorator(filename):\n\t\twith open(filename, 'r') as file_in:\n\t\t\tconfig = json.load(file_in)\n\n\t\t#'**' takes a dict and extracts its contents and passes them as parameters to a function.\n\t\t#returns the intial function with new arguments????\n\t\treturn func(**config)\n\t\n\t## return the decorated input function\n\treturn decorator", "def deserialize(self, val):\n if val is None:\n return None\n\n val = config.decode(val).strip()\n if val == \"\":\n return None\n\n arr = val.split(',')\n fn_type = arr[0].strip()\n if fn_type == '':\n raise ValueError((\"empty fn_type for function config '{}' not\"\n + \" allowed\").format(val))\n del arr[0]\n fn_args = None\n if len(arr) > 0:\n try:\n fn_args = dict((k.strip(), literal_eval(v.strip())) for k, v in\n (pair.split('=') for pair in arr))\n except ValueError:\n raise ValueError((\"malformed function arguments for function\"\n + \" config '{}'\").format(val))\n else:\n fn_args = dict()\n\n return self.tuple_functionconfig(fn_type, fn_args)", "def instantiate_callable_class(builder: IRBuilder, fn_info: FuncInfo) -> Value:\n fitem = fn_info.fitem\n func_reg = builder.add(Call(fn_info.callable_class.ir.ctor, [], fitem.line))\n\n # Set the environment attribute of the callable class to point at\n # the environment class defined in the callable class' immediate\n # outer scope. Note that there are three possible environment\n # class registers we may use. This depends on what the encapsulating\n # (parent) function is:\n #\n # - A nested function: the callable class is instantiated\n # from the current callable class' '__call__' function, and hence\n # the callable class' environment register is used.\n # - A generator function: the callable class is instantiated\n # from the '__next__' method of the generator class, and hence the\n # environment of the generator class is used.\n # - Regular function: we use the environment of the original function.\n curr_env_reg = None\n if builder.fn_info.is_generator:\n curr_env_reg = builder.fn_info.generator_class.curr_env_reg\n elif builder.fn_info.is_nested:\n curr_env_reg = builder.fn_info.callable_class.curr_env_reg\n elif builder.fn_info.contains_nested:\n curr_env_reg = builder.fn_info.curr_env_reg\n if curr_env_reg:\n builder.add(SetAttr(func_reg, ENV_ATTR_NAME, curr_env_reg, fitem.line))\n return func_reg", "def _make_callable(func):\n try:\n return func.evaluator()\n except AttributeError:\n return func", "def getCallable():", "def build_evaluator(cfg: CfgNode) -> EvaluatorBase:\n name = cfg[\"name\"]\n evaluator = simple_build(name, cfg, EVALUATORS)\n return evaluator", "def create(cls, obj, body, evaldict, defaults=None,\r\n doc=None, module=None, addsource=True,**attrs):\r\n if isinstance(obj, str): # \"name(signature)\"\r\n name, rest = obj.strip().split('(', 1)\r\n signature = rest[:-1] #strip a right parens \r\n func = None\r\n else: # a function\r\n name = None\r\n signature = None\r\n func = obj\r\n self = cls(func, name, signature, defaults, doc, module)\r\n ibody = '\\n'.join(' ' + line for line in body.splitlines())\r\n return self.make('def %(name)s(%(signature)s):\\n' + ibody, \r\n evaldict, addsource, **attrs)", "def configure_training_functions(self, config: ConfigDict):\n\n if \"lr_schedule\" in config:\n create_lr_schedule: Callable = config[\"lr_schedule\"]\n self.lr_schedule = create_lr_schedule(config)\n else:\n self.lr_schedule = create_cnst_lr_schedule(config)\n\n if \"criterion\" in config:\n self.criterion: Callable = config[\"criterion\"]\n else:\n self.criterion = mse_loss\n\n if \"create_train_state\" in config:\n self.create_train_state: Callable = config[\"create_train_state\"]\n else:\n self.create_train_state = create_basic_train_state\n\n if \"train_step_fn\" in config:\n self.train_step_fn: Callable = config[\"train_step_fn\"]\n else:\n self.train_step_fn = train_step\n\n if \"eval_step_fn\" in config:\n self.eval_step_fn: Callable = config[\"eval_step_fn\"]\n else:\n self.eval_step_fn = eval_step\n\n if \"metrics_fn\" in config:\n self.metrics_fn: Callable = config[\"metrics_fn\"]\n else:\n self.metrics_fn = compute_metrics\n\n self.post_lst: Optional[List[Callable]] = None\n if \"post_lst\" in config:\n self.post_lst = config[\"post_lst\"]", "def build_pfunc(cls, representation):\n if ut.is_str(representation):\n try:\n func = eval(representation)\n except:\n bf = 'cls.build_pfunc('\n af = ')'\n st = ut.parse_enclose_with_counter(representation , before = bf, after = af)\n func = eval(st)\n \n elif ut.is_dico(representation):\n name_func = representation['name_func']\n func = eval(name_func)(**representation)\n \n else:\n raise SystemError(\"build_custom_func can build a function from an \"\n \"object of tye {0}\".format(cls.__class__))\n \n return func", "def callableize(f_or_d):\n return f_or_d.get if isinstance(f_or_d,dict) else f_or_d", "def _from_yaml_to_func(method, params):\r\n prm = dict()\r\n if params is not None:\r\n for key, val in params.items():\r\n prm[key] = eval(str(val))\r\n return eval(method)(**prm)", "def _from_yaml_to_func(method, params):\n prm = dict()\n if params is not None:\n for key, val in params.iteritems():\n prm[key] = eval(str(val))\n return eval(method)(**prm)", "def create_from_json(cls, config_json: str) -> 'ResolverOp':\n return cls.create(**json_utils.loads(config_json))", "def from_config(cls, config: dict):\n\n func = cls.deserialize_func(config.pop('func'))\n\n # do not deserialize inverse_func here, it will be done in init method\n scaler = cls(func=func, inverse_func=config.pop('inverse_func'), **cls.deserialize(**config))\n\n setattr(scaler, '_from_config', True)\n\n return scaler", "def from_config(cls, config: Dict[str, Any]) -> \"CosineParamScheduler\":\n assert (\n \"start_value\" in config and \"end_value\" in config\n ), \"Cosine scheduler requires a start_value and a end_value\"\n\n return cls(\n start_value=config[\"start_value\"],\n end_value=config[\"end_value\"],\n update_interval=UpdateInterval.from_config(config, UpdateInterval.STEP),\n )", "def config(conf, ctx, pattern=None, desc=None, cast=None):\n\n def decorator(func):\n fninfo = _fn_get_info(func)\n fninfo.configs.append((conf, ctx, pattern, desc, cast))\n setattr(func, FNINFO_ATTR, fninfo)\n return func\n\n return decorator", "def loader(config_dict, engine): # (Need to match function signature) pylint: disable=unused-argument\n config = configobj.ConfigObj(config_dict)\n return MQTTSubscribeDriver(**config[DRIVER_NAME])", "def __init__(\n self,\n eval_fn: Callable[[Posting], Union[str, None]] = lambda p: None\n ):\n self.eval_fn = eval_fn", "def __init__(self, function: Optional[Callable] = None,\n kwargs: Optional[Dict] = None):\n self.function: Callable = function\n\n if kwargs is None:\n kwargs = dict()\n self.kwargs: Dict[str, Any] = kwargs", "def setup_callable_class(builder: IRBuilder) -> None:\n # Check to see that the name has not already been taken. If so,\n # rename the class. We allow multiple uses of the same function\n # name because this is valid in if-else blocks. Example:\n #\n # if True:\n # def foo(): ----> foo_obj()\n # return True\n # else:\n # def foo(): ----> foo_obj_0()\n # return False\n name = base_name = \"{}_obj\".format(builder.fn_info.namespaced_name())\n count = 0\n while name in builder.callable_class_names:\n name = base_name + \"_\" + str(count)\n count += 1\n builder.callable_class_names.add(name)\n\n # Define the actual callable class ClassIR, and set its\n # environment to point at the previously defined environment\n # class.\n callable_class_ir = ClassIR(name, builder.module_name, is_generated=True)\n\n # The functools @wraps decorator attempts to call setattr on\n # nested functions, so we create a dict for these nested\n # functions.\n # https://github.com/python/cpython/blob/3.7/Lib/functools.py#L58\n if builder.fn_info.is_nested:\n callable_class_ir.has_dict = True\n\n # If the enclosing class doesn't contain nested (which will happen if\n # this is a toplevel lambda), don't set up an environment.\n if builder.fn_infos[-2].contains_nested:\n callable_class_ir.attributes[ENV_ATTR_NAME] = RInstance(\n builder.fn_infos[-2].env_class\n )\n callable_class_ir.mro = [callable_class_ir]\n builder.fn_info.callable_class = ImplicitClass(callable_class_ir)\n builder.classes.append(callable_class_ir)\n\n # Add a 'self' variable to the environment of the callable class,\n # and store that variable in a register to be accessed later.\n self_target = add_self_to_env(builder.environment, callable_class_ir)\n builder.fn_info.callable_class.self_reg = builder.read(\n self_target, builder.fn_info.fitem.line\n )", "def get_callable_for_event(name, event_config, context=None):\n\n kwargs = {\n 'context': context,\n 'key': name,\n 'config': event_config,\n }\n\n if \"command\" in event_config:\n f = partial(do_shell, event_config[\"command\"], **kwargs)\n elif \"function\" in event_config:\n f = partial(get_callable_from_string(event_config[\"function\"]), **kwargs)\n elif \"method\" in event_config:\n f = partial(getattr(get_handler_object(event_config['method'][0]), event_config['method'][1]), **kwargs)\n else:\n raise AttributeError(\"%s have a class, method, function or command\" % name)\n\n return f", "def from_callable(\n cls, obj: Callable, gui_options: dict | None = None, **kwargs: Any\n ) -> Container:\n return magic_signature(obj, gui_options=gui_options).to_container(**kwargs)", "def build_transforms(transforms_config: List[Dict[str, Any]]) -> Callable:\n transform_list = [build_transform(config) for config in transforms_config]\n return transforms.Compose(transform_list)", "def build(self, consumer, definition):\n\n try:\n definition = definition.build()\n except exceptions.InvalidRequestDefinition as error:\n # TODO: Find a Python 2.7 compatible way to reraise\n raise exceptions.UplinkBuilderError(\n consumer.__class__, definition.__name__, error)\n\n return CallFactory(\n consumer,\n RequestPreparer(self, definition),\n definition\n )", "def from_config(config: dict):\n pass", "def from_config(cls, config):\n return cls(**config)", "def from_config(cls, config):\n return cls(**config)" ]
[ "0.58034515", "0.57293683", "0.5724343", "0.5703751", "0.5685489", "0.5578687", "0.5514746", "0.54510975", "0.54171664", "0.5389978", "0.53591245", "0.5319346", "0.53167105", "0.5313625", "0.5299541", "0.5285164", "0.5197402", "0.5194518", "0.5183609", "0.51273054", "0.5117182", "0.5114585", "0.51138043", "0.51050735", "0.50938934", "0.50686413", "0.5065824", "0.5058119", "0.50354964", "0.50354964" ]
0.5859784
0
Initialize a new FFMpeg wrapper object. Optional parameters specify the paths to ffmpeg and ffprobe utilities.
def __init__(self, ffmpeg_path=None, ffprobe_path=None): def which(name): path = os.environ.get_parser('PATH', os.defpath) for d in path.split(':'): fpath = os.path.join(d, name) if os.path.exists(fpath) and os.access(fpath, os.X_OK): return fpath return None if ffmpeg_path is None: ffmpeg_path = 'ffmpeg' if ffprobe_path is None: ffprobe_path = 'ffprobe' if '/' not in ffmpeg_path: ffmpeg_path = which(ffmpeg_path) or ffmpeg_path if '/' not in ffprobe_path: ffprobe_path = which(ffprobe_path) or ffprobe_path self.ffmpeg_path = ffmpeg_path self.ffprobe_path = ffprobe_path if not os.path.exists(self.ffmpeg_path): raise FFMpegError("ffmpeg binary not found: " + self.ffmpeg_path) if not os.path.exists(self.ffprobe_path): raise FFMpegError("ffprobe binary not found: " + self.ffprobe_path) self.hwaccels = [] self.encoders = [] self.decoders = [] self._getcapabilities()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, executable=\"ffprobe\", global_options=\"\", inputs=None):\n super(FFprobe, self).__init__(\n executable=executable, global_options=global_options, inputs=inputs\n )", "def __init__(\n self, executable=\"ffmpeg\", global_options=None, inputs=None, outputs=None\n ):\n self.executable = executable\n self._cmd = [executable]\n\n global_options = global_options or []\n if _is_sequence(global_options):\n normalized_global_options = []\n for opt in global_options:\n normalized_global_options += shlex.split(opt)\n else:\n normalized_global_options = shlex.split(global_options)\n\n self._cmd += normalized_global_options\n self._cmd += _merge_args_opts(inputs, add_input_option=True)\n self._cmd += _merge_args_opts(outputs)\n\n self.cmd = subprocess.list2cmdline(self._cmd)\n self.process = None", "def __init__(self):\n \n app_name = 'FFMPEG_info'\n app_author = 'sksound'\n \n # The package \"appdirs\" allows an OS-independent implementation\n user_data_dir = appdirs.user_data_dir(app_name, app_author)\n if not os.path.exists(user_data_dir):\n os.makedirs(user_data_dir)\n self.config_file = os.path.join(user_data_dir, 'ffmpeg.json')\n \n if not os.path.exists(self.config_file):\n \n # Check if it is in the system path\n try:\n completed_process = subprocess.run('ffmpeg')\n completed_process = subprocess.run('ffplay')\n self.ffmpeg = 'ffmpeg'\n self.ffplay = 'ffplay'\n except FileNotFoundError:\n self.set()\n else:\n with open(self.config_file, 'r') as in_file:\n info = json.load(in_file)\n self.ffmpeg = info['ffmpeg']\n self.ffplay = info['ffplay']", "def start(self) -> None:\r\n self._spawn_ffmpeg()", "def test_ffmpeg_in_path(self) -> None:\n self.assertIsNotNone(which('ffmpeg'))", "def _spawn_ffmpeg(self) -> None:\r\n if self.ffmpeg_proc is not None:\r\n raise RuntimeError('_spawn_ffmpeg called when ffmpeg_proc is '\r\n + f'{self.ffmpeg_proc} (not None)')\r\n\r\n args = ['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo',\r\n '-s', f'{self.frame_size[0]}x{self.frame_size[1]}',\r\n '-pix_fmt', 'rgba', '-r', str(self.fps),\r\n '-loglevel', 'quiet',\r\n '-i', 'pipe:0',\r\n '-vcodec', 'h264', '-pix_fmt', 'yuv420p',\r\n '-movflags', '+faststart']\r\n\r\n if self.bitrate > 0:\r\n args.extend(['-b', f'{self.bitrate}k'])\r\n args.extend(['-y', self.outfile])\r\n\r\n create_flags = sp.CREATE_NO_WINDOW if 'nt' in os.name else 0\r\n self.ffmpeg_proc = sp.Popen(args, shell=False, stdout=None, stderr=None,\r\n stdin=sp.PIPE, creationflags=create_flags)", "def ffmpeg(*options):\n\tffmpeg_command = [\"ffmpeg\"] + list(options)\n\tprint(\"Calling FFMPEG:\", \" \".join(ffmpeg_command))\n\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"Calling FFmpeg failed with exit code {exit_code}. CERR: {cerr} . COUT: {cout}\".format(exit_code=exit_code, cerr=str(cerr), cout=str(cout)))", "def main():\n print(\"This is a library for reading video sequences into python via ffmpeg. \")\n print(\"Provides the 'Video_Reader' iterator class. \")\n print(\"Requires ffmpeg be installed. \")", "def test_ffprobe_in_path(self) -> None:\n self.assertIsNotNone(which('ffprobe'))", "def __init__(self, filename, check_integrity=False, force_framerate=0):\n super(FMFCapture, self).__init__()\n\n self._mov = fmf.FlyMovie(filename, check_integrity)\n\n self._frame_timestamp = 0.0\n self._frame_number = -1\n if force_framerate > 0:\n self._frame_delay = 1./float(force_framerate)\n else:\n self._frame_delay = None\n\n #CaptureBase attributes\n self.frame_count = self._mov.n_frames\n self.frame_width = self._mov.width\n self.frame_height = self._mov.height\n self.is_video_file = True\n self.filename = filename", "def __init__(self,vid_path:str,num_frames:int=None,vid_flow_direction:str='left'):\n \n self.num_frames=num_frames\n if vid_path.split('.')[-1]=='cine' or vid_flow_direction!='left':\n #This is a cine file or needs to be rotated, convert to mp4\n print('Converting .cine file to mp4 (lossless)')\n #detect platform so we can correct file paths for ffmpeg\n is_win=re.compile('.*[Ww]in.*')\n if is_win.match(sys.platform):\n corrected_vid_path='\"'+vid_path+'\"'\n else:\n #Put escape characters in front of spaces in file name\n corrected_vid_path=[]\n for c in vid_path:\n if c==' ':\n corrected_vid_path.append('\\\\')\n corrected_vid_path.append(c)\n corrected_vid_path=''.join(corrected_vid_path)\n if vid_flow_direction=='up':\n rotate='-vf \"transpose=2\" '\n elif vid_flow_direction=='left':\n rotate=''\n elif vid_flow_direction=='right':\n rotate='-vf \"transpose=2,transpose=2\" '\n else:\n raise Exception(\"vid_flow_direction must be 'up', 'left' or 'right'\")\n if num_frames!=None:\n frames='-frames:v {0} '.format(num_frames)\n else:\n frames=''\n os_handle,new_file_path=tempfile.mkstemp(suffix='.mp4')\n #close file, we don't work with it directly\n os.close(os_handle)\n ffmpeg_command='ffmpeg -y -i {orig_file} {frames}{rotate}-f mp4 -crf 0 {new_file}'.format(orig_file=corrected_vid_path,rotate=rotate,new_file=new_file_path,frames=frames)\n print(ffmpeg_command)\n list(os.popen(ffmpeg_command))\n self.vid_path=new_file_path\n self.delete_file=True\n stats=os.stat(new_file_path)\n if stats.st_size==0:\n raise Exception('File conversion failed, check that ffmpeg is on PATH')\n else:\n #Not a cine\n self.vid_path=vid_path\n self.delete_file=False", "def __init__(self, inFile = None, inData = None, inRate = None):\n \n # Information about FFMPEG\n self.ffmpeg_info = FFMPEG_info()\n \n if inData is not None:\n if inRate is None:\n print('Set the \"rate\" to the default value (8012 Hz).')\n rate = 8012.0\n self.generate_sound(inData, inRate)\n else: \n if inFile is None:\n inFile = self._selectInput()\n if inFile == 0:\n return\n try:\n self.source = inFile\n self.read_sound(self.source)\n except FileNotFoundError as err:\n print(err)\n inFile = self._selectInput()\n self.source = inFile\n self.read_sound(self.source)", "def __init__(self, dirs):\n self.dirs = dirs\n self.fps = self.get_filepaths()", "def set(self):\n \n ffmpeg_installed = misc.askquestion(DialogTitle='FFMPEG Check',\n Question='Is FFMPEG installed?')\n \n if ffmpeg_installed:\n ffmpeg_dir = misc.get_dir(DialogTitle='Please select the directory where FFMPEG (binary) is installed:')\n \n if sys.platform=='win32':\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg.exe')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay.exe')\n else:\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay')\n \n if not os.path.exists(self.ffmpeg):\n print('Sorry, {0} does not exist!'.format(self.ffmpeg))\n return\n \n if not os.path.exists(self.ffplay):\n print('Sorry, {0} does not exist!'.format(self.ffplay))\n return\n \n else:\n self.ffmpeg = None\n self.ffplay = None\n \n # Save them to the default config file\n info = {'ffmpeg':self.ffmpeg, 'ffplay': self.ffplay}\n try:\n with open(self.config_file, 'w') as outFile:\n json.dump(info, outFile)\n print('Config information written to {0}'.format(os.path.abspath(self.config_file)))\n except PermissionError as e:\n curDir = os.path.abspath(os.curdir)\n print('Current directory: {0}'.format(curDir))\n print('Error: {0}'.format(e))\n \n return", "def __init__(self, video_folder, output_folder, output_file=None, height=320, width=480,\n sample_every=10, max_workers=32):\n self.video_folder = video_folder\n self.output_folder = output_folder\n self.output_file = output_file\n print(\n f\"Video Preprocessor created with video_folder = {video_folder} , output_folder = {output_folder}, output_file = {output_file}\")\n\n self.height = height\n self.width = width\n self.sample_every = sample_every\n self.max_workers = max_workers\n print(f\"Frames will be created with height = {height} , width = {width} , sample_every = {sample_every}\")", "def __init__(self, outfile, width=512, height=512, framerate=25, videoformat=\"flv\"):\n self.outfile = outfile\n self.width = width\n self.height = height\n self.framerate = framerate\n self.videoformat = videoformat", "def __init__(self, features=\"mfcc\", keywords_split=\"one_shot_evaluation\",\n embed_dir=None, preprocess_func=None, speaker_mode=\"baseline\",\n **kwargs):\n super().__init__(**kwargs)\n\n logging.log(logging.INFO, f\"Creating Flickr audio experiment\")\n\n assert features in [\"mfcc\", \"fbank\"]\n\n assert keywords_split in [\n \"one_shot_evaluation\", \"one_shot_development\", \"background_train\",\n \"background_dev\", \"background_test\"]\n\n assert speaker_mode in [\"baseline\", \"difficult\", \"distractor\"]\n\n if keywords_split == \"background_test\":\n subset = \"test\"\n elif keywords_split == \"background_dev\":\n subset = \"dev\"\n else: # rest fall under train subset\n subset = \"train\"\n\n self.speaker_mode = speaker_mode\n\n # load Flickr 8k keywords set\n keywords_path = os.path.join(\n \"data\", \"splits\", \"flickr8k\", f\"{keywords_split}.csv\")\n keywords_set = file_io.read_csv(keywords_path, skip_first=True)\n\n # load aligned Flickr Audio UIDs and metadata\n faudio_path = os.path.join(\n \"data\", \"splits\", \"flickr8k\", f\"faudio_{keywords_split}.txt\")\n faudio_uids = file_io.read_csv(faudio_path)[0]\n self.faudio_uids = np.asarray(faudio_uids)\n\n self.faudio_metadata = flickr_audio.extract_all_uid_metadata(\n self.faudio_uids)\n\n # load audio paths\n audio_paths = flickr_audio.fetch_audio_paths(\n os.path.join(\"data\", \"processed\", \"flickr_audio\", features, subset),\n self.faudio_uids)\n\n # load audio embedding paths if specified\n if embed_dir is not None:\n embed_paths = []\n for audio_path in audio_paths:\n embed_paths.append(\n os.path.join(\n embed_dir, \"flickr_audio\", f\"{keywords_split}\",\n f\"{os.path.split(audio_path)[1]}.tfrecord\"))\n assert os.path.exists(embed_paths[-1])\n\n self.keywords_set = tuple(np.asarray(x) for x in keywords_set)\n self.audio_paths = np.asarray(audio_paths)\n self.embed_paths = None\n if embed_dir is not None:\n self.embed_paths = np.asarray(embed_paths)\n\n # get unique keywords and keyword class label lookup dict\n self.keywords = sorted(np.unique(self.keywords_set[3]).tolist())\n self.keyword_labels = {\n keyword: idx for idx, keyword in enumerate(self.keywords)}\n\n # get unique speakers and valid distractor speakers and labels\n self.speakers = np.unique(self.faudio_metadata[2])\n\n distractor_speaker_labels = {}\n for speaker in self.speakers:\n speaker_idx = np.where(\n self.faudio_metadata[2] == speaker)[0]\n unique_keywords, counts = np.unique(\n self.keywords_set[3][speaker_idx], return_counts=True)\n\n speaker_labels = []\n for keyword, count in zip(unique_keywords, counts):\n if count > 5: # constrain min. training samples per keyword\n speaker_labels.append(keyword)\n\n if len(speaker_labels) < 10: # constrain min. keywords per speaker\n continue\n else:\n distractor_speaker_labels[speaker] = speaker_labels\n\n self.distractor_speaker_labels = distractor_speaker_labels\n\n # get lookup for unique indices per class label\n self.class_unique_indices = {}\n for keyword in self.keywords:\n cls_idx = np.where(self.keywords_set[3] == keyword)[0]\n\n self.class_unique_indices[keyword] = cls_idx\n\n # set speech data as raw paths or extracted embedding paths\n if self.embed_paths is None:\n self.speech_data = self.audio_paths\n else:\n self.speech_data = self.embed_paths\n\n if preprocess_func is not None:\n self.speech_data = preprocess_func(self.speech_data)", "def __init__(self, initial_state=True):\n self.ffmpeg = None\n self.initial_state = initial_state", "def __init__(self, subtitle_zip_files_dir, target_dir, temp_storage_dir):\n self._video_formats = ('avi', 'mp4', 'mov', 'mkv', 'mk3d', 'webm', \\\n 'ts', 'mts', 'm2ts', 'ps', 'vob', 'evo', 'mpeg', 'mpg', \\\n 'm1v', 'm2p', 'm2v', 'm4v', 'movhd', 'movx', 'qt', \\\n 'mxf', 'ogg', 'ogm', 'ogv', 'rm', 'rmvb', 'flv', 'swf', \\\n 'asf', 'wm', 'wmv', 'wmx', 'divx', 'x264', 'xvid')\n\n self.subtitle_zip_files_dir = pathlib.Path(subtitle_zip_files_dir)\n self.target_dir = pathlib.Path(target_dir)\n self.temp_storage_dir = pathlib.Path(temp_storage_dir)", "def __init__(self):\n if not os.path.isfile(CONFIG_FILENAME):\n first_time_run()\n raise SystemExit()\n\n \"\"\"\n Init file is present, read and parse it:\n \"\"\"\n conf = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation(), inline_comment_prefixes='#')\n conf.read(CONFIG_FILENAME)\n \"\"\"\n Process certain paths:\n \"\"\"\n\n path = conf['Paths']\n self.ffc_dir = path['FlatFieldCalDir']\n self.capture_dir = path['CaptureDir']\n self.image_dir = path['ImageDir']\n\n \"\"\"\n Process options\n \"\"\"\n\n self.cal_auto_save = conf.getboolean('Options', 'CalAutoSave', fallback=True)\n self.cal_auto_load = conf.getboolean('Options', 'CalAutoLoad', fallback=True)\n self.sound_on_capture = conf.getboolean('Options', 'SoundOnCapture', fallback=True)\n self.exp_init1 = conf.getint('Options', 'ExpInit1', fallback=100)\n self.exp_init2 = conf.getint('Options', 'ExpInit2', fallback=100)\n self.black_correct = conf.getboolean('Options', 'BlackCorrect', fallback=True)\n # Setup square window, default of full-screen height\n self.tiff_seq_x_window = conf.getint('Options', 'TiffSeqXWindow', fallback=cameras.FRAME_HEIGHT)\n self.tiff_seq_y_window = conf.getint('Options', 'TiffSeqYWindow', fallback=cameras.FRAME_HEIGHT)\n self.tiff_seq_rebin = conf.getint('Options', 'TiffSeqRebin', fallback = 2)", "def __init__(self, filepath: str):\n # parse the path and protocol (e.g. file, http, s3, etc.)\n protocol, path = get_protocol_and_path(filepath)\n self._protocol = protocol\n self._filepath = PurePosixPath(path)\n self._fs = fsspec.filesystem(self._protocol)", "def probe(self, fname) -> FFprobeParser:\n\n if not os.path.exists(fname):\n raise FileNotFoundError\n\n p = self._spawn([self.ffprobe_path,\n '-show_format', '-show_streams', '-hide_banner', '-print_format', 'json', fname])\n stdout_data, _ = p.communicate()\n stdout_data = stdout_data.decode(console_encoding, errors='ignore')\n parser = FFprobeParser(stdout_data)\n\n return parser", "def __init__(self, fn, name=\"No name\", labels=None, labels_in_file=False):\n self.filename = fn\n self.name = name\n\n (base, extension) = os.path.splitext(self.filename)\n if extension == \".mp3\":\n try:\n print \"Creating wav from {}\".format(self.filename)\n new_fn = base + '.wav'\n subprocess.check_output(\"lame --decode \\\"{}\\\" \\\"{}\\\"\".format(\n self.filename, new_fn), shell=True)\n self.filename = new_fn\n except:\n print \"Could not create wav from mp3\"\n raise\n\n self.sound = Sndfile(self.filename, 'r')\n self.current_frame = 0\n self.channels = self.sound.channels\n\n if labels is not None and labels_in_file:\n raise Exception(\n \"Must only define one of labels and labels_in_file\")\n if labels_in_file and not LIBXMP:\n raise Exception(\n \"Cannot use labels_in_file without python-xmp-toolkit\")\n if labels_in_file and LIBXMP:\n self.labels = self._extract_labels(fn)\n else:\n self.labels = labels", "def _have_ffmpeg(self):\n from sage.misc.sage_ostools import have_program\n return have_program('ffmpeg')", "def __init__(self, paths):\n Process.__init__(self)\n self.paths = paths", "def __init__(self, path=\"\"):\n super().__init__(\"/run/fio/*\", \"fio\", path)\n\n # fio command-line options\n self.debug = FormattedParameter(\"--debug={}\")\n self.parse_only = FormattedParameter(\"--parse-only\", False)\n self.output = FormattedParameter(\"--output={}\")\n self.bandwidth_log = FormattedParameter(\"--bandwidth-log\", False)\n self.minimal = FormattedParameter(\"minimal\", False)\n self.output_format = FormattedParameter(\"--output-format={}\")\n self.terse_version = FormattedParameter(\"--terse-version={}\")\n self.version = FormattedParameter(\"--version\", False)\n self.fio_help = FormattedParameter(\"--help\", False)\n self.cpuclock_test = FormattedParameter(\"--cpuclock-test\", False)\n self.crctest = FormattedParameter(\"--crctest={}\")\n self.cmdhelp = FormattedParameter(\"--cmdhelp={}\")\n self.enghelp = FormattedParameter(\"--enghelp={}\")\n self.showcmd = FormattedParameter(\"--showcmd={}\")\n self.eta = FormattedParameter(\"--eta={}\")\n self.eta_newline = FormattedParameter(\"--eta-newline={}\")\n self.status_interval = FormattedParameter(\"--status-interval={}\")\n self.readonly = FormattedParameter(\"--readonly\", False)\n self.section = FormattedParameter(\"--section={}\")\n self.alloc_size = FormattedParameter(\"--alloc-size={}\")\n self.warnings_fatal = FormattedParameter(\"--warnings-fatal\", False)\n self.max_jobs = FormattedParameter(\"--max-jobs={}\")\n self.server = FormattedParameter(\"--server={}\")\n self.daemonize = FormattedParameter(\"--daemonize={}\")\n self.client = FormattedParameter(\"--client={}\")\n self.remote_config = FormattedParameter(\"--remote-config={}\")\n self.idle_prof = FormattedParameter(\"--idle-prof={}\")\n self.inflate_log = FormattedParameter(\"--inflate-log={}\")\n self.trigger_file = FormattedParameter(\"--trigger-file={}\")\n self.trigger_timeout = FormattedParameter(\"--trigger-timeout={}\")\n self.trigger = FormattedParameter(\"--trigger={}\")\n self.trigger_remote = FormattedParameter(\"--trigger-remote={}\")\n self.aux_path = FormattedParameter(\"--aux-path={}\")\n\n # Middleware to use with fio. Needs to be configured externally prior to calling run()\n self.api = BasicParameter(None, \"POSIX\")\n\n # List of fio job names to run\n self.names = BasicParameter(None)\n self._jobs = {}\n\n # List of hosts on which the fio command will run\n self._hosts = None", "def async_start_ffmpeg(self):\n raise NotImplementedError()", "def __init__(self, input: Union[BinaryIO, str, os.PathLike], skip_frames: bool = False):\n self.start = None\n self.frames = []\n self.end = None\n self.metadata = None\n self.metadata_raw = None\n\n parse(input, {\n ParseEvent.START: lambda x: setattr(self, 'start', x),\n ParseEvent.FRAME: self._add_frame,\n ParseEvent.END: lambda x: setattr(self, 'end', x),\n ParseEvent.METADATA: lambda x: setattr(self, 'metadata', x),\n ParseEvent.METADATA_RAW: lambda x: setattr(self, 'metadata_raw', x)},\n skip_frames)", "def __init__(self, device='/dev/video0', output_filename='/dev/null',\n width=320, height=240, framerate=30,\n window_title='webcam', image_controls=None,\n ):\n # Store params\n self.device = device\n self.output_filename = output_filename\n self.width = width\n self.height = height\n self.framerate = framerate\n self.window_title = window_title\n \n if self.output_filename is None:\n self.output_filename = '/dev/null'\n \n # Image controls\n self.image_controls = {\n 'gain': 3,\n 'exposure': 20,\n 'brightness': 40,\n 'contrast': 50,\n 'saturation': 69,\n 'hue': 0,\n 'white_balance_automatic': 0,\n 'gain_automatic': 0,\n 'auto_exposure': 1, # flipped\n }\n if image_controls is not None:\n self.image_controls.update(image_controls)\n \n self.read_stderr = None\n self.ffplay_stderr = None\n self.ffplay_stdout = None\n \n self.ffplay_proc = None\n self.read_proc = None\n self.tee_proc = None", "def __init__(self, dataPath, transformImage=None):\r\n self.dataPath = dataPath\r\n self.transformImage = transformImage\r\n self.videos = sorted(os.listdir(self.dataPath))\r\n self.queryDir = 'cast'\r\n self.candDir = 'candidates'\r\n self.len = len(self.videos)" ]
[ "0.6561802", "0.64016914", "0.6363934", "0.5849916", "0.58092856", "0.573448", "0.5698482", "0.5652191", "0.56246364", "0.55915654", "0.54411674", "0.5407398", "0.53775966", "0.5373673", "0.5371317", "0.53640306", "0.52425927", "0.5226642", "0.5160404", "0.514741", "0.5144468", "0.5111974", "0.51079077", "0.5083627", "0.5054557", "0.4994342", "0.499169", "0.49837944", "0.4976582", "0.49761415" ]
0.80697465
0
Convert the source media (infile) according to specified options (a list of ffmpeg switches as strings) and save it to outfile. Convert returns a generator that needs to be iterated to drive the conversion process. The generator will periodically yield timecode of currently processed part of the file (ie. at which second in the content is the conversion process currently). The optional timeout argument specifies how long should the operation be blocked in case ffmpeg gets stuck and doesn't report back. See the documentation in Converter.convert() for more details about this option. >>> conv = FFMpeg().convert('test.ogg', '/tmp/output.mp3', ... ['acodec libmp3lame', 'vn'])
def convert(self, infile, outfile, opts, timeout=10, preopts=None, postopts=None): if os.name == 'nt': timeout = 0 if not os.path.exists(infile): raise FFMpegError("Input file doesn't exist: " + infile) cmds = [self.ffmpeg_path] if preopts: cmds.extend(preopts) cmds.extend(['-i', infile]) # Move additional inputs to the front of the line for ind, command in enumerate(opts): if command == '-i': cmds.extend(['-i', opts[ind + 1]]) del opts[ind] del opts[ind] cmds.extend(opts) if postopts: cmds.extend(postopts) cmds.extend(['-y', outfile]) if timeout: def on_sigalrm(*_): signal.signal(signal.SIGALRM, signal.SIG_DFL) raise Exception('timed out while waiting for ffmpeg') signal.signal(signal.SIGALRM, on_sigalrm) try: p = self._spawn(cmds) except OSError: raise FFMpegError('Error while calling ffmpeg binary') yielded = False buf = '' total_output = '' pat = re.compile(r'time=([0-9.:]+) ') while True: if timeout: signal.alarm(timeout) ret = p.stderr.read(10) if timeout: signal.alarm(0) if not ret: # For small or very fast jobs, ffmpeg may never output a '\r'. When EOF is reached, yield if we haven't yet. if not yielded: yielded = True yield 10 break try: ret = ret.decode(console_encoding) except UnicodeDecodeError: try: ret = ret.decode(console_encoding, errors="ignore") except: pass total_output += ret buf += ret if '\r' in buf: line, buf = buf.split('\r', 1) tmp = pat.findall(line) if len(tmp) == 1: timespec = tmp[0] if ':' in timespec: timecode = 0 for part in timespec.split(':'): timecode = 60 * timecode + float(part) else: timecode = float(tmp[0]) yielded = True yield timecode if timeout: signal.signal(signal.SIGALRM, signal.SIG_DFL) p.communicate() # wait for process to exit if total_output == '': raise FFMpegError('Error while calling ffmpeg binary') cmd = ' '.join(cmds) if '\n' in total_output: line = total_output.split('\n')[-2] if line.startswith('Received signal'): # Received signal 15: terminating. raise FFMpegConvertError(line.split(':')[0], cmd, total_output, pid=p.pid) if line.startswith(infile + ': '): err = line[len(infile) + 2:] raise FFMpegConvertError('Encoding error', cmd, total_output, err, pid=p.pid) if line.startswith('Error while '): raise FFMpegConvertError('Encoding error', cmd, total_output, line, pid=p.pid) if not yielded: raise FFMpegConvertError('Unknown ffmpeg error', cmd, total_output, line, pid=p.pid) if p.returncode != 0: raise FFMpegConvertError('Exited with code %d' % p.returncode, cmd, total_output, pid=p.pid) return outfile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert(from_: Path,\n to_: Path,\n *,\n force: bool = False) -> None:\n if not from_.exists():\n raise FileNotFoundError(f\"'{from_}' doesn't exist\")\n if to_.exists():\n if not force:\n raise exceptions.FileEvenExistsError(f\"'{to_}' even exists\")\n if not (is_video(from_) and is_video(to_)):\n raise exceptions.WrongExtensionError(\n f\"'{from_.suffix}' or '{to_.suffix}' is wrong extension\")\n\n logger.debug(f\"Converting {get_info(from_)}\")\n\n try:\n ff = ffmpy.FFmpeg(\n inputs={from_: None},\n outputs={to_: None}\n )\n\n ff.run()\n except Exception as e:\n logger.error(f\"{e}\\n while converting '{from_}' file\")\n raise\n\n logger.debug(f\"Converting {get_info(from_, to_)} completed\")", "def convert_files(enumerated_src_file):\n i, src_file = enumerated_src_file\n src_file = src_file.strip()\n file_extension, acodec, quality = audio_codec()\n\n dst_file = '.'.join(src_file.split('.')[:-1]) + file_extension\n sys.stdout.write(str(i + 1) + ': ' + src_file + ' -> ' + dst_file + '\\n')\n subprocess.call(['ffmpeg', '-i', src_file, '-vn', '-acodec',\n acodec, '-aq', quality, dst_file, '-loglevel', 'quiet'])\n return src_file", "def convert(fname_src, verbose=False):\n if not os.path.isfile(fname_src):\n raise IOError('File not found: %s' % fname_src)\n\n # File names.\n b, e = os.path.splitext(fname_src)\n fname_dst = b + '.m4a'\n\n # Build command.\n cmd = 'ffmpeg -y -i \"%s\" \"%s\"' % (fname_src, fname_dst)\n\n t0 = time.time()\n std_out, std_err = run_cmd(cmd)\n dt = time.time() - t0\n\n if dt < 0.01:\n raise Exception('Problem processing file: %s %s %s %s' % (fname_src, std_out, std_err, cmd))\n\n if std_out.lower().find('error') >= 0:\n raise Exception('Problem processing file: %s %s %s %s' % (fname_src, std_out, std_err, cmd))\n\n # Done.\n return fname_dst", "def convert_to_mp3(filename: str, title: str, start: int=None, end: int=None) -> list:\n\t# setup args for ffmpeg\n\tfile_a = f\"{path_to_wrk_dir}{filename}.mp4\" # input file\n\tfile_b = f\"{path_to_wrk_dir}{title}.mp3\" # output file\n\tfiles_b = [] # this list need if file more than 30 mb\n\targs = [\n\t\t\"/usr/bin/ffmpeg\", # path to ffmpeg\n\t\t\"-i\", # flag for input file\n\t\tfile_a, # input file\n\t\t\"-acodec\", # setup codec\n\t\t\"libmp3lame\", # codec name\n\t\t]\n\n\t# now need setup timings for target encode\n\tif start is not None and start != 0:\n\t\targs = args + [\"-ss\", str(start)]\n\tif end is not None and end != 0:\n\t\targs = args + [\"-t\", str(end - start)]\n\n\t# and last part for args to ffmpeg\n\targs = args + [\n\t\t\"-metadata\", # setup metadata for file\n\t\tf\"title={title}\", # title\n\t\t\"-metadata\",\n\t\tf\"artist={title}\", # and artist\n\t\t\"-b:a\", # setup bitrate\n\t\t\"320k\", # setup max bitrate\n\t\tfile_b,\n\t\t]\n\tprint(f\"{args}\")\n\t# start subprocess for encoding\n\tpopen = subprocess.Popen(args)\n\tpopen.wait()\n\n\t# check size file. if he more than 30 mb, bot need split him to chunks.\n\tsize = getsize(file_b) / 1024 / 1024\n\tif size > 30 and ( start or end is None ):\n\t\t# setup args for split to chunks\n\t\targs = [\n\t\t\t\"ffprobe\",\n\t\t\t\"-show_entries\",\n\t\t\t\"format=duration\",\n\t\t\t\"-i\",\n\t\t\tfile_b,\n\t\t\t]\n\n\t\t# get duration video.\n\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\tpopen.wait()\n\t\toutput = popen.stdout.read()\n\t\t# now we know how long this audio file\n\t\t# split to 10 min chunks\n\t\tdur = re.findall(r\"\\d{1,10}\", str(output))\n\t\t# get chunks count for loop\n\t\tcount_chunks = (int(dur[0]) // 600) + 1\n\t\tfor chunk_start_time in range(0, count_chunks):\n\t\t\t# setup args for split\n\t\t\t# big parts of args the same for encode\n\t\t\targs = [\n\t\t\t\t\"/usr/bin/ffmpeg\",\n\t\t\t\t\"-i\",\n\t\t\t\tfile_b,\n\t\t\t\t\"-ss\",\n\t\t\t\tf\"{chunk_start_time * 600}\", # when start chunk\n\t\t\t\t\"-t\",\n\t\t\t\t\"600\", # 10 mints duration\n\t\t\t\t\"-acodec\",\n\t\t\t\t\"copy\", # copy\n\t\t\t\t\"-b:a\",\n\t\t\t\t\"320k\",\n\t\t\t\tf\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\", # now we have path to video with chunk number.\n\t\t\t]\n\t\t\ttry:\n\t\t\t\t# start process for cut chunk\n\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\tpopen.wait()\n\t\t\t# handle except.\n\t\t\texcept Exception as e:\n\t\t\t\tprint(f\"Exception - {e}\")\n\t\t\tfiles_b.append(f\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\") # append name of file in list\n\t\tremove(file_b)\n\ttry:\n\t\t# remove tmp file\n\t\tremove(file_a)\n\t# handle except\n\texcept FileNotFoundError:\n\t\tfiles = get_file_list(path_to_wrk_dir)\n\t\tfor i in files:\n\t\t\tif -1 != f\"{path_to_wrk_dir}{i}\".find(f\"{filename}\") and f\"{i}\".find(f\".mp3\") == -1:\n\t\t\t\ttry:\n\t\t\t\t\tremove(f\"{path_to_wrk_dir}{i}\")\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tprint(f\"can't remove file {path_to_wrk_dir}{i}\")\n\tif len(files_b) == 0:\n\t\treturn [file_b]\n\telse:\n\t\treturn files_b", "def transcodetomp4(file_in, logger):\n\n import subprocess\n\n file_out = file_in.replace('.mkv', '.mp4')\n\n if os.path.isfile('/usr/bin/avconv'):\n\n convert_command = 'su securityspy -c \\\"/usr/bin/avconv -i \"{}\" -f mp4 -vcodec copy -acodec '.format(file_in) + \\\n 'libfaac -b:a 112k -ac 2 -y \"{}\"'.format(file_out) + \"\\\"\"\n\n try:\n subprocess.check_call(convert_command, shell=True)\n except subprocess.CalledProcessError:\n logger.error(\"The command to transcode: {} --- failed...\".format(convert_command))\n return file_in\n\n return file_out\n else:\n return file_in\n # fin", "def convert (self, lossless=False):\n self._has_errors = False\n if self._progress:\n max_val = 0\n for root, dirs, files in os.walk(self._in_dir):\n max_val += len(files)\n self._bar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar()],\n maxval=max_val).start()\n pool = multiprocessing.Pool()\n command = CONVERT_TO_JP2_LOSSY\n if lossless:\n command = CONVERT_TO_JP2_LOSSLESS\n for root, dirs, files in os.walk(self._in_dir):\n out_rel_path = os.path.relpath(root, self._in_dir)\n out_full_path = os.path.abspath(\n os.path.join(self._out_dir, out_rel_path))\n try:\n os.mkdir(out_full_path)\n except OSError:\n # It is not an error for the directory to already exist.\n pass\n for name in files:\n basename = os.path.splitext(name)[0]\n in_file = os.path.join(root, name)\n base_out_file = os.path.join(out_full_path, basename)\n tiff_file = '%s.tif' % base_out_file\n jp2_file = '%s.jp2' % base_out_file\n if self._force or not(os.path.isfile(jp2_file)):\n params = (in_file, tiff_file, jp2_file, command)\n pool.apply_async(self._convert, params,\n callback=self._result_callback)\n elif self._progress:\n self._bar.update(self._bar.currval + 1)\n pool.close()\n pool.join()\n if self._progress:\n self._bar.finish()\n return not(self._has_errors)", "def transcode(path, outpath):\n\n needs_transcode = determine_transcode(path)\n logger.info(f\"Transcoding {path} to {outpath}...\")\n\n cmd = [\n \"ffmpeg\", \"-y\",\n \"-i\", path,\n \"-an\",\n \"-metadata:s\", \"handler_name=tator\",\n \"-vcodec\", \"libx264\",\n \"-g\", \"25\",\n \"-preset\", \"fast\",\n \"-pix_fmt\", \"yuv420p\",\n \"-vf\", \"pad=ceil(iw/2)*2:ceil(ih/2)*2\",\n \"-movflags\",\n \"faststart+frag_keyframe+empty_moov+default_base_moof\",\n \"-tune\", \"fastdecode\",\n ]\n\n if needs_transcode[1]:\n #Resize to 720p\n cmd.extend([\"-vf\", \"scale=-2:720\"])\n\n cmd.append(outpath)\n logger.info('ffmpeg cmd = {}'.format(cmd))\n subprocess.run(cmd, check=True)\n logger.info(\"Transcoding finished!\")", "def convert(self):\n #lame --mp3input --silent -h -b BITRATE SOURCE TARGET\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n msg('command', command)\n error = check_call(command)\n if error != 0:\n raise TaskError(subprocess.CalledProcessError)\n self.success = True", "def main():\n convert(\"env_100000.mp4\", TargetFormat.GIF)", "def main_convert():\n\n verbose = True\n\n # Build parser.\n parser = argparse.ArgumentParser()\n\n parser.add_argument('fname_pattern', action='store', help='File name pattern')\n parser.add_argument('-R', '--recursive', action='store_true', default=True,\n help='Search several subdirectories')\n\n # Run parser, extract arguments.\n args = parser.parse_args()\n\n # List of files.\n pattern = os.path.normpath(unicode(args.fname_pattern))\n\n if os.path.isdir(pattern):\n pattern = os.path.join(pattern, '*')\n fname_list = glob.glob(pattern)\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n else:\n fname_list = glob.glob(pattern)\n\n to_be_removed = []\n for f in fname_list:\n if os.path.isdir(f):\n to_be_removed.append(f)\n\n for f in to_be_removed:\n fname_list.remove(f)\n\n # Do the work.\n num_files = len(fname_list)\n for k, f_src in enumerate(fname_list):\n f_src = os.path.abspath(f_src)\n\n b_src, e = os.path.splitext(f_src)\n\n folder = os.path.basename(os.path.dirname(f_src))\n if (e == '.mp3' or e == '.wma' or e == '.wav' or e == '.aiff') and b_src != 'tmp' and folder != '.audio_convert':\n\n if verbose:\n try:\n print('%3d/%d: [%s -> .m4a] %s' % (k, num_files, e, os.path.basename(b_src)))\n except Exception as e:\n val = repr(f_src)\n raise Exception('Problem processing file: %s' % val)\n\n # Temporary working copy.\n path_work = os.path.dirname(f_src)\n f_tmp_src = os.path.join(path_work, 'tmp' + e)\n shutil.copy(f_src, f_tmp_src)\n\n # Transcode file format.\n f_tmp_dst = convert(f_tmp_src, verbose=verbose)\n\n # Finish.\n b_tmp_dst, e_dst = os.path.splitext(f_tmp_dst)\n\n f_dst = b_src + e_dst\n if os.path.isfile(f_dst):\n os.remove(f_dst)\n os.rename(f_tmp_dst, f_dst)\n\n if os.path.isfile(f_tmp_src):\n os.remove(f_tmp_src)\n\n if os.path.isfile(f_dst):\n move_processed_file(f_src)\n\n # Done.", "def convert_to_mp4(filepath=''):\n print \"Converting \" + filepath + \" to mp4...\"\n base = os.path.splitext(filepath)\n basename = base[0]\n subprocess.call([\n 'convert',\n '-coalesce',\n '-background',\n 'white',\n filepath,\n basename + '%05d.png'\n ])\n\n frame_rate = get_frame_rate(filepath)\n print \"Using frame rate of \" + frame_rate\n\n # avconv -r 8 -i frame%02d.png -qscale 4 test.mp4\n # convert frames to avi\n subprocess.call([\n 'avconv',\n '-r',\n frame_rate,\n '-i',\n basename + '%05d.png',\n '-qscale',\n '4',\n '-b:a',\n '192k',\n '-y',\n '-loglevel',\n 'quiet',\n '-vf',\n 'scale=trunc(iw/2)*2:trunc(ih/2)*2',\n basename + '.mp4'\n ])\n\n # clean up\n for fl in glob.glob(basename + '*png'):\n os.remove(fl)\n return basename + '.mp4'", "def convert_to_ogg(self, path, filename):\n\n codec = \"libvorbis\"\n ogg_filename = filename + \".ogg\"\n\n command = [self.FFMPEG_BIN,\n \"-n\",\n \"-i\", path,\n \"-acodec\", codec,\n \"-aq\", \"60\",\n \"-vn\",\n \"-ac\", \"2\",\n ogg_filename\n ]\n\n return command", "def gifmp4converter(path,fpsOut):\n# import moviepy.editor as mp\n \n print(\"=========================================\")\n print(\"GIF-MP4 Converter Started!\")\n\n clip = mp.VideoFileClip(path)\n #Get output fps\n fpsIn = int(clip.fps)\n fps=fpsOut\n if fpsOut != fpsIn:\n print(\"Conflict in fps! \\n\",\n \"[0] Use fps of input file;\\n\",\n \"[1] Use desired fps w/o speedup;\\n\",\n \"[2] Use desired fps w/ speedup:\")\n k = input('Input your selection: ')\n if k == 2:\n sf = fpsOut/fpsIn\n fps = fpsOut \n clip =clip.fx(mp.vfx.speedx, sf)\n elif k == 0:\n fps = fpsIn\n \n# Converting formats\n if path.endswith('.gif'):\n pathout = path[:-4]+'_cv'+'.mp4'\n clip.write_videofile(pathout,fps=fps,codec='libx264', bitrate='32 M',preset='ultrafast')\n elif path.endswith('.mp4'):\n pathout = path[:-4]+'_cv'+'.gif'\n clip.write_gif(pathout,fps=fps)\n clip.reader.close()# To fix handel error problem\n print(\"=========================================\")\n print(\"GIF-MP4 Converter Done!\")", "async def convert(self, ctx: Context, argument: str) -> ConverterOutputT:\n raise NotImplementedError", "def convert(inputpath, targetformat):\n outputpath = os.path.splitext(inputpath)[0] + targetformat\n print(\"converting {0} to {1}\".format(inputpath, outputpath))\n\n reader = imageio.get_reader(inputpath)\n fps = reader.get_meta_data()['fps']\n\n writer = imageio.get_writer(outputpath, fps=fps)\n for i, im in enumerate(reader):\n sys.stdout.write(\"\\rframe {0}\".format(i))\n sys.stdout.flush()\n writer.append_data(im)\n print(\"Finalizing...\")\n writer.close()\n print(\"Done.\")", "def to_audio(self, _in, _out, bitrate, file_format):\n\n # Default output parameter\n # If not current directory, append '/'\n if os.path.isdir(_out):\n _out = '' if _out == '.' else _out + '/'\n _out += self.get_name_from_path(_in,\n replace=True) + '.' + file_format\n _out = _out.replace('//', '/')\n self.out = _out\n\n # File format unchecked for single inputs\n if not check_is_video(_in):\n msg = \" is not a supported media type\"\n self.abort_conversion(\n self.get_name_from_path(_in) + msg)\n\n \"\"\"\n else:\n base_name = os.path.basename(_out)\n ext = os.path.splitext(base_name)[1]\n _out = _out.replace(ext, '.mp3')\n \"\"\"\n commands = ['ffmpeg', '-i', _in,\n '-vn', '-ar', '44100',\n '-ac', '2', '-ab',\n bitrate, _out]\n try:\n self.run_convert_commands(commands)\n except FileNotFoundError as er:\n res = require_ffmepg()\n\n if not res:\n self.abort_conversion(\"Dependecy not installed.\")", "def transcode(self) -> None:\n # Get source mediainfo to use in validation\n source_media_info = self.get_media_info(self.source)\n\n # Common ffmpeg flags\n ff = FFMPEG(overwrite=True, loglevel='repeat+level+info')\n # Init source file\n ff < SourceFile(self.source)\n # Scaling\n fc = ff.init_filter_complex()\n fc.video | Scale(**TRANSCODING_OPTIONS[SCALE]) | fc.get_video_dest(0)\n\n # set group of pixels length to segment size\n gop = math.floor(source_media_info[VIDEO_FRAME_RATE] * GOP_DURATION)\n # preserve source audio sampling rate\n arate = source_media_info[AUDIO_SAMPLING_RATE]\n # preserve original video FPS\n vrate = source_media_info[VIDEO_FRAME_RATE]\n # codecs, muxer and output path\n\n cv0 = VideoCodec(\n gop=gop,\n vrate=vrate,\n **TRANSCODING_OPTIONS[VIDEO_CODEC])\n ca0 = AudioCodec(\n arate=arate,\n **TRANSCODING_OPTIONS[AUDIO_CODEC])\n out0 = Muxer(self.destination, format='mp4')\n\n # Add output file to ffmpeg\n ff.add_output(out0, cv0, ca0)\n\n # Run ffmpeg\n self.run(ff)\n\n # Get result mediainfo\n dest_media_info = self.get_media_info(self.destination)\n\n # Validate ffmpeg result\n self.validate(source_media_info, dest_media_info)", "def list_files_to_convert():\n for root, dirs, files in os.walk(video_dir):\n file_list = [name for name in files if not name.endswith('.mp3')]\n for name in file_list:\n filepath = os.path.join(root, name)\n media_info = MediaInfo.parse(filepath, library_file=dll_path)\n for track in media_info.tracks:\n if 'Audio' in track.track_type:\n # print(track.track_type, track.bit_rate)\n # print(filepath, \"Is an Audio/Video file, and should be converted because a sound track is found\")\n yield dict(path=filepath, info=media_info)", "def tiff2mp4(path):\n video = tifffile.imread(path)\n nFrames, h,w = video.shape\n fps = int(input('Input desired output fps:'))\n # dur=1/fps \n pathout =path[:-4]+'_'+str(fps)+'.mp4' \n # pathout2 =path[:-4]+'_St.tif'\n codec = cv2.VideoWriter_fourcc(*'H264')\n out = cv2.VideoWriter(pathout, codec , fps, (w, h))\n print(\"---------------------------------------------\")\n print('Converting Tiff stack to the movie') \n for i in tqdm.tqdm(range(nFrames)): \n img=video[i] \n out.write(img)\n out.release()\n cv2.destroyAllWindows()\n print(\"==============================================\")\n print(\"MP4 convertion Done!\")", "def video_to_gif(input_file, output_file,\n start_time: Union[str, int] = 0, duration=0,\n overwrite=True, open_output=False):\n start_time = f'-ss {start_time} ' if start_time else ''\n duration = f'-t {duration} ' if duration else ''\n overwrite = f'-y ' if overwrite else '-n '\n stream = os.popen('ffmpeg '\n '-filter_complex \"[0:v] fps=12,scale=w=480:h=-1,split [a][b];[a] '\n 'palettegen=stats_mode=single [p];[b][p] paletteuse=new=1\" '\n f'{overwrite}'\n f'{start_time} {duration} '\n f'-i \"{input_file}\" \"{output_file}\"')\n stream.read()\n\n # Open output file\n if open_output:\n os.popen(f'xdg-open \"{Path(output_file).resolve()}\"')", "def convert_multiple(self, video_files, out, brate, _format):\n\n for video in video_files:\n self.to_audio(os.path.abspath(video),\n out, brate, _format)", "def convert_file_to_mp4(from_: Path,\n to_: Path = None) -> None:\n if from_.suffix == '.mp4':\n os.rename(from_, to_)\n logger.info(f\"{from_} is even a mp4 file, move it to destination\")\n return\n\n if to_ is not None and to_.suffix != '.mp4':\n logger.error(f\"Destination file must have .mp4 extension, \"\n f\"but '{to_.suffix}' found in '{to_}'\")\n return\n\n to_ = to_ or change_suffix_to_mp4(from_)\n try:\n convert(from_, to_)\n except Exception:\n pass\n else:\n # move processed video\n os.rename(from_, CONVERTED_VIDEOS_FOLDER / from_)\n logger.debug(\n f\"Converted successfully, source file {short_filename(from_, 8)} \"\n f\"moved to {CONVERTED_VIDEOS_FOLDER}\"\n )", "def convert_video_path_and_save(video_path, output_path=\"output.mp4\", temp_folder = \"./temp\",\n frame_frequency=24, image_reducer=100, fontSize=10, spacing=1.1, maxsize=None, chars=\" .*:+%S0#@\",\n logs=False, processes=4, progress_tracker=None):\n\n if logs:\n start_time = time.time()\n print (\"Converting video...\")\n \n # set up a capture temporarily so we can grab some basic info about it\n capture = cv2.VideoCapture(video_path)\n if not capture.isOpened():\n print (\"Could not read video. Please enter a valid video file!\")\n exit(0)\n\n fps = capture.get(cv2.CAP_PROP_FPS)\n bitrate = int(capture.get(cv2.CAP_PROP_BITRATE))\n total_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n frames_included = int(total_frames / frame_frequency)\n # total_frames / fps gives us our video duration.\n video_duration = total_frames / fps\n # frames included / video duration gives new fps\n new_fps = (total_frames / frame_frequency) / video_duration\n\n capture.release()\n\n # First, we grab all the frames we need and store them in a temp folder\n # After that, we convert all the image frames in the temp folder, and save them back in the temp folder\n # Then, we write them to video and save to disk\n # To utilize mutli processing, we separate grabbing frames and converting the frames into batches\n\n while os.path.isdir(temp_folder):\n temp_folder += \"_\"\n temp_folder += \"/\"\n os.mkdir(temp_folder)\n\n # initial setup\n # we divide our work into batches\n batches = processes\n frames_per_batch = int(total_frames / batches / frame_frequency)\n if progress_tracker is None:\n progress_tracker = Value(\"f\", 0, lock=True)\n # progress: saved frames + converted frames + written frames\n progress_step = 100 / (frames_included * 3)\n\n # grab the frames, and write to separate batch folders\n save_frames_processes = []\n for batch in range(batches):\n starting_frame = batch * frames_per_batch * frame_frequency\n batch_folder = temp_folder + str(batch) + \"/\"\n os.mkdir(batch_folder)\n args = (\n starting_frame,\n starting_frame + frames_per_batch * frame_frequency,\n video_path,\n batch_folder,\n frame_frequency,\n logs,\n progress_tracker,\n progress_step\n )\n p = Process(target=_save_frames, args=args)\n p.daemon = True\n p.start()\n save_frames_processes.append(p)\n for p in save_frames_processes:\n p.join()\n\n # convert all the frames in each batch folder\n convert_processes = []\n for batch in range(batches):\n batch_folder = temp_folder + str(batch) + \"/\"\n args = (\n batch_folder,\n frames_per_batch,\n image_reducer,\n fontSize, spacing, maxsize, chars,\n logs, progress_tracker, progress_step\n )\n p = Process(target=_convert_batch, args=args)\n p.daemon = True\n p.start()\n convert_processes.append(p)\n for p in convert_processes:\n p.join()\n\n # if no extension was assigned, automatically assign .mp4\n output_name, output_ext = os.path.splitext(output_path)\n if output_ext == \"\":\n output_ext = \".mp4\"\n # if final output path was specified, then modify it (append _Copy to it)\n final_output_path = output_name + output_ext\n while os.path.isfile(final_output_path):\n if logs : print (final_output_path, \"already exists!\")\n final_output_path = os.path.splitext(final_output_path)[0] + \"_Copy\" + output_ext\n\n # video settings\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n video_out = imageio.get_writer(final_output_path, fps=new_fps, quality=None, bitrate=(bitrate * 1024 * 2.5))\n size = None\n\n # write images to new video\n for batch in range(1, batches + 1):\n batch_folder = temp_folder + str(batch - 1) + \"/\"\n for i in range(1, frames_per_batch + 1):\n img = cv2.imread(batch_folder + str(i) + \".jpg\", 2)\n if size is None:\n height, width = img.shape\n size = (width, height)\n video_out.append_data(img)\n with progress_tracker.get_lock():\n progress_tracker.value += progress_step\n if logs : print (\"Progress: %.4f%%\" % progress_tracker.value, end=\"\\r\")\n video_out.close()\n shutil.rmtree(temp_folder)\n\n # when we are done, there might be some rounding errors when converting some stuff to integers, thus it doesn't appear to be done\n # So we just simply set it to 100\n with progress_tracker.get_lock():\n progress_tracker.value = 100\n\n if logs:\n print (\"=\" * 30)\n print (\"SUMMARY:\")\n print (\"-\" * 20)\n print (\"Progress: %.4f%%\" % progress_tracker.value)\n print (\"Total frames found:\", str(total_frames))\n print (\"Frames included and converted:\", str(frames_per_batch * batches))\n print (\"Original FPS:\", str(fps))\n print(\"New FPS:\", str(new_fps))\n print (\"Resolution:\", str(size))\n print (\"Saved to\", final_output_path)\n print (\"Time took: %.4f secs\" % (time.time() - start_time))", "def convert(processed_dir: str, video_file: str):\n\n video_name = osp.splitext(osp.basename(video_file))[0]\n out_dir = processed_dir + video_name\n\n # create img dir\n if not osp.exists(processed_dir):\n os.mkdir(processed_dir)\n\n # Create dir for video file if not existent\n # this is where we save our images\n if not osp.exists(out_dir):\n os.mkdir(out_dir)\n\n if osp.exists(out_dir):\n os.mkdir(out_dir + \"/kermit/\")\n os.mkdir(out_dir + \"/not_kermit/\")\n\n # open video file for processing\n cap = cv.VideoCapture(video_file)\n frame_rate = cap.get(5) # frame rate\n\n sec = 0\n total_count = (60*25)+50 # just an approximation\n pbar = tqdm.tqdm(total=total_count, leave=False)\n\n count = 0\n while (cap.isOpened()):\n frame_id = cap.get(1) # current frame number\n frame_exists, curr_frame = cap.read()\n\n if not frame_exists:\n break\n else:\n if (frame_id % math.floor(frame_rate) == 0):\n # output is : video_file/<video_file>_frameNr.jpg\n cv.imwrite(osp.join(out_dir, '{}_{}.jpg'.format(video_name,count)), curr_frame)\n count = count + 1\n pbar.update(1)\n\n pbar.close()\n # release resources\n cap.release()", "def ffmpeg(self, savefile=None, show_path=False, output_format=None,\n ffmpeg_options='', delay=None, iterations=0, pix_fmt='rgb24'):\n if not self._have_ffmpeg():\n msg = \"\"\"Error: ffmpeg does not appear to be installed. Saving an animation to\na movie file in any format other than GIF requires this software, so\nplease install it and try again.\"\"\"\n raise OSError(msg)\n else:\n if savefile is None:\n if output_format is None:\n output_format = '.mpg'\n else:\n if output_format[0] != '.':\n output_format = '.'+output_format\n savefile = graphics_filename(ext=output_format)\n else:\n if output_format is None:\n suffix = os.path.splitext(savefile)[1]\n if len(suffix) > 0:\n output_format = suffix\n else:\n output_format = '.mpg'\n if not savefile.endswith(output_format):\n savefile += output_format\n early_options = ''\n if output_format == '.gif':\n # We try to set reasonable options for gif output.\n #\n # Older versions of ffmpeg (before 0.9, summer 2011)\n # use the option -loop_output instead of -loop.\n # Setting iterations=None is a way of preventing sage\n # from adding the -loop option. A separate\n # -loop_output option can be added with the\n # ffmpeg_options argument.\n if iterations is not None:\n loop_cmd = '-loop {0} '.format(iterations)\n else:\n loop_cmd = ''\n # A pix_fmt value is required for some but not all\n # ffmpeg installations. Setting pix_fmt=None will\n # prevent sage from adding this option, and it may be\n # controlled separately through ffmpeg_options.\n if pix_fmt is not None:\n pix_fmt_cmd = '-pix_fmt {0} '.format(pix_fmt)\n else:\n pix_fmt_cmd = ''\n ffmpeg_options += ' {0}{1}'.format(pix_fmt_cmd,loop_cmd)\n if delay is not None and output_format != '.mpeg' and output_format != '.mpg':\n early_options += ' -r %s ' % int(100/delay)\n savefile = os.path.abspath(savefile)\n pngdir = self.png()\n pngs = os.path.join(pngdir, \"%08d.png\")\n # For ffmpeg, it seems that some options, like '-g ... -r\n # ...', need to come before the input file names, while\n # some options, like '-pix_fmt rgb24', need to come\n # afterwards. Hence 'early_options' and 'ffmpeg_options'\n cmd = 'cd \"%s\"; sage-native-execute ffmpeg -y -f image2 %s -i %s %s %s' % (pngdir, early_options, pngs, ffmpeg_options, savefile)\n from subprocess import check_call, CalledProcessError, PIPE\n try:\n if sage.misc.misc.get_verbose() > 0:\n set_stderr = None\n else:\n set_stderr = PIPE\n sage.misc.misc.verbose(\"Executing '%s'\" % cmd,level=1)\n sage.misc.misc.verbose(\"\\n---- ffmpeg output below ----\\n\")\n check_call(cmd, shell=True, stderr=set_stderr)\n if show_path:\n print(\"Animation saved to file %s.\" % savefile)\n except (CalledProcessError, OSError):\n print(\"Error running ffmpeg.\")\n raise", "def mpg_convert(input_path: str, output_path: str, check=True,\n verbose_level=0):\n if verbose_level > 0:\n v = '-v'\n else:\n v = '-q'\n os.system('mpg123 ' + v + ' -w ' + output_path + ' ' + input_path)\n if check:\n if not os.path.isfile(output_path):\n raise RuntimeError('Not able to convert file', input_path,\n output_path)", "def convert(input, output, delimiter, encoding, verbose, prefix_strip, fields, start_line, skip_end_rows, start_page, tagname, format_in, format_out, zipfile):\n if verbose:\n enableVerbose()\n options = {}\n options['delimiter'] = delimiter\n options['encoding'] = encoding\n options['prefix_strip'] = prefix_strip\n options['start_line'] = start_line\n options['skip_end_rows'] = skip_end_rows\n options['start_page'] = start_page\n options['tagname'] = tagname\n options['fields'] = fields\n options['format_in'] = format_in\n options['format_out'] = format_out\n options['zipfile'] = zipfile\n acmd = Converter()\n acmd.convert(input, output, options)\n pass", "def main():\n destination = Path(argv[1])\n source_files = destination.glob(\"**/*.wma\")\n for file in source_files:\n new_name = file.name.rsplit(\".\", maxsplit=1)[0] + \".flac\"\n dest = str(file.parent / new_name)\n cmd = list(map(str, [\"avconv\", \"-i\", file, dest]))\n if platform == \"win32\":\n print(\"Running on windows... on Unix I'd run the following command:\")\n print(cmd)\n else:\n that = Popen(cmd)\n that.wait()", "def to_voice(item):\r\n item.seek(0)\r\n item = AudioSegment.from_file(item)\r\n m = io.BytesIO()\r\n m.name = \"voice.ogg\"\r\n item.split_to_mono()\r\n dur = len(item) / 1000\r\n item.export(m, format=\"ogg\", bitrate=\"64k\", codec=\"libopus\")\r\n m.seek(0)\r\n return m, dur", "def seqIo_toVid(fName, ext='avi'):\n\n assert fName[-3:]=='seq', 'Not a seq file'\n sr = seqIo_reader(fName)\n N = sr.header['numFrames']\n h = sr.header['height']\n w = sr.header['width']\n fps = sr.header['fps']\n\n out = fName[:-3]+ext\n sw = skvideo.io.FFmpegWriter(out)\n # sw = cv2.VideoWriter(out, -1, fps, (w, h))\n timer = pb.ProgressBar(widgets=['Converting ', pb.Percentage(), ' -- ',\n pb.FormatLabel('Frame %(value)d'), '/',\n pb.FormatLabel('%(max)d'), ' [', pb.Timer(), '] ',\n pb.Bar(), ' (', pb.ETA(), ') '], maxval=N)\n\n for f in range(N):\n I, ts = sr.getFrame(f)\n sw.writeFrame(Image.fromarray(I))\n # sw.write(I)\n timer.update(f)\n timer.finish()\n # cv2.destroyAllWindows()\n # sw.release()\n sw.close()\n sr.close()\n print(out + ' converted')" ]
[ "0.56139004", "0.53700686", "0.52830744", "0.5208299", "0.51823676", "0.50471944", "0.5017532", "0.50037694", "0.49034742", "0.4880749", "0.47778627", "0.47714263", "0.4754747", "0.46861088", "0.4681197", "0.4670765", "0.46648082", "0.45863622", "0.45649004", "0.45644942", "0.45608425", "0.4558464", "0.45532772", "0.45368737", "0.4523044", "0.45087826", "0.45023882", "0.44820517", "0.4480209", "0.44798154" ]
0.6806187
0
Register a new handler for a specific slash command
def register(self, command: str, handler: Any): if not command.startswith("/"): command = f"/{command}" LOG.info("Registering %s to %s", command, handler) self._routes[command].append(handler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _register(self, comm, handler):", "def _register_handler(self, callback, cmd, helphint, hidden, handlers,\n synonyms=(), plugin=None):\n # Register any synonyms (done before we frig with the handlers)\n for entry in synonyms:\n self._register_handler(callback, entry, helphint, True, handlers,\n plugin=plugin)\n\n # Allow simple commands to be passed as strings\n cmd = cmd.split() if isinstance(cmd, (str, unicode)) else cmd\n\n for part in cmd:\n handlers = handlers.subcommands.setdefault(part, Handlers([], {}))\n handlers.handlers.append(Registration(callback, \" \".join(cmd),\n helphint, hidden, plugin))", "def register_handler(self, handler):\r\n self.handler = handler", "def _command(self, *cmd, handler=None):", "def register_command_route(self, route: CommandRoute) -> None:\n self.__command_routes.append(route)\n self.__tg.add_handler(tg_ext.CommandHandler(command=route.command,\n callback=self.__serve_command_route))", "def wrapper(callback):\n self.commands[name] = SlashCommand(callback, name, description, options, guild_ids=guild_ids, default_permission=default_permission, guild_permissions=guild_permissions)", "def _register(cls):\r\n command_name = cls.__dict__.get('__command__', None)\r\n if command_name:\r\n Command._commands[command_name] = cls", "def register_handler(self, token, handler):\r\n self._handlers[token] = handler", "def _addCommand(self, command):\n self.updater.dispatcher.add_handler(command)", "def add_handler(self, handler):\n pass", "def register_handler(self, method, handler):\n self.handlers[method] = handler", "def register_handler(self, regex, handler):\n regex = re.compile(\"^\" + regex + \"$\")\n self.handlers.append((regex, handler))", "def register_handler(config):\n\n @respond_to(\".*\")\n def handle(message):\n \"\"\"Respond to every Slack message and dispatch to another handler based\n on the contents of the message.\n\n This duplicates a little bit of the work that slackbot does, but allows\n us to define handlers dynamically based on the job config.\n \"\"\"\n\n text = message.body[\"text\"]\n logger.info(\"Received message\", message=text)\n\n if text == \"status\":\n handle_status(message)\n return\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"regex\"].match(text):\n handle_command(message, slack_config)\n return\n\n for namespace, help_config in config[\"help\"].items():\n for pattern in [\"^{} help$\", \"^help {}$\"]:\n if re.match(pattern.format(namespace), text):\n handle_namespace_help(message, help_config)\n return\n\n include_apology = text != \"help\"\n handle_help(message, config[\"help\"], include_apology)", "def __init__(self, command_handler_name):\n\n # Set the command handler attributes\n self.name = command_handler_name", "def add(self, method: str, pattern: str, handler: Callable) -> None:", "def make_new_handler(self, *args, **kwargs):", "def add_handler(self, path, handler) -> None:\n if self.__test_path(path) and self.__test_path(handler):\n path_parts = self.__split_path(path) # Splits parts into constituent components\n self.route_trie.insert(path_parts, handler) # Passes parts on for addition to the trie", "def usingHandler(self, cmd):\n self.command_handler.handle_command(cmd)\n while msg_queue.empty() is False:\n self.writeresponse(msg_queue.get())", "def register(self, command_name, command):\n self._commands[command_name] = command", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def addhandler(self, txt, handler):\n self.handlers[txt] = handler\n rlog(0, 'webserver', '%s handler added' % txt)", "def register_command(self, func):\n self.commands[func.__name__] = func", "def register(self, handler):\n self.handlers.add(handler)\n return self", "def add_command_handler(self,command,command_handler):\n\t\tif(callable(command_handler)):\n\t\t\tif isinstance(command, str):\n\t\t\t\tself.command_handlers[command] = command_handler\n\t\t\telse:\n\t\t\t\traise NotAStringException(\"{} isn't a valid command name. Command names must be string\")\n\t\telse:\n\t\t\traise NotCallableException(\"{} is not a function\".format(command_handler))", "def __setupCommandHandlerTypes(self):\n # dict saving all command handler types\n self.__commandHandlers = {'channel': {}, 'query': {}, 'not_authed_dcc': {}, 'authed_dcc': {}}", "def handler(self, command, args=[]):\n ###\n # command parsing and handling logic to be implemented by child\n ###\n if not command and not hasattr(self, 'handle_'):\n return f'Service {str(self.__class__.__name__)}: {self.__doc__ or \"\"}'\n methodname = 'handle_{}'.format(command or '')\n logger.info('method name: {}'.format(methodname))\n logger.info('args: {}'.format(args))\n method = self.__getattribute__(methodname)\n return method(args)", "def route(self, method, pattern, handler):\n pass", "def register_handler(self, method, path, fn):\n if not(method in self.handlers):\n self.handlers[method] = {}\n self.handlers[method][path] = fn", "def __addCommandHandler(self, command, type = 'channel', requiresdb = False):\n try:\n # ensure we are dealing with booleans\n if not requiresdb:\n requiresdb = False\n else:\n requiresdb = True\n\n # add the handler\n # check for existing command type\n if self.__commandHandlerTypeExists(type):\n cmdExec = self.__getFullCommandName(command, type)\n\n # if database required but no database available raise exception\n if requiresdb and not self.__databaseAvailable:\n raise ConfigurationException(CONFIG_DATABASE_NOT_AVAILABLE % cmdExec)\n\n # add handler only if the correct method exists\n if self.__commandExists(command, type):\n cmdHandler = {'func': getattr(self, cmdExec),\n 'db': requiresdb}\n self.__commandHandlers[type][command] = cmdHandler\n else:\n raise ConfigurationException(CONFIG_COMMAND_EXEC_NOT_FOUND % cmdExec)\n else:\n raise ConfigurationException(CONFIG_COMMAND_TYPE_NOT_FOUND % type)\n\n except ConfigurationException, (e):\n print 'Configuration failed: ',\n print 'Could not add the command handler for %s: ' % command\n print e.parameter", "def add_handler(self, path, handler):\n if path: # guard against Null path, we assume handler could be Null\n path_list = self.split_path(path)\n self.trie.insert(step_list=path_list, handler=handler)" ]
[ "0.66050607", "0.6488118", "0.6455913", "0.63224286", "0.62794155", "0.62301844", "0.6192825", "0.61768305", "0.6152243", "0.61095166", "0.61037755", "0.60059524", "0.60009134", "0.5937418", "0.5923331", "0.59176046", "0.59151715", "0.5913708", "0.58904934", "0.58497286", "0.5820212", "0.58184415", "0.57910275", "0.5785638", "0.57818395", "0.5759306", "0.57545763", "0.57510227", "0.5745916", "0.57293266" ]
0.78429383
0
Read a Waymo ego_pose file
def parse_ego_pose_file(ego_pose_file): ego_poses = {} with open(ego_pose_file, 'r') as f: for line in f.readlines(): line = line.rstrip() line = line.split(',') timestamp_micro = int(line[0]) pose = np.array([float(x) for x in line[1:]]).reshape(4, 4) waymo_pose = WaymoPose(timestamp_micro, pose) ego_poses[timestamp_micro] = waymo_pose return ego_poses
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_poses(self):\n print('Loading poses for sequence ' + self.sequence + '...')\n\n pose_file = os.path.join(self.pose_path, self.sequence + '.txt')\n\n # Read and parse the poses\n try:\n self.T_w_cam0 = []\n with open(pose_file, 'r') as f:\n for line in f.readlines():\n T = np.fromstring(line, dtype=float, sep=' ')\n T = T.reshape(3, 4)\n T = np.vstack((T, [0, 0, 0, 1]))\n self.T_w_cam0.append(T)\n print('done.')\n\n except FileNotFoundError:\n print('Ground truth poses are not avaialble for sequence ' +\n self.sequence + '.')", "def _read_eeg(eeg_file):\r\n pass", "def readOdom(msg):\n global pose\n global xPosition\n global yPosition\n global theta\n global odom_list\n global odom_tf\n try:\n pose = msg.pose\n geo_quat = pose.pose.orientation\n q = [geo_quat.x, geo_quat.y, geo_quat.z, geo_quat.w]\n odom_tf.sendTransform((pose.pose.position.x, pose.pose.position.y, 0), \n (pose.pose.orientation.x, pose.pose.orientation.y,pose.pose.orientation.z,pose.pose.orientation.w),rospy.Time.now(),\"base_footprint\",\"odom\")\n #Convert transform to global usable coordinates (x, y, theta)\n (trans, rot) = odom_list.lookupTransform('map', 'base_footprint', rospy.Time(0))\n roll, pitch, yaw = euler_from_quaternion(rot)\n theta = yaw * (180.0/math.pi)\n xPosition = trans[0]\n yPosition = trans[1]\n except:\n print \"waiting\"", "def read_poses_for_camera(record_path, camera_name):\n\n # Resolve pose.txt file path for camera\n poses_path = os.path.join(record_path, camera_name, 'pose.txt')\n if os.path.exists(poses_path):\n poses = read_poses_dict(poses_path)\n else:\n # Sample type dataset (aka zpark-sample)\n poses_path = os.path.join(record_path, camera_name + '.txt')\n poses = read_poses_dict_6(poses_path)\n return poses", "def read(self):\n stable_poses = []\n f = open(self.filepath_, \"r\")\n data = [line.split() for line in f]\n for i in range(len(data)):\n if len(data[i]) > 0 and data[i][0] == \"p\":\n p = float(data[i][1])\n r = [[data[i+1][1], data[i+1][2], data[i+1][3]], [data[i+2][0], data[i+2][1],\n data[i+2][2]], [data[i+3][0], data[i+3][1], data[i+3][2]]]\n r = np.array(r).astype(np.float64)\n x0 = np.array([data[i+4][1], data[i+4][2], data[i+4][3]]).astype(np.float64)\n stable_poses.append(sp.StablePose(p, r, x0))\n return stable_poses", "def read_data(path):\n with h5py.File(path, \"r\") as f:\n transformed_poses = np.array(f['transformed_poses'])\n extracted_poses = np.array(f['poses'])\n target = np.array(f['target'])\n \n return extracted_poses, transformed_poses, target", "def get_poses(traj_num_str):\n x = []\n y = []\n theta = []\n\n # Get x, y\n file_path = \"./dataset/poses/\" + traj_num_str.zfill(2) + \".txt\"\n x_ind = 3\n y_ind = 11\n with open(file_path, \"r\") as fid:\n for i, line in enumerate(fid):\n row = [float(s) for s in line.split(\" \")]\n x.append(row[x_ind])\n y.append(row[y_ind])\n\n # Get theta from pose transformation matrix\n # Add 90 degrees to transpose to world frame\n if np.arcsin(row[0]) > 0:\n theta.append(np.arccos(row[0]) + np.pi/2)\n else:\n theta.append(np.arccos(row[0]) * -1 + np.pi/2)\n\n return x, y, theta", "def read_positions():\n return np.genfromtxt(\"POSITIONS.OUT\").transpose()", "def read_map_file(path):\r\n with open(path) as f:\r\n dir_name = os.path.dirname(path)\r\n img = cv2.imread(dir_name + '/' + f.readline().strip())\r\n assert img.shape[0] > 0 and img.shape[1] > 0, 'Can not open image file'\r\n meter_per_pixel = float(f.readline().strip())\r\n ori_str = f.readline().strip().split()\r\n origin = np.array([int(ori_str[0]), int(ori_str[1])])\r\n init_heading = float(ori_str[2])\r\n return img, meter_per_pixel, origin, init_heading", "def read_proj_info(fname, nangles=None, convert_vec=True):\n \n if fname[-4:] == 'toml':\n proj_geom = read_from_toml(fname, nangles=nangles)\n if proj_geom['type'] == 'cone' and convert_vec:\n proj_geom = geom_2vec(proj_geom)\n\n elif fname[-3:] == 'pkl':\n with open(fname, 'rb') as f:\n proj_geom = pickle.load(f)\n proj_geom['nangles'] = proj_geom['Vectors'].shape[0]\n \n # if toml and cone beam, c\n \n return proj_geom", "def loadPose(modelname, posename):\n\n load_file = bUtils.readTextFile(modelname + '::poses')\n if load_file == '':\n log('No poses stored.', 'ERROR')\n return\n\n loadedposes = json.loads(load_file)\n if posename not in loadedposes:\n log('No pose with name ' + posename + ' stored for model ' + modelname, 'ERROR')\n return\n prev_mode = bpy.context.mode\n pose = loadedposes[posename]\n\n # apply rotations to all joints defined by the pose\n try:\n bpy.ops.object.mode_set(mode='POSE')\n for obj in sUtils.getObjectsByPhobostypes(['link']):\n if nUtils.getObjectName(obj, 'joint') in pose['joints']:\n obj.pose.bones['Bone'].rotation_mode = 'XYZ'\n obj.pose.bones['Bone'].rotation_euler.y = float(\n pose['joints'][nUtils.getObjectName(obj, 'joint')]\n )\n except KeyError as error:\n log(\"Could not apply the pose: \" + str(error), 'ERROR')\n finally:\n # restore previous mode\n bpy.ops.object.mode_set(mode=prev_mode)", "def read_kitti_Tr_velo_to_cam(filename):\n\n with open(filename) as f:\n for line in f:\n data = line.split(' ')\n if data[0] == 'Tr_velo_to_cam:':\n calib = np.array([float(x) for x in data[1:13]])\n calib = calib.reshape(3, 4)\n return _extend_matrix(calib)\n\n raise Exception(\n 'Could not find entry for P2 in calib file {}'.format(filename))", "def read_phosim_seg_file(seg_file):\n my_self = FocalPlaneReadout()\n with open(seg_file, 'r') as f:\n lines = [line for line in f.readlines() if not line.startswith('#')]\n i = -1\n while True:\n try:\n i += 1\n sensor_props = SensorProperties(lines[i])\n my_self.sensors[sensor_props.name] = sensor_props\n for j in range(sensor_props.num_amps):\n i += 1\n amp_props = AmplifierProperties(lines[i])\n my_self.amps[amp_props.name] = amp_props\n sensor_props.append_amp(amp_props)\n except IndexError:\n break\n return my_self", "def eeg_readelp(file):\t\n\t\n\tf = open(file,'r')\n\tphi = np.zeros(33)\n\ttheta = np.zeros(33)\n\tr = np.ones(33)\n\tchan = []\n\tfor i in range(33):\n\t\tline=f.readline()\n\t\tstr = string.split(line)\n\t\t#chan[i]=str[1]\n\t\tphi[i]=float(str[2])\n\t\ttheta[i]=float(str[3])\n\t\tchan.append(str[1])\n\t\n\tf.close()\n\t# theta and phi are in degrees -> convert to radians\n\tx,y,z = eeg_sph2cart(theta*np.pi/180,phi*np.pi/180,r)\n\treturn chan,x,y,z,theta,phi", "def parse_geometry(path: str) -> Optional[Dict[str, tuple]]:\n if not os.path.isfile(path):\n raise InputError(f'Could not find file {path}')\n if path.endswith('.yml'):\n content = read_yaml_file(path)\n if isinstance(content, dict):\n if 'xyz' in content.keys():\n return content['xyz'] if isinstance(content['xyz'], dict) else str_to_xyz(content['xyz'])\n elif 'opt_xyz' in content.keys():\n return content['opt_xyz'] if isinstance(content['opt_xyz'], dict) else str_to_xyz(content['opt_xyz'])\n software = identify_ess(path)\n xyz_str = ''\n if software == 'xtb':\n lines = _get_lines_from_file(path)\n final_structure, coord, first_line = False, False, True\n for line in lines:\n if '$' in line or 'END' in line or len(line.split()) < 10:\n coord = False\n if coord:\n splits = line.split()\n xyz_str += f'{qcel.periodictable.to_E(splits[3])} {splits[0]} {splits[1]} {splits[2]}\\n'\n if final_structure and ('$coord' in line or len(line.split()) > 15):\n coord = True\n if len(line.split()) > 15 and first_line:\n splits = line.split()\n xyz_str += f'{qcel.periodictable.to_E(splits[3])} {splits[0]} {splits[1]} {splits[2]}\\n'\n first_line = False\n if 'final structure:' in line:\n final_structure = True\n return str_to_xyz(xyz_str)\n\n log = ess_factory(fullpath=path, check_for_errors=False)\n try:\n coords, number, _ = log.load_geometry()\n except LogError:\n logger.debug(f'Could not parse xyz from {path}')\n\n # Try parsing Gaussian standard orientation instead of the input orientation parsed by Arkane.\n lines = _get_lines_from_file(path)\n for i in range(len(lines)):\n if 'Standard orientation:' in lines[i]:\n xyz_str = ''\n j = i\n while len(lines) and not lines[j].split()[0].isdigit():\n j += 1\n while len(lines) and '-------------------' not in lines[j]:\n splits = lines[j].split()\n xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\\n'\n j += 1\n break\n\n if xyz_str:\n return str_to_xyz(xyz_str)\n return None\n\n return xyz_from_data(coords=coords, numbers=number)", "def extract_pose(np_img):\n\n # Prepare extraction\n current_dir = os.getcwd()\n os.chdir(OPEN_FACE_BINARY_PATH)\n timestamp = datetime.now().timestamp()\n\n # Extract action units using OpenFace\n np_img *= 255\n np_img = np_img.astype(np.uint8)\n imsave('{}.jpg'.format(timestamp), np_img)\n exit_code = os.system('./FaceLandmarkImg -f {}.jpg >/dev/null'.format(timestamp))\n csv_data = pd.read_csv('processed/{}.csv'.format(timestamp), sep=',\\s', engine='python')\n pose_params = csv_data[['pose_Rx', 'pose_Ry', 'pose_Rz']].iloc[0].tolist()\n\n # Convert pose parameters to numpy arrays\n pose_params = np.array(pose_params)\n\n # Delete temporary files created during action unit extract_action_units\n exit_code = os.system('rm -r {}.jpg >/dev/null'.format(timestamp))\n exit_code = os.system('rm -r processed/{}* >/dev/null'.format(timestamp))\n\n os.chdir(current_dir)\n\n return pose_params", "def get_goal_ee_pose(self):\n #self.target_endpoint = #magic tf call that I can add ie the pose of the palm from camera aruco detection\n while True:\n try:\n translation, rotation = self.listener.lookupTransform('world_frame', 'palm_frame_camera', rospy.Time()) # ee_frame_camera_flipped\n break # once the transform is obtained move on\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue # if it fails try again\n point = [translation[0], translation[1], translation[2]]\n self.target_endpoint = np.array(point)\n # rospy.logerr(self.target_endpoint)", "def readmovie(addr, ftype='xyz', frameformat='aio'):\n \n f = open(addr, 'r')\n allcoords = []\n \n if ftype == 'xyz':\n \n while True:\n \n try:\n natoms = int(f.readline())\n f.readline()\n atoms, coords = [], []\n \n for x in range(natoms):\n line = f.readline().split()\n atoms.append(line[0])\n coords.append(line[1:])\n \n allcoords.append(coords)\n \n except:\n \n break\n \n # Assemble the read coordinates into a dictionary\n nframes = len(allcoords)\n out = {}\n out['atoms'] = atoms\n \n if frameformat == 'aio':\n out['frames'] = np.asarray(allcoords, dtype='float64')\n elif frameformat == 'sep':\n for i in range(nframes):\n out[str(i)] = np.asarray(allcoords[i], dtype='float64')\n \n return out", "def get_ef_pose(pose_listener): \n if LOCAL_TEST: # dummy\n return np.array([[-0.1915, 0.8724, -0.4498, 0.6041],\n [ 0.7355, 0.4309, 0.5228, -0.0031],\n [ 0.6499, -0.2307, -0.7242, 0.3213],\n [ 0., 0., 0., 1. ]])\n else:\n base_frame = 'measured/base_link'\n target_frame = 'measured/panda_hand'\n try:\n tf_pose = pose_listener.lookupTransform(base_frame, target_frame, rospy.Time(0))\n pose = make_pose(tf_pose)\n except (tf2_ros.LookupException,\n tf2_ros.ConnectivityException,\n tf2_ros.ExtrapolationException):\n pose = None\n print('cannot find end-effector pose')\n sys.exit(1)\n return pose", "def readogle(filename, **kw):\n \n # 2008-12-21 18:53 IJC: Created\n\n f = open(filename, 'r')\n raw = f.readlines()\n f.close()\n\n nstars = len(raw)\n\n raw2 = array([line.split() for line in raw])\n ra = raw2[:,1]\n dec = raw2[:,2]\n xref = raw2[:,3]\n yref = raw2[:,4]\n vmag = raw2[:,5]\n imag = raw2[:,7]\n \n xref = [map(float, [x]) for x in xref]\n yref = [map(float, [y]) for y in yref]\n vmag = [map(float, [v]) for v in vmag]\n imag = [map(float, [i]) for i in imag]\n\n return (ra, dec, xref, yref, vmag, imag)", "def reformat_pose_to_dict(self, now_pose):\n # now_pose è un dict in particolare { pose : [ {position : [{x : value , y:value , z:value} ] } , {orientation : [] } }\n # devo convertire i quaternioni in amgoli di eulero...estrarre i quaternioni da pose_now e convertirli in angoli RPY\n\n lato_corto_2 = 1.65 #1.45 # offset parcheggio\n \n #correggo gli offset x centrare le macchine nei parcheggi\n\n if abs(round(now_pose.position.x,2)) == 22.45:\n if now_pose.position.x < 0 :\n now_pose.position.x+=lato_corto_2\n now_pose.position.y-=0.4\n else :\n now_pose.position.x-=lato_corto_2\n now_pose.position.y+=0.4\n \n if abs(round(now_pose.position.y,2)) == 22.45:\n if now_pose.position.y < 0 :\n now_pose.position.y+=lato_corto_2\n now_pose.position.x+=0.4\n else :\n now_pose.position.y-=lato_corto_2\n now_pose.position.x-=0.4\n\n # correggo la z per renderla uguale all'asfalto che viene spownata nel mondo\n\n offset_asfalto = 0.3\n\n x = now_pose.position.x\n y = now_pose.position.y\n z = now_pose.position.z + offset_asfalto\n\n q1 = now_pose.orientation.x\n q2 = now_pose.orientation.y\n q3 = now_pose.orientation.z\n q4 = now_pose.orientation.w\n\n\n # converto i quaternioni in angoli di rulero RPY in radianti\n orientation_list = [q1,q2,q3,q4]\n\n euler = euler_from_quaternion( orientation_list )\n roll = euler[0]\n pitch = euler[1]\n yaw = round(euler[2],2) + np.pi\n\n\n # creo la lista dei parametri che mi servono nel campo pose:[] del file .yaml\n\n lista_parametri = [x ,y ,z ,roll ,pitch ,yaw ]\n\n # creo un dict con tutti i campi di cui ho bisogno nel file .yaml\n # settare le chiavi 'name' , ' type ' , 'package' , ' pose ' secondo le proprie necessità\n # i due stili sono equivalenti : usare quello preferito\n \"\"\"\n {\"name\" : \"park1\" , \n \"type\" : \"sdf\" , \n \"package\" : \"object_spawner\" , \n \"pose \":self.seq(lista_parametri) \n }\n \n \"\"\"\n lista_veicoli = ['macchina','pickup','ferrari','prius_hybrid','car_lexus','car_polo','car_volvo','car_golf']\n num_veicoli = 1\n\n #modificare qui implementando una funzione randomica se si vogliono piu veicoli casuali spawnati\n elemento_lista = {'name' : lista_veicoli[3],\n 'type': 'sdf',\n 'package': 'object_spawner',\n 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n #\"\"\"\n # elemento_lista = {'name' : 'ferrari',\n # 'type': 'urdf',\n # 'package': 'autopark',\n # 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n\n return elemento_lista", "def _read_pose_data(self, pose_arr, input_data_mode):\n if input_data_mode == InputDataMode.TF_IMAGE:\n return pose_arr[:,2:3]\n elif input_data_mode == InputDataMode.TF_IMAGE_PERSPECTIVE:\n return np.c_[pose_arr[:,2:3], pose_arr[:,4:6]]\n elif input_data_mode == InputDataMode.RAW_IMAGE:\n return pose_arr[:,:4]\n elif input_data_mode == InputDataMode.RAW_IMAGE_PERSPECTIVE:\n return pose_arr[:,:6]\n elif input_data_mode == InputDataMode.REGRASPING:\n # depth, approach angle, and delta angle for reorientation\n return np.c_[pose_arr[:,2:3], pose_arr[:,4:5], pose_arr[:,6:7]]\n else:\n raise ValueError('Input data mode %s not supported' %(input_data_mode))", "def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata", "def get_ego_poses(self):\n ego_poses = []\n existing_samp = []\n \n for i, samp_data in enumerate(nusc.sample_data):\n samp = nusc.get('sample', samp_data['sample_token'])\n \n # add ego poses from key frames for each sample without duplicates\n if samp_data['is_key_frame'] == True and samp in self.samples and samp not in existing_samp:\n ego_poses.append(nusc.ego_pose[i])\n existing_samp.append(samp)\n \n return ego_poses", "def eeg_readswf(file):\t\t\n\tf=open(file,'r')\t\n\tfirstline = f.readline() # ntpts TSB info etc\n\tstr = string.split(firstline)\n\tntpts = int(str[1])\t\n\ttsb = float(str[3])\n\tdi = float(str[5])\t\n\ttim = np.arange(tsb,ntpts*di+tsb,di)\t\n\tline = f.readline()\t\n\tstr = string.split(line)\n\teeg0 = np.array(map(float,str[1:]))\n\tline = f.readline()\t\n\tstr = string.split(line)\n\teeg1 = np.array(map(float,str[1:]))\n\teeg = np.zeros([2,ntpts])\n\teeg[0,:]=eeg0\n\teeg[1,:]=eeg1\n\treturn [eeg,tim,ntpts]", "def load_pose(color_dir, view, depth_scale, device):\n pose_file = os.path.join(color_dir, '%05d.pose' % view)\n if os.path.exists(pose_file):\n with open(pose_file, \"rb\") as fh:\n pose = pickle.load(fh)\n pose[:3,3:] *= depth_scale\n pose = torch.tensor(\n np.concatenate(\n (pose, np.array([0, 0, 0, 1]).reshape(1, 4)),\n axis=0\n ),\n dtype=torch.float32,\n device=device,\n )\n else:\n error(\"Pose file '%s' does not exist.\" % pose_file)\n return pose", "def _read_vee(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 2:\n size1, size2 = int(line[0]), int(line[1])\n vee = NP.zeros((size1, size1, size2, size2), dtype=NP.float64)\n elif len(line) == 5:\n mu, nu, lmda, sgma, val = int(line[0]) - 1, int(line[1]) - 1, int(line[2]) - 1, int(line[3]) - 1, NP.float64(line[4])\n vee[mu,nu,lmda,sgma] = \\\n vee[nu,mu,lmda,sgma] = \\\n vee[mu,nu,sgma,lmda] = \\\n vee[nu,mu,sgma,lmda] = \\\n vee[lmda,sgma,mu,nu] = \\\n vee[sgma,lmda,mu,nu] = \\\n vee[lmda,sgma,nu,mu] = \\\n vee[sgma,lmda,nu,mu] = \\\n val\n return vee", "def update_pose(self, data):\n # self.pose = data\n self.x_position = round(data.pose.pose.position.x, 4)\n self.y_position = round(data.pose.pose.position.y, 4)\n [yaw, _, _] = quaternion_to_euler(\n data.pose.pose.orientation.x, \n data.pose.pose.orientation.y, \n data.pose.pose.orientation.z, \n data.pose.pose.orientation.w\n )\n \n self.theta = round(yaw, 4)\n print(f'(Reading) X: {data.pose.pose.position.x}\\t Y:{data.pose.pose.position.y}')\n # self.theta = round(data.pose.pose.orientation.z, 4)", "def readVP(self,species): \n f = open('VPparams.txt', 'rU')\n lines = f.readlines()\n f.close()\n \n parsing = False\n for i in np.arange(len(lines)):\n if lines[i].startswith(species):\n parsing = True\n else:\n parsing = False\n if parsing:\n data = lines[i].split()\n \n lnC, L0, Rv, da, db = data[1:len(data)]\n self.lnC, self.L0, self.Rv, self.da, self.db = \\\n float(lnC), float(L0), float(Rv), float(da), float(db)", "def read_track(fp, colnames=None):\n # read lines\n f = open(fp, \"r+\")\n s = f.readlines()\n\n # get info\n MIST_version = re.split(r\"\\s+\", s[0].strip())[-1]\n MESA_revision = re.split(r\"\\s+\", s[1].strip())[-1]\n\n Yinit, Zinit, FeH, aFe, vvcrit = re.split(r\"\\s+\", s[4].strip())[1:]\n Yinit = np.float(Yinit)\n Zinit = np.float(Zinit)\n FeH = np.float(FeH)\n aFe = np.float(aFe)\n vvcrit = np.float(vvcrit)\n\n initial_mass, N_pts, N_EEP, N_col, phase, type_ = \\\n re.split(r\"\\s+\", s[7].strip())[1:]\n initial_mass = np.float(initial_mass)\n N_pts = np.int(N_pts)\n N_EEP = np.int(N_EEP)\n N_col = np.int(N_col)\n\n # get eep info\n EEPs = tuple([np.int(_) for _ in re.split(r\"\\s+\", s[8].strip())[2:]])\n # eep = np.arange(EEPs[0], EEPs[-1] + 1) sometimes inconsistent with data\n\n # add eep column\n # _eep\n t = Table.read(s[11:], format=\"ascii.commented_header\")\n eep = np.arange(EEPs[0], EEPs[0] + len(t))\n eep_ok = eep[-1] == EEPs[-1] + 1\n t.add_column(Column(eep, \"_eep\"))\n # _lgmass\n t.add_column(Column(np.ones(len(t), )*np.log10(initial_mass), \"_lgmass\"))\n # _lgage\n t.add_column(Column(np.log10(t[\"star_age\"].data), \"_lgage\"))\n # _feh\n t.add_column(Column(np.ones(len(t), ) * FeH, \"_feh_ini\"))\n t.add_column(Column(t[\"log_surf_z\"]-np.log10(Zsun), \"_feh\"))\n\n # add meta info\n meta = OrderedDict(\n MIST_version=MIST_version,\n MESA_revision=MESA_revision,\n Yinit=Yinit,\n Zinit=Zinit,\n FeH=FeH,\n aFe=aFe,\n vvcrit=vvcrit,\n initial_mass=initial_mass,\n N_pts=N_pts,\n N_EEP=N_EEP,\n N_col=N_col,\n phase=phase,\n type_=type_,\n EEPs=EEPs,\n EEP0=EEPs[0],\n EEP1=EEPs[-1],\n EEP1ACT=EEPs[0] + len(t),\n EEPOK=eep_ok,\n INTERP=(\"_INTERP\" in fp)\n )\n t.meta = meta\n\n if colnames is None:\n return t\n else:\n for colname in colnames:\n try:\n assert colname in t.colnames\n except AssertionError as ae:\n raise(ae(\"{} not in track.colnames!!!\".format(colname)))\n return t[colnames]" ]
[ "0.64440465", "0.642082", "0.6161004", "0.59746593", "0.5859893", "0.5809093", "0.5723148", "0.56689817", "0.56615466", "0.5603466", "0.5602688", "0.55753565", "0.55375", "0.5500092", "0.54698247", "0.545957", "0.5449294", "0.5443041", "0.54186076", "0.53790516", "0.53673875", "0.5343579", "0.53205895", "0.53046834", "0.52910256", "0.527676", "0.52744806", "0.52649623", "0.52623147", "0.5261153" ]
0.75926197
0
Convert a NuScenesObject to a Bbox3D
def waymo_object_to_bbox3d(o, frame_index): return Bbox3D(o.x, o.y, o.z, o.l, o.w, o.h, o.yaw, frame=o.ref_frame, obj_type=o.obj_type, stamp=frame_index, score=o.score)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def object_label_to_box_3d(obj_label):\n\n fc.check_object_label_format(obj_label)\n\n box_3d = np.zeros(7)\n\n box_3d[0:3] = obj_label.t\n box_3d[3] = obj_label.l\n box_3d[4] = obj_label.w\n box_3d[5] = obj_label.h\n box_3d[6] = obj_label.ry\n\n return box_3d", "def b_transform_cube(b_obj):\n \n b_scale_object()\n b_scale_single_face(b_obj)", "def _load_bboxes_3d(self, results):\n results[\"gt_bboxes_3d\"] = results[\"ann_info\"][\"gt_bboxes_3d\"]\n results[\"bbox3d_fields\"].append(\"gt_bboxes_3d\")\n return results", "def objects_to_bmesh(objs, transform=True):\n\n # CAUTION: Removes/destroys custom layer props\n\n # Creates the mesh used to merge the entire scene\n bm_all = bmesh.new()\n\n # Adds the objects\" meshes to the bmesh\n for obj in objs:\n dprint(\"Preparing object {} for export...\".format(obj.name))\n # Creates a bmesh from the supplied object\n bm = bmesh.new()\n bm.from_mesh(obj.data)\n\n # Makes sure all layers exist so values don't get lost while exporting\n uv_layer = bm.loops.layers.uv.get(\"UVMap\")\n tex_layer = bm.faces.layers.tex.get(\"UVMap\")\n vc_layer = (bm.loops.layers.color.get(\"Col\") or\n bm.loops.layers.color.new(\"Col\"))\n env_layer = (bm.loops.layers.color.get(\"Env\") or\n bm.loops.layers.color.new(\"Env\"))\n env_alpha_layer = (bm.faces.layers.float.get(\"EnvAlpha\") or\n bm.faces.layers.float.new(\"EnvAlpha\"))\n va_layer = (bm.loops.layers.color.get(\"Alpha\") or\n bm.loops.layers.color.new(\"Alpha\"))\n texnum_layer = bm.faces.layers.int.get(\"Texture Number\")\n type_layer = (bm.faces.layers.int.get(\"Type\") or\n bm.faces.layers.int.new(\"Type\"))\n material_layer = (bm.faces.layers.int.get(\"Material\") or\n bm.faces.layers.int.new(\"Material\"))\n\n # Removes the parent for exporting and applies transformation\n parent = obj.parent\n if parent:\n mat = obj.matrix_world.copy()\n old_mat = obj.matrix_basis.copy()\n obj.parent = None\n obj.matrix_world = mat\n\n spc = obj.matrix_basis\n bmesh.ops.scale(\n bm,\n vec=obj.scale,\n space=spc,\n verts=bm.verts\n )\n if transform:\n bmesh.ops.transform(\n bm,\n matrix=Matrix.Translation(obj.location),\n space=spc,\n verts=bm.verts\n )\n bmesh.ops.rotate(\n bm,\n cent=obj.location,\n matrix=obj.rotation_euler.to_matrix(),\n space=spc,\n verts=bm.verts\n )\n\n # Restores the parent relationship\n if parent and not obj.parent:\n obj.parent = parent\n obj.matrix_basis = old_mat\n\n # Converts the transformed bmesh to mesh\n new_mesh = bpy.data.meshes.new(\"ncp_export_temp\")\n bm.to_mesh(new_mesh)\n\n # Adds the transformed mesh to the big bmesh\n bm_all.from_mesh(new_mesh)\n\n # Removes unused meshes\n bpy.data.meshes.remove(new_mesh, do_unlink=True)\n bm.free()\n\n return bm_all", "def load_obj_render_BSR(objfile):\n #load a 3d model and render it in 2D\n obj = object3d() \n obj.load(objfile)\n \n #obj.scale_pts( (.1,.2,.1) )\n #obj.rotate_pts( (.1,.1,.1) )\n\n #obj2 = object3d() \n #obj2.load('objects/monkey.obj')\n\n bloody_simple_2drender('2d_render.png', obj=[obj], gridsize=100)", "def get_bmesh(obj):\n bm = bmesh.new()\n bm.from_mesh(obj.data)\n\n # Scene update necessary, as matrix_world is updated lazily\n bpy.context.scene.update()\n\n return bm", "def box_3d_to_object_label(box_3d, obj_type='Car'):\n\n fc.check_box_3d_format(box_3d)\n\n obj_label = obj_utils.ObjectLabel()\n\n obj_label.type = obj_type\n\n obj_label.t = box_3d.take((0, 1, 2))\n obj_label.l = box_3d[3]\n obj_label.w = box_3d[4]\n obj_label.h = box_3d[5]\n obj_label.ry = box_3d[6]\n\n return obj_label", "def get_3d_bb(doc, obj, step_ang):\n # get the aligned bounding box first for checking later\n a_min_pt, a_max_pt = obj.GetBoundingBox()\n # compute the minimum bounding box.\n doc.StartUndoMark()\n na_min_pt, na_max_pt = get_min_3d_bb(obj, step_ang)\n doc.EndUndoMark()\n doc.SendCommand(\"_undo\\n\\n\")\n\n a_vol = get_volume_from_points(a_min_pt, a_max_pt)\n na_vol = get_volume_from_points(na_min_pt, na_max_pt)\n if a_vol < na_vol:\n min_pt = a_min_pt\n max_pt = a_max_pt\n else:\n min_pt = na_min_pt\n max_pt = na_max_pt\n return min_pt, max_pt", "def cast(obj: 'itkLightObject') -> \"itkMeshSourceMUC3 *\":\n return _itkMeshSourcePython.itkMeshSourceMUC3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkMeshSourceMUS3 *\":\n return _itkMeshSourcePython.itkMeshSourceMUS3_cast(obj)", "def create_b_obj(ob_name, b_obj_data):\n b_obj = bpy.data.objects.new(ob_name, b_obj_data)\n bpy.context.scene.objects.link(b_obj)\n bpy.context.scene.objects.active = b_obj\n b_obj.select = True\n return b_obj", "def cast(obj: 'itkLightObject') -> \"itkMeshSourcePSUC3 *\":\n return _itkMeshSourcePython.itkMeshSourcePSUC3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkMeshSourcePSUS3 *\":\n return _itkMeshSourcePython.itkMeshSourcePSUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkMeshSourceMF3 *\":\n return _itkMeshSourcePython.itkMeshSourceMF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkMeshSourceMD3 *\":\n return _itkMeshSourcePython.itkMeshSourceMD3_cast(obj)", "def add_object(self, obj): # DEFINE OBJ!\n obj.spritesheet_width = self.spritesheet.size['width']\n obj.spritesheet_height = self.spritesheet.size['height']\n \n obj._layer_added(self)\n \n\n obj.buffer_index = len(self.objects)\n self.objects.append(obj)\n\n x = obj.x\n y = obj.y\n \n self.verts.extend(((x, y, 0.0), (x+obj.width, y, 0.0), (x+obj.width, y-obj.height, 0.0), (x, y-obj.height, 0.0)))\n self.texcoords.extend(obj.uv_texture)\n self.norms.extend(((0, 0, -1), (0, 0, -1), (0, 0, -1), (0, 0, -1)))\n\n if pi3d.PLATFORM == pi3d.PLATFORM_PI:\n self.inds.append((self.a,self.b,self.c))\n self.inds.append((self.d,self.a,self.c))\n else:\n self.inds.extend((self.a,self.b,self.c))\n self.inds.extend((self.d,self.a,self.c))\n\n self.a += 4\n self.b += 4\n self.c += 4\n self.d += 4\n\n \n #~ return len(self.sprites)-1", "def _packb3(obj, **options):\n fp = io.BytesIO()\n _pack3(obj, fp, **options)\n return fp.getvalue()", "def box_3d_to_3d_iou_format(boxes_3d):\n boxes_3d = np.asarray(boxes_3d)\n fc.check_box_3d_format(boxes_3d)\n\n iou_3d_boxes = np.zeros([len(boxes_3d), 7])\n iou_3d_boxes[:, 4:7] = boxes_3d[:, 0:3]\n iou_3d_boxes[:, 1] = boxes_3d[:, 3]\n iou_3d_boxes[:, 2] = boxes_3d[:, 4]\n iou_3d_boxes[:, 3] = boxes_3d[:, 5]\n iou_3d_boxes[:, 0] = boxes_3d[:, 6]\n\n return iou_3d_boxes", "def b_create_base_geometry(b_name):\n b_obj = b_create_cube(b_name)\n return b_obj", "def cast(obj: 'itkLightObject') -> \"itkMeshSourceMSS3 *\":\n return _itkMeshSourcePython.itkMeshSourceMSS3_cast(obj)", "def marching_cubes_to_obj(marching_cubes_out, output_file):\n\n verts, faces, normals, _ = marching_cubes_out\n with open(output_file, \"w\") as f:\n for item in verts:\n f.write(f\"v {item[0]} {item[1]} {item[2]}\\n\")\n for item in normals:\n f.write(f\"vn {item[0]} {item[1]} {item[2]}\\n\")\n for item in faces:\n f.write(\n f\"f {item[0]}//{item[0]} {item[1]}//{item[1]} \"\n f\"{item[2]}//{item[2]}\\n\"\n )\n f.close()", "def cast(obj: 'itkLightObject') -> \"itkMeshSourcePSD3 *\":\n return _itkMeshSourcePython.itkMeshSourcePSD3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkMeshSourcePSF3 *\":\n return _itkMeshSourcePython.itkMeshSourcePSF3_cast(obj)", "def cube_from_bbox(bbox):\n cube = pm.polyCube(\n width=bbox.width(),\n height=bbox.height(),\n depth=bbox.depth(),\n ch=False\n )\n cube[0].setAttr('t', bbox.center())\n return cube[0]", "def itkMeshSourceMUS3_cast(obj: 'itkLightObject') -> \"itkMeshSourceMUS3 *\":\n return _itkMeshSourcePython.itkMeshSourceMUS3_cast(obj)", "def grb_to_grid(grb_obj):\n #from scollis\n n_levels = len(grb_obj)\n levels = np.array([grb_element['level'] for grb_element in grb_obj])\n indexes = np.argsort(levels)[::-1] # highest pressure first\n cube = np.zeros([n_levels, grb_obj[0].values.shape[0], grb_obj[1].values.shape[1]])\n for i in range(n_levels):\n cube[i,:,:] = grb_obj[indexes[i]].values\n cube_dict = {'data' : cube, 'units' : grb_obj[0]['units'],\n 'levels' : levels[indexes]}\n return cube_dict", "def nms_3d(boxes: torch.Tensor, scores: torch.Tensor, iou_threshold: float) -> torch.Tensor:\n return _C.nms_3d(boxes, scores, iou_threshold)", "def itkMeshSourceMUC3_cast(obj: 'itkLightObject') -> \"itkMeshSourceMUC3 *\":\n return _itkMeshSourcePython.itkMeshSourceMUC3_cast(obj)", "def itkMeshSourceMF3_cast(obj: 'itkLightObject') -> \"itkMeshSourceMF3 *\":\n return _itkMeshSourcePython.itkMeshSourceMF3_cast(obj)", "def make_multi_object_scene(self):\n multi1 = Scene3D()\n box = self.objects[0]\n box.set_location(1, 0, 0)\n box.set_size(0.4, 0.4, 0.1)\n multi1.add_object(box)\n\n box = self.objects[1]\n box.set_location(-1, 0, 0)\n multi1.add_object(box)\n\n self.scenes.append(multi1)" ]
[ "0.61987334", "0.61626554", "0.60703915", "0.60206556", "0.58586913", "0.57093024", "0.5580755", "0.55471516", "0.55148286", "0.5499369", "0.5482052", "0.5461504", "0.5441759", "0.54299873", "0.54185647", "0.5400274", "0.5377804", "0.5348333", "0.5327999", "0.53279144", "0.5320906", "0.5318295", "0.52907765", "0.5277979", "0.5269867", "0.52485114", "0.5245621", "0.52399325", "0.5216757", "0.520968" ]
0.69672847
0
this is a function that is used to verify if the Exit edit mode button is displayed in TEAMS.
def check_TEAMS_exit_edit_mode_Button(driver = None,intervalWaitForPage = None,output = None): global verify, log_path pageLoadWaitInterval = intervalWaitForPage if intervalWaitForPage != None else 5 if (driver == None or output == None): print "ERROR in check_TEAMS_exit_edit_mode_Button(): Please send webdriver, and output as arguments." else: driver.set_page_load_timeout(pageLoadWaitInterval) try: verify = 0 #Admin Gear test try: editButton = WebDriverWait(driver, 15).until(EC.presence_of_element_located((By.ID,"QA:CentricView:exitEditButton"))) if editButton.is_displayed() == False: output = writer("VERIFY:\texitEditButton Absent\tFAIL",output) elif editButton.is_displayed() == True: output = writer("VERIFY:\texitEditButton Present\tPASS",output) verify = 1 except TimeoutException: output = writer("INFO:\tCatastrophic DOM Error",output) #------------------------- except TimeoutException: output = writer("INFO:\tgo to Admin iframe failed",output) return verify
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confirm_exit(self):\n return True", "def state_preview_exit(cfg, app, win):", "def is_edit(self):\n return self._tag == 'edit'", "def exit_check(self):\n if self.changed:\n msg = \"The current object has not been saved - would you like to exit?\"\n reply = QMessageBox.question(self.parent, 'Message', msg, QMessageBox.Yes, QMessageBox.No)\n return reply == QMessageBox.Yes\n else:\n return True", "def _test_display_up_button(self):\n return (self.product_displays.top_index > 0)", "def isExit(self):\n return self.exit", "def is_exit_command(self, event):\n return event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE)", "def verify_popup(self, type):", "def IsOk(self):\r\n \r\n return self.window != None", "def state_chosen_exit(cfg, app, win):", "def state_choose_exit(cfg, app, win):", "def check_for_exit(self):\n keys = pg.key.get_pressed()\n if (keys[K_ESCAPE]):\n self._exit = True\n for event in pg.event.get():\n if event.type == QUIT:\n self._exit = True", "def is_exp_set(self):\n if self.exp_id is None:\n return False\n if self.working_dir is None:\n return False\n if self.id != str(self.Id_widget.text()).strip():\n return False\n return True", "def is_exit(self):\n # TODO: 存在意義があやしい\n return self._data_handler.is_exit()", "def isHelp():\n return (True)", "def readExitButtonStatus(self):\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp", "def exit(self):\n ans = messagebox.askokcancel('Verify exit', 'Really quit?')\n if ans:\n self._master.destroy()\n return True\n else:\n return False", "def do_exits(self, arg):\r\n global showFullExits\r\n showFullExits = not showFullExits\r\n if showFullExits:\r\n print('Showing full exit descriptions.')\r\n else:\r\n print('Showing brief exit descriptions.')", "def verifyTeamOpenClose(self):\n element = self.getElement(locator=self._userProfile_membersTable)\n value = self.getAttribute(attribute=\"class\", element=element)\n if \"show\" in value:\n self.log.info(\"Verify Team details window result: \" + str(True))\n return True\n else:\n self.log.info(\"Verify Team details window result: \" + str(False))\n return False", "def __window_confirm(self, text):\n return True", "def test_ui_menu(test):\n assert hl.test_help_ui_menu(test) == test", "def unsaved_details_exist(self):\r\n return (self.talkDetailsWidget.saveButton.isEnabled() and\r\n (self.talkDetailsWidget.titleLineEdit.text() or\r\n self.talkDetailsWidget.presenterLineEdit.text() or\r\n self.talkDetailsWidget.categoryLineEdit.text() or\r\n self.talkDetailsWidget.descriptionTextEdit.toPlainText()))", "def hasEditVariable(self, variable: Variable, /) -> bool:\n ...", "def popup(self):\r\n return self.exec_() == QDialog.Accepted", "def can_exit(self) -> bool:\n return False", "def check_cannot_edit_team_alert(driver = None,intervalWaitForPage = None,output = None):\r\n\tglobal verify, log_path\r\n\t#pageLoadWaitInterval = intervalWaitForPage if intervalWaitForPage != None else 5\r\n\t#driver.set_page_load_timeout(pageLoadWaitInterval)\r\n\tverify=0\r\n\tif (driver == None or output == None):\r\n\t\tprint \"ERROR in check_TEAMS_exit_edit_mode_Button(): Please send webdriver, and output as arguments.\"\r\n\telse:\r\n\t\ttry:\r\n\t\t\tWebDriverWait(driver, intervalWaitForPage).until(EC.alert_is_present())\r\n\t\t\tcannotEditTeamAlert = driver.switch_to_alert().dismiss()\r\n\t\t\toutput = writer(\"VERIFY:\\tDismissed the alert.\\tPASS\",output)\r\n\t\t\tverify = 1\r\n\t\texcept (TimeoutException, NoAlertPresentException):\r\n\t\t\toutput = writer(\"VERIFY:\\tDid NOT find and dismiss Alert: You do not have permissions for this team.\\tFAIL\", output)\r\n\t\t\t#-------------------------\r\n\treturn verify", "def _enter_edit_mode(self):\n edit_mode = self.UTILS.element.getElement(DOM.DownloadManager.download_edit_button,\n \"Download edit button\", True, 10)\n edit_mode.tap()\n self.UTILS.element.waitForElements(DOM.DownloadManager.downloads_edit_header_title,\n \"Edit downloads header\")", "def exit_openbox(self):\n if self.are_you_sure(\"Bist du sicher, dass du zur Konsole zurück willst?\")=='yes':\n ret = os.system(\"openbox --exit\")", "def is_exit(self):\n return self._exit", "def can_edit(self):\n return self.state not in (\n 'scanning', 'resulted', 'cancelled', 'aborted')" ]
[ "0.6533513", "0.64115244", "0.62993443", "0.6278795", "0.627007", "0.626141", "0.61285347", "0.612151", "0.60363686", "0.6028714", "0.60132474", "0.5996961", "0.59871906", "0.59546524", "0.5943055", "0.59272546", "0.5925924", "0.5906537", "0.5845683", "0.5838273", "0.582006", "0.58041984", "0.57851005", "0.5760026", "0.57481354", "0.56862736", "0.56833375", "0.56829053", "0.567559", "0.5671534" ]
0.74778014
0
Writes all connections of a molecule into one line and returns as np array
def write_conns_in_one_line(data, N_max, n_per_conn): # write all connections of a molecule into one line data = data.groupby('molecule_name').apply(lambda x: x.values.reshape(-1)) # pad with nans mol_list = [] for i in range(len(data)): n = N_max * (n_per_conn+1) - len(data[i]) mol_list.append(np.pad(data[i], (0, n), 'constant', constant_values=(0, np.nan))) # build np array data = np.vstack(mol_list) del mol_list # remove molecule names data = np.delete(data, [(n_per_conn+1)*i for i in range(N_max)], 1) # convert to np array return data.astype(float)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connectivity_matrix(self):\n # TODO: make this more memory efficient by ordering i,j in code when needed.\n temp = []\n for i in range(self.n_atom):\n for j in range(i+1, self.n_atom):\n if self.bond(i, j):\n temp.append([i+1, j+1])\n self.connect = np.asarray(temp)", "def _write_conne(parameters):\n from ._common import connections\n\n # Reorder connections\n if parameters[\"connections_order\"] is not None:\n order = parameters[\"connections_order\"]\n else:\n order = parameters[\"connections\"].keys()\n\n # Format\n label_length = len(max(parameters[\"connections\"], key=len)) // 2\n fmt = block_to_format[\"CONNE\"]\n fmt = str2format(fmt[label_length])\n\n out = []\n for k in order:\n data = deepcopy(connections)\n data.update(parameters[\"connections\"][k])\n\n values = [\n k,\n data[\"nseq\"],\n data[\"nadd\"][0] if data[\"nadd\"] is not None else None,\n data[\"nadd\"][1] if data[\"nadd\"] is not None else None,\n data[\"permeability_direction\"],\n data[\"nodal_distances\"][0],\n data[\"nodal_distances\"][1],\n data[\"interface_area\"],\n data[\"gravity_cosine_angle\"],\n data[\"radiant_emittance_factor\"],\n ]\n out += write_record(values, fmt)\n\n return out", "def connect_fully(self, num_atoms ,coordinate):\n # Initialize all edges: no self-edges\n \n adjacency = {}\n for i in range(num_atoms):\n for j in range(num_atoms):\n if i != j and self.euler_distance(coordinate[i],coordinate[j]) < 8:\n adjacency[(i, j)] = 1\n\n # Convert to numpy arrays\n \n src = []\n dst = []\n w = []\n for edge, weight in adjacency.items():\n src.append(edge[0])\n dst.append(edge[1])\n w.append(weight)\n\n return np.array(src), np.array(dst), np.array(w)", "def _raw_to_arrays(self):\n self.update_geometry()\n if isinstance(self, Molecule):\n # normal qcdb.Molecule\n geom = self.geometry(np_out=True)\n else:\n # psi4.core.Molecule\n geom = np.array(self.geometry())\n mass = np.asarray([self.mass(at) for at in range(self.natom())])\n elem = np.asarray([self.symbol(at) for at in range(self.natom())])\n elez = np.asarray([self.Z(at) for at in range(self.natom())])\n uniq = np.asarray(\n [hashlib.sha1((str(elem[at]) + str(mass[at])).encode('utf-8')).hexdigest() for at in range(self.natom())])\n\n return geom, mass, elem, elez, uniq", "def get_conn_matrix_vector(self):\n\n vect = []\n for line in sorted(self.connection_matrix):\n for item in self.connection_matrix[line]:\n vect.append(item)\n\n return vect", "def str_conn_matrix(self):\n\n st = 'Connection Matrix:\\n'\n for line in self.connection_matrix:\n st += line + \"= [ \"\n for j in range(len(self.connection_matrix[line])):\n st += str(self.connection_matrix[line][j]) + \" \"\n\n st += \"], \"\n\n return st", "def get_conect_records(self) -> List[str]:\n conect_records = []\n adjacency_matrix = self._structure.adjacency_matrix\n for x_serial, row in enumerate(adjacency_matrix, 1):\n connections = np.flatnonzero(row) + 1\n fmt = \"{: 5d}\" if x_serial < 100000 else \"{: 5X}\"\n first_atom = fmt.format(x_serial)\n connection_list = [\"CONECT\", first_atom]\n for connection in connections:\n fmt = \"{: 5d}\" if connection < 100000 else \"{: 5X}\"\n connection_list.append(fmt.format(connection))\n connection_list.append(\"\\n\")\n conect_records.append(\"\".join(connection_list))\n return conect_records", "def read_conn(path, simulation):\n with h5py.File(path+simulation+'-conn','r') as f: \n print(path+simulation+'-conn' )\n out_deg=np.array(f['conn']) \n return out_deg", "def plt_connecting_lines():\n\n for i in range(0, Molecule.connection_count):\n tmp1 = Molecule.right_endpt[Molecule.left_connection[i] - 1]\n tmp2 = Molecule.left_endpt[Molecule.right_connection[i] - 1]\n tmp3 = Molecule.energy[Molecule.left_connection[i] - 1]\n tmp4 = Molecule.energy[Molecule.right_connection[i] - 1]\n\n plt.plot([tmp1, tmp2], [tmp3, tmp4], color=PlotParameter.connection_line_color,\n lw=PlotParameter.connection_line_width, linestyle='--')\n\n return None", "def numpy_from_output_graph(input_file, sep=\" \"):\n # convert input from csv file to numpy matrix, then return\n out = nx.read_weighted_edgelist(\n input_file, delimiter=sep\n ) # TODO: make sure this outputs the same thing regardless of delimiter\n out = nx.to_numpy_matrix(out)\n np.fill_diagonal(out, 0) # for staging branch\n return np.array(out)", "def format_molecule_for_numpy(self, npobj=True):\n factor = 1.0 if self.PYunits == 'Angstrom' else psi_bohr2angstroms\n self.update_geometry()\n\n # TODO fn title is format_mol... but return args not compatible\n geo = []\n for i in range(self.natom()):\n [x, y, z] = self.atoms[i].compute()\n geo.append([self.Z(i), x * factor, y * factor, z * factor])\n\n nparr = np.array(geo)\n return nparr if npobj else np.array_repr(nparr)", "def config_connection_matrix(self):\n for leg in self.legs.values():\n for m in leg[\"muscles\"]:\n if \"brain_sig\" and \"name\" in m:\n self.connection_matrix[m[\"name\"]] = [0] * self.brain[\"n_osc\"]\n self.connection_matrix[m[\"name\"]][m[\"brain_sig\"] - 1] = 1.", "def exportConnections(source=None, filePath=None, disc=False):\n connections = {}\n connections[\"joints\"] = []\n connections[\"attrs\"] = []\n dm_nodes = []\n if not source:\n source = pm.selected()\n for x in source:\n if not x.name().startswith(\"blend_\"):\n connections[\"joints\"].append(x.name())\n attrs_list = []\n for chn in SRT_CHANNELS:\n at = x.attr(chn)\n at_cnx = pm.listConnections(\n at, p=True, type=\"mgear_matrixConstraint\")\n if not at_cnx:\n at_cnx = pm.listConnections(\n at, p=True, type=\"decomposeMatrix\")\n attrs_list.append(at_cnx)\n\n parentInv_attr = pm.listConnections(\n x.parentInverseMatrix[0], d=True, p=True)\n attrs_list.append(parentInv_attr)\n\n attrs_list_checked = []\n for at in attrs_list:\n if at:\n attrs_list_checked.append(at[0].name())\n dm_nodes.append(at[0].node())\n else:\n attrs_list_checked.append(None)\n\n connections[\"attrs\"].append(attrs_list_checked)\n\n data_string = json.dumps(connections, indent=4, sort_keys=True)\n if not filePath:\n filePath = pm.fileDialog2(fileMode=0,\n fileFilter=' Shifter joint cnx matrix'\n ' .jmm (*%s)' % \".jmm\")\n if not filePath:\n return\n if not isinstance(filePath, string_types):\n filePath = filePath[0]\n\n if connections[\"joints\"]:\n with open(filePath, 'w') as f:\n f.write(data_string)\n\n if disc:\n disconnect(connections)\n # we need to return the decompose matrix nodes to track it at export time.\n return set(dm_nodes)", "def add_pconn(self):\n self.use_pconn= True\n self.pconn = []\n for i,c in enumerate(self.conn):\n atoms_pconn = []\n atoms_image = []\n for ji, j in enumerate(c):\n # If an atom or vertex is connected to another one multiple times (in an image), this\n # will be visible in the self.conn attribute, where the same neighbour will be listed\n # multiple times.\n # Sometimes, the distances are a bit different from each other, and in this case, we\n # have to increase the threshold, until the get_distvec function will find all imgis.\n n_conns = c.count(j)\n t = 0.01\n while True:\n d,r,imgi = self.get_distvec(i,j,thresh=t)\n t += 0.01\n if n_conns == len(imgi):\n break\n if len(imgi) == 1:\n # only one neighbor .. all is fine\n atoms_pconn.append(images[imgi[0]])\n atoms_image.append(imgi[0])\n else:\n # we need to assign an image to each connection\n # if an atom is connected to another atom twice this means it must be another\n # image\n for ii in imgi:\n # test if this image is not used for this atom .. then we can use it\n if atoms_image.count(ii)==0:\n atoms_image.append(ii)\n atoms_pconn.append(images[ii])\n else:\n # ok, we have this image already\n use_it = True\n #print(c, \"=>\", j)\n #print(atoms_image)\n for k, iii in enumerate(atoms_image):\n #print('k',k)\n if (iii == ii) and (c[k] == j): use_it=False\n if use_it:\n atoms_image.append(ii)\n atoms_pconn.append(images[ii])\n self.pconn.append(atoms_pconn)\n #if len(atoms_pconn) != len(c): print(\"AOSUHDAPUFHPOUFHPWOUFHPOUDHSPUODHASIUDHAUSIDHSD\")\n return\n\n # 'na',lower(label),xyz,i,j)", "def construct_fast_graph_connection(coord_list, radie):\n\n connection_distance = []\n connection = []\n coord_list_tree = scipy.spatial.cKDTree(coord_list)\n for j, data in enumerate(coord_list):\n '''save nodes which are in range'''\n connections_ckd = coord_list_tree.query_ball_point(data, radie)\n for i in connections_ckd:\n #only save upper half of the matrix\n if i > j:\n #save the connection\n connection.append([j, i])\n #save the relative distance of the nodes\n connection_distance.append(np.hypot(coord_list[i,0]-data[0], coord_list[i,1]-data[1]))\n\n connection_distance = np.array(connection_distance)\n connection = np.array(connection)\n\n\n return connection, connection_distance", "def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the y component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[2])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals", "def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the z component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[1])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals", "def extract_diagram(self):\n nodes = []\n edges = []\n \n for clump in self.clumps:\n new_nodes, new_edges = clump.get_diagram_representation()\n nodes.extend(new_nodes)\n edges.extend(new_edges)\n #nodes.append(backend.JunctionNode(clump))\n # TODO: move to Tunnel.get_diagram_representation()\n for tunnel in self.tunnels:\n# print tunnel\n edges.append(TunnelEdge(tunnel))\n return nodes, edges", "def writeline(self):\n if self.record == 'ATOM' and not self.atom.startswith('H') and len(self.atom) < 4:\n atom = ' {0}'.format(self.atom)\n else:\n atom = self.atom\n aline = (\n \"{0:6s} {1:5d} {2:5s} {3} {4}{5:5d} {6:10.5f}{7:10.5f}\"\n \"{8:10.5f} {9:5s}{10:3d}{11:2d} {12:8.5f} {13} 0\\n\"\n ).format(\n self.record, self.natom, atom, self.res, self.chain, self.nres,\n self.x, self.y, self.z, self.fftype, self.nbond, self.nlonepair,\n self.charge, self.fixed\n )\n cline = \"CONECT{0:6d}{1}\\n\".format(\n self.natom,\n ''.join('{0:6d}'.format(num) for num in self.connections)\n )\n return aline, cline", "def send_coords(self) -> np.ndarray:\n natom = len(self.molecule.geometry)\n\n coords = np.reshape(self.molecule.geometry, (3 * natom))\n MDI_Send(coords, 3 * natom, MDI_DOUBLE, self.comm)\n\n return coords", "def generate_full_adj(self):\n edges = np.zeros(shape=(self.n_balls, self.n_balls))\n row_idx = 0 # start filling adjacency mat from root node\n col_idx = 1 # skip the root node and start from 2nd node\n for l in range(self.nl):\n for n in range(self.nn[l]):\n edges[row_idx, col_idx:col_idx + self.nc[l]] = 1\n # Increase counters after filling connections for a parent node\n col_idx += self.nc[l]\n row_idx += 1\n return edges", "def construct_graph_connection(coord_list, radie):\n\n connection_distance = []\n connection = []\n for j, data in enumerate(coord_list):\n '''Calculate the relative distance of the nodes'''\n distance = np.hypot(coord_list[:,0]-data[0], coord_list[:,1]-data[1])\n '''save nodes which are in range'''\n #for i, data in enumerate(distance):\n for i in range(j+1, len(distance)):\n data = distance[i]\n if data < radie:\n connection.append([j, i])\n connection_distance.append(data)\n\n\n connection_distance = np.array(connection_distance)\n connection = np.array(connection)\n return connection, connection_distance", "def get_oc_oc_connections(self, random_conn=False):\n\n print \"Drawing OC - OC connections .... \"\n abstract_weights_non_negative = np.loadtxt(self.params['oc_oc_abstract_weights_fn'])\n abstract_weights = self.take_log_weights(abstract_weights_non_negative)\n if random_conn:\n rnd.shuffle(abstract_weights)\n rnd.seed(self.params['random_oc_oc_seed'])\n np.savetxt(self.params['oc_oc_abstract_weights_fn'].rsplit('.dat')[0] + '_random.dat', abstract_weights)\n\n assert (abstract_weights[:,0].size == self.params['n_hc'] * self.params['n_mc'])\n assert (abstract_weights[0,:].size == self.params['n_hc'] * self.params['n_mc'])\n w_max_abstract = abstract_weights.max()\n w_min_abstract = abstract_weights.min()\n\n w_pyr_pyr_global_max = self.params['w_pyr_pyr_global_max']\n w_pyr_rsnp_max = self.params['w_pyr_rsnp_max']\n output_pyr_pyr = \"\"\n line_cnt_pyr_pyr = 0\n output_pyr_rsnp = \"\"\n line_cnt_pyr_rsnp = 0\n cnt_discarded_conn = 0\n for src_mc in xrange(abstract_weights[:, 0].size):\n for tgt_mc in xrange(abstract_weights[:, 0].size):\n if (src_mc != tgt_mc):\n w_in = abstract_weights[src_mc, tgt_mc]\n if (w_in > 0): # draw several pyr -> pyr connections between the two MC\n src_tgt_dict = {} # src_tgt_dict[src_gid] = [tgt_gid_0, ...] multiple connections between the same source and the same target are forbiddden\n w_out = (w_in / w_max_abstract) * w_pyr_pyr_global_max\n src_pyrs = rnd.randint(0, self.params['n_pyr_per_mc'], self.params['n_pyr_pyr_between_2mc'])\n for src in np.unique(src_pyrs):\n src_tgt_dict[src] = []\n for src in src_pyrs:\n src_pyr = src + src_mc * self.params['n_pyr_per_mc'] + self.params['pyr_offset']\n tgt_pyr = rnd.randint(0, self.params['n_pyr_per_mc']) + tgt_mc * self.params['n_pyr_per_mc'] + self.params['pyr_offset']\n src_tgt_dict[src].append(tgt_pyr)\n\n # remove multiple instances of the same src-tgt connection\n for src in src_pyrs:\n n1 = len(src_tgt_dict[src])\n src_tgt_dict[src] = np.unique(src_tgt_dict[src]).tolist()\n cnt_discarded_conn += n1 - len(src_tgt_dict[src])\n for tgt_pyr in src_tgt_dict[src]:\n w_noise = self.draw_connection(1.0, w_out, noise=self.params['w_pyr_pyr_global_sigma'])\n if (w_noise > self.params['weight_threshold']):\n output_pyr_pyr += \"%d %d %.6e\\n\" % (src_pyr, tgt_pyr, w_noise)\n line_cnt_pyr_pyr += 1\n\n elif (w_in < 0):\n w_out = (w_in / w_min_abstract) * w_pyr_rsnp_max\n src_pyrs = self.get_rnd_targets(self.params['n_pyr_per_mc'], self.params['n_pyr_rsnp_between_2mc']) # avoid double connections\n for src in src_pyrs:\n src_pyr = src + src_mc * self.params['n_pyr_per_mc'] + self.params['pyr_offset'] \n tgt_rsnp = rnd.randint(0, self.params['n_rsnp_per_mc']) + tgt_mc * self.params['n_rsnp_per_mc'] + self.params['rsnp_offset']\n w_noise = self.draw_connection(1.0, w_out, noise=self.params['w_pyr_rsnp_sigma'])\n if (w_noise > self.params['weight_threshold']):\n output_pyr_rsnp += \"%d %d %.6e\\n\" % (src_pyr, tgt_rsnp, w_noise)\n line_cnt_pyr_rsnp += 1\n\n print 'Number of discarded pyr-pyr connections:', cnt_discarded_conn\n print 'Number of pyr-rsnp connections:', line_cnt_pyr_rsnp\n print 'Number of pyr-pyr connections:', line_cnt_pyr_pyr\n print 'Number of OC-OC connections:', line_cnt_pyr_pyr + line_cnt_pyr_rsnp\n output_fn_pyr_pyr = self.params['conn_list_pyr_pyr']\n output_file_pyr_pyr = open(output_fn_pyr_pyr, 'w')\n output_file_pyr_pyr.write(\"%d\\t%d\\n\" % (line_cnt_pyr_pyr, 3))\n output_file_pyr_pyr.write(output_pyr_pyr)\n output_file_pyr_pyr.close()\n\n output_fn_pyr_rsnp = self.params['conn_list_pyr_rsnp']\n output_file_pyr_rsnp = open(output_fn_pyr_rsnp, 'w')\n output_file_pyr_rsnp.write(\"%d\\t%d\\n\" % (line_cnt_pyr_rsnp, 3))\n output_file_pyr_rsnp.write(output_pyr_rsnp)\n output_file_pyr_rsnp.close()", "def stordir_to_connectivity(self, stordir):\n\n output,suffix = [],'/chargemol_analysis/final/bonds.json'\n with open(stordir+suffix,'r') as f: bond_dicts = json.load(f)\n n = len(self.stordir_to_atoms(stordir))\n for i in range(n):\n newatom = []\n for bond in bond_dicts:\n if bond['fromNode'] == i:\n if bond['bondorder'] > 0.01:\n newatom.append((bond['bondorder'],bond['distance'],bond['toNode']))\n\n sorted_newatom = list(reversed(sorted(newatom))) # bonds in decreasing strength\n maxind = min(12,len(newatom)) # take UP TO 12 bonds\n out_list = [(n,b,d) for b,d,n in sorted_newatom[:maxind]]\n output.append(np.array(out_list))\n return output", "def connect_cells(self):\n self.nclist = []\n N = self._N\n for i in range(N):\n src = self.cells[i]\n tgt_syn = self.cells[(i+1)%N].synlist[0]\n nc = src.connect2target(tgt_syn)\n nc.weight[0] = self.syn_w\n nc.delay = self.syn_delay\n\n nc.record(self.t_vec, self.id_vec, i)\n self.nclist.append(nc)", "def report(self):\n s = \"Conn %s\" % (self.shape,)\n if hasattr(self,'eltype'):\n s += \", eltype=%s\" % self.eltype\n s += '\\n'\n return s + ndarray.__str__(self)", "def connectedLineElems(elems):\n elems = Connectivity(elems).copy() # make copy to avoid side effects\n parts = []\n while elems.size != 0:\n loop = findConnectedLineElems(elems)\n parts.append(loop[(loop!=-1).any(axis=1)])\n elems = elems[(elems!=-1).any(axis=1)]\n return parts", "def __repr__(self):\n weight = self.weight * self.connectivity\n reprio = io.StringIO()\n remain_lines = self.dim_node\n for row in map(iter, weight):\n reprio.write('{:6.1f}'.format(next(row)))\n for ele in row:\n reprio.write(' {:6.1f}'.format(ele))\n if remain_lines > 1:\n reprio.write('\\n')\n remain_lines -= 1\n return reprio.getvalue()", "def _flatten_to_arrays_and_conns(cls, network_model):\n component_arrays = {}\n connection_groups = {}\n # Create flattened component with all synapses combined with the post-\n # synaptic cell dynamics using MultiDynamics\n for pop in network_model.populations:\n # Get all the projections that project to/from the given population\n receiving = [p for p in network_model.projections\n if (pop == p.post or\n (p.post.nineml_type == 'Selection' and\n pop in p.post.populations))]\n sending = [p for p in network_model.projections\n if (pop == p.pre or\n (p.pre.nineml_type == 'Selection' and\n pop in p.pre.populations))]\n # Create a dictionary to hold the cell dynamics and any synapse\n # dynamics that can be flattened into the cell dynamics\n # (i.e. linear ones).\n sub_components = {cls.CELL_COMP_NAME: pop.cell}\n # All port connections between post-synaptic cell and linear\n # synapses and port exposures to pre-synaptic cell\n internal_conns = []\n exposures = []\n\n def add_exposures(exposures_to_add):\n \"\"\"\n Adds exposures to a \"set\" of exposures. If 9ML objects were\n hashable could use a 'set'.\n \"\"\"\n for pe in exposures_to_add:\n if pe not in exposures:\n exposures.append(pe)\n\n synapses = []\n connection_property_sets = []\n # FIXME: There has to be a way of avoiding this name clash\n if any(p.name == cls.CELL_COMP_NAME for p in receiving):\n raise Pype9RuntimeError(\n \"Cannot handle projections named '{}' (why would you \"\n \"choose such a silly name?;)\".format(cls.CELL_COMP_NAME))\n for proj in receiving:\n # Flatten response and plasticity into single dynamics class.\n # TODO: this should be no longer necessary when we move to\n # version 2 as response and plasticity elements will be\n # replaced by a synapse element in the standard. It will need\n # be copied at this point though as it is modified\n synapse, proj_conns = cls._flatten_synapse(proj)\n # Get all connections to/from the pre-synaptic cell\n pre_conns = [pc for pc in proj_conns\n if 'pre' in (pc.receiver_role, pc.sender_role)]\n # Get all connections between the synapse and the post-synaptic\n # cell\n post_conns = [pc for pc in proj_conns if pc not in pre_conns]\n # Mapping of port connection role to sub-component name\n role2name = {'post': cls.CELL_COMP_NAME}\n # If the synapse is non-linear it can be combined into the\n # dynamics of the post-synaptic cell.\n try:\n if not synapse.component_class.is_linear():\n raise Pype9UnflattenableSynapseException()\n role2name['synapse'] = proj.name\n # Extract \"connection weights\" (any non-singular property\n # value) from the synapse properties\n connection_property_sets.extend(\n cls._extract_connection_property_sets(synapse,\n proj.name))\n # Add the flattened synapse to the multi-dynamics sub\n # components\n sub_components[proj.name] = synapse.clone()\n # Convert port connections between synpase and post-\n # synaptic cell into internal port connections of a multi-\n # dynamics object\n internal_conns.extend(pc.assign_names_from_roles(role2name)\n for pc in post_conns)\n # Expose ports that are needed for the pre-synaptic\n # connections\n except Pype9UnflattenableSynapseException:\n # All synapses (of this type) connected to a single post-\n # synaptic cell cannot be flattened into a single component\n # of a multi- dynamics object so an individual synapses\n # must be created for each connection.\n synapse_conns = [\n pc.append_namespace_from_roles(\n {'post': cls.CELL_COMP_NAME,\n 'pre': cls.CELL_COMP_NAME,\n 'synapse': proj.name}) for pc in post_conns]\n synapses.append(SynapseProperties(proj.name, synapse,\n synapse_conns))\n # Add exposures to the post-synaptic cell for connections\n # from the synapse\n add_exposures(chain(*(\n pc.expose_ports({'post': cls.CELL_COMP_NAME})\n for pc in post_conns)))\n # Add exposures for connections to/from the pre synaptic cell\n add_exposures(\n chain(*(pc.expose_ports(role2name)\n for pc in pre_conns)))\n role2name['pre'] = cls.CELL_COMP_NAME\n # Add exposures for connections to/from the pre-synaptic cell in\n # populations.\n for proj in sending:\n # Not required after transition to version 2 syntax\n synapse, proj_conns = cls._flatten_synapse(proj)\n # Add send and receive exposures to list\n add_exposures(chain(*(\n pc.expose_ports({'pre': cls.CELL_COMP_NAME})\n for pc in proj_conns)))\n # Add all cell ports as multi-component exposures that aren't\n # connected internally in case the user would like to save them or\n # play data into them\n internal_cell_ports = set(chain(\n (pc.send_port_name for pc in internal_conns\n if pc.sender_name == cls.CELL_COMP_NAME),\n (pc.receive_port_name for pc in internal_conns\n if pc.receiver_name == cls.CELL_COMP_NAME)))\n add_exposures(\n BasePortExposure.from_port(p, cls.CELL_COMP_NAME)\n for p in pop.cell.ports if p.name not in internal_cell_ports)\n dynamics_properties = MultiDynamicsProperties(\n name=pop.name + '_cell', sub_components=sub_components,\n port_connections=internal_conns,\n port_exposures=exposures)\n component = MultiDynamicsWithSynapsesProperties(\n dynamics_properties.name,\n dynamics_properties, synapse_propertiess=synapses,\n connection_property_sets=connection_property_sets)\n array_name = pop.name\n component_arrays[array_name] = ComponentArray9ML(\n array_name, pop.size, component)\n selections = {}\n for sel in network_model.selections:\n selections[sel.name] = Selection9ML(\n sel.name, Concatenate9ML(component_arrays[p.name]\n for p in sel.populations))\n arrays_and_selections = dict(\n chain(iter(component_arrays.items()), iter(selections.items())))\n # Create ConnectionGroups from each port connection in Projection\n for proj in network_model.projections:\n _, proj_conns = cls._flatten_synapse(proj)\n # Get all connections to/from the pre-synaptic cell\n pre_conns = [pc for pc in proj_conns\n if 'pre' in (pc.receiver_role, pc.sender_role)]\n # Create a connection group for each port connection of the\n # projection to/from the pre-synaptic cell\n for port_conn in pre_conns:\n ConnectionGroupClass = (\n EventConnectionGroup9ML\n if port_conn.communicates == 'event'\n else AnalogConnectionGroup9ML)\n if len(pre_conns) > 1:\n name = ('__'.join((proj.name,\n port_conn.sender_role,\n port_conn.send_port_name,\n port_conn.receiver_role,\n port_conn.receive_port_name)))\n else:\n name = proj.name\n if port_conn.sender_role == 'pre':\n connectivity = proj.connectivity\n # If a connection from the pre-synaptic cell the delay\n # is included\n # TODO: In version 2 all port-connections will have\n # their own delays\n delay = proj.delay\n else:\n # If a \"reverse connection\" to the pre-synaptic cell\n # the connectivity needs to be inverted\n connectivity = InversePyNNConnectivity(\n proj.connectivity)\n delay = 0.0 * un.s\n # Append sub-component namespaces to the source/receive\n # ports\n ns_port_conn = port_conn.append_namespace_from_roles(\n {'post': cls.CELL_COMP_NAME,\n 'pre': cls.CELL_COMP_NAME,\n 'synapse': proj.name})\n conn_group = ConnectionGroupClass(\n name,\n arrays_and_selections[proj.pre.name],\n arrays_and_selections[proj.post.name],\n source_port=ns_port_conn.send_port_name,\n destination_port=(ns_port_conn.receive_port_name),\n connectivity=connectivity,\n delay=delay)\n connection_groups[conn_group.name] = conn_group\n return component_arrays, connection_groups, selections", "def _create_arrays(network):\n\n # Vertices\n\n n = network.number_of_vertices()\n B = zeros((n, 3))\n P = zeros((n, 3))\n X = zeros((n, 3))\n S = zeros((n, 3))\n V = zeros((n, 3))\n k_i = network.key_index()\n for key in network.vertices():\n i = k_i[key]\n vertex = network.vertex[key]\n B[i, :] = vertex['B']\n P[i, :] = vertex['P']\n X[i, :] = [vertex[j] for j in 'xyz']\n Pn = normrow(P)\n\n # Edges\n\n m = len(network.edges())\n E = zeros((m, 1))\n A = zeros((m, 1))\n s0 = zeros((m, 1))\n l0 = zeros((m, 1))\n u = []\n v = []\n ind_c = []\n ind_t = []\n edges = []\n uv_i = network.uv_index()\n for ui, vi in network.edges():\n i = uv_i[(ui, vi)]\n edge = network.edge[ui][vi]\n edges.append([k_i[ui], k_i[vi]])\n u.append(k_i[ui])\n v.append(k_i[vi])\n E[i] = edge['E']\n A[i] = edge['A']\n s0[i] = edge['s0']\n if edge['l0']:\n l0[i] = edge['l0']\n else:\n l0[i] = network.edge_length(ui, vi)\n if edge['ct'] == 'c':\n ind_c.append(i)\n elif edge['ct'] == 't':\n ind_t.append(i)\n f0 = s0 * A\n ks = E * A / l0\n\n # Arrays\n\n C = connectivity_matrix(edges, 'csr')\n Ct = C.transpose()\n M = mass_matrix(Ct, E, A, l0, f0, c=1, tiled=False)\n rows, cols, vals = find(Ct)\n\n return B, P, Pn, S, X, V, f0, l0, ind_c, ind_t, C, Ct, ks, array(u), array(v), M, rows, cols, vals, E, A" ]
[ "0.5919387", "0.58005446", "0.55780476", "0.553181", "0.5501123", "0.54834193", "0.5477078", "0.5464846", "0.533232", "0.52614045", "0.5224273", "0.52067107", "0.5179197", "0.5155321", "0.51372504", "0.5133801", "0.51067024", "0.51050067", "0.50948536", "0.5088741", "0.50608027", "0.5032769", "0.50324", "0.50102365", "0.5000231", "0.49930158", "0.4975115", "0.49623442", "0.49576896", "0.49553755" ]
0.6940061
0
Standardizes scc by type and saves everything
def standardize_and_save_train(data, scc, type_data, N_max, n_per_conn, path): size = (N_max*n_per_conn - np.isnan(data).sum(axis=1)) // n_per_conn # remove size 1 molecules data = data[size!=1] scc = scc[size!=1] type_data = type_data[size!=1] size = size[size!=1] # standardize type_mean = [] type_std = [] for i in range(8): type_mean.append(scc[type_data == i].mean()) type_std.append(scc[type_data == i].std()) scc[type_data == i] -= type_mean[i] scc[type_data == i] /= type_std[i] # convert to Tensor data = torch.Tensor(data) scc = torch.Tensor(scc) type_data = torch.from_numpy(type_data.astype(np.int32)) size = torch.from_numpy(size.astype(np.int32)) # save torch.save(data, os.path.join(path, 'train.pt')) torch.save(scc, os.path.join(path, 'scc.pt')) torch.save(type_data, os.path.join(path, 'train_type.pt')) torch.save(size, os.path.join(path, 'size.pt')) # save mean and std of scc np.savetxt(os.path.join(path, 'scc_mean_std.csv'), np.array([type_mean, type_std]), delimiter=',')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _SaveSuspectedCLs(\n suspected_cls, master_name, builder_name, build_number, failure_type):\n for suspected_cl in suspected_cls:\n suspected_cl_util.UpdateSuspectedCL(\n suspected_cl['repo_name'], suspected_cl['revision'],\n suspected_cl['commit_position'], analysis_approach_type.HEURISTIC,\n master_name, builder_name, build_number, failure_type,\n suspected_cl['failures'], suspected_cl['top_score'])", "def csc():\n endcaps = [1,2]\n disks = [1,2,3,4]\n rings = {1:[1,2,3], # different rings for different disks\n 2:[1,2], \n 3:[1,2],\n 4:[1,2]}\n\n csc_info = {\n \"endcaps\":endcaps,\n \"disks\": disks,\n \"rings\": rings}\n\n return csc_info", "def convert_to_coverage_model():\r\n logging.info(\"converting non-coverage model to coverage model..\")\r\n\r\n # initialize an entire coverage model from scratch\r\n sess = tf.Session(config=util.get_config())\r\n print(\"initializing everything...\")\r\n sess.run(tf.global_variables_initializer())\r\n\r\n # load all non-coverage weights from checkpoint\r\n saver = tf.train.Saver([v for v in tf.global_variables() if \"coverage\" not in v.name and \"Adagrad\" not in v.name])\r\n print(\"restoring non-coverage variables...\")\r\n curr_ckpt = util.load_ckpt(saver, sess)\r\n print(\"restored.\")\r\n\r\n # save this model and quit\r\n new_fname = curr_ckpt + '_cov_init'\r\n print(\"saving model to %s...\" % (new_fname))\r\n new_saver = tf.train.Saver() # this one will save all variables that now exist\r\n new_saver.save(sess, new_fname)\r\n print(\"saved.\")\r\n exit()", "def translate_scc(scc_content, brackets='[]'):\n opening_bracket, closing_bracket = brackets if brackets else ('', '')\n scc_elements = set(scc_content.split())\n for elem in scc_elements:\n name = COMMAND_LABELS.get(elem, ALL_CHARACTERS.get(elem))\n # If a 2 byte command was not found, try retrieving 1 byte characters\n if not name:\n char1 = ALL_CHARACTERS.get(elem[:2])\n char2 = ALL_CHARACTERS.get(elem[2:])\n if char1 is not None and char2 is not None:\n name = f\"{char1}{char2}\"\n if name:\n scc_content = scc_content.replace(\n elem, f\"{opening_bracket}{name}{closing_bracket}\")\n return scc_content", "def _inspec_report_to_scc(report, scc_source):\n global scc\n if not scc:\n scc = SecurityCenterClient()\n\n # get the set of existing SCC Findings for this source\n current_findings = _get_findings(scc, scc_source)\n\n # InSpec tests are grouped into profiles and controls\n for prof in report['profiles']:\n controls = prof['controls']\n for control in controls:\n _add_control_findings(scc, scc_source, control, current_findings)", "def save(self,fout):\n\n # only process 0 should save\n if COMM_WORLD.rank == 0:\n\n # The file format is:\n # L,nterms,masks,signs,coefficients\n # where each is just a binary blob, one after the other.\n\n # do this first so that we haven't already created the file if\n # it fails for some reason\n msc = self.get_MSC()\n\n with open(fout,mode='wb') as f:\n\n # write the chain length to the file. This is the only parameter\n # that we save other than the MSC representation.\n L = self.L\n if L is None:\n raise ValueError('L must be set before saving to disk.')\n\n # cast it to the type that C will be looking for\n int_t = msc.dtype[0].type\n L = int_t(L)\n\n f.write(L.tobytes())\n\n # write out the length of the MSC representation\n size = int_t(msc.size)\n f.write(size.tobytes())\n\n f.write(msc['masks'].tobytes())\n f.write(msc['signs'].tobytes())\n f.write(msc['coeffs'].tobytes())\n\n COMM_WORLD.barrier()", "def main(s):\n w = s.wildcards\n i = s.input\n\n sample = f\"{w.sample}_{w.tumor}-{w.normal}\"\n show_output(f\"Loading {i.CNV} of sample {sample} for ASCAT conversion\")\n cnv_df = pd.read_csv(i.CNV, sep=\"\\t\", compression=\"gzip\")\n write_ASCAT(cnv_df, sample=sample, outpath=f\"CNV/{w.sample}/pre\")", "def save_crops(self, workspace):\n objects_name = self.objects_name.value\n objects = workspace.object_set.get_objects(objects_name)\n bit_depth = self.bit_depth.value\n if self.input_type == IF_IMAGE:\n image_name = self.image_name.value\n image = workspace.image_set.get_image(image_name)\n pixels = image.pixel_data\n elif self.input_type == IF_OBJECTS:\n obj_name = self.input_object_name.value\n inp_obj = workspace.object_set.get_objects(obj_name)\n pixels = inp_obj.get_segmented()\n else:\n raise (\"invalid choice of input\")\n\n filename = self.get_filename(workspace)\n object_extension = self.object_extension.value\n if filename is None: # failed overwrite check\n return\n\n slices = ndi.find_objects(objects.segmented)\n slices, labels = zip(\n *[(s, label) for label, s in enumerate(slices) if s is not None]\n )\n\n ext_slices = [\n self._extend_slice_touple(\n sl, object_extension, [pixels.shape[0], pixels.shape[1]]\n )\n for sl in slices\n ]\n out_folder = os.path.dirname(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n # the stack for imctools needs to be cxy, while it is xyc in cp\n if len(pixels.shape) == 2:\n stack = pixels.reshape([1] + list(pixels.shape))\n else:\n stack = np.rollaxis(pixels, 2, 0)\n\n # fix the dtype\n if bit_depth == BIT_DEPTH_8:\n stack = skimage.util.img_as_ubyte(stack)\n elif bit_depth == BIT_DEPTH_16:\n stack = skimage.util.img_as_uint(stack)\n elif bit_depth == BIT_DEPTH_FLOAT:\n stack = skimage.util.img_as_float(stack).astype(np.float32)\n\n self._save_object_stack(out_folder, basename, stack, ext_slices, labels)\n self.save_filename_measurements(workspace)\n if self.show_window:\n workspace.display_data.wrote_image = True", "def pre_processing(self, whole_dataset, type=None):\n # for svm\n X = whole_dataset\n if self._scaler == None:\n self._scaler = preprocessing.StandardScaler().fit(X)\n else:\n basic.outputlogMessage('warning, StandardScaler object already exist, this operation will overwrite it')\n self._scaler = preprocessing.StandardScaler().fit(X)\n # save\n joblib.dump(self._scaler, scaler_saved_path)", "def convert_to_COCO_type(\n input_list: Sequence[np.ndarray],\n height: int,\n width: int,\n segmentation_type: str,\n ) -> Dict:\n if segmentation_type == \"uncompressed_RLE\":\n output_list = [\n MetricsInstanceSegmentaion.convert_uncompressed_RLE_COCO_type(\n element, height, width\n )\n for element in input_list\n ]\n elif segmentation_type == \"polygon\":\n output_list = [\n MetricsInstanceSegmentaion.convert_polygon_COCO_type(\n element, height, width\n )\n for element in input_list\n ]\n elif segmentation_type == \"pixel_map\":\n output_list = [\n MetricsInstanceSegmentaion.convert_pixel_map_COCO_type(element)\n for element in input_list\n ]\n\n return output_list", "def scc (self, reset=False):\n if reset or (self.__scc is None):\n self.tarjan(reset)\n return self.__scc", "def write_compact(self, fout):\n for ignored in self.ignored_rules:\n fout.write(ignored)\n self.sort_decls()\n for (pos, (ss, pp)) in enumerate(self.cliques):\n fout.write(','.join(sorted(ss)))\n fout.write('{')\n fout.write(';'.join(pp))\n fout.write('}')", "def make_struc(alat,atom,clat):\r\n if atom == 'Cu' or atom == 'Au':\r\n fcccell = bulk(atom, 'fcc', a=alat)\r\n write('fcc.cif', fcccell)\r\n print(fcccell, fcccell.get_atomic_numbers())\r\n structure = Struc(ase2struc(fcccell))\r\n elif atom == 'CuAu':\r\n lattice = alat * numpy.identity(3)\r\n lattice[2][2] = clat\r\n symbols = ['Cu','Au']\r\n sc_pos = [[0,0,0],[0.5,0.5,0.5]]\r\n bctcell = Atoms(symbols=symbols, scaled_positions=sc_pos, cell=lattice)\r\n write('bct.cif', bctcell)\r\n print(bctcell, bctcell.get_atomic_numbers())\r\n structure = Struc(ase2struc(bctcell))\r\n # check how your cell looks like\r\n print(structure.species)\r\n return structure", "def unify_coco(coco):\n coco = coco.replace('_', ' ')\n if coco == \"tvmonitor\":\n return \"tv\"\n elif coco == \"aeroplane\":\n return \"airplane\"\n elif coco == \"diningtable\":\n return \"dining table\"\n elif coco == \"sofa\":\n return \"couch\"\n elif coco == \"motorbike\":\n return \"motorcycle\"\n elif coco == \"pottedplant\":\n return \"potted plant\"\n else:\n return coco", "def getStandards(self):\n\n print(\"StandardQuery.getStandards\")\n\n print(\"StandardQuery.getStandards: self.opts = %r\" % self.opts)\n\n print(\"StandardQuery.getStandards: Running query: %s\" % self.toUrlForm())\n results = requests.get(self.toUrlForm())\n statusCode = results.status_code\n j = utils.getJson(results)\n print(\"StandardQuery.getStandards: j=\",j)\n response = j[\"response\"]\n\n # Get standards\n standards = []\n for item in response:\n print\n itemId = item[\"props\"][\"urn:lri:property_type:id\"]\n print(\"StandardQuery.getStandards: Processing: %s\" % itemId)\n\n # If item has a CCID it is a CCSS type\n if \"urn:ccss:property_type:ccid\" not in item[\"props\"]:\n # Custom standards will not have ccid\n print(\"StandardQuery.getStandards: Processing Custom item: %s\" % itemId)\n\n # Custom standards will be type competency\n customType = \"urn:lri:entity_type:competency\"\n if customType in item[\"props\"][\"urn:lri:property_type:types\"]:\n standards.append(item)\n print(\"StandardQuery.getStandards: Added Custom standard: %s\" % itemId)\n else:\n print(\"StandardQuery.getStandards: ERROR: type: %s not in: %r\" % (customType,\n item[\"props\"][\"urn:lri:property_type:types\"]))\n\n else:\n ccid = item[\"props\"][\"urn:ccss:property_type:ccid\"]\n print(\"StandardQuery.getStandards: Processing CCSS item: %s\" % ccid)\n\n if ccid.find(\"Math\") != -1:\n # Prune CCSS Math structure\n print(\"StandardQuery.getStandards: Processing CCSS Math item: %s\" % ccid)\n if \"urn:ccss:entity_type:standard\" in item[\"props\"][\"urn:lri:property_type:types\"]:\n standards.append(item)\n print(\"StandardQuery.getStandards: Added CCSS Math standard: %s\" % itemId)\n\n elif ccid.find(\"ELA-Literacy\") != -1:\n # Prune CCSS ELA structure\n print(\"StandardQuery.getStandards: Processing CCSS ELA item: %s\" % ccid)\n\n cbProp = \"urn:lri:property_type:contained_by\"\n containedBy = item[\"props\"][cbProp]\n if type(containedBy) == str or type(containedBy) == unicode:\n containedBy = [containedBy]\n\n # Handle contained_by domain or grade_level\n for cb in containedBy:\n print(\"StandardQuery.getStandards: contained_by = %s\" % cb)\n\n if cb.find(\":domain:\") != -1:\n # contained_by domain means item is grade_level\n # grade_level contains standard\n gradeId = item[\"props\"][\"urn:lri:property_type:id\"]\n print(\"StandardQuery.getStandards: Checking: (%s, %s)\" % (self.grade_level, gradeId))\n if gradeId == self.grade_level:\n print(\"StandardQuery.getStandards: Processing grade_level: %s\" % gradeId)\n\n for standardId in item[\"props\"][\"urn:lri:property_type:contains\"]:\n print(\"StandardQuery.getStandards: Processing CCSS ELA standard: %s\" % standardId)\n\n standard = self.getEntityById(standardId)\n standards.append(standard)\n newId = standard[\"props\"][\"urn:lri:property_type:id\"]\n if newId != standardId:\n print(\"StandardQuery.getStandards: ERROR: %s != %s\" % (newId, standardId))\n print(\"StandardQuery.getStandards: Added CCSS ELA standard: %s, (domain = %s, grade = %s)\" % (newId, cb, gradeId))\n else:\n print(\"StandardQuery.getStandards: Skipping grade_level: %s\" % gradeId)\n\n\n elif cb.find(\":grade\") != -1:\n # contained_by grade_level means item is standard\n standard = self.getEntityById(itemId)\n standards.append(standard)\n newId = standard[\"props\"][\"urn:lri:property_type:id\"]\n if newId != itemId:\n print(\"StandardQuery.getStandards: ERROR: %s != %s\" % (newId, itemId))\n print(\"StandardQuery.getStandards: Added CCSS ELA standard: %s\" % newId)\n\n print(\"StandardQuery.getStandards: Processed %d standards\" % len(standards))\n\n # Get components\n if self.getChildren:\n print(\"StandardQuery.getStandards: Getting standard_components\")\n\n for std in standards:\n if not \"urn:lri:property_type:contains\" in std[\"props\"]:\n continue\n\n stdId = std[\"props\"][\"urn:lri:property_type:id\"]\n print(\"StandardQuery.getStandards: Getting components of: %s\" % stdId)\n\n components = []\n print(\"StandardQuery.getStandards: contains: \")\n print(std[\"props\"][\"urn:lri:property_type:contains\"])\n print(\"StandardQuery.getStandards: /contains\")\n contains = std[\"props\"][\"urn:lri:property_type:contains\"]\n if type(contains) == str or type(contains) == unicode:\n contains = [contains]\n for componentId in contains:\n print(\"StandardQuery.getStandards: Getting standard_component: %s\" % componentId)\n\n component = self.getEntityById(componentId)\n components.append(component)\n newId = component[\"props\"][\"urn:lri:property_type:id\"]\n if newId != componentId:\n print(\"StandardQuery.getStandards: ERROR: %s != %s\" % (newId, standardId))\n std[\"props\"][\"urn:lri:property_type:contains\"] = components\n\n print(\"StandardQuery.getStandards: Added: %s components to standard: %s\" % (len(std[\"props\"][\"urn:lri:property_type:contains\"]), stdId))\n\n # Get anchors\n if self.getAnchors:\n print(\"StandardQuery.getStandards: Getting anchor_standards\")\n\n for std in standards:\n if not \"urn:ccss:property_type:is_anchored_to\" in std[\"props\"]:\n continue\n\n stdId = std[\"props\"][\"urn:lri:property_type:id\"]\n print(\"StandardQuery.getStandards: Getting anchor for: %s\" % stdId)\n\n anchorId = std[\"props\"][\"urn:ccss:property_type:is_anchored_to\"]\n anchor = self.getEntityById(anchorId)\n newId = anchor[\"props\"][\"urn:lri:property_type:id\"]\n if newId != anchorId:\n print(\"StandardQuery.getStandards: ERROR: %s != %s\" % (newId, anchorId))\n std[\"props\"][\"urn:ccss:property_type:is_anchored_to\"] = anchor\n\n print(\"StandardQuery.getStandards: Set is_anchored_to: %s\" % anchor)\n \n # Flatten\n r = {}\n r[\"response\"] = []\n r[\"response\"].append({})\n r[\"response\"][0][\"standards\"] = standards\n r[\"status\"] = \"success\"\n r[\"status_code\"] = statusCode\n\n return r", "def to_swc(self, contributors=\"\"):\n from . import __version__\n sx, sy, sz = np.diag(self.transform)[:3]\n\n swc_header = f\"\"\"# ORIGINAL_SOURCE CloudVolume {__version__}\n# CREATURE \n# REGION\n# FIELD/LAYER\n# TYPE\n# CONTRIBUTOR {contributors}\n# REFERENCE\n# RAW \n# EXTRAS \n# SOMA_AREA\n# SHINKAGE_CORRECTION \n# VERSION_NUMBER {__version__}\n# VERSION_DATE {datetime.datetime.utcnow().isoformat()}\n# SCALE {sx:.6f} {sy:.6f} {sz:.6f}\n\"\"\"\n\n def generate_swc(skel, offset):\n if skel.edges.size == 0:\n return \"\"\n\n index = defaultdict(set)\n visited = defaultdict(bool)\n for e1, e2 in skel.edges:\n index[e1].add(e2)\n index[e2].add(e1)\n\n stack = [ skel.edges[0,0] ]\n parents = [ -1 ]\n\n swc = \"\"\n\n while stack:\n node = stack.pop()\n parent = parents.pop()\n\n if visited[node]:\n continue\n\n swc += \"{n} {T} {x:0.6f} {y:0.6f} {z:0.6f} {R:0.6f} {P}\\n\".format(\n n=(node + 1 + offset),\n T=skel.vertex_types[node],\n x=skel.vertices[node][0],\n y=skel.vertices[node][1],\n z=skel.vertices[node][2],\n R=skel.radii[node],\n P=parent if parent == -1 else (parent + 1 + offset),\n )\n\n visited[node] = True\n \n for child in index[node]:\n stack.append(child)\n parents.append(node)\n\n return swc\n\n skels = self.components()\n\n swc = swc_header + \"\\n\"\n offset = 0\n for skel in skels:\n swc += generate_swc(skel, offset) + \"\\n\"\n offset += skel.vertices.shape[0]\n\n return swc", "def fix_paper_types(cleaner):\n # Record info about types of conferneces\n # true_confval = entry[pubkey].replace('{', '').replace('}', '')\n pubval = cleaner.standard_pubval()\n type_key = 'ENTRYTYPE'\n\n # article = journal\n # inprocedings = converence paper\n\n # FIX ENTRIES THAT SHOULD BE CONFERENCES\n entry = cleaner.entry\n if pubval in constants_tex_fixes.CONFERENCE_LIST:\n if entry[type_key] == 'inproceedings':\n pass\n elif entry[type_key] == 'article':\n entry['booktitle'] = entry['journal']\n del entry['journal']\n elif entry[type_key] == 'incollection':\n pass\n else:\n raise AssertionError('UNKNOWN TYPE: %r' % (entry[type_key],))\n if 'booktitle' not in entry:\n print('DOES NOT HAVE CORRECT CONFERENCE KEY')\n print(ub.repr2(entry))\n assert 'journal' not in entry, 'should not have journal'\n entry[type_key] = 'inproceedings'\n\n # FIX ENTRIES THAT SHOULD BE JOURNALS\n if pubval in constants_tex_fixes.JOURNAL_LIST:\n if entry[type_key] == 'article':\n pass\n elif entry[type_key] == 'inproceedings':\n pass\n #print(ut.dict_str(entry))\n elif entry[type_key] == 'incollection':\n pass\n else:\n raise AssertionError('UNKNOWN TYPE: %r' % (entry['type'],))\n if 'journal' not in entry:\n print('DOES NOT HAVE CORRECT CONFERENCE KEY')\n print(ut.dict_str(entry))\n assert 'booktitle' not in entry, 'should not have booktitle'", "def semcor2conc(args):\r\n input_files = list_files(*args.input_files)\r\n types = list(args.types)\r\n output_file = args.output_file or output_default / '{}_conc.csv'.format('_'.join(types))\r\n output_file = Path(output_file)\r\n left_context = args.left\r\n right_context = args.right\r\n separator = args.separator\r\n filter_pos = args.pos\r\n kind_id = args.kind_id\r\n with output_file.open('w') as file:\r\n x = 'last\\tnext\\tlemma' if args.add_closest else 'lemma'\r\n file.write('\\t'.join(['concordance', 'file', 'token_id', 'left', 'wordform', 'right', x, 'pos', 'sense_key\\n']))\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n tokenlist = list(generate_tokenlist(corpus_file.text))\r\n chosen_words = [index for (index, token) in enumerate(tokenlist) if token.lemma in types]\r\n for word in chosen_words:\r\n node = tokenlist[word]\r\n pos = node.pos\r\n if filter_pos and not re.match(r'{}'.format([x for x in filter_pos]), pos):\r\n continue\r\n if kind_id == 'lemma_pos':\r\n wordtype = '/'.join([node.lemma, node.pos])\r\n elif kind_id == 'wordform':\r\n wordtype = node.wordform\r\n else:\r\n wordtype = node.lemma\r\n token_id = '/'.join([wordtype, corpus_file.shortname, str(word + 1)])\r\n left, right = generate_context(tokenlist, word, left_context, right_context, separator, len(tokenlist))\r\n if args.add_closest:\r\n last = tokenlist[word-1].wordform\r\n following = tokenlist[word+1].wordform\r\n line = [corpus_file.concordance, corpus_file.shortname, token_id, left, node.wordform, right, last, following, node.lemma, pos, node.sense_key or 'NA']\r\n else:\r\n line = [corpus_file.concordance, corpus_file.shortname, token_id, left, node.wordform, right, node.lemma, pos, node.sense_key or 'NA']\r\n file.write('\\t'.join(line) + '\\n')\r\n print('File \"{}\" processed.'.format(input_file.stem))", "def main():\n\n # parse arguments\n args = parseArguments()\n\n # read stac specification \n with open( args.config_file, 'r' ) as f:\n root = yaml.safe_load( f )\n\n # generate nested stac hierarchy\n obj = getStacObject( root )\n\n # create out path if required\n if not os.path.exists ( args.out_path ):\n os.makedirs( args.out_path )\n\n # generate nested stac hierarchy\n obj.normalize_and_save( root_href=args.out_path, \n catalog_type=pystac.CatalogType.SELF_CONTAINED)\n\n return", "def scc(self):\n return self.to_ddm().scc()", "def serotype_escherichia(metadata, analysistype):\n for sample in metadata:\n # Initialise negative results to be overwritten when necessary\n sample[analysistype].best_o_pid = '-'\n sample[analysistype].o_genes = ['-']\n sample[analysistype].o_set = ['-']\n sample[analysistype].best_h_pid = '-'\n sample[analysistype].h_genes = ['-']\n sample[analysistype].h_set = ['-']\n if sample.general.bestassemblyfile != 'NA':\n if sample.general.closestrefseqgenus in ['Escherichia', 'Shigella']:\n o = dict()\n h = dict()\n for result, percentid in sample[analysistype].blastresults.items():\n if 'O' in result.split('_')[-1]:\n o.update({result: float(percentid)})\n if 'H' in result.split('_')[-1]:\n h.update({result: float(percentid)})\n # O\n try:\n sorted_o = sorted(o.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_o_pid = str(sorted_o[0][1])\n\n sample[analysistype].o_genes = [gene for gene, pid in o.items()\n if str(pid) == sample[analysistype].best_o_pid]\n sample[analysistype].o_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].o_genes))\n except (KeyError, IndexError):\n pass\n # H\n try:\n sorted_h = sorted(h.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_h_pid = str(sorted_h[0][1])\n sample[analysistype].h_genes = [gene for gene, pid in h.items()\n if str(pid) == sample[analysistype].best_h_pid]\n sample[analysistype].h_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].h_genes))\n except (KeyError, IndexError):\n pass\n return metadata", "def get_poly_by_type(cst, nmsk, sarr, use_anc=True):\n # create save directory for SNP types\n snp_dir = root_dir + '/compress/snp_type/'\n if not os.path.isdir(snp_dir):\n os.mkdir(snp_dir)\n # create format for saved files\n save_fmt = snp_dir + cst.chrom + '.{}.{}.npz'\n\n # get SNP data included ref/alt bases for chrom\n snppos, rbase, rcount, abase, acount = snpcount(cst.snp_files, True)\n # create the array used for compression\n snps = np.column_stack((snppos, rcount, acount))\n # get anc sequence\n anc = fasta_array(cst.chrom, cst.ancs_files)\n\n # use ancestor sequence to polarize data\n if use_anc:\n # use a different save file name\n save_fmt = snp_dir + cst.chrom + '.{}.{}.fix_anc.npz'\n # get ancestor allele at each snp pos (adjusted for 0-based index)\n snpanc = anc[snppos-1]\n # mask for sites where anc matches alt\n anc_alt = (snpanc == abase)\n # store the alt where it matches anc and ref where it doesnt\n switched_ref = abase[anc_alt]\n switched_alt = rbase[anc_alt]\n # switch alt>ref for sites where alt matches anc\n rbase[anc_alt] = switched_ref\n abase[anc_alt] = switched_alt\n # write err message for number of swithces done\n msg = 'ancestor correction to reference affected {} out of {} sites.\\n'\n stderr.write(msg.format(np.sum(anc_alt), len(anc_alt)))\n stdout.flush()\n\n # combine SNP pairs into 2-char string array\n pairs = np.core.defchararray.add(rbase, abase)\n\n # for each SNP type, get compressed poly data\n snp_types = ['AC', 'AG', 'AT', 'CA', 'CG', 'CT']\n for snptype in snp_types:\n # make a copy of the neutral mask for filtering originating base types\n cur_nmsk = np.copy(nmsk)\n # # set the base composition background\n # base = snptype[0]\n # # remove neutral sites that don't match the background\n # base_mask = (anc == base) | (anc == complement_base(base))\n # cur_nmsk[~base_mask] = False\n # get complement SNP type\n complement = complement_strand(snptype)\n # create label for current SNP type\n lbl = '{}_{}'.format(snptype, complement)\n # get mask for the snptype and its complement\n smsk = (pairs == snptype) | (pairs == complement)\n # get polymorphism & divergence summaries compressed into segments\n hom, het = compress_data(cur_nmsk, snps[smsk], sarr)[0][:2]\n # save poly data to array\n f_save = save_fmt.format(cst.tkn, lbl)\n np.savez_compressed(f_save, np.column_stack((hom, het)))\n # calculate pi for current SNP type\n pi = 1.0 * np.sum(het) / np.sum(het+hom)\n # write err msg with pi and snp type\n msg = '{} {} pi = {}\\n'.format(cst.chrom, lbl, pi)\n stderr.write(msg)\n stdout.flush()\n\n return None", "def _transform_sym_type(sym):\n func_types = ['T', 'W']\n obj_types = ['B', 'D', 'R', 'V', 'S']\n if sym['type'] in func_types:\n sym['type'] = 'FUNC'\n elif sym['type'] in obj_types:\n sym['type'] = 'OBJECT'\n return sym", "def save(self):\n for t in self.ace_types:\n self.api.api_request(\"PUT\", self.url + t, data={t: self[t]})", "def preprocess_sf(bd, var):\n filepath_svf = f\"team67-ptp/data/{var}.ftr\"\n filepath = bd\n data = feather.read_dataframe(filepath)\n df = data.copy()\n df2 = df[var]\n df2 = df2.to_frame()\n if df2[var].dtype is \"category\":\n df2[var] = df2[var].astype(\"category\").cat.codes\n filename = filepath_svf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")\n else:\n filename = filepath_svf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")", "def helper_cccc(standardized_output: dict):\n\n for module in standardized_output[\"classes\"]:\n WMC = 0\n n_func = 0\n module_name = module[\"class name\"]\n for file in standardized_output[\"files\"]:\n for func in file[\"functions\"]:\n if \"class name\" in func and func[\"class name\"] == module_name:\n WMC += func[\"CC\"]\n n_func += 1\n module[\"WMC\"] = WMC\n module[\"no. functions\"] = n_func", "def type_to_sc_type(type_, prefix='sc'):\n return '{}{}'.format(prefix.upper(), type_.title())", "def to_ACEScg():\r\n selNodes = nuke.selectedNodes()\r\n for node in selNodes:\r\n if node.Class() == 'Read':\r\n inputDataType = {\r\n '8-bit fixed': 169, '16-bit fixed': 169,\r\n '16-bit half float': 163, '32-bit float': 163\r\n }\r\n bitDepth = node.metadata('input/bitsperchannel')\r\n node['colorspace'].setValue(inputDataType[bitDepth])\r\n fileParm = node['file'].value()\r\n fileName = str(fileParm.split('/')[-1])\r\n newName = str(fileName.split('.')[0] + '_ACEScg')\r\n fileName = fileName.replace(str(fileName.split('.')[0]), newName)\r\n filename, fileExt = os.path.splitext(fileName)\r\n newFileName = filename + '.exr'\r\n newPath = fileParm.replace(\r\n str(fileParm.split('/')[-1]), newFileName)\r\n\r\n # Create write node and save out as ACEScg\r\n wNode = nuke.nodes.Write()\r\n wNode.setInput(0, node)\r\n wNode['file'].setValue(newPath)\r\n wNode['file_type'].setValue(3)\r\n wNode['colorspace'].setValue(16)\r\n nuke.execute(wNode, start=1, end=1, incr=1)", "def _convert(self, coco_cat_id):\n map = {\n # coco: voc\n 5 : 1,\n 2 : 2,\n 15: 3,\n 9 : 4,\n 40: 5,\n 6 : 6,\n 3 : 7,\n 16: 8,\n 57: 9,\n 20: 10,\n 61: 11,\n 17: 12,\n 18: 13,\n 4 : 14,\n 1 : 15,\n 59: 16,\n 19: 17,\n 58: 18,\n 7 : 19,\n 63: 20,\n }\n\n if not coco_cat_id in map:\n voc_cat_id = None\n else:\n voc_cat_id = map[coco_cat_id]\n\n return voc_cat_id", "def save_model(self, request, obj, form, change):\n if change:\n # if form is changed\n if set(form.changed_data).intersection(set(['code', 'subcode'])):\n # if code or subcode is altered\n # delete all the previous references belongs to this object\n from nsl.cache import nsl_cache_keys\n key = '{0}-{1}'.format(form.cleaned_data.get('code'),\n form.cleaned_data.get('subcode'))\n nsl_cache_keys(key, 'del')\n old_nsl_obj = NSLCode.objects.get(id=obj.id)\n for rel_nsl_obj in old_nsl_obj.dependents.all():\n rel_nsl_obj.clearance_code.remove(old_nsl_obj)\n rel_nsl_obj.save()\n\n if not change or set(form.changed_data).intersection(\n set(['code', 'subcode'])):\n # if form not changed i.e creating a new object or\n # check if newely assigned code and subcode combination\n # exists or not\n nsl_qs = NSLCode.objects.filter(\n code=form.cleaned_data.get('code'),\n subcode=form.cleaned_data.get('subcode'))\n if nsl_qs.count():\n # if exists then link the same to this newely\n # created or altered object.\n for nsl_obj in nsl_qs[0].dependents.all():\n super(NSLCodeAdmin, self).save_model(\n request, obj, form, change)\n nsl_obj.clearance_code.add(obj)\n nsl_obj.save()\n # Modify clearance_code before saving/deleting so that it accepts its\n # relevant clearance codes based on the combination of code and subcode\n form.cleaned_data['clearance_code'] = NSLCode.objects.filter(\n code__in=form.cleaned_data['clearance_code'].values('code'),\n subcode__in=form.cleaned_data['clearance_code'].values('subcode'))\n super(NSLCodeAdmin, self).save_model(request, obj, form, change)" ]
[ "0.53527796", "0.48657048", "0.4840384", "0.48110694", "0.4809684", "0.4795651", "0.472314", "0.47031", "0.4693356", "0.46858063", "0.46800965", "0.46186087", "0.46148878", "0.46077034", "0.46013483", "0.4600135", "0.4594256", "0.45909703", "0.45855772", "0.45818898", "0.45770308", "0.4537289", "0.4534955", "0.45349473", "0.45275006", "0.4520285", "0.45194662", "0.44998157", "0.44858676", "0.4481456" ]
0.6809345
0
Return the PermClass containing all permutations up to the given length.
def all(cls, max_length): return PermClass([PermSet.all(length) for length in range(max_length + 1)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_permutatation_by_length(length, permutation_set):\n pass", "def guess_basis(self, max_length=6):\n assert (\n max_length <= self.max_len\n ), \"The class is not big enough to check that far!\"\n\n # Find the first length at which perms are missing.\n for length, S in enumerate(self):\n if len(S) < factorial(length):\n start_length = length\n break\n else:\n # If we're here, then `self` is the class of all permutations.\n return PermSet()\n\n # Add missing perms of minimum length to basis.\n missing = PermSet.all(start_length) - self[start_length]\n basis = missing\n\n length = start_length\n current = PermSet.all(length - 1)\n current = current.right_extensions(basis=basis)\n\n # Go up in length, adding missing perms at each step.\n while length < max_length:\n length += 1\n current = current.right_extensions(basis=basis)\n\n for perm in list(current):\n if perm not in self[length]:\n basis.add(perm)\n current.remove(perm)\n\n return basis", "def possible_motifs_by_length(length, base_set=\"ACGU\"):\n args = [base_set for i in xrange(length)]\n for permutation in itertools.product(*args):\n yield \"\".join(permutation)", "def distribution_for_length(\n self, n: int, perm_class: Optional[Av] = None\n ) -> List[int]:\n iterator = perm_class.of_length(n) if perm_class else Perm.of_length(n)\n cnt = Counter(self.func(p) for p in iterator)\n lis = [0] * (max(cnt.keys(), default=0) + 1)\n for key, val in cnt.items():\n lis[key] = val\n return lis", "def sum_closure(self, max_len=8):\n assert max_len <= self.max_len, \"Can't make a sum-closure of that size!\"\n L = []\n for length in range(max_len + 1):\n new_set = PermSet()\n for p in Permutation.gen_all(length):\n if all(q in self for q in set(p.sum_decomposition())):\n new_set.add(p)\n L.append(new_set)\n\n return PermClass(L)", "def permute(p,l,length):\n assert length >= 0\n if length == 0:\n\tprint p\n\treturn\n\n for i in range(0,length):\n\tn = p + (l[i],) \n\tpermute(n,l[0:i]+l[i+1:],length-1)", "def permutation(self, size=None, n=1, ndim=None, dtype='int64'):\r\n return self.gen(permutation, size, n, ndim=ndim, dtype=dtype)", "def to_permutation(self):\n sp = SetPartitions(self.parent()._n)(self)\n perm = sp.to_permutation().to_cycles()\n return perm", "def bruteForcePopulation(N):\n return list(itertools.permutations(range(N), N))", "def skew_closure(self, max_len=8):\n assert max_len <= self.max_len, \"Can't make a skew-closure of that size!\"\n L = []\n for length in range(max_len + 1):\n new_set = PermSet()\n for p in Permutation.gen_all(length):\n if all(q in self for q in set(p.skew_decomposition())):\n new_set.add(p)\n L.append(new_set)\n\n return PermClass(L)", "def get_perms(n):\n \n from itertools import permutations\n bases = 'CATGN'\n return [''.join(perm) for perm in permutations(bases, n)]", "def distinct(length, digits=DIGITS):\n return (int(''.join(p)) for p in permutations(digits, length))", "def list_permutations(self):\n return self.permutations(self._char_counts)", "def reduced(self):\n from reduced import ReducedPermutationLI\n\n return ReducedPermutationLI(self.list(),alphabet=self._alphabet, reduced=True)", "def guess_basis(self, max_length=6, search_mode=False):\n\n t = time.time()\n\n assert max_length < len(self), 'class not big enough to check that far'\n\n if search_mode:\n max_length = len(self)-1\n\n # Find the first length at which perms are missing.\n not_all_perms = [i for i in range(len(self)) if i >= 1 and len(self[i]) != factorial(i)]\n\n # If no perms are missing, we have all perms, so return empty basis.\n if len(not_all_perms) == 0:\n return permset.PermSet([])\n\n # Add missing perms of minimum length to basis.\n start_length = min(not_all_perms)\n basis = permset.PermSet(permutation.Permutation.listall(start_length)).difference(self[start_length])\n\n if search_mode:\n print('\\t'+str(len(basis))+' basis elements of length '+str(start_length)+'\\t\\t'+(\"{0:.2f}\".format(time.time() - t)) + ' seconds')\n t = time.time()\n\n basis_elements_so_far = len(basis)\n\n current_length = start_length + 1\n\n # Go up in length, adding missing perms at each step.\n while current_length <= max_length:\n C = avclass.AvClass(basis, current_length)\n basis = basis.union(C[-1].difference(self[current_length]))\n\n if search_mode:\n print('\\t'+str(len(basis)-basis_elements_so_far)+' basis elements of length ' + str(current_length) + '\\t\\t' + (\"{0:.2f}\".format(time.time() - t)) + ' seconds')\n t = time.time()\n\n basis_elements_so_far = len(basis)\n\n current_length += 1\n\n return basis", "def permutationSizeList(c, size=1, prev=[]):\n for n in xrange(len(c)):\n if size == 1:\n yield prev + [c[n]]\n else:\n for p in permutationSizeList(c, size-1, prev + [c[n]]):\n yield p", "def permutations(a):\n n = len(a)\n return _heap_perm_(n, a)", "def confound_restricted_permutations(target, confounds):\n\n raise NotImplementedError()", "def permutations(iterable):\n pass", "def permute(seq, permutation):\n return [seq[i] for i in permutation]", "def _get_fresh_permutations(self):\n self.permutations = []\n for i in self.permutation_numbers:\n self.permutations.append(copy.copy(self.content.find(\"permutation\", number=i)))", "def part_1():\n return itertools.permutations(range(5))", "def number_of_permutations(self) -> int:\n perms = math.factorial(len(self._word))\n for v in self._char_counts.values():\n if v > 1:\n perms /= math.factorial(v)\n return perms", "def r_permutations(n, r):\n return math.factorial(n) / math.factorial(n - r)", "def distribution_up_to(\n self, n: int, perm_class: Optional[Av] = None\n ) -> List[List[int]]:\n return [self.distribution_for_length(i, perm_class) for i in range(n + 1)]", "def AllPermutations(data):\n if len(data) <= 1:\n return data\n\n return [p for p in itertools.permutations(data)]", "def permute(l):\n perm = []\n if len(l) == 0:\n perm.append([])\n else:\n first_element = l[0]\n after_first = slice(1, None)\n sub_permutes = permute(l[after_first])\n for p in sub_permutes:\n for j in range(0, len(p) + 1):\n r = copy.deepcopy(p)\n r.insert(j, first_element)\n perm.append(r)\n return perm", "def expand(self): #uninformed\n children = []\n index = self._find0()\n if index >= self.size: return children\n for change in range(1, self.size + 1):\n child = Permutation(self.size)\n elements = self.getElements()\n elements[index] = change\n child.setElements(elements)\n children.append(child)\n return children", "def gen_permutations(outcomes, length):\r\n \r\n ans = set([()])\r\n for dummy_idx in range(length):\r\n temp = set()\r\n for seq in ans:\r\n for item in outcomes:\r\n new_seq = list(seq)\r\n if new_seq.count(item) == 0:\r\n new_seq.append(item)\r\n temp.add(tuple(new_seq))\r\n ans = temp\r\n return ans", "def trivial_permutation(n: int) -> List[int]:\n return list(range(n))" ]
[ "0.71164477", "0.6639119", "0.6269324", "0.6241461", "0.622866", "0.616659", "0.61158764", "0.60792464", "0.6075472", "0.6008308", "0.6007796", "0.59446824", "0.59340173", "0.58739895", "0.5801515", "0.57797754", "0.5774455", "0.5759966", "0.57079226", "0.56256104", "0.56016207", "0.5596002", "0.559148", "0.55599385", "0.5550428", "0.5526983", "0.5524448", "0.54894155", "0.5462438", "0.5454154" ]
0.78563213
0
Modify `self` by removing permutations that do not satisfy the `property`.
def filter_by(self, property): for length in range(len(self)): for p in list(self[length]): if not property(p): self[length].remove(p)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filtered_by(self, property):\n C = copy.deepcopy(self)\n C.filter_by(property)\n return C", "def removeDuplicate(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return self[ind[ok]]", "def clearProperty(*args):", "def clearProperty(*args):", "def clearProperty(*args):", "def clearProperty(*args):", "def reset_property(self, _, prop):\n dst = prop.get_merged_equivalent().clone()\n create_pseudo_values([dst])\n cmd = commands.ReplaceObject(obj=prop, repl=dst)\n self.execute(cmd)\n\n # Reset the view to make sure the changes are properly displayed.\n self.reset_value_view(None)", "def reset(self):\n self.valid_passes = set()\n self.property_set.clear()", "def test_analysis_pass_remove_property(self):\n qr = QuantumRegister(1, \"qr\")\n circuit = QuantumCircuit(qr, name=\"MyCircuit\")\n property_set = {\"to remove\": \"value to remove\", \"to none\": \"value to none\"}\n\n pass_e = PassN_AP_NR_NP(\"to remove\", \"to none\")\n with self.assertLogs(\"LocalLogger\", level=\"INFO\") as cm:\n result = pass_e(circuit, property_set)\n\n self.assertMessageLog(\n cm,\n [\n \"run analysis pass PassN_AP_NR_NP\",\n \"property to remove deleted\",\n \"property to none noned\",\n ],\n )\n self.assertEqual(property_set, PropertySet({\"to none\": None}))\n self.assertIsInstance(property_set, dict)\n self.assertEqual(circuit, result)", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def clean(self):\n self.unique_combinations = {}\n self.reverse_combinations = []\n self.label_count = None", "def clear_properties(self):\n self.properties.clear()", "def clearProperties(*args):", "def clearProperties(*args):", "def clearProperties(*args):", "def clearProperties(*args):", "def removeDeadProperty(self, property):\n if self.hasDeadProperty(property):\n if type(property) is tuple:\n qname = property\n else:\n qname = property.qname()\n\n self.deadProperties().delete(qname)", "def simple_mutator(chromosome, genes, properties):\n mutated_chromosome = list(chromosome)\n for i in range(len(chromosome)):\n if random.random() < properties.mutation_probability:\n mutated_chromosome[i] = random.choice(genes)\n return mutated_chromosome", "def _reset_derived_prop_(self):\n self._derived_properties[\"photosamplers\"] = None", "def permute(self):\n raise NotImplementedError()", "def reduced(self):\n from reduced import ReducedPermutationLI\n\n return ReducedPermutationLI(self.list(),alphabet=self._alphabet, reduced=True)", "def clear_proportions(self):\n\n self._proportions = [{}, {}]", "def reset(self):\n newPerm = randomUtils.randomPermutation(self.xArray.tolist(),self)\n self.pot = np.asarray(newPerm)", "def _mutate(self, individuals):\n for cur in individuals:\n if random.random() < self.mutation_probability:\n self.op.mutate(cur['individual'])\n cur['fitness'] = None", "def self_mutate(self) -> 'Individual':\n self.mutator_cls.mutate_inplace(self.chromosome)\n self.fitness.cache_clear()\n return self", "def permutation(self, individual):\n genotype = numpy.array(individual.genotype, copy=True)\n [idx1, idx2] = numpy.random.randint(0, len(genotype), 2)\n aux = individual.genotype[idx1]\n numpy.put(genotype, [idx1], [genotype[idx2]])\n numpy.put(genotype, [idx2], [aux])\n return optimization.Individual(genotype, individual.fitness_evaluator, individual.crossover_method, individual.mutation_method)", "def mutant(self):\n _mutant = []\n _wt = self.wildtype\n for i in range(0, len(self.mutations)):\n site = _wt[i]\n options = self.mutations[i]\n if options is None:\n _mutant.append(_wt[i])\n else:\n for o in options:\n if o != site:\n _mutant.append(o)\n return \"\".join(_mutant)", "def unset(self, role, *permissions):\n for perm in permissions:\n for rec in self:\n if role is not None and rec[1] != role:\n continue\n\n if rec[2] is ALL_PERMISSIONS or perm is ALL_PERMISSIONS:\n rec[2] = set()\n else:\n if perm in rec[2]:\n rec[2].remove(perm)\n\n records = []\n for rec in self:\n if rec[2]:\n records.append(rec)\n self[:] = records", "def bitFlip_mutation(population, **kwargs):\r\n new_pop = []\r\n for indiv in population:\r\n mutation_mask = np.random.random(size=indiv.shape) < kwargs['mutation_prob']\r\n indiv[mutation_mask] = 1 - indiv[mutation_mask]\r\n new_pop.append(indiv.copy())\r\n return new_pop", "def complement(self):\n for cell in self.compact:\n cell.set(not cell.peg)" ]
[ "0.58707166", "0.5852598", "0.58254266", "0.58254266", "0.58254266", "0.58254266", "0.5796714", "0.57930195", "0.57692075", "0.5452164", "0.54516786", "0.54340243", "0.5398869", "0.5398869", "0.5398869", "0.5398869", "0.53718495", "0.5368747", "0.53646755", "0.53518665", "0.5325577", "0.5304527", "0.52967864", "0.5293067", "0.52747583", "0.5251408", "0.5221104", "0.52202183", "0.520007", "0.519128" ]
0.7654139
0
Return a copy of `self` that has been filtered using the `property`.
def filtered_by(self, property): C = copy.deepcopy(self) C.filter_by(property) return C
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allow_filtering(self):\r\n clone = copy.deepcopy(self)\r\n clone._allow_filtering = True\r\n return clone", "def filter_by(self, property):\n for length in range(len(self)):\n for p in list(self[length]):\n if not property(p):\n self[length].remove(p)", "def filter(self, observable):", "def filter(self):\n return self._filter", "def __get__(self, model_instance, model_class):\r\n if model_instance is not None:\r\n query = Query(self.__model)\r\n if type(self.__property) == list:\r\n props = []\r\n for prop in self.__property:\r\n props.append(\"%s =\" % prop)\r\n return query.filter(props, model_instance)\r\n else:\r\n return query.filter(self.__property + ' =', model_instance)\r\n else:\r\n return self", "def copy(self):\n return properties.copy(self)", "def clone(self):\n clone = super(Property, self).clone()\n clone.fget = self.fget\n clone.fset = self.fset\n clone.cached = self.cached\n return clone", "def filter(self, func=bool):\n return _(filter(func, self._))", "def filter_by_property(self, properties=None, **kwargs):\r\n\t\tif properties is None:\r\n\t\t\tproperties = {}\r\n\t\tproperties.update(kwargs)\r\n\t\tresult_list = ElementList()\r\n\t\tfor element in self:\r\n\t\t\tif all(k in element.properties and element.properties[k] == v\r\n\t\t\t\t\tfor k, v in properties.items()):\r\n\t\t\t\tresult_list.append(element)\r\n\t\treturn result_list", "def filter(self, predicate):\n def _filter(iterator):\n while True:\n item = next(iterator)\n if predicate(item):\n return item\n return self.__class__(self, _filter)", "def _chain(self):\n obj = self._clone()\n if obj._sticky_filter:\n obj.query.filter_is_sticky = True\n obj._sticky_filter = False\n return obj", "def copy(self) -> \"FilterAlgorithmState\":\n\n # NB: This is untested and might not be optimal tbh\n return deepcopy(self)", "def filtered(self, func):\n return PSetList(list(filter(func, self.sets)))", "def filter(self, *args, **kwargs):", "def copy(self):\n new_filter = BloomFilter(self.capacity, self.error_rate)\n new_filter.filter = self.filter.copy()\n return new_filter", "def _copy_(self):\n return copy.copy(self)", "def __copy__(self):\n return self.copy()", "def filter(self, predicate: Callable[[Cut], bool]) -> None:\n self._filter_fn = predicate\n self.provide_len = False", "def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter", "def keep(self, predicate=None):\n self.__ff.append(\n lambda c: filter(\n self._as_callable(predicate) if (predicate is not None) else None,\n c\n )\n )\n return self", "def copy(self):\n copied = super().copy()\n copied.anonymize()\n return copied", "def filter(self, *args, **kwargs):\n clone = self._clone()\n for f in args:\n clone.filter_obj.add_filter(f)\n for key, value in kwargs.items():\n clone.filter_obj.add_filter_param(key, value)\n return clone", "def filter(self, *args):\n from .elements import EqualClauseElement\n for a in args:\n for c in self._criterion:\n if isinstance(c, EqualClauseElement) and isinstance(a, EqualClauseElement) and \\\n c.attribute.node == a.attribute.node and c.attribute.label == a.attribute.label:\n c.value = a.value\n break\n else:\n self._criterion.append(a)\n return self", "def filter_thing2(x):\r\n return x._thing2", "def distinct(self):\n qs = copy(self)\n qs._distinct = True\n return qs", "def _filterfunc(self,*args,**kwargs):\n self._filterfunc = self.f\n return self.f(*args,**kwargs)", "def filter(self, filtered=None, **kwargs):\n \"\"\"whose attributes match the given keyword arguments.\n \"\"\"\n if filtered is None:\n filtered = self._objects\n try:\n key, value = kwargs.popitem()\n except KeyError:\n # We're out of filters, return\n return filtered\n\n def get_match(obj):\n return key in obj and obj.get(key) == value\n\n return self.filter(filtered=filter(get_match, filtered), **kwargs)", "def copy(self):\n return set(self)", "def copy_with(self):\n return self.copy()", "def filter(self, predicate):\n self.children = [c for c in self.children if predicate(c)]\n for c in self.children:\n c.filter(predicate)" ]
[ "0.71182835", "0.68695885", "0.62238836", "0.6067556", "0.5971925", "0.59450746", "0.588143", "0.5807814", "0.5804987", "0.5721194", "0.56818587", "0.56648064", "0.55875915", "0.55407995", "0.54704565", "0.54670125", "0.54438066", "0.5432381", "0.54123914", "0.5409055", "0.5392494", "0.5312273", "0.5311437", "0.5310213", "0.530996", "0.53058153", "0.5302468", "0.53023803", "0.53020227", "0.52742195" ]
0.8608328
0
Guess a basis for the class up to "max_length" by iteratively generating the class with basis elements known so far (initially the empty set) and adding elements that should be avoided to the basis. Search mode goes up to the max length in the class and prints out the number of basis elements of each length on the way.
def guess_basis(self, max_length=6): assert ( max_length <= self.max_len ), "The class is not big enough to check that far!" # Find the first length at which perms are missing. for length, S in enumerate(self): if len(S) < factorial(length): start_length = length break else: # If we're here, then `self` is the class of all permutations. return PermSet() # Add missing perms of minimum length to basis. missing = PermSet.all(start_length) - self[start_length] basis = missing length = start_length current = PermSet.all(length - 1) current = current.right_extensions(basis=basis) # Go up in length, adding missing perms at each step. while length < max_length: length += 1 current = current.right_extensions(basis=basis) for perm in list(current): if perm not in self[length]: basis.add(perm) current.remove(perm) return basis
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def guess_basis(self, max_length=6, search_mode=False):\n\n t = time.time()\n\n assert max_length < len(self), 'class not big enough to check that far'\n\n if search_mode:\n max_length = len(self)-1\n\n # Find the first length at which perms are missing.\n not_all_perms = [i for i in range(len(self)) if i >= 1 and len(self[i]) != factorial(i)]\n\n # If no perms are missing, we have all perms, so return empty basis.\n if len(not_all_perms) == 0:\n return permset.PermSet([])\n\n # Add missing perms of minimum length to basis.\n start_length = min(not_all_perms)\n basis = permset.PermSet(permutation.Permutation.listall(start_length)).difference(self[start_length])\n\n if search_mode:\n print('\\t'+str(len(basis))+' basis elements of length '+str(start_length)+'\\t\\t'+(\"{0:.2f}\".format(time.time() - t)) + ' seconds')\n t = time.time()\n\n basis_elements_so_far = len(basis)\n\n current_length = start_length + 1\n\n # Go up in length, adding missing perms at each step.\n while current_length <= max_length:\n C = avclass.AvClass(basis, current_length)\n basis = basis.union(C[-1].difference(self[current_length]))\n\n if search_mode:\n print('\\t'+str(len(basis)-basis_elements_so_far)+' basis elements of length ' + str(current_length) + '\\t\\t' + (\"{0:.2f}\".format(time.time() - t)) + ' seconds')\n t = time.time()\n\n basis_elements_so_far = len(basis)\n\n current_length += 1\n\n return basis", "def __init__(self, num_beams, max_length, length_penalty, early_stopping):\n self.max_length = max_length - 1 # ignoring bos_token\n self.length_penalty = length_penalty\n self.early_stopping = early_stopping\n self.num_beams = num_beams\n self.beams = []\n self.worst_score = 1e9", "def get_best_guess(self, lst):\n maxlen = 0\n pass\n #for elem in lst:", "def classes(self):\n #print \"making classes again!\"\n l = []\n for p in self.marks:\n l.append(psi_class(self,p))\n for d in range(1, self.dimension + 1):\n l.append(kappa_class(self,d))\n for i in range(1, self.genus+1):\n l.append(chern_char(self, 2*i-1))\n if True:#self.genus != 0:\n l.append(irreducible_boundary(self))\n marks = set(self.marks)\n reducible_boundaries = []\n if self.n != 0:\n first_mark_list = [marks.pop()] \n for g1 in range(0, self.genus + 1):\n for p in subsets(marks):\n r_marks = set(first_mark_list + p)\n if 3*g1 - 3 + len(r_marks) + 1 >= 0 and 3*(self.genus-g1) - 3 + self.n - len(r_marks) + 1 >= 0:\n reducible_boundaries.append( reducible_boundary(self, Mgn(g1, r_marks)) )\n \n reducible_boundaries.sort(key = lambda b: sorted(list(b.component1.marks)))\n reducible_boundaries.sort(key = lambda b: len(b.component1.marks))\n reducible_boundaries.sort(key = lambda b: b.component1.genus)\n \n else: #self.n == 0\n for g1 in range(1, floor(self.genus/2.0)+1):\n reducible_boundaries.append(reducible_boundary(self, Mgn(g1, []))) \n \n \n l += reducible_boundaries \n \n for i in range(1,self.genus+1):\n l.append(lambda_class(self,i))\n return l", "def lazy_greedy_max(self, budget):\r\n\r\n classes, no_elements = torch.unique(self.y_trn, return_counts=True)\r\n len_unique_elements = no_elements.shape[0]\r\n per_class_bud = int(budget / len(classes))\r\n final_per_class_bud = []\r\n _, sorted_indices = torch.sort(no_elements, descending = True)\r\n\r\n if self.selection_type == 'PerClass':\r\n \r\n total_idxs = 0\r\n for n_element in no_elements:\r\n final_per_class_bud.append(min(per_class_bud, torch.IntTensor.item(n_element)))\r\n total_idxs += min(per_class_bud, torch.IntTensor.item(n_element))\r\n \r\n if total_idxs < budget:\r\n bud_difference = budget - total_idxs\r\n for i in range(len_unique_elements):\r\n available_idxs = torch.IntTensor.item(no_elements[sorted_indices[i]])-per_class_bud \r\n final_per_class_bud[sorted_indices[i]] += min(bud_difference, available_idxs)\r\n total_idxs += min(bud_difference, available_idxs)\r\n bud_difference = budget - total_idxs\r\n if bud_difference == 0:\r\n break\r\n\r\n total_greedy_list = []\r\n for i in range(len_unique_elements):\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n \r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=final_per_class_bud[i])\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn[idxs].numpy())\r\n greedyList = self.get_index(self.x_trn[idxs].numpy(), x_sub)\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n greedyList = list(np.argmax(sim_sub, axis=1))\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n elif self.selection_type == 'Supervised':\r\n \r\n \r\n if self.submod == 'feature_based':\r\n \r\n class_map = {}\r\n for i in range(len_unique_elements):\r\n class_map[torch.IntTensor.item(classes[i])] = i #Mapping classes from 0 to n\r\n \r\n sparse_data = torch.zeros([self.x_trn.shape[0], self.x_trn.shape[1]*len_unique_elements])\r\n for i in range(self.x_trn.shape[0]):\r\n \r\n start_col = class_map[torch.IntTensor.item(self.y_trn[i])]*self.x_trn.shape[1]\r\n end_col = start_col+self.x_trn.shape[1]\r\n sparse_data[i, start_col:end_col] = self.x_trn[i, :]\r\n\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n x_sub = fl.fit_transform(sparse_data.numpy())\r\n total_greedy_list = self.get_index(sparse_data.numpy(), x_sub)\r\n\r\n else:\r\n for i in range(len(classes)):\r\n \r\n if i == 0:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = idxs.repeat_interleave(N)\r\n col = idxs.repeat(N)\r\n data = self.dist_mat.cpu().numpy().flatten()\r\n else:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = torch.cat((row, idxs.repeat_interleave(N)), dim=0)\r\n col = torch.cat((col, idxs.repeat(N)), dim=0)\r\n data = np.concatenate([data, self.dist_mat.cpu().numpy().flatten()], axis=0)\r\n \r\n \r\n sparse_simmat = csr_matrix((data, (row.numpy(), col.numpy())), shape=(self.N_trn, self.N_trn))\r\n #self.dist_mat = sparse_simmat\r\n\r\n if self.submod == 'facility_location':\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n sim_sub = fl.fit_transform(sparse_simmat)\r\n total_greedy_list = list(np.array(np.argmax(sim_sub, axis=1)).reshape(-1))\r\n\r\n\r\n if self.selection_type == 'Full':\r\n \r\n\r\n total_greedy_list = []\r\n idx_end = self.x_trn.shape[0] - 1\r\n idxs = torch.linspace(0, idx_end, self.x_trn.shape[0]).long()\r\n\r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn.numpy())\r\n total_greedy_list = self.get_index(self.x_trn.numpy(), x_sub)\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n total_greedy_list = list(np.argmax(sim_sub, axis=1))\r\n\r\n return total_greedy_list", "def create_program(fe: FitnessEvaluator, max_len: int) -> str:\n\n # mut_prob = {\"<\": 0.8, \">\": 0.8, \"+\": 0.6, \"-\": 0.6, \"[\": 0.1, \"]\": 0.1}\n\n # new_population: List[Program] = []\n\n # k = 1000\n # N = 0.5 # N is top percentile for selection process\n\n converges = True\n gen_no = 0\n\n while 1:\n k = 1000 # k represents the initial population size\n gen_no = gen_no + 1\n print(gen_no)\n if gen_no == 100:\n converges = True\n gen_no = 0\n\n # generate initial random, score initial random, add to population\n if converges:\n converges = False\n population: List[Program] = []\n res = generate_random(fe, max_len, k, population)\n if res != \"\":\n # print(\"from RANDOM\")\n return res\n\n new_population: List[Program] = []\n ct = [0]\n\n while ct[0] != k:\n weights = populate_weights(k, population)\n\n population.sort(key=lambda program: program.score)\n\n selected = random.choices(population, weights=weights, k=k//2)\n selected.sort(key=lambda program: program.score)\n\n if bad_average(selected):\n k = 0\n converges = True\n gen_no = False\n break\n\n res = select(new_population, selected, fe, k//2, ct)\n if res != \"\":\n return res\n\n for i in range(k):\n population[i] = new_population[i]", "def __init__(self, n_hyp, max_len, length_penalty, early_stopping):\n self.max_len = max_len - 1 # ignoring <BOS>\n self.length_penalty = length_penalty\n self.early_stopping = early_stopping\n self.n_hyp = n_hyp\n self.hyp = []\n self.worst_score = 1e9", "def test_we_get_all_W_mers_we_asked_for(self):\n fasta_file = os.path.normpath(get_fasta_file('T00759-small.fa'))\n num_sites = [2, 4, 8, 16, 32]\n self.options.max_num_sites = max(num_sites)\n self.options.min_num_sites = min(num_sites)\n \n #\n # Load sequences and build index\n #\n algorithm = stempy.Algorithm(self.options)\n algorithm._initialise(fasta_file)\n data = algorithm.input_sequences.data\n\n for seed in (\n 'GCTAGCTAGCGG',\n 'ATGCAGAAAAATTAAG',\n 'TTTAAAATACTTTAAA',\n ):\n # seed a model\n logging.info('Using seed %s', seed)\n W = len(seed)\n model = algorithm.create_model_of_input(W)\n model.bs.seed(seed, True)\n model.set_lambda_for_sites(data.num_sequences)\n \n for num_to_find in num_sites:\n # look for best W-mers under model\n best_w_mer_finder = stempy.create_best_w_mer_finder(data, model, num_to_find)\n best_w_mer_finder()\n if len(best_w_mer_finder.best_w_mers) < num_to_find:\n if len(best_w_mer_finder.best_w_mers) != model.num_W_mers:\n logging.warning('Found %d W-mers', len(best_w_mer_finder.best_w_mers))\n logging.warning('%d W-mers available', model.num_W_mers)\n logging.warning('Wanted %d W-mers', num_to_find)\n raise ValueError('Did not find enough W-mers')", "def possible_motifs_by_length(length, base_set=\"ACGU\"):\n args = [base_set for i in xrange(length)]\n for permutation in itertools.product(*args):\n yield \"\".join(permutation)", "def test_limit_build() -> None:\n tknzr = WsTknzr(is_uncased=False, max_vocab=10, min_count=0)\n tknzr.build_vocab([chr(i) for i in range(65536)])\n assert tknzr.vocab_size == 10", "def optimalize(): \n start = time()\n max = 0\n maxn=2\n maxm=3\n check = [(n,m) for n in range(24,30) for m in range(3,20)]\n dict = {}\n print \"start optimalization of: bigram-features,uniqueness\"\n for n,m in check:\n score=0\n print \">lem>>n(uniqueness):\"+str(n)\n print \">lem>>m(commonness):\"+str(m)\n wrds = common_but_unique(ngrams_dict(1,authors,compactcorpus,n,False),m)\n bigrams = common_but_unique(ngrams_dict(2,authors,compactcorpus,n,False),m)\n trigrams = common_but_unique(ngrams_dict(3,authors,compactcorpus,n,False),m)\n #pos_feat = [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)]\n pos_feat = [\"bi:(\"+str(bi[0])+\",\"+str(bi[1])+\")>\"+str(num) for bi in bigrams for num in range(0,1)] + [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)] + [\"tri:(\"+str(tri[0])+\",\"+str(tri[1])+\",\"+str(tri[2])+\")>\"+str(num) for tri in trigrams for num in range(0,1)]\n\n print \"number of features AFTER selection:\" + str(len(pos_feat))\n for x in range(0,4):\n data = split_train_test_data(authors, corp,45)\n train_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"train\"]]\n train_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"train\"]]\n test_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"test\"]]\n classifier1 = NaiveBayesClassifier.train(train_set)\n acc = nltk.classify.accuracy(classifier1,test_set)\n print \"accuracy:\"+str(acc)\n score +=acc\n print \"time elapsed: \"+str(time()-start)\n print \"score(\" + str(n) +\")=\"+str(score/4)\n classifier1.show_most_informative_features(8)\n dict[(n,m)]=(score/4)\n if(score/4)>max:\n max = (score/4)\n maxn =n\n maxm = m\n print \"max score=\"+str(max)\n print \"where n = \"+str(maxn)\n print \"where m = \"+str(maxm)\n print \"time:\"+str(time()-start)\n writetofile(dict,\"optimalizedict_commonwrdsandbigrams_latest_lem.pkl\")", "def iterate_continutations(model, radix, whitelist, topk):\n for size in sorted(model.keys(), reverse=True):\n probas = dict()\n left = (\"^\" + radix)[-size:]\n count = 0\n for right in model[size].get(left, dict()):\n if whitelist.includes(LetterBag(right)):\n probas.setdefault(right, 0)\n probas[right] += math.exp(model[size][left][right])\n count += 1\n if count > 0:\n k = 0\n for selection, proba in sorted(probas.items(), key=lambda x: -x[1]):\n k += 1\n if k > topk:\n break\n yield selection, math.log(proba / count)\n break\n yield None, 0", "def class_size(self):\n if not self.is_mutation_finite():\n return infinity\n\n # type A (finite and affine)\n if self._letter == 'A':\n # the formula is taken from Torkildsen - Counting\n # cluster-tilted algebras of type A\n if self.is_finite():\n n = self._rank\n a = binomial( 2*(n+1), n+1 ) // (n+2)\n if n % 2 == 1:\n a += binomial( n+1, (n+1)//2 )\n if n % 3 == 0:\n a += 2 * binomial( 2*n//3, n//3 )\n return a // (n+3)\n # the formula is taken from Bastian, Prellberg, Rubey, Stump\n elif self.is_affine():\n i,j = self._bi_rank\n i = ZZ(i)\n j = ZZ(j)\n n = i+j\n f = Euler_Phi()\n if i == j:\n return ( binomial( 2*i,i ) +\n sum( f(k) * binomial(2*i//k,i//k)**2\n for k in [k for k in i.divisors()\n if k in j.divisors()] ) // n ) // 4\n else:\n return sum( f(k) * binomial(2*i//k,i//k) *\n binomial(2*j//k,j//k)\n for k in [k for k in i.divisors()\n if k in j.divisors()] ) // ( 2 * n )\n\n # types B and C (finite and affine)\n elif self._letter in ['B', 'C']:\n # this formula is proven but nowhere published correctness\n # is clear enough that I don't think a warning is needed\n if self.is_finite():\n n = self._rank\n return binomial(2 * n, n) // (n + 1)\n\n elif self._letter in ['BB','CC']:\n # these two formulas are not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 1\n if n%2==1:\n return binomial( 2*n-1, n-1 )\n else:\n return binomial( 2*n-1, n-1 ) + binomial( n-1, n//2 -1 )\n\n # type BC (affine)\n elif self._letter == 'BC':\n # this formula is not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 1\n return binomial( 2*n, n )\n\n # types BD and CD (affine)\n elif self._letter in ['BD','CD']:\n # this formula is not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 2\n return 2*binomial( 2*n, n )\n\n # type D (finite and affine)\n elif self._letter == 'D':\n # the formula is taken from Bastian, Prellberg, Rubey, Stump\n if self.is_finite():\n if self._rank == 4:\n return 6\n else:\n f = Euler_Phi()\n n = ZZ(self._rank)\n return sum( f( n//k ) * binomial( 2*k, k )\n for k in n.divisors() ) // (2*n)\n # this formula is not yet proven\n elif self.is_affine():\n n = self._rank - 3\n if n == 2:\n return 9\n else:\n print(Warning (\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if n%2==1:\n return 2*binomial(2*n,n)\n else:\n return 2*binomial(2*n,n) + binomial(n, n//2)\n\n # the exceptional types are hard-coded\n # type E (finite, affine and elliptic)\n elif self._letter == 'E':\n if self.is_finite():\n if self._rank == 6:\n return 67\n elif self._rank == 7:\n return 416\n elif self._rank == 8:\n return 1574\n elif self.is_affine():\n if self._rank == 7:\n return 132\n elif self._rank == 8:\n return 1080\n elif self._rank == 9:\n return 7560\n elif self.is_elliptic():\n if self._rank == 8:\n return 49\n elif self._rank == 9:\n return 506\n elif self._rank == 10:\n return 5739\n\n # type F\n elif self._letter == 'F':\n if self.is_finite():\n return 15\n elif self.is_affine():\n return 60\n elif self.is_elliptic():\n if self._twist == [1,2]:\n return 90\n if self._twist == [1,1] or self._twist == [2,2]:\n return 35\n\n # type G\n elif self._letter == 'G':\n if self.is_finite():\n return 2\n elif self.is_affine():\n return 6\n elif self.is_elliptic():\n if self._twist == [1,3]:\n return 7\n if self._twist == [1,1] or self._twist == [3,3]:\n return 2\n\n # type X\n elif self._letter == 'X':\n if self._rank == 6:\n return 5\n elif self._rank == 7:\n return 2\n\n # otherwise the size is returned to be unknown\n else:\n print(\"Size unknown\")\n return NotImplemented", "def brute(limit):\n c_lengths = {s: collatz_length(s) for s in range(1, limit+1)}\n return max(c_lengths, key=lambda x: c_lengths[x])", "def all(cls, max_length):\n return PermClass([PermSet.all(length) for length in range(max_length + 1)])", "def guess_key_length(self, min_len=1, max_len=9, display=False):\n\n res = {}\n max_ic = 0\n probable_key_length = 0\n # We try different key lengths\n for i in range(min_len, max_len+1):\n\n if self._len < i*2:\n continue\n ics = []\n for j in range(i):\n var = []\n for k in range(self._len//i):\n var.append(self._s[k*i + j])\n text = VigenereLikeCipher(''.join(var))\n ics.append(text.get_ic())\n total_ic = round(sum(ics)/len(ics),4)\n if total_ic > max_ic:\n max_ic = total_ic\n probable_key_length = i\n res[i] = total_ic\n if display:\n print \"\\n[+] Visual key length IC correspondance\"\n for k,v in res.items():\n v = int(round(v*1000,0))\n print str(k) + (int(math.floor(math.log10(len(res))))+1-len(str(k)))*\" \",\n print ''.join(['|' for i in range(v//2)])\n print \"\"\n return probable_key_length", "def num_accepts(self, max_len: int, bound: Sequence[Text] = ()) -> Tuple[int, int, int]:\n lt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n lt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n gt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n gt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq1[frozenset(self.start_nodes)] = 1\n num_accepted_le = int(self.accepts(\"\"))\n num_accepted_gt = 0\n for c in itertools.islice(itertools.chain(bound, itertools.repeat(None)), 0, max_len):\n for nodes, count in lt1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n lt2[next_nodes] += count\n for nodes, count in eq1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n if c is None or (element is not None and element > c):\n gt2[next_nodes] += count\n elif element == c:\n eq2[next_nodes] += count\n else:\n lt2[next_nodes] += count\n for nodes, count in gt1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n gt2[next_nodes] += count\n num_accepted_le += self._sum_tables(eq2)\n num_accepted_le += self._sum_tables(lt2)\n num_accepted_gt += self._sum_tables(gt2)\n if not lt2 and not eq2 and not gt2:\n break # Exit early if we know this regex cannot accept anymore strings.\n lt1, lt2 = lt2, collections.defaultdict(int)\n eq1, eq2 = eq2, collections.defaultdict(int)\n gt1, gt2 = gt2, collections.defaultdict(int)\n num_accepted_eq = int(len(bound) <= max_len and self.accepts(bound))\n return num_accepted_le - num_accepted_eq, num_accepted_eq, num_accepted_gt", "def get_n_best(self):\n pass", "def generate_mis(G, sample_size, nodes=None):\n\n # list of maximal independent sets\n max_ind_set_list=[]\n\n # iterates from 0 to the number of samples chosen\n for i in range(sample_size):\n\n # for each iteration generates a random maximal independent set that contains\n # UnitedHealth and Amazon\n max_ind_set = nx.maximal_independent_set(G, nodes=nodes, seed=i)\n\n # if set is not a duplicate\n if max_ind_set not in max_ind_set_list:\n\n # appends set to the above list\n max_ind_set_list.append(max_ind_set)\n\n # otherwise pass duplicate set\n else:\n pass\n\n # list of the lengths of the maximal independent sets\n mis_len_list=[]\n\n # iterates over the above list\n for i in max_ind_set_list:\n\n # appends the lengths of each set to the above list\n mis_len_list.append(len(i))\n\n # extracts the largest maximal independent set, i.e., the maximum independent set (MIS)\n ## Note: this MIS may not be unique as it is possible there are many MISs of the same length\n max_ind_set = max_ind_set_list[mis_len_list.index(max(mis_len_list))]\n\n return max_ind_set", "def get_max_combination(total_cuts):\n max_pieces = 0\n for i in range(total_cuts):\n result = i * (total_cuts - i)\n if result > max_pieces:\n max_pieces = result\n print(max_pieces)", "def solution(max_base: int = 5) -> int:\n freqs = defaultdict(list)\n num = 0\n\n while True:\n digits = get_digits(num)\n freqs[digits].append(num)\n\n if len(freqs[digits]) == max_base:\n base = freqs[digits][0] ** 3\n return base\n\n num += 1", "def run(self, generations=1000):\n gcount = 0\n \n while gcount<=generations:\n try:\n print \"Gen: \"+str(gcount),\n self.population = zip (self.population, [self.target]*len(self.population))\n self.population = self.pool.map(f, self.population)\n except:\n pass\n for i in self.population:\n print i[0],i[1]\n self.population = [organism.Organism(x[0], x[1]) for x in self.population]\n self.population.sort()\n print \" Max fitness: \"+str(self.population[::-1][1].fitness)\n try:\n if self.population[0] <= self.ppop[0]:\n self.ppop = self.population[::-1][0:10] # The top ten organisms\n else:\n self.population = self.ppop # We got worse! go back!\n except:\n self.ppop = self.population\n self.population = self.population[::-1][0:10]\n try:\n self.breed()\n except:\n print \"Breeding error\"\n gcount+=1", "def puzzle_02() -> None:\n\n containers = load_containers()\n combinations_lengths = tuple(map(\n lambda combination: len(combination),\n filter(lambda combination: sum(combination) == EGGNOG_LITRES,\n [combination\n for i in range(len(containers))\n for combination in combinations(containers, i)])))\n print_puzzle_solution(combinations_lengths.count(min(combinations_lengths)))", "def __init__(self, min_length=2):\n\n sw = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', 'couldn', 'didn', 'doesn', 'hadn', 'hasn', 'haven', 'isn', 'ma', 'mightn', 'mustn', 'needn', 'shan', 'shouldn', 'wasn', 'weren', 'won', 'wouldn']\n self.stopwords = sw\n self.MIN_WORD_LENGTH = min_length", "def generate_repeats(min_size, max_size):\n generated_repeats = []\n alphabet = ['A', 'C', 'G', 'T']\n expanded_set = set()\n repeat_set = set()\n for i in range(min_size, max_size+1):\n for combination in product(alphabet, repeat=i):\n repeat = ''.join(combination)\n repeat_revcomp = rev_comp(repeat)\n expanded = expand_repeat(repeat, max_size)\n if expanded in expanded_set:\n continue\n else:\n repeat_cycles = get_cycles(repeat)\n for cycle in repeat_cycles:\n strand = '+'\n string = expand_repeat(cycle, max_size)\n expanded_set.add(string)\n if cycle not in repeat_set:\n repeat_set.add(cycle)\n generated_repeats.append('\\t'.join([cycle, repeat, str(len(cycle)), strand]))\n if repeat_revcomp == repeat:\n continue\n repeat_cycles = get_cycles(repeat_revcomp)\n for cycle in repeat_cycles:\n strand = '-'\n string = expand_repeat(cycle, max_size)\n expanded_set.add(string)\n if cycle not in repeat_set:\n repeat_set.add(cycle)\n generated_repeats.append('\\t'.join([cycle, repeat, str(len(cycle)), strand]))\n return generated_repeats", "def solve(chars, length):\n return generate_greedy(generate_string_list(length, chars))", "def sample_many(self, start=\"\", max_len=20):\n\n n = max(min(self.n, max_len-1), 2)\n\n start = tuple(start.split())\n if start:\n ngrams = []\n compls = ()\n else:\n ngrams = [self._table.get_start_gram(n)]\n compls = ngrams[0].words\n\n for i in range(5*max_len):\n text = start + compls\n disfavor = self._table.normalized_completions(suffix(n, text)) if len(text) >= n else {}\n for k in range(n-1, 0, -1):\n ngram = self.sample(suffix(k, text), try_end=(i >= max_len), disfavor=disfavor)\n if ngram is not None:\n break\n\n if not ngram:\n break\n ngrams.append(ngram)\n if ngram.compl is END:\n break\n compls += (ngram.compl,)\n\n logging.info(\"max_len {}, length {}, average ngram {:.2}\".format(\n max_len, len(ngrams),\n sum(len(ngram.words) for ngram in ngrams)/len(ngrams) if len(ngrams) else 0.\n ))\n return compls, ngrams", "def generate(self, text, max_sen=1000):\n model = markovify.Text(text)\n count = 1\n tries = 5\n while count <= max_sen and tries > 0:\n sen = model.make_short_sentence(100)\n if sen:\n count += 1\n self.sentences.append(\n self.analyze_text(count, sen)\n )\n else:\n tries -= 1\n self._logger.debug('sen count:%s tries:%s', count, tries)", "def run(self):\n values_to_set = self._load().get_initial_values()\n\n best_data = []\n worst_data = []\n found = False\n overall_nb_generations_done = 0\n restart_counter = 0\n\n while overall_nb_generations_done < self._max_nb_generations and not found:\n new_population = ga_utils.create_generation(self._population_size, values_to_set)\n\n nb_generations_done = 0\n remember_the_best = 0\n nb_generations_without_improvement = 0\n\n # Loop until max allowed generations is reached or a solution is found\n while nb_generations_done < self._max_nb_generations and not found:\n # Rank the solutions\n ranked_population = ga_utils.rank_population(new_population)\n best_solution = ranked_population[0]\n best_score = best_solution.fitness()\n worst_score = ranked_population[-1].fitness()\n best_data.append(best_score)\n worst_data.append(worst_score)\n\n # Manage best value and improvements among new generations over time\n if remember_the_best == best_score:\n nb_generations_without_improvement += 1\n else:\n remember_the_best = best_score\n if 0 < self._restart_after_n_generations_without_improvement < nb_generations_without_improvement:\n print(\"No improvement since {} generations, restarting the program\".\n format(self._restart_after_n_generations_without_improvement))\n restart_counter += 1\n break\n\n # Check if problem is solved and print best and worst results\n if best_score > 0:\n print(\"Problem not solved on generation {} (restarted {} times). Best solution score is {} and \"\n \"worst is {}\".format(nb_generations_done, restart_counter, best_score, worst_score))\n # Not solved => select a new generation among this ranked population\n # Retain only the percentage specified by selection rate\n next_breeders = ga_utils.pick_from_population(ranked_population, self._selection_rate,\n self._random_selection_rate)\n\n children = ga_utils.create_children_random_parents(next_breeders, self._nb_children)\n new_population = ga_utils.mutate_population(children, self._mutation_rate)\n\n nb_generations_done += 1\n overall_nb_generations_done += 1\n else:\n print(\"Problem solved after {} generations ({} overall generations)!!! Solution found is:\".\n format(nb_generations_done, overall_nb_generations_done))\n best_solution.display()\n found = True\n print(\"It took {} to solve it\".format(tools.get_human_readable_time(self._start_time, time())))\n\n if not found:\n print(\"Problem not solved after {} generations. Printing best and worst results below\".\n format(overall_nb_generations_done))\n ranked_population = ga_utils.rank_population(new_population)\n best_solution = ranked_population[0]\n worst_solution = ranked_population[-1]\n print(\"Best is:\")\n best_solution.display()\n print(\"Worst is:\")\n worst_solution.display()\n\n graphics.draw_best_worst_fitness_scores(best_data, worst_data)", "def get_words(self, max_length: int = -1):\n nullables = self.get_nullable_symbols()\n if self.start_symbol in nullables:\n yield []\n if max_length == 0:\n return\n cfg = self.to_normal_form()\n productions = cfg.productions\n gen_d = {}\n # Look for Epsilon Transitions\n for production in productions:\n if production.head not in gen_d:\n gen_d[production.head] = [[]]\n if len(production.body) == 2:\n for obj in production.body:\n if obj not in gen_d:\n gen_d[obj] = [[]]\n # To a single terminal\n for production in productions:\n body = production.body\n if len(body) == 1:\n if len(gen_d[production.head]) == 1:\n gen_d[production.head].append([])\n if body not in gen_d[production.head][-1]:\n gen_d[production.head][-1].append(list(body))\n if production.head == cfg.start_symbol:\n yield list(body)\n # Complete what is missing\n current_length = 2\n total_no_modification = 0\n while current_length <= max_length or max_length == -1:\n was_modified = False\n for gen in gen_d.values():\n if len(gen) != current_length:\n gen.append([])\n for production in productions:\n body = production.body\n if len(gen_d[production.head]) != current_length + 1:\n gen_d[production.head].append([])\n if len(body) != 2:\n continue\n for i in range(1, current_length):\n j = current_length - i\n for left in gen_d[body[0]][i]:\n for right in gen_d[body[1]][j]:\n new_word = left + right\n if new_word not in gen_d[production.head][-1]:\n was_modified = True\n gen_d[production.head][-1].append(new_word)\n if production.head == cfg.start_symbol:\n yield new_word\n if was_modified:\n total_no_modification = 0\n else:\n total_no_modification += 1\n current_length += 1\n if total_no_modification > current_length / 2:\n return" ]
[ "0.75921", "0.61178046", "0.5979814", "0.5643039", "0.5527009", "0.5503019", "0.54588383", "0.5352529", "0.53102463", "0.5286225", "0.52744913", "0.527253", "0.52582645", "0.52432245", "0.52397716", "0.5235602", "0.5205583", "0.51763463", "0.51498365", "0.5075549", "0.5072652", "0.5058915", "0.50543565", "0.50275874", "0.50147164", "0.49999252", "0.4973078", "0.49718773", "0.4969288", "0.49623057" ]
0.6833219
1
Return the union of the two permutation classes.
def union(self, other): return PermClass([S_1 + S_2 for S_1, S_2 in zip(self, other)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def union(set1, set2):", "def union(a, b):\r\n return list(set(a) | set(b))", "def union(a, b):\n return list(set(a) | set(b))", "def union(a, b):\n return list(set(a) | set(b))", "def _control_union(self, entities_1: List[str], entities_2: List[str]):\n return list(set(entities_1).union(set(entities_2)))", "def union(self, other):\n # initialize new Set from the elements in the first Set\n union_set = Set(self.get_elements())\n\n # add every element in the second Set to a new Set and return it\n for element in other.get_elements():\n union_set.add(element)\n return union_set", "def __or__(self, other):\n\n union = list(self)\n union.extend([value for value in other if value not in union])\n\n return union", "def union(s1, s2):\n \"*** YOUR CODE HERE ***\"\n s = set()\n for member in s1:\n s.add(member)\n for member in s2:\n s.add(member)\n return s", "def union(A, B, *C):\n return setutils(\"union\", A, B, *C)", "def _union(cls, s1, s2):\n return s1.union(s2)", "def union(self, other, check_convex=False):\n return union(self, other, check_convex)", "def union(self, other, check_convex=False):\n return union(self, other, check_convex)", "def __or__(self, other):\n return self.union(other)", "def __or__(self, other):\n return self.union(other)", "def union(self, *args):\n return self.phy2abs.union(*args)", "def union(self, iterable):\n pass", "def union(a, b):\n if a == b:\n return a\n elif not a:\n return b\n elif not b:\n return a\n a = Const.unwrap(a)\n b = Const.unwrap(b)\n # TODO(robertwb): Work this into the Union code in a more generic way.\n if type(a) == type(b) and element_type(a) == typehints.Union[()]:\n return b\n elif type(a) == type(b) and element_type(b) == typehints.Union[()]:\n return a\n return typehints.Union[a, b]", "def union(p1: Iterator[Posting], p2: Iterator[Posting]) -> Iterator[Posting]:\n raise NotImplementedError(\"You need to implement this as part of the assignment.\")", "def union(self, *lists):\n if self.is_a(set):\n return _(self._.union(*lists))\n return _(_union(self._, *lists))", "def union(A,B):\n set_A = A\n set_B = B\n sorted_union = []\n for elements in set_A:\n if elements not in sorted_union:\n sorted_union.append(elements)\n for elements in set_B:\n if elements not in sorted_union:\n sorted_union.append(elements)\n return sorted_union", "def union(self, other, inplace=True):\n if self.target != other.target:\n raise ValueError('target mismatch (%s != %s)' % (\n self.target, other.target))\n\n union = self if inplace else copy.deepcopy(self)\n\n sections = [name for name in self.sections if name != 'target']\n for name in sections:\n mine = getattr(union, name)\n yours = getattr(other, name)\n setattr(union, name, mine | yours)\n\n return union", "def union(self, other):\n\n return self.intersect(other, op=np.union1d)", "def union(self, other):\n return Union(self, other)", "def union(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return Set(lhs | rhs)", "def union(first, second):\n # Put your code here.", "def union(self, p, q):\n pass", "def make_union(self, *args, **kwargs): # real signature unknown\n pass", "def union(self, other):\n p = AddPermission(*self.needs.union(other.needs))\n p.excludes.update(self.excludes.union(other.excludes))\n return p", "def union(self, *others):\r\n return self.r.sunion(self.r_key, *[o.r_key for o in others])", "def union(self, *other):\n \n new_ordered_set = OrderedSet()\n\n for element in self:\n new_ordered_set.add(element)\n\n for obj in other:\n for element in obj:\n new_ordered_set.add(element)\n\n return new_ordered_set" ]
[ "0.6826644", "0.66227067", "0.658717", "0.658717", "0.64879555", "0.64367926", "0.6419794", "0.63842964", "0.63831425", "0.63432133", "0.6324109", "0.6324109", "0.6262606", "0.6262606", "0.62462246", "0.6229771", "0.6208712", "0.6176275", "0.6149572", "0.6138486", "0.61124706", "0.609291", "0.6050086", "0.6043872", "0.6039484", "0.6030387", "0.5990845", "0.5978278", "0.5944831", "0.591958" ]
0.751311
0
Given a fitted classification model with feature importances, creates a horizontal barplot of the top 10 most important features.
def plot_importances(model, X_train_df, count = 10, return_importances = False, model_name = None, save_fig = False): importances = pd.Series(model.feature_importances_, index= X_train_df.columns) importances = importances.sort_values(ascending = False) top_imp = list(importances.sort_values(ascending = False).index[:count]) for i in range(len(top_imp)): if top_imp[i] == 'minimum_nights_avg_ntm': top_imp[i] = 'minimum_avg_nights.' if top_imp[i] == 'maximum_nights_avg_ntm': top_imp[i] = 'maximum_avg_nights.' if top_imp[i] == 'host_listings_count': top_imp[i] = 'number_of_host_regional_listings' if top_imp[i] == 'host_total_listings_count': top_imp[i] = 'number_of_host_overall_listings' if top_imp[i] == 'num_bathrooms': top_imp[i] = 'number_of_bathrooms' if top_imp[i] == 'host_is_superhost': top_imp[i] = 'is_superhost' top_imp[i] = top_imp[i].replace("_", " ").title() ax = importances[:count].plot(kind= 'barh') ax.set(title=f'Top {count} Strongest Predictors', xlabel='Strength', yticklabels=top_imp) if save_fig == True: plt.savefig(f'{model_name}_feat_imp.png') if return_importances == True: return importances
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_feature_importance(model, x_train, n=12):\n # extract and sort the feature importance\n features = model.feature_importances_\n feature_names = x_train.columns.values\n\n # combine the features importance and column names into a matrix and sort them\n feature_matrix = np.array([features, feature_names])\n feature_matrix = feature_matrix.transpose()\n feature_matrix = feature_matrix[feature_matrix[:, 0].argsort()]\n\n # divide the column names and feature importance\n sorted_feat = feature_matrix[:, 0]\n sorted_columns = feature_matrix[:, 1]\n\n # plot the features\n plt.figure(figsize=(14, 10))\n if n > len(sorted_feat):\n plt.barh(sorted_columns, sorted_feat, align='center')\n else:\n plt.barh(sorted_columns[-n:], sorted_feat[-n:], align='center')\n\n # add label and titles\n plt.yticks(sorted_columns[-n:], sorted_columns[-n:])\n plt.title('Feature Importances', fontsize=18)\n plt.xlabel('Feature Importance', fontsize=16)\n plt.ylabel('Features', fontsize=16)\n return", "def plot_feature_importance(clf, feature_names, ndisplay=20):\n feature_importance = clf.feature_importances_\n # make importances relative to max importance\n feature_importance = 100.0 * (feature_importance / feature_importance.max())\n sorted_idx = np.argsort(feature_importance)[-ndisplay:] # only use the top 20 features\n pos = np.arange(sorted_idx.shape[0]) + .5\n \n plt.figure(figsize=(12, 6))\n plt.barh(pos, feature_importance[sorted_idx], align='center')\n plt.yticks(pos, feature_names[sorted_idx])\n plt.xlabel('Relative Importance')\n plt.title('Variable Importance (Top %d)' % ndisplay)\n plt.savefig('feature_importance.png')\n #plt.show()", "def plot_feature_importances(model, train, export = False):\n \n # declaring the number\n n_features = X_train.shape[1]\n \n # setting plot window\n fig, ax = plt.subplots(figsize=(12,9))\n \n plt.barh(range(n_features), model.feature_importances_, align='center')\n plt.yticks(pd.np.arange(n_features), train.columns)\n plt.xlabel(\"Feature importance\")\n plt.ylabel(\"Feature\")\n \n if export == True:\n plt.savefig('Tree_Leaf_50_Feature_Importance.png')", "def get_top_and_bottom_features(feature_importances, feature_names, count=20):\n\n coef_pd = pd.Series(feature_importances, index = feature_names).sort_values(ascending=False)\n imp_coef = coef_pd.head(count)\n least_coef = coef_pd.abs().tail(count)\n \n plt.rcParams['figure.figsize'] = (16.0, 8.0)\n\n plt.subplot(1,2,1)\n plt.tight_layout(pad=0.4,w_pad=0.5, h_pad=1.0)\n plt.title(\"Leastx important Coefficients\")\n least_coef.plot(kind = \"barh\")\n\n plt.subplot(1,2,2)\n plt.tight_layout(pad=0.4,w_pad=0.5, h_pad=1.0)\n plt.title(\"Most important Coefficients\")\n imp_coef.plot(kind = \"barh\")\n\n plt.show()\n return (imp_coef, least_coef)", "def plot_feature_importance(pipeline, top_n_features=100, rank_features=True, orientation='h', width=500, height=None):\n assert isinstance(pipeline, Pipeline), \"Input isn't a Pipeline\"\n \n if height is None:\n height = top_n_features * 10\n \n features = get_selected_features(pipeline)\n importance_values = pipeline[-1].get_feature_importance()\n\n assert len(features) == len(importance_values), \"The number of feature names & importance values doesn't match\"\n \n importances = pd.Series(importance_values, \n index=features)\\\n .nlargest(top_n_features)\\\n .sort_values()\n \n \n if rank_features:\n existing_index = importances.index.to_series().reset_index(drop=True)\n ranked_index = pd.Series(range(1, len(importances) + 1)[::-1])\\\n .astype(str)\\\n .str.cat(existing_index, sep='. ')\n \n importances.index = ranked_index\n \n fig = px.bar(importances, orientation=orientation, width=width, height=height)\n fig.update(layout_showlegend=False)\n fig.show()", "def plot_importance(tree, X_tr, top_n=10, figsize=(10,10), ax=None):\n \n import pandas as pd\n import matplotlib as plt\n\n imps = pd.Series(tree.feature_importances_,index=X_tr.columns)\n imps.sort_values(ascending=True).tail(top_n).plot(kind='barh',figsize=figsize, ax=ax)\n return imps", "def plot_feature_importance_tree(model, dataset, title, plt):\n n_features = dataset.shape[1]\n plt.barh(range(n_features), model.feature_importances_, align='center')\n plt.yticks(np.arange(n_features), dataset.columns)\n plt.xlabel(\"Feature importance\")\n plt.ylabel(\"Feature\")\n plt.title(title)", "def graph_feature_importances(model, feature_names, autoscale=True, headroom=0.05, width=10, summarized_columns=None):\n \n if autoscale:\n x_scale = model.feature_importances_.max()+ headroom\n else:\n x_scale = 1\n \n feature_dict=dict(zip(feature_names, model.feature_importances_))\n \n if summarized_columns: \n #some dummy columns need to be summarized\n for col_name in summarized_columns: \n #sum all the features that contain col_name, store in temp sum_value\n sum_value = sum(x for i, x in feature_dict.iteritems() if col_name in i ) \n \n #now remove all keys that are part of col_name\n keys_to_remove = [i for i in feature_dict.keys() if col_name in i ]\n for i in keys_to_remove:\n feature_dict.pop(i)\n #lastly, read the summarized field\n feature_dict[col_name] = sum_value\n \n results = pd.Series(feature_dict.values(), index=feature_dict.keys())\n results.sort(axis=1)\n results.plot(kind=\"barh\", figsize=(width,len(results)/4), xlim=(0,x_scale))", "def plot_feature_importances(name, forest, test_train_data):\n\n X_train, y_train, X_test, y_test = test_train_data[name]\n plt.figure()\n plt.title(name + \": Feature importances\")\n importances = forest.feature_importances_\n indices = np.argsort(importances)[::-1]\n std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0)\n plt.barh(\n range(X_test.shape[1]),\n importances[indices],\n color=\"blue\",\n xerr=std[indices],\n align=\"center\",\n )\n plt.yticks(range(X_test.shape[1]), X_test.columns)\n plt.show()", "def print_top10(vectorizer, clf, class_labels):\n feature_names = vectorizer.get_feature_names()\n for i, class_label in enumerate(class_labels):\n top10 = np.argsort(clf.coef_[i])[-15:]\n print(\"%s: %s\" % (class_label,\n \" \".join(feature_names[j] for j in top10)))", "def value_count_top(df, cat_features, top = 10, save_plot = False, path_dir = None ):\n cat_features = list(set(cat_features))\n cols = cat_features\n if len(cols) != 0:\n for i, col in sorted(list(enumerate(cols)), key=lambda x: x[1]):\n fig, ax = plt.subplots()\n fig.set_size_inches(4.5, 5.5)\n fig.set_size_inches(4, 4)\n ax = df[col].value_counts()[:top].plot(kind='barh')\n plt.title(str(\"Distribution of TOP \" +str(top) +\" \"+ col), fontsize=10)\n plt.show(block=False)\n if save_plot == True:\n plt.savefig((str(path_dir) + \"top_\"+str(top)+\"_value_count_ordinal.png\"))\n plt.clf()\n else:\n print(\"No categorial features to plot\")", "def plot_feature_importances(df, n = 15, color = 'blue', threshold = None):\n \n # Sort features with most important at the head\n df = df.sort_values('importance', ascending = False).reset_index(drop = True)\n \n # Normalize the feature importances to add up to one and calculate cumulative importance\n df['importance_normalized'] = df['importance'] / df['importance'].sum()\n df['cumulative_importance'] = np.cumsum(df['importance_normalized'])\n \n plt.rcParams['font.size'] = 12\n plt.style.use('fivethirtyeight')\n # Bar plot of n most important features\n df.loc[:n, :].plot.barh(y = 'importance_normalized', \n x = 'feature', color = color, \n edgecolor = 'k', figsize = (12, 8),\n legend = False)\n\n plt.xlabel('Normalized Importance', size = 18); plt.ylabel(''); \n plt.title(f'Top {n} Most Important Features', size = 18)\n plt.gca().invert_yaxis()\n \n \n if threshold:\n # Cumulative importance plot\n plt.figure(figsize = (8, 6))\n plt.plot(list(range(len(df))), df['cumulative_importance'], 'b-')\n plt.xlabel('Number of Features', size = 16); plt.ylabel('Cumulative Importance', size = 16); \n plt.title('Cumulative Feature Importance', size = 18);\n \n # Number of features needed for threshold cumulative importance\n # This is the index (will need to add 1 for the actual number)\n importance_index = np.min(np.where(df['cumulative_importance'] > threshold))\n \n # Add vertical line to plot\n plt.vlines(importance_index + 1, ymin = 0, ymax = 1.05, linestyles = '--', colors = 'red')\n plt.show();\n \n print('{} features required for {:.0f}% of cumulative importance.'.format(importance_index + 1, \n 100 * threshold))\n \n return df", "def plot_feature_importances(df, n = 15, threshold = None):\n \n # Sort features with most important at the head\n df = df.sort_values('importance', ascending = False).reset_index(drop = True)\n \n # Normalize the feature importances to add up to one and calculate cumulative importance\n df['importance_normalized'] = df['importance'] / df['importance'].sum()\n df['cumulative_importance'] = np.cumsum(df['importance_normalized'])\n \n plt.rcParams['font.size'] = 12\n \n # Bar plot of n most important features\n df.loc[:n, :].plot.barh(y = 'importance_normalized', \n x = 'feature', color = 'blue', \n edgecolor = 'k', figsize = (12, 8),\n legend = False)\n\n plt.xlabel('Normalized Importance', size = 18); plt.ylabel(''); \n plt.title(f'Top {n} Most Important Features', size = 18)\n plt.gca().invert_yaxis()\n \n \n if threshold:\n # Cumulative importance plot\n plt.figure(figsize = (8, 6))\n plt.plot(list(range(len(df))), df['cumulative_importance'], 'b-')\n plt.xlabel('Number of Features', size = 16); plt.ylabel('Cumulative Importance', size = 16); \n plt.title('Cumulative Feature Importance', size = 18);\n \n # Number of features needed for threshold cumulative importance\n # This is the index (will need to add 1 for the actual number)\n importance_index = np.min(np.where(df['cumulative_importance'] > threshold))\n \n # Add vertical line to plot\n plt.vlines(importance_index + 1, ymin = 0, ymax = 1.05, linestyles = '--', colors = 'red')\n plt.show();\n \n print('{} features required for {:.0f}% of cumulative importance.'.format(importance_index + 1, \n 100 * threshold))\n \n return df", "def print_top10(vectorizer, clf):\n feature_names = vectorizer.get_feature_names()\n indices=np.argsort(clf.coef_)[0][-10:]\n for i in range(10):\n print(feature_names[indices[i]])", "def fiPlot(rf):\n\tfi = rf.feature_importances_\n\tprint(len(fi))\n\tfi = 100* (fi/fi.max())\n\tsorted_idx = np.argsort(fi)\n\tpos = np.arange(len(fi))\n\tprint(pos)\n\tplt.figure()\n\tplt.barh(pos,fi[sorted_idx],align='center')\n\tplt.savefig('featureImporances.png')", "def display_importances(feature_importance_df_, num_rows):\n cols = feature_importance_df_[[\"feature\", \"importance\"]]\n # Take mean and groupby feature because there is a different\n # importance returned given for each fold in the importances df\n # returned from kfold_lightgbm()\n cols = cols.groupby(\"feature\").mean()\n cols = cols.sort_values(by=\"importance\", ascending=False)[:40].index\n # df.feature notation addresses just the 'feature' column\n best_features = feature_importance_df_.loc[feature_importance_df_\n .feature.isin(cols)]\n plt.figure(figsize=(8, 10))\n sns.barplot(x=\"importance\", y=\"feature\",\n data=best_features.sort_values(by=\"importance\",\n ascending=False))\n plt.title('LightGBM Features (avg over folds)') # barplot creates a\n # black line over each bar/category which shows the range of values\n # of importance that that category has in the data, since there are\n # multiple folds for each feature in best_features df\n plt.tight_layout()\n now = datetime.datetime.now()\n filename = 'lgbm_importances' + '_' + str(now.strftime('%Y-%m-%d')) \\\n + '_' + str(num_rows/1000) + 'K_rows.png'\n plt.savefig(filename)", "def plot_top_results (preds_softmax, vocab, title=None, show_max=5, figsize=None):\n figsize = figsize or (5, 2)\n fig, ax = plt.subplots(figsize=figsize)\n \n show_max = show_max or len(preds_softmax)\n idx_sort = preds_softmax.argsort(descending=True)\n \n x = np.arange(show_max)\n if title is not None:\n ax.set_title(title)\n ax.barh(x[::-1], preds_softmax[idx_sort[:show_max]])\n ax.set_xlabel('p')\n ax.set_ylabel('Category')\n ax.set_xlim([0, 1])\n ax.set_yticks(x)\n ax.set_yticklabels(vocab[idx_sort[:show_max]][::-1])\n \n return fig, ax", "def plot_feature_importance(feature_names, feature_importances, n=20):\n feature_names = np.array(feature_names)\n top_nx = np.argsort(feature_importances)[:-n-1:-1]\n feat_import = feature_importances[top_nx] # now sorted\n feat_import = feat_import / feat_import.max()\n feature_names = feature_names[top_nx]\n fig = plt.figure()\n x_ind = np.arange(n)\n plt.barh(x_ind, feat_import, height=.3, align='center')\n plt.ylim(x_ind.min() + .5, x_ind.max() + .5)\n plt.yticks(x_ind, feature_names)\n plt.savefig('/home/ubuntu/ca_bills_project/graphs/feature_importances.png', dpi=300)", "def top_10_features(df):\n df = df.groupby(\"role\").tail(10).reset_index(drop=True)\n df[\"i\"] = df.index.tolist()\n categories = CategoricalDtype(categories=df[\"i\"].tolist(), ordered=True)\n df[\"i\"] = df[\"i\"].astype(categories)\n\n def convert_label(labels):\n return OrderedDict([\n (df[df.i == l[0]].feature.tolist()[0], l[1])\n for l in list(labels.items())\n ])\n\n return (\n ggplot(df, aes(\"i\", \"value\", group=\"category\"))\n + geom_segment(\n aes(x=\"i\", xend=\"i\", y=\"min(value)\",\n yend=\"max(value)\"),\n linetype=\"dashed\",\n size=1,\n color=\"grey\"\n )\n + geom_point(aes(color=\"category\", shape=\"category\"), size=7)\n + scale_x_discrete(labels=convert_label)\n + scale_y_continuous(labels=lambda x: [\"%d%%\" % (v * 100) for v in x])\n + scale_color_brewer(type=\"qual\", palette=7)\n + guides(\n color=guide_legend(title=\"Category\"),\n shape=guide_legend(title=\"Category\")\n )\n + labs(y=\"% Relevance\", x=\"Features\", color=\"category\",\n shape=\"category\")\n + theme_matplotlib()\n + theme(strip_text=element_text(size=18),\n axis_title=element_text(size=18),\n axis_text=element_text(size=16),\n axis_text_x=element_text(size=16),\n legend_position=\"top\",\n legend_text=element_text(size=16),\n legend_title=element_text(size=18, margin={\"b\": 10}),\n legend_title_align=\"center\",\n aspect_ratio=1.4,\n panel_spacing_y=0.5,\n panel_spacing_x=2.8,\n figure_size=(14, 9))\n + coord_flip()\n + facet_wrap(\"~ role\", ncol=3, scales=\"free\",\n labeller=as_labeller({\n \"Backend\": \"Backend\",\n \"Frontend\": \"Frontend\",\n \"Mobile\": \"Mobile\"\n })\n )\n )", "def plot_imp_rf(model_rf, X):\n importances = model_rf.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model_rf.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n names = X.columns[indices]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(X.shape[1]):\n print(str(f+1)+'. feature '+str(names[f])+' ('+str(importances[indices[f]])+')')\n # Plot the feature importances of the forest\n plt.figure(figsize=(15, 10))\n plt.title(\"Feature importances\")\n plt.bar(range(X.shape[1]), importances[indices], color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(X.shape[1]), names, rotation=80)\n plt.xlim([-1, X.shape[1]])\n plt.show()", "def plot_global_imp(top_words, top_importances, label_name):\n plt.figure(figsize=(14, 7))\n plt.title(\"most important words for class label: \" + str(label_name), fontsize=18)\n plt.bar(range(len(top_importances)), top_importances, color=\"r\", align=\"center\")\n plt.xticks(range(len(top_importances)), top_words, rotation=60, fontsize=18)\n plt.show()", "def draw_k_main_features_cv(feature_importance_pool, first_k=20):\n name_mean_std_pool = []\n for name, importances in feature_importance_pool.items():\n mean = numpy.mean(importances)\n std = numpy.std(importances, ddof=1)\n name_mean_std_pool.append([name, mean, std])\n\n name_mean_std_pool = sorted(name_mean_std_pool, key=lambda x: -x[1])\n\n name_pool, mean_pool, std_pool = [], [], []\n for name, mean, std in name_mean_std_pool[:first_k]:\n name_pool.append(name)\n mean_pool.append(mean)\n std_pool.append(std)\n\n fig, ax_features = pyplot.subplots(figsize=(10, 10))\n ax_features.bar(name_pool, mean_pool, yerr=std_pool)\n ax_features.set_xticklabels(\n name_pool, rotation_mode='anchor', rotation=45,\n horizontalalignment='right'\n )\n ax_features.set(\n title=\"Feature importances(with stand deviation as error bar)\",\n xlabel='Feature name', ylabel='Importance'\n )\n\n return (fig, ax_features)", "def plot_xgboost_importance(xgboost_model, feature_names, threshold = 5):\n # convert from dictionary to tuples and sort by the\n # importance score in ascending order for plotting purpose\n importance = xgboost_model.booster().get_score(importance_type = 'gain')\n tuples = [(int(k[1:]), importance[k]) for k in importance]\n tuples = sorted(tuples, key = itemgetter(1))\n labels, values = zip(*tuples)\n\n # make importances relative to max importance,\n # and filter out those that have smaller than 5%\n # relative importance (threshold chosen arbitrarily)\n labels, values = np.asarray(labels), np.asarray(values)\n values = np.round(100 * values / np.max(values), 2)\n mask = values > threshold\n labels, values = labels[mask], values[mask]\n feature_labels = feature_names[labels]\n\n ylocs = np.arange(values.shape[0])\n plt.barh(ylocs, values, align = 'center')\n for x, y in zip(values, ylocs):\n plt.text(x + 1, y, x, va = 'center')\n\n plt.ylabel('Features')\n plt.xlabel('Relative Importance Score')\n plt.title('Feature Importance Score')\n plt.xlim([0, 110])\n plt.yticks(ylocs, feature_labels)\n\n # revert the ordering of the importance\n return labels[::-1]", "def feature_importance_plot(algorithm,X_train,y_train,of_type):\r\n if of_type == \"coef\":\r\n algorithm.fit(X_train,y_train)\r\n coef = pd.DataFrame(algorithm.coef_.ravel())\r\n coef[\"coef\"] = X_train.columns\r\n plt.figure(figsize=(14,4))\r\n ax1 = sns.barplot(coef[\"coef\"],coef[0],palette=\"jet_r\",\r\n linewidth=2,edgecolor=\"k\"*coef[\"coef\"].nunique())\r\n #ax1.set_facecolor(\"lightgrey\")\r\n ax1.axhline(0,color=\"k\",linewidth=2)\r\n plt.ylabel(\"coefficients\")\r\n plt.xlabel(\"features\")\r\n plt.xticks(rotation='vertical')\r\n plt.title('FEATURE IMPORTANCES')\r\n \r\n elif of_type == \"feat\":\r\n algorithm.fit(X_train,y_train)\r\n coef = pd.DataFrame(algorithm.feature_importances_)\r\n coef[\"feat\"] = X_train.columns\r\n plt.figure(figsize=(14,4))\r\n ax2 = sns.barplot(coef[\"feat\"],coef[0],palette=\"jet_r\",\r\n linewidth=2,edgecolor=\"k\"*coef[\"feat\"].nunique())\r\n #ax2.set_facecolor(\"lightgrey\")\r\n ax2.axhline(0,color=\"k\",linewidth=2)\r\n plt.ylabel(\"coefficients\")\r\n plt.xlabel(\"features\")\r\n plt.xticks(rotation='vertical')\r\n plt.title('FEATURE IMPORTANCES')", "def show_topn(classifier,vectorizer,categories,n):\n feature_names = np.asarray(vectorizer.get_feature_names())\n for i, category in enumerate(categories):\n topn = np.argsort(classifier.coef_[i])[-n:]\n print('{}: {}'.format(category,\", \".join(feature_names[topn])))", "def plot_feature_importances(df, threshold=0.7, table=True):\n # Sort feature according to importance\n df = df.sort_values('importance', ascending=False).reset_index()\n\n # Normalize the feature importances to add up to one\n df['importance_normalized'] = df['importance'] / df['importance'].sum()\n df['cumulative_importance'] = np.cumsum(df['importance_normalized'])\n\n # Make a horizontal bar chart of feature importances\n plt.figure(figsize=(10, 6))\n ax = plt.subplot()\n\n # Need to reverse the index to plot most important on top\n ax.barh(list(reversed(list(df.index[:15]))),\n df['importance_normalized'].head(15),\n align='center', edgecolor='k')\n\n # Set the yticks and labels\n ax.set_yticks(list(reversed(list(df.index[:15]))))\n ax.set_yticklabels(df['feature'].head(15))\n\n # Plot labeling\n plt.xlabel('Normalized Importance')\n plt.title('Feature Importances')\n plt.show()\n\n # Cumulative importance plot\n plt.figure(figsize=(8, 6))\n plt.plot(list(range(len(df))), df['cumulative_importance'], 'r-')\n plt.xlabel('Number of feature')\n plt.ylabel('Cumulative Importance')\n plt.title('Cumulative Feature Importance')\n plt.show()\n\n importance_index = np.min(np.where(df['cumulative_importance'] > threshold))\n print('%d feature required for %0.2f of cumulative importance' % (importance_index + 1, threshold))\n\n if table:\n return df", "def plot_coefficients(classifier, feature_names, top_features=20):\n\n coef = classifier.coef_.ravel()\n top_positive_coefficients = np.argsort(coef)[-top_features:]\n top_negative_coefficients = np.argsort(coef)[:top_features]\n top_coefficients = np.hstack([top_negative_coefficients, top_positive_coefficients])", "def plot_bar_important_features(important_features, title, xlabel, ylabel, fname):\r\n plt.figure(figsize=(20, 21))\r\n plt.barh(important_features.index.astype(str).tolist(), important_features.values.tolist())\r\n plt.title(title)\r\n plt.xlabel(xlabel)\r\n plt.ylabel(ylabel)\r\n plt.savefig(fname, bbox_inches='tight')\r\n plt.close()", "def feature_importance(features, df):\n feature_name = features.columns.values\n X= features\n y = df['Severity'].values\n\n model = RandomForestClassifier(n_estimators = 300, max_features='sqrt', max_depth=60, random_state=42,\n min_samples_leaf = 2, bootstrap = True, min_samples_split=5)\n model.fit(X, y)\n results = permutation_importance(model, X, y, scoring='f1')\n importance = results.importances_mean\n std = results.importances_std\n indices = np.argsort(importance)[::-1][:20]\n plt.figure(figsize=(12,12))\n plt.title(\"Feature importances\")\n plt.bar(range(len(indices)), importance[indices], color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(len(indices)), feature_name[indices], rotation='vertical')\n plt.xlim([-1, len(indices)])\n plt.show()", "def feature_importance(df):\n column_list = df[['Temperature(F)', 'Humidity(%)', 'Visibility(mi)',\n 'Wind_Speed(mph)', 'Precipitation(in)', 'Bump', 'Crossing', 'Junction',\n 'Railway', 'Traffic_Signal', 'Civil_Twilight', 'Rush Hour', 'Weekend',\n 'Weather_Condition_Fog', 'Weather_Condition_Other',\n 'Weather_Condition_Rain', 'Weather_Condition_Snow',\n 'Weather_Condition_Thunderstorm', 'Season_Spring',\n 'Season_Summer', 'Season_Winter', 'Side_R']]\n column_names = column_list.rename(columns={'Temperature(F)': 'Temperature(F)', 'Humidity(%)': 'Humidity(%)', \n 'Civil_Twilight': 'During Day',\n 'Rush Hour': 'During Rush Hour',\n 'Wind_Speed(mph)': 'Wind Speed(mph)', 'Traffic_Signal': \"Traffic Signal\",\n 'Side_R': \"Right Side of Road\", 'Wind_Speed(mph)': 'Wind Speed(mph)', \n 'Season_Spring':'Season Spring', 'Season_Summer': 'Season Summer',\n 'Season_Winter': 'Season Winter'\n })\n\n feature_name = column_names.columns.values \n X= column_list\n y = df['Severity'].values\n model = RandomForestClassifier(n_estimators = 50, max_features='sqrt', max_depth=70, random_state=42,\n min_samples_leaf = 4, bootstrap = True, min_samples_split=5)\n model.fit(X, y)\n results = permutation_importance(model, X, y, scoring='f1')\n #Plot Imp \n importance = results.importances_mean\n std = results.importances_std\n indices = np.argsort(importance)[::-1][:12]\n plt.figure(figsize=(10,7))\n plt.title(\"Random Forest Feature Importances\", fontsize=20)\n patches = plt.bar(range(len(indices)), importance[indices], color=\"dimgray\", yerr=std[indices], align=\"center\")\n patches[0].set_fc('r')\n patches[1].set_fc('r')\n patches[3].set_fc('r')\n patches[4].set_fc('r')\n patches[5].set_fc('r')\n patches[6].set_fc('r')\n patches[7].set_fc('r')\n plt.xticks(range(len(indices)), feature_name[indices], rotation='45', fontsize=16, horizontalalignment='right')\n plt.xlim([-1, len(indices)])\n plt.ylabel(\"Importance\", fontsize=17)\n plt.yticks(fontsize=16)\n plt.savefig('../Images/rf_featureimportance.png', transparent=False, bbox_inches='tight', format='png', dpi=200)\n plt.show()" ]
[ "0.72053033", "0.71699697", "0.7103706", "0.689223", "0.6787334", "0.676711", "0.6759439", "0.6745823", "0.6685218", "0.6671483", "0.66482466", "0.656502", "0.6540204", "0.6513533", "0.6497981", "0.64888465", "0.6476057", "0.6410694", "0.63880503", "0.63738155", "0.6222233", "0.6207887", "0.6181231", "0.61630565", "0.61598384", "0.60995626", "0.6089096", "0.6073304", "0.605541", "0.59812355" ]
0.72323865
0
Detects outliers using the Zscore>3 cutoff. Returns a boolean Series where True=outlier
def find_outliers_z(data): zFP = np.abs(stats.zscore(data)) zFP = pd.Series(zFP, index=data.index) idx_outliers = zFP > 3 return idx_outliers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_outlier(points, thresh=3.5):\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n return modified_z_score > thresh", "def is_outlier(points, thresh=12):\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n return modified_z_score > thresh", "def filter_outliers(data: pd.Series, std: int=3) -> pd.Series:\n return data[(data - data.mean()).abs() <= (std * data.std())]", "def filterOutlier(data_list,z_score_threshold=3):\n\t# n = len(data_list)\n\t# z_score_threshold = (n-1)/np.sqrt(n)\n\tdata = np.array(data_list)\n\tmedian = np.median(data)\n\tdeviation = np.median([np.abs(x - median) for x in data])\n\tz_scores = [0.675*(x - median)/deviation for x in data]\n\tdata_out = data[np.where(np.abs(z_scores) < z_score_threshold)].tolist()\n\toutput = data_out if len(data_out) > 0 else data_list\n\treturn output", "def isnot_outlier(points, thresh=1.5):\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n return modified_z_score <= thresh", "def is_outlier(incoming_data):\r\n outlier_df = \\\r\n incoming_data[incoming_data.apply(\r\n lambda x: np.abs(x - x.mean()) / x.std() > 3).all(axis=1)]\r\n return not outlier_df.empty", "def ZscoreOutlier(data, margin=0, axis=0, ddof=0, nan_policy='propagate'):\n scores = ss.zscore(data, axis=axis, ddof=ddof, nan_policy=nan_policy)\n\n lower_range, upper_range = iqr_threshold_method(scores, margin)\n\n anomaly_points = []\n for i in range(len(scores)):\n if scores[i] < lower_range or scores[i] > upper_range:\n anomaly_points.append(data[i])\n\n return anomaly_points", "def is_outlier(points, threshold=3.5):\n # transform into vector\n if len(points.shape) == 1:\n points = points[:,None]\n\n # compute median value \n median = np.median(points, axis=0)\n \n # compute diff sums along the axis\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n # compute MAD\n med_abs_deviation = np.median(diff)\n \n # compute modified Z-score\n # http://www.itl.nist.gov/div898/handbook/eda/section4/eda43.htm#Iglewicz\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n # return a mask for each outlier\n return modified_z_score > threshold", "def filtering_outliers(z, verbose=False, t=np.array([False])):\n N = len(z)\n interval = 60\n ave_z = np.median(z)\n z_60 = abs(z - ave_z)\n z_temp = np.where(z_60 > 0.2, np.nan, z)\n z_temp = np.where(z_60 > 3 * np.std(z), np.nan, z_temp)\n if verbose:\n print(100 * (1 - np.sum(np.isnan(z_temp)) / N), \"% filtered\")\n if t.any():\n t = t[np.logical_not(np.isnan(z_temp))]\n z_resized = z_temp[np.logical_not(np.isnan(z_temp))]\n if t.any():\n return z, z_resized, t\n return z, z_resized", "def remove_outliers(df, std_threshold: float = 3):\n\n df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]\n return df", "def replace_outliers(data, threshold=4):\n zscores = stats.zscore(data)\n mean, std = data.mean(), data.std()\n data.loc[zscores >= threshold] = mean + std * threshold\n data.loc[zscores <= -threshold] = mean - std * threshold\n\n return data", "def identify_outliers(x):\n outliers = np.array([])\n\n IQR = iqr(x)\n low_cut = np.percentile(x,25) - 1.5*IQR\n high_cut = np.percentile(x,75) + 1.5*IQR\n\n for sub in x.index:\n if x.loc[sub] < low_cut or x.loc[sub] > high_cut:\n # outliers = np.append(outliers,np.asarray(x == i).nonzero()[0])\n outliers = np.append(outliers,sub)\n\n return outliers", "def filter_outliers(data): \n \n idx_out = find_outliers_IQR(data)\n \n cleaned = data[~idx_out].copy()\n\n # print(f'There were {idx_out.sum()} outliers.')\n \n return cleaned", "def is_outlier(hist, value):\n stdev = np.std(hist, axis=0)\n avg = np.average(hist[-15:], axis=0)\n if any(lf for lf, avg, std in zip(value, avg, stdev) if lf > avg + 3 * std) or \\\n any(lf for lf, avg, std in zip(value, avg, stdev) if lf < avg - 3 * std):\n return True\n return False", "def identify_and_handel_outliers(self):\n col_list = [] # This will hold the column names created for the administration of the modified z-score test\n values_dropped = []\n cont_cols = self.df.select_dtypes(exclude=[\"category\"]).columns # Gets continous columns \n for col in cont_cols:\n#TODO: Add lines to check column len(), if len() == 0, drop drop column, create cont_cols and cat_cols, and drop from there as well. \n df_len = len(self.df)\n top_value = self.df[col].value_counts(normalize=True, ascending=False, dropna=True)\n top_value = top_value.head(1).reset_index().to_numpy()[0] #Gets the top occuring value along with its percentage of occurances\n if top_value[1] > 0.5:#Test if the top occuring value makes up more than 50% of the data\n remaining_col = self.df[col][~self.df[col].isin([top_value[0]])] #Gets all values not within the 50% of single value data\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(remaining_col) #Gets modified z-score for remaining items\n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0) #Fills all missing z-scores\\\n #with zero(because that 50% of data removed would be zero anyways)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n else:\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(self.df[col]) #Gets modified z-score \n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n self.df.drop(columns = col_list, inplace=True)#Removed columns created to test modified z-score\n self.outliers_dropped = values_dropped", "def remove_outliers(self, data, sd_val):\n data = data.dropna()\n data = data[(np.abs(stats.zscore(data)) < float(sd_val)).all(axis=1)]\n return data", "def ModifiedZscoreOutlier(data, margin=0, consistency_correction=1.4826):\n\n median = np.median(data)\n\n deviation_from_med = np.array(data) - median\n\n mad = np.median(np.abs(deviation_from_med))\n scores = deviation_from_med/(consistency_correction*mad)\n\n lower_range, upper_range = iqr_threshold_method(scores, margin)\n\n anomaly_points = []\n for i in range(len(scores)):\n if scores[i] < lower_range or scores[i] > upper_range:\n anomaly_points.append(data[i])\n\n return anomaly_points", "def drop_outliers_z_score(df, z=3):\n n_initial_rows = df.shape[0]\n drop_list = set()\n\n print('-' * 25)\n print('OUTLIERS DELETION: Z-SCORE METHOD\\n')\n\n for el in df.columns.values:\n drop_list = drop_list | \\\n set(df[el][np.abs(df[el]-df[el].mean()) >=\n (z*df[el].std())].index.values)\n\n drop_list = list(set(drop_list))\n count = len(drop_list)\n df.drop(drop_list, inplace=True)\n\n print('N of deleted rows: {} | % of deleted rows: {}% | '\n 'z-score: {}'.format(count, round(100 * (count / n_initial_rows), 3),\n z))\n return df", "def clip_outliers(df, std_threshold: float = 3):\n df_std = df.std(axis=0, skipna=True)\n df_mean = df.mean(axis=0, skipna=True)\n\n lower = df_mean - (df_std * std_threshold)\n upper = df_mean + (df_std * std_threshold)\n df2 = df.clip(lower=lower, upper=upper, axis=1)\n\n return df2", "def find_outliers_IQR(data): \n \n res = data.describe()\n q1 = res['25%']\n q3 = res['75%']\n thresh = 1.5*(q3-q1)\n idx_outliers =(data < (q1-thresh)) | (data > (q3+thresh))\n return idx_outliers", "def filter_outliers(self, df, outlier):\n return df[~outlier].reset_index(drop=True)", "def rem_outliers(s):\n s_mean = s.mean()\n s_std = s.std()\n s_min = s_mean - 3 * s_std\n s_max = s_mean + 3 * s_std\n return s.loc[(s_min < s.loc[:]) & (s.loc[:] < s_max)].index.to_list()", "def detect_outliers(data, tolerance=2):\n medians = data.rolling(5, center=True).median()\n lowerq = data.rolling(5, center=True).quantile(.75)\n upperq = data.rolling(5, center=True).quantile(.25)\n iqrs = np.abs(upperq - lowerq)\n diffs = np.abs(data - medians)\n outliers = pd.Series(diffs > (tolerance * iqrs))\n return outliers, sum(outliers)", "def flag_outliers_in_col(self, df, col='paciente_idade', threshold=2):\n data = df[col]\n mean = np.mean(data)\n std = np.std(data)\n outlier = []\n for i in data:\n z = (i-mean)/std\n outlier.append(z > threshold)\n outlier = pd.Series(outlier)\n print(f\"Number of outliers: {outlier.sum()}\")\n return outlier", "def detect_outliers(df, n, features):\n outlier_indices = [] \n for col in features: \n Q1 = np.percentile(df[col], 25)\n Q3 = np.percentile(df[col], 75)\n IQR = Q3 - Q1\n outlier_step = 1.5 * IQR \n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index\n outlier_indices.extend(outlier_list_col) \n outlier_indices = Counter(outlier_indices)\n multiple_outliers = list(key for key, value in outlier_indices.items() if value > n) \n return multiple_outliers", "def detect_outlier(df,method='iqr',val=np.nan):\n c_name = [n for n,d in df.dtypes if d != 'string' and d != 'boolean']\n if method=='z_score':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']\n df = df.withColumn(i,when(abs((col(i)-m)/s)>thresh,val).otherwise(col(i)))\n elif method=='iqr':\n for i in c_name:\n q1,q3 = df.approxQuantile(i,[0.25,0.75],0)\n IQR = q3-q1\n lo = q1-(1.5*IQR)\n up = q3+(1.5*IQR)\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n elif method=='std':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']*thresh\n lo = m - s\n up = m + s\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n return df", "def remove_outliers(data):\n upper_boundary = np.quantile(data, 0.992)\n lower_boundary = np.quantile(data, 0.008)\n selection = data[(data > lower_boundary) & (data < upper_boundary)]\n standard_dev = np.std(selection)\n median = np.median(selection)\n data[(median + 4.5 * standard_dev < data) | (data < median - 4.5 * standard_dev)] = median\n return data", "def detect_outliers(self, var: ndarray) -> ndarray:\n beta, gamma = self.get_vars(var)\n r = self.data.obs - self.fevar.mapping(beta)\n s = np.sqrt(self.data.obs_se**2 +\n np.sum(self.revar.mapping.mat**2*gamma, axis=1))\n a = norm.ppf(0.5 + 0.5*self.inlier_pct)\n return np.abs(r) > a*s", "def outlier_vars(data, show_plot=False):\n \n outliers = [] \n Q1 = data.quantile(0.25)\n Q3 = data.quantile(0.75)\n IQR = Q3 - Q1\n num_data = data.select_dtypes(include='number')\n result = dict ((((num_data < (Q1 - 1.5 * IQR)) | (num_data > (Q3 + 1.5 * IQR)))==True).any())\n for k,v in result.items():\n if v == True: \n outliers.append(k)\n if show_plot:\n pair_plot = sns.pairplot(data[outliers]);\n print(f'{result},\\n\\n Visualization of outlier columns')\n return pair_plot\n else:\n return data[outliers]", "def remove_outliers(lst):\n slst = sorted(lst)\n three_iqr = 3 * get_IQR(lst)\n low_boundary = float(np.percentile(lst, 25)) - three_iqr\n high_boundary = float(np.percentile(lst, 75)) + three_iqr\n\n return filter(lambda x: x >= low_boundary and x <= high_boundary, slst)" ]
[ "0.7372204", "0.7347039", "0.72379816", "0.72093225", "0.7197352", "0.7143112", "0.7124173", "0.692372", "0.68723196", "0.68039775", "0.67991555", "0.6691045", "0.6686342", "0.66102344", "0.66076815", "0.6597073", "0.65257156", "0.6483273", "0.6468104", "0.645345", "0.64530885", "0.6440808", "0.6433108", "0.6404346", "0.6301337", "0.6299468", "0.62740815", "0.62532884", "0.6249257", "0.6223162" ]
0.819929
0
Determines outliers using the 1.5IQR thresholds.
def find_outliers_IQR(data): res = data.describe() q1 = res['25%'] q3 = res['75%'] thresh = 1.5*(q3-q1) idx_outliers =(data < (q1-thresh)) | (data > (q3+thresh)) return idx_outliers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def identify_outliers(x):\n outliers = np.array([])\n\n IQR = iqr(x)\n low_cut = np.percentile(x,25) - 1.5*IQR\n high_cut = np.percentile(x,75) + 1.5*IQR\n\n for sub in x.index:\n if x.loc[sub] < low_cut or x.loc[sub] > high_cut:\n # outliers = np.append(outliers,np.asarray(x == i).nonzero()[0])\n outliers = np.append(outliers,sub)\n\n return outliers", "def iqr_detector(measures, iqr_proportion=1.5):\n # improt numpy\n import numpy as np\n\n # calculate 25, 50, 75 percentiles of measures\n Q1, Q3 = np.percentile(measures, [25,75])\n\n # calculate interquartile range\n IQR = Q3 - Q1\n\n # calculate upper and lower outlier values\n upper_out = Q3 + IQR * iqr_proportion\n lower_out = Q1 - IQR * iqr_proportion\n\n # get outliers based on > upper_out or < lower_out\n outlier_tf = np.logical_or(measures > upper_out, measures < lower_out)\n\n # get indices of outlier_tf\n outlier_i = [i for i, x in enumerate(outlier_tf) if x]\n\n return outlier_tf, outlier_i", "def iqr_outlier_cutoff(myseries, multiplier=1.5):\n # calculate IQR\n q1 = myseries.quantile(.25)\n q3 = myseries.quantile(.75)\n iqr = q3 - q1\n\n # get outlier cutoff\n cutoff = q3 + iqr*multiplier\n\n return cutoff", "def find_outliers(data, method='iqr'):\n\n if method=='iqr':\n # Finding the interquartile range\n q1 = data.quantile(.25)\n q3 = data.quantile(.75)\n iqr = q3-q1\n\n upper = q3 + iqr*1.5\n lower = q1 - iqr*1.5\n elif method=='std':\n std = data.std()\n lower = data.mean() - 3*std\n upper = data.mean() + 3*std\n else:\n raise ValueError(\"Invalid value for 'method' passed\")\n\n\n return lower, upper", "def outlier_thresholds(dataframe, col_name, low_quantile, up_quantile):\n quartile1 = dataframe[col_name].quantile(low_quantile)\n quartile3 = dataframe[col_name].quantile(up_quantile)\n interquantile_range = quartile3 - quartile1\n up_limit = quartile3 + 1.5 * interquantile_range\n low_limit = quartile1 - 1.5 * interquantile_range\n return low_limit, up_limit", "def find_outlier_range(dfs: pd.Series, iqr_factor=1.5, clip_low=None, clip_high=None):\n quant1 = dfs.quantile(0.25)\n quant3 = dfs.quantile(0.75)\n IQR = quant3 - quant1\n low = quant1 - iqr_factor * IQR\n high = quant3 + iqr_factor * IQR\n\n if clip_low is not None:\n low = max(clip_low, low)\n if clip_high is not None:\n high = min(clip_high, high)\n return (low, high)", "def IQR(data):\n return percentile(data, 75) - percentile(data, 25)", "def filter_outliers(data): \n \n idx_out = find_outliers_IQR(data)\n \n cleaned = data[~idx_out].copy()\n\n # print(f'There were {idx_out.sum()} outliers.')\n \n return cleaned", "def iqr_outliers(res, stats, factor):\n col_name1 = 'Q3 + IQR*' + str(factor)\n std1 = (stats['Q3'] + (stats['Q3'] - stats['Q1'])*factor)\n std1.name = col_name1\n\n col_name2 = 'Q3 - IQR*' + str(factor)\n std2 = (stats['Q3'] - (stats['Q3'] - stats['Q1'])*factor)\n std2.name = col_name2\n\n std2.loc[std2 < 0] = 0\n\n std = pd.concat([std1, std2], axis=1)\n\n data1 = pd.merge(res.reset_index(), std.reset_index(), on=['Site', 'Measurement'])\n data2 = data1[data1['Value'] > data1[col_name1]]\n data3 = data1[data1['Value'] < data1[col_name2]]\n\n data4 = pd.concat([data2, data3])\n\n return data4", "def remove_outliers(data):\n upper_boundary = np.quantile(data, 0.992)\n lower_boundary = np.quantile(data, 0.008)\n selection = data[(data > lower_boundary) & (data < upper_boundary)]\n standard_dev = np.std(selection)\n median = np.median(selection)\n data[(median + 4.5 * standard_dev < data) | (data < median - 4.5 * standard_dev)] = median\n return data", "def detect_outlier(df,method='iqr',val=np.nan):\n c_name = [n for n,d in df.dtypes if d != 'string' and d != 'boolean']\n if method=='z_score':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']\n df = df.withColumn(i,when(abs((col(i)-m)/s)>thresh,val).otherwise(col(i)))\n elif method=='iqr':\n for i in c_name:\n q1,q3 = df.approxQuantile(i,[0.25,0.75],0)\n IQR = q3-q1\n lo = q1-(1.5*IQR)\n up = q3+(1.5*IQR)\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n elif method=='std':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']*thresh\n lo = m - s\n up = m + s\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n return df", "def remove_outliers(lst):\n slst = sorted(lst)\n three_iqr = 3 * get_IQR(lst)\n low_boundary = float(np.percentile(lst, 25)) - three_iqr\n high_boundary = float(np.percentile(lst, 75)) + three_iqr\n\n return filter(lambda x: x >= low_boundary and x <= high_boundary, slst)", "def analyze_outliers(self, outliers):\n for outlier in outliers:\n image_path = os.path.join(self._images_out_folder, outlier.sample.class_name, outlier.sample.name)\n\n print('Outlier')\n print(' sample path = ', image_path)\n print(' sample class = ', outlier.sample.class_name)\n print(' detected class = ', outlier.detected_class)\n print(' all classes = ', outlier.all_classes)\n\n img = cv2.imread(image_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n show_image(img, figsize=(20, 20))", "def outlier_hunt(df):\n outlier_indices = []\n\n # iterate over features(columns)\n for col in df.columns.tolist():\n # 1st quartile (25%)\n Q1 = np.percentile(df[col], 1)\n\n # 3rd quartile (75%)\n Q3 = np.percentile(df[col], 99)\n\n # Interquartile rrange (IQR)\n IQR = Q3 - Q1\n\n # outlier step\n outlier_step = 1.5 * IQR\n\n # Determine a list of indices of outliers for feature col\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index\n\n # append the found outlier indices for col to the list of outlier indices\n outlier_indices.extend(outlier_list_col)\n\n # select observations containing more than 2 outliers\n outlier_indices = Counter(outlier_indices)\n multiple_outliers = list(k for k, v in outlier_indices.items() if v >= 2)\n\n return multiple_outliers", "def detect_outliers(data, tolerance=2):\n medians = data.rolling(5, center=True).median()\n lowerq = data.rolling(5, center=True).quantile(.75)\n upperq = data.rolling(5, center=True).quantile(.25)\n iqrs = np.abs(upperq - lowerq)\n diffs = np.abs(data - medians)\n outliers = pd.Series(diffs > (tolerance * iqrs))\n return outliers, sum(outliers)", "def get_IQR(lst):\n return (float(np.percentile(lst, 75)) - float(np.percentile(lst, 25)))", "def IQR(x):\r\n\r\n x.sort()\r\n # split values into lower and upper portions at the median\r\n odd = len(x) % 2\r\n midpoint = int(len(x) / 2)\r\n if odd:\r\n low_vals = x[:midpoint]\r\n high_vals = x[midpoint + 1:]\r\n else: # if even\r\n low_vals = x[:midpoint]\r\n high_vals = x[midpoint:]\r\n # find the median of the low and high values\r\n min_val = median(low_vals)\r\n max_val = median(high_vals)\r\n return min_val, max_val", "def detect_outliers(df):\n outlier_indices = {}\n # iterate over features(columns)\n for col in df.columns:\n # 1st quartile (25%)\n Q1 = np.percentile(df[col].dropna(), 25)\n # 3rd quartile (75%)\n Q3 = np.percentile(df[col].dropna(), 75)\n # Interquartile range (IQR)\n IQR = Q3 - Q1\n\n # outlier step\n outlier_step = 1.5 * IQR\n\n # Determine a list of indices of outliers for feature col\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index.to_list()\n\n # append the found outlier indices for col to the list of outlier indices\n outlier_indices[col]=outlier_list_col\n if outlier_list_col:\n Box_plots(df[col],col)\n return outlier_indices", "def replace_outliers(data, threshold=4):\n zscores = stats.zscore(data)\n mean, std = data.mean(), data.std()\n data.loc[zscores >= threshold] = mean + std * threshold\n data.loc[zscores <= -threshold] = mean - std * threshold\n\n return data", "def remove_outliers(a, constant=1.5):\n if not isinstance(a, np.ndarray):\n a = np.array(list(a))\n\n upper_quartile = np.percentile(a, 75)\n lower_quartile = np.percentile(a, 25)\n IQR = (upper_quartile - lower_quartile) * constant\n quartile_set = (lower_quartile - IQR, upper_quartile + IQR)\n return [y for y in a.tolist() if y >= quartile_set[0] and y <= quartile_set[1]]", "def iqr(self, arr):\n a = np.asarray(arr)\n self.q1 = stats.scoreatpercentile(a, 25)\n self.q2 = stats.scoreatpercentile(a, 50)\n self.q3 = stats.scoreatpercentile(a, 75)", "def get_outliers(self):\n out = Outliers()\n out.set_data(self.data)\n out.run()\n return out.get_value('outliers')", "def remove_outliers(value, remove_outlier):\n try:\n if len(value) > 0:\n percent = float(remove_outlier)\n value = value.dropna().astype(\"float64\")\n q75, q25 = np.percentile(\n value, [percent, 100 - percent], interpolation=\"linear\"\n )\n iqr = q75 - q25\n value = value[value >= (q25 - 1.5 * iqr)]\n value = value[value <= (q75 + 1.5 * iqr)]\n value.reset_index(drop=True)\n return value\n except:\n raise", "def drop_outliers(data, cols, t=1.5):\n iqr_d = iqr(data, cols, t)\n for col in cols:\n return data[~((data[col]< iqr_d[\"low_b\"][col]) | (data[col]> iqr_d[\"upp_b\"][col]))]", "def ZscoreOutlier(data, margin=0, axis=0, ddof=0, nan_policy='propagate'):\n scores = ss.zscore(data, axis=axis, ddof=ddof, nan_policy=nan_policy)\n\n lower_range, upper_range = iqr_threshold_method(scores, margin)\n\n anomaly_points = []\n for i in range(len(scores)):\n if scores[i] < lower_range or scores[i] > upper_range:\n anomaly_points.append(data[i])\n\n return anomaly_points", "def number_of_outliers(sentiment, lower, upper):\r\n upper_quartile = np.percentile(sentiment, upper)\r\n lower_quartile = np.percentile(sentiment, lower)\r\n lower_outlier = np.count_nonzero(sentiment <= lower_quartile)\r\n higher_outlier = np.count_nonzero(sentiment >= upper_quartile)\r\n total_outlier = lower_outlier + higher_outlier\r\n return total_outlier", "def drop_outliers_quantile(df, upper=0.99, lower=0):\n n_initial_rows = df.shape[0]\n drop_list = set()\n quant_upper = df.quantile(upper)\n quant_lower = df.quantile(lower)\n\n print('-' * 25)\n print('OUTLIERS DELETION: QUANTILE METHOD\\n')\n\n for el in df.columns.values:\n drop_list = drop_list | \\\n set(df[el][df[el] > quant_upper[el]].index.values) | \\\n set(df[el][df[el] < quant_lower[el]].index.values)\n\n drop_list = list(set(drop_list))\n count = len(drop_list)\n df.drop(drop_list, inplace=True)\n\n print('Lower quantile: {} | Upper quantile: {}'.format(lower, upper))\n print('N of deleted rows: {} | % of deleted rows: {}%'.format(\n count, round(100 * (count / n_initial_rows), 3)))\n return df", "def filterOutlier(data_list,z_score_threshold=3):\n\t# n = len(data_list)\n\t# z_score_threshold = (n-1)/np.sqrt(n)\n\tdata = np.array(data_list)\n\tmedian = np.median(data)\n\tdeviation = np.median([np.abs(x - median) for x in data])\n\tz_scores = [0.675*(x - median)/deviation for x in data]\n\tdata_out = data[np.where(np.abs(z_scores) < z_score_threshold)].tolist()\n\toutput = data_out if len(data_out) > 0 else data_list\n\treturn output", "def is_outlier(points, thresh=3.5):\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n return modified_z_score > thresh", "def outlierdetection(data,method):\n import numpy as np\n ##########\n # 0. Input\n data = np.array(data)\n methodname = method['name']\n rule = method['rule']\n try:\n mask = rule['initmask'].copy()\n if not mask:\n mask = np.full_like(data,True,dtype=bool)\n rule['initmask'] = mask.copy()\n except:\n mask = np.full_like(data,True,dtype=bool)\n rule['initmask'] = mask.copy()\n ##########\n # 1. Compute\n if methodname in {'median','sigma'}:\n minp,maxp = rule['minp'],rule['maxp']\n niter = rule['niter']\n for i in range(niter):\n gooddata = data[mask] # good data\n ### median or sigma\n if methodname=='median':\n median = np.median(gooddata)\n minbound = minp*median\n maxbound = maxp*median\n elif methodname=='sigma':\n std = np.std(gooddata)\n median = np.median(gooddata)\n minbound = median - minp*std\n maxbound = median + maxp*std\n ### update mask\n m = np.argwhere((data >= minbound) & (data <= maxbound)).flatten() # good data\n mask = np.full_like(data,False,dtype=bool)\n mask[m] = True\n print('{0} iter {1}'.format(methodname,i))\n elif methodname == 'sn':\n minp = rule['minp']\n noise = rule['noise']\n keepneg = rule['keepneg']\n sn = data / noise\n if keepneg:\n sn = np.abs(sn)\n m = np.argwhere(sn >= minp).flatten()\n mask = np.full_like(data,False,dtype=bool)\n mask[m] = True\n print('{0} complete'.format(methodname))\n elif methodname == 'sigmalocal':\n sigma = rule['sigma']\n noise = rule['noise']\n keepneg = rule['keepneg']\n niter = rule['niter']\n params = rule['params']\n for i in range(niter):\n tmpdata = data[mask]\n tmpmedian = savgol_filter(tmpdata,**params)\n tmpnoise = noise[mask]\n ratio = (tmpdata - tmpmedian)/tmpnoise\n if keepneg:\n ratio = np.abs(ratio)\n m = np.argwhere(ratio > sigma).flatten()\n mask[m] = False\n print('{0} iter {1}'.format(methodname,i))\n else:\n raise ValueError('method {0} does not support'.format(method))\n ##########\n # 2. Update with the initial mask and return\n return mask & rule['initmask']" ]
[ "0.7312946", "0.71711165", "0.7104051", "0.7031879", "0.69975835", "0.69557506", "0.69528335", "0.6874209", "0.6798811", "0.65834653", "0.6569872", "0.6486448", "0.6450941", "0.64337593", "0.64293826", "0.6391283", "0.6367405", "0.636632", "0.6259762", "0.6258333", "0.6254081", "0.62505853", "0.62470305", "0.62425834", "0.62286854", "0.61618924", "0.6156386", "0.6105893", "0.6051069", "0.6030012" ]
0.7957246
0
Filters outliers from given data via the "find_outliers_IQR" function and saves filtered values to a new DataFrame
def filter_outliers(data): idx_out = find_outliers_IQR(data) cleaned = data[~idx_out].copy() # print(f'There were {idx_out.sum()} outliers.') return cleaned
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_outliers_IQR(data): \n \n res = data.describe()\n q1 = res['25%']\n q3 = res['75%']\n thresh = 1.5*(q3-q1)\n idx_outliers =(data < (q1-thresh)) | (data > (q3+thresh))\n return idx_outliers", "def filter_outliers(self, df, outlier):\n return df[~outlier].reset_index(drop=True)", "def identify_outliers(x):\n outliers = np.array([])\n\n IQR = iqr(x)\n low_cut = np.percentile(x,25) - 1.5*IQR\n high_cut = np.percentile(x,75) + 1.5*IQR\n\n for sub in x.index:\n if x.loc[sub] < low_cut or x.loc[sub] > high_cut:\n # outliers = np.append(outliers,np.asarray(x == i).nonzero()[0])\n outliers = np.append(outliers,sub)\n\n return outliers", "def remove_outliers(df, var):\n import numpy as np\n \n df = df.copy()\n \n # remove outliers\n Q1 = np.nanquantile(df[var] ,0.25)\n Q3 = np.nanquantile(df[var], 0.75)\n IQR = Q3 - Q1\n \n lower_end = Q1 - 1.5 * IQR \n high_end = Q3 + 1.5 * IQR \n \n df_filtered = df.drop(df[(df[var] < lower_end) | (df[var] > high_end)].index)\n \n return df_filtered", "def iqr_outliers(res, stats, factor):\n col_name1 = 'Q3 + IQR*' + str(factor)\n std1 = (stats['Q3'] + (stats['Q3'] - stats['Q1'])*factor)\n std1.name = col_name1\n\n col_name2 = 'Q3 - IQR*' + str(factor)\n std2 = (stats['Q3'] - (stats['Q3'] - stats['Q1'])*factor)\n std2.name = col_name2\n\n std2.loc[std2 < 0] = 0\n\n std = pd.concat([std1, std2], axis=1)\n\n data1 = pd.merge(res.reset_index(), std.reset_index(), on=['Site', 'Measurement'])\n data2 = data1[data1['Value'] > data1[col_name1]]\n data3 = data1[data1['Value'] < data1[col_name2]]\n\n data4 = pd.concat([data2, data3])\n\n return data4", "def detect_outliers(df):\n outlier_indices = {}\n # iterate over features(columns)\n for col in df.columns:\n # 1st quartile (25%)\n Q1 = np.percentile(df[col].dropna(), 25)\n # 3rd quartile (75%)\n Q3 = np.percentile(df[col].dropna(), 75)\n # Interquartile range (IQR)\n IQR = Q3 - Q1\n\n # outlier step\n outlier_step = 1.5 * IQR\n\n # Determine a list of indices of outliers for feature col\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index.to_list()\n\n # append the found outlier indices for col to the list of outlier indices\n outlier_indices[col]=outlier_list_col\n if outlier_list_col:\n Box_plots(df[col],col)\n return outlier_indices", "def remove_outliers(data):\n upper_boundary = np.quantile(data, 0.992)\n lower_boundary = np.quantile(data, 0.008)\n selection = data[(data > lower_boundary) & (data < upper_boundary)]\n standard_dev = np.std(selection)\n median = np.median(selection)\n data[(median + 4.5 * standard_dev < data) | (data < median - 4.5 * standard_dev)] = median\n return data", "def outlier_vars(data, show_plot=False):\n \n outliers = [] \n Q1 = data.quantile(0.25)\n Q3 = data.quantile(0.75)\n IQR = Q3 - Q1\n num_data = data.select_dtypes(include='number')\n result = dict ((((num_data < (Q1 - 1.5 * IQR)) | (num_data > (Q3 + 1.5 * IQR)))==True).any())\n for k,v in result.items():\n if v == True: \n outliers.append(k)\n if show_plot:\n pair_plot = sns.pairplot(data[outliers]);\n print(f'{result},\\n\\n Visualization of outlier columns')\n return pair_plot\n else:\n return data[outliers]", "def detect_outlier(df,method='iqr',val=np.nan):\n c_name = [n for n,d in df.dtypes if d != 'string' and d != 'boolean']\n if method=='z_score':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']\n df = df.withColumn(i,when(abs((col(i)-m)/s)>thresh,val).otherwise(col(i)))\n elif method=='iqr':\n for i in c_name:\n q1,q3 = df.approxQuantile(i,[0.25,0.75],0)\n IQR = q3-q1\n lo = q1-(1.5*IQR)\n up = q3+(1.5*IQR)\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n elif method=='std':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']*thresh\n lo = m - s\n up = m + s\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n return df", "def drop_outliers(data, cols, t=1.5):\n iqr_d = iqr(data, cols, t)\n for col in cols:\n return data[~((data[col]< iqr_d[\"low_b\"][col]) | (data[col]> iqr_d[\"upp_b\"][col]))]", "def outlier_hunt(df):\n outlier_indices = []\n\n # iterate over features(columns)\n for col in df.columns.tolist():\n # 1st quartile (25%)\n Q1 = np.percentile(df[col], 1)\n\n # 3rd quartile (75%)\n Q3 = np.percentile(df[col], 99)\n\n # Interquartile rrange (IQR)\n IQR = Q3 - Q1\n\n # outlier step\n outlier_step = 1.5 * IQR\n\n # Determine a list of indices of outliers for feature col\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index\n\n # append the found outlier indices for col to the list of outlier indices\n outlier_indices.extend(outlier_list_col)\n\n # select observations containing more than 2 outliers\n outlier_indices = Counter(outlier_indices)\n multiple_outliers = list(k for k, v in outlier_indices.items() if v >= 2)\n\n return multiple_outliers", "def is_outlier(incoming_data):\r\n outlier_df = \\\r\n incoming_data[incoming_data.apply(\r\n lambda x: np.abs(x - x.mean()) / x.std() > 3).all(axis=1)]\r\n return not outlier_df.empty", "def find_outliers(data, method='iqr'):\n\n if method=='iqr':\n # Finding the interquartile range\n q1 = data.quantile(.25)\n q3 = data.quantile(.75)\n iqr = q3-q1\n\n upper = q3 + iqr*1.5\n lower = q1 - iqr*1.5\n elif method=='std':\n std = data.std()\n lower = data.mean() - 3*std\n upper = data.mean() + 3*std\n else:\n raise ValueError(\"Invalid value for 'method' passed\")\n\n\n return lower, upper", "def remove_outliers(df: pd.DataFrame, numerical_variables: list, strategy: str = 'IQR') -> pd.DataFrame:\n assert strategy in [\n 'IQR', 'z-score'], \"You must choose IQR or z-score strategy\"\n\n df_numerical = df[numerical_variables]\n\n if strategy == \"IQR\":\n Q1 = df_numerical.quantile(0.25)\n Q3 = df_numerical.quantile(0.75)\n IQR = Q3 - Q1\n is_outlier = (df_numerical < (Q1 - 1.5 * IQR)\n ) | (df_numerical > (Q3 + 1.5 * IQR))\n outliers = df_numerical[is_outlier.any(axis=1)]\n\n elif strategy == 'z-score':\n z = np.abs(stats.zscore(df))\n outliers = df_numerical[(z >= 3).all(axis=1)]\n\n return df.drop(outliers.index, axis=0)", "def get_outliers(a_dataframe):\n outliers_list = []\n for category in a_dataframe.dtypes.keys():\n try:\n column = a_dataframe.loc[:, category]\n mean = np.mean(column) # check if category is numeric\n except TypeError:\n pass\n else:\n # print_hist(column, category)\n st_dev = np.std(column)\n limit_hi = mean + 2 * st_dev\n limit_lo = mean - 2 * st_dev\n flag_bad = (column < limit_lo) | (column > limit_hi)\n if category != \"fnlwgt\": # skip 'fnlwgt' var. 'cos I'll delete it\n outliers_list.append(flag_bad)\n num_outliers = sum(flag_bad)\n print_stats(category, column,\n limit_hi, limit_lo,\n num_outliers\n )\n\n return outliers_list", "def find_outliers_z(data):\n\n zFP = np.abs(stats.zscore(data))\n zFP = pd.Series(zFP, index=data.index)\n idx_outliers = zFP > 3\n return idx_outliers", "def remove_outliers_by_percentile(dataframe, columns, limits =.01, frame_type='spark'):\n\n if frame_type == 'spark':\n import numpy as np\n df = dataframe\n\n def percentile_threshold(ardd, percentile):\n assert percentile > 0 and percentile <= 100, \"percentile should be larger then 0 and smaller or equal to 100\"\n # df.approxQuantile(\"x\", [0.5], 0.25)\n return ardd.sortBy(lambda x: x).zipWithIndex().map(lambda x: (x[1], x[0])) \\\n .lookup(np.ceil(ardd.count() / 100 * percentile - 1))[0]\n\n for column in columns:\n def flatten_column(row):\n return tuple(float(x) for x in row)\n #Compute the percentiles\n lower = percentile_threshold(df.select(column).rdd.flatMap(flatten_column),limits)\n upper = percentile_threshold(df.select(column).rdd.flatMap(flatten_column), 100 - limits)\n\n print('For {column} the lower limit is {lower}'.format(column=column,lower=str(lower)))\n print('For {column} the upper limit is {upper}'.format(column=column,upper=str(upper)))\n\n from pyspark.sql.functions import lit\n #Filter out outliers\n df = df.where(\"{column} < {upper} AND {column} > {lower} \"\\\n .format(column=column,upper=upper,lower=lower))\n return df\n\n\n else:\n import numpy as np\n\n df = None\n if frame_type == 'h2o':\n # convert to pandas\n df = dataframe.as_data_frame()\n elif frame_type == 'pandas':\n df = dataframe\n\n for column in columns:\n ulimit = np.percentile(train_df[column].values, 100 - limits)\n llimit = np.percentile(train_df[column].values, limits)\n df[column] = df[df[column] < ulimit]\n df[column] = df[df[column] > llimit]\n\n if frame_type == 'h2o':\n import h2o\n print('Converting to H2OFrame ...')\n # convert train back to h2o\n df = h2o.H2OFrame(df)\n print('Done.')\n return df\n else:\n return df", "def detect_outliers(data, tolerance=2):\n medians = data.rolling(5, center=True).median()\n lowerq = data.rolling(5, center=True).quantile(.75)\n upperq = data.rolling(5, center=True).quantile(.25)\n iqrs = np.abs(upperq - lowerq)\n diffs = np.abs(data - medians)\n outliers = pd.Series(diffs > (tolerance * iqrs))\n return outliers, sum(outliers)", "def _remove_outliers(df, contamination):\n\n day_roam_distance = df[['day_roam_distance']]\n\n clf = IsolationForest(n_estimators=100, contamination=contamination, random_state=0, n_jobs=-1)\n outliers = clf.fit_predict(day_roam_distance)\n\n inline_data = df.loc[outliers == 1]\n\n return inline_data", "def remove_outliers(y, x, ids):\n # Compute first and third quartiles and the Interquartile range\n q1 = np.percentile(x, 25, axis=0)\n q3 = np.percentile(x, 75, axis=0)\n iqr = q3 - q1\n # Set to True any entry outside the Interquartile range\n mask = (x >= q1 - 1.5 * iqr) & (x <= q3 + 1.5 * iqr)\n\n # Only filter out features with values that are spread over a range bigger than threshold_range\n # i.e. if the difference between the minimum value and the maximum value is bigger than threshold_range\n threshold_range = 10\n # Set to False any feature with range bigger than threshold\n col_mask = (x.max(axis=0) - x.min(axis=0)) < threshold_range\n mask = mask | col_mask\n row_mask = mask.all(axis=1) # sets to False rows containing any outliers\n\n return y[row_mask], x[row_mask], ids[row_mask]", "def identify_and_handel_outliers(self):\n col_list = [] # This will hold the column names created for the administration of the modified z-score test\n values_dropped = []\n cont_cols = self.df.select_dtypes(exclude=[\"category\"]).columns # Gets continous columns \n for col in cont_cols:\n#TODO: Add lines to check column len(), if len() == 0, drop drop column, create cont_cols and cat_cols, and drop from there as well. \n df_len = len(self.df)\n top_value = self.df[col].value_counts(normalize=True, ascending=False, dropna=True)\n top_value = top_value.head(1).reset_index().to_numpy()[0] #Gets the top occuring value along with its percentage of occurances\n if top_value[1] > 0.5:#Test if the top occuring value makes up more than 50% of the data\n remaining_col = self.df[col][~self.df[col].isin([top_value[0]])] #Gets all values not within the 50% of single value data\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(remaining_col) #Gets modified z-score for remaining items\n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0) #Fills all missing z-scores\\\n #with zero(because that 50% of data removed would be zero anyways)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n else:\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(self.df[col]) #Gets modified z-score \n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n self.df.drop(columns = col_list, inplace=True)#Removed columns created to test modified z-score\n self.outliers_dropped = values_dropped", "def remove_outliers(df, std_threshold: float = 3):\n\n df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]\n return df", "def fast_outlier_id(data, cols=\"All\", method=\"z-score\",\n threshold_low_freq=0.05):\n\n # ASSERT TESTS\n assert isinstance(data, pd.DataFrame), \"Data must be in pandas Data Frame!\"\n\n if type(cols) == str:\n if cols.lower() == \"all\":\n cols = list(data.columns)\n\n if type(cols) != str:\n assert isinstance(cols, list), \"Columns must be inputted in a list\"\n for i in cols:\n assert i in list(\n data.columns), \"Columns must exist in the inputted data \" \\\n \"dataframe\"\n\n assert method.lower() in [\"z-score\",\n \"interquartile\"], \\\n \"The only permitted values are z-score or interquantile,thank you\"\n\n # Initialize lists containing summary values\n no_nans_list = list()\n col_type_list = list()\n perc_nans_list = list()\n outlier_values_list = list()\n outlier_count_list = list()\n outlier_perc_list = list()\n method_list = list()\n\n # Subsetting the data by the columns selected by the user\n subset = data[cols]\n for i in cols:\n # More lists containing summary values\n no_nans = subset[i].isna().sum()\n no_nans_list.append(no_nans)\n col_type_list.append(subset[i].dtype)\n perc_nans_list.append(round(no_nans / len(subset[i]), 2))\n data_no_nans = subset[i][~pd.isna(subset[i])]\n if data_no_nans.dtypes in ['float64', 'int64']:\n if method.lower() == \"z-score\":\n score = np.abs(stats.zscore(data_no_nans))\n data_no_nans = data_no_nans.to_numpy()\n outlier_values = data_no_nans[np.where(score > 2)]\n outlier_count_list.append(len(outlier_values))\n outlier_perc_list.append(\n round(len(outlier_values) / len(data_no_nans), 2))\n outlier_values_list.append(outlier_values)\n method_list.append(\"Z-Score\")\n elif method.lower() == \"interquartile\":\n Q1 = np.quantile(data_no_nans, 0.25)\n Q3 = np.quantile(data_no_nans, 0.75)\n IQR = Q3 - Q1\n score = (data_no_nans < (Q1 - 1.5 * IQR)) | (\n data_no_nans > (Q3 + 1.5 * IQR))\n data_no_nans = data_no_nans.to_numpy()\n outlier_values = data_no_nans[np.where(score > 0)]\n outlier_count_list.append(len(outlier_values))\n outlier_perc_list.append(\n round(len(outlier_values) / len(data_no_nans), 2))\n outlier_values_list.append(outlier_values)\n method_list.append(\"Interquartile\")\n elif data_no_nans.dtype in ['object']:\n score = data_no_nans.value_counts() / len(data_no_nans)\n outlier_values = score[score < threshold_low_freq].index.tolist()\n outlier_count_list.append(\n data_no_nans.value_counts()[score < threshold_low_freq].sum())\n outlier_perc_list.append(\n round(sum(score[score < threshold_low_freq]), 2))\n outlier_values_list.append(outlier_values)\n method_list.append(\"low-freq\")\n summary_dict = {'column_name': cols, 'type': col_type_list,\n 'no_nans': no_nans_list, 'perc_nans': perc_nans_list,\n 'outlier_method': method_list,\n \"no_outliers\": outlier_count_list,\n \"perc_outliers\": outlier_perc_list,\n \"outlier_values\": outlier_values_list}\n summary = pd.DataFrame(summary_dict)\n return (summary)", "def remove_outliers(self, data, sd_val):\n data = data.dropna()\n data = data[(np.abs(stats.zscore(data)) < float(sd_val)).all(axis=1)]\n return data", "def detect_outliers(df, n, features):\n outlier_indices = [] \n for col in features: \n Q1 = np.percentile(df[col], 25)\n Q3 = np.percentile(df[col], 75)\n IQR = Q3 - Q1\n outlier_step = 1.5 * IQR \n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index\n outlier_indices.extend(outlier_list_col) \n outlier_indices = Counter(outlier_indices)\n multiple_outliers = list(key for key, value in outlier_indices.items() if value > n) \n return multiple_outliers", "def outliers(df):\r\n # LocalOutlierFactor.\r\n start_time = time.time()\r\n print('\\n'+ '# '*10+'[Training] Local Outlier Factor Model (LOF):'+ ' #'*10)\r\n clf = LocalOutlierFactor()\r\n y_pred = clf.fit_predict(df.drop(['label', 'label_encoded'], axis=1))\r\n print('> '*2+'Training and prediction time: %.4f seconds.'%(time.time()-start_time))\r\n # Dataframe with various metrics.\r\n metrics = ['fliers', 'Q1', 'Q3', 'IQR', 'min', 'max', 'median', 'LOF_inliers', 'LOF_outliers', 'LOF_outlier_factor']\r\n df_outliers = pd.DataFrame()\r\n df_outliers['Feature'] = metrics\r\n bp = plt.boxplot([df[i] for i in df.drop(['label', 'label_encoded'], axis=1).columns])\r\n for i in range(len(df.drop(['label', 'label_encoded'], axis=1).columns)):\r\n vals = []\r\n # Fliers.\r\n vals.append(len(bp['fliers'][i].get_ydata()))\r\n # Q1.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].quantile(0.25))\r\n # Q3.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].quantile(0.75))\r\n # IQR.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].quantile(0.75) - df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].quantile(0.25))\r\n # Min.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].min())\r\n # Max.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].max())\r\n # Median.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].median())\r\n # Local Outlier Factor.\r\n vals.append(y_pred.tolist().count(1)) # Inliers.\r\n vals.append(y_pred.tolist().count(-1)) # Outliers.\r\n vals.append(clf.negative_outlier_factor_)\r\n # Add column and data.\r\n df_outliers[df.columns[i]] = vals\r\n plt.close()\r\n return df_outliers", "def filter_outliers(data: pd.Series, std: int=3) -> pd.Series:\n return data[(data - data.mean()).abs() <= (std * data.std())]", "def remove_rf_outliers(X, y):\n summary = np.percentile(y, [25, 50, 75])\n high_lim = summary[0] - 1.5 * (summary[2] - summary[1])\n low_lim = summary[2] + 1.5 * (summary[2] - summary[1])\n\n data = np.hstack((X, y[None].T))\n\n data = data[~(data[:, -1] >= low_lim)]\n data = data[~(data[:, -1] <= high_lim)]\n\n # remove last instances\n data = data[:-(data.shape[0] % 1000), :]\n\n return data[:, :-1], data[:, -1]", "def _identify_outliers(\n table: pd.DataFrame,\n column_names: list,\n which: list,\n factor: float = 1.5,\n merge: float = \"and\",\n) -> np.ndarray:\n if isinstance(table, dict):\n table = pd.DataFrame(table)\n\n # True if values are good, False if outliers\n indices = np.ones((len(column_names), len(table)), dtype=bool)\n indices[:, table[table.isnull().any(axis=1)].index] = False\n\n for idx, column in enumerate(column_names):\n Q1 = table[column].quantile(0.25)\n Q3 = table[column].quantile(0.75)\n IQR = Q3 - Q1\n if which[idx] == \"above\":\n idx_outlier = table[column] > (Q3 + factor * IQR)\n elif which[idx] == \"below\":\n idx_outlier = table[column] < (Q1 - factor * IQR)\n elif which[idx] == \"both\":\n idx_outlier = (table[column] < (Q1 - factor * IQR)) + (\n table[column] > (Q3 + factor * IQR)\n )\n indices[idx, table[idx_outlier].index] = False\n\n if merge == \"and\":\n indices = np.all(indices, axis=0)\n elif merge == \"or\":\n indices = ~np.any(~indices, axis=0)\n\n return indices", "def replace_outliers(data, threshold=4):\n zscores = stats.zscore(data)\n mean, std = data.mean(), data.std()\n data.loc[zscores >= threshold] = mean + std * threshold\n data.loc[zscores <= -threshold] = mean - std * threshold\n\n return data" ]
[ "0.7899556", "0.75043386", "0.7186214", "0.71685565", "0.7134353", "0.71028703", "0.70568556", "0.7023544", "0.69353133", "0.68823826", "0.6858225", "0.68442225", "0.6770016", "0.675921", "0.669157", "0.6602507", "0.6570492", "0.6569533", "0.6558121", "0.65446496", "0.65201205", "0.65003806", "0.6475474", "0.6467962", "0.6448635", "0.6439705", "0.6421968", "0.638998", "0.63754463", "0.6364178" ]
0.84360087
0
Runs a ttest on two samples from the same independent variable; prints whether or not they are significant; and returns pvalue as a variable called "pvalue."
def ttest_review(sample_1, sample_2, alpha=.05): result = stats.ttest_ind(sample_1, sample_2) crit_val, p_val = result ## Creating interpretation based on p-value results. if p_val < .05: print(f'The feature is statistically significant with a p-value of {p_val}.') else: print(f'The feature is not statistically significant with a p-value of {p_val}.') return p_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _t_test(_sample_a, _sample_b):\n res = stats.ttest_ind(_sample_a, _sample_b, axis=0, equal_var=equal_var, nan_policy='propagate')\n print('Independent t-test\\nt-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)", "def calculate_t_test(mean1, mean2, var1, var2, n1, n2, alpha):\n # Two Sample T Test (M0 == M1) (Two Tails)\n t = (mean1 - mean2) / sqrt((var1 / n1) + (var2 / n2)) # t statistic calculation for two sample\n df = n1 + n2 - 2 # degree of freedom for two sample t - set\n pval = 1 - stats.t.sf(np.abs(t), df) * 2 # two-sided pvalue = Prob(abs(t)>tt) # p - value\n cv = stats.t.ppf(1 - (alpha / 2), df)\n standart_error = cv * sqrt((var1 / n1) + (var2 / n2))\n confidence_intervals = [abs(mean1 - mean2) - standart_error, abs(mean1 - mean2) + standart_error, standart_error]\n acception = 'HO REJECTED!' if pval < (alpha / 2) else 'HO ACCEPTED!' # left tail\n acception = 'HO REJECTED!' if pval > 1 - (alpha / 2) else 'HO ACCEPTED!' # right tail\n return pval, confidence_intervals, acception", "def eeg_twosample_ttest(array1,array2):\t\n\tfrom scipy.stats import ttest_rel\n\ts1 = array1.shape\n\tp = np.zeros(s1[1])\n\tt = np.zeros(s1[1])\n\tfor i in range(s1[1]):\n\t\ttval,pval = ttest_rel(array1[:,i],array2[:,i])\n\t\tp[i]=pval\n\t\tt[i]=tval\n\t\t\n\treturn t,p", "def p_value(set1, set2):\n\ts, p = stats.ttest_ind(set1, set2)\n\treturn p", "def t_tests(self):\n se = self.se()\n t = self._coef / se\n p = 2 * stats.distributions.t.sf(np.abs(t), self._rdf)\n return (t, p)", "def ttest_two_sided(arr1, arr2, alpha=0.05, verbose=False):\n res = stats.ttest_ind(arr1, arr2)\n if res[1] <= alpha:\n if verbose: print(\n f'P-value = {round(res[1], 3)}, i.e. at alpha={alpha} the samples are significantly DIFFERENT')\n is_significant = True\n else:\n if verbose: print(f'P-value = {round(res[1], 3)}, i.e. at alpha={alpha} the samples are from the SAME set')\n is_significant = False\n return res[1], is_significant", "def t_test(sample1, sample2, paired=False, alpha=0.05,\n alternative='two-sided', correction='auto', r=0.707,\n show_graph=True, **kwargs):\n confidence = 1 - alpha\n df_result = pg.ttest(\n sample1,\n sample2,\n paired=paired,\n confidence=confidence,\n alternative=alternative,\n correction=correction,\n r=r\n )\n if show_graph:\n if paired:\n difference = [x - y for x, y in zip(sample1, sample2)]\n Visualization.histogram(difference, **kwargs)\n else:\n Visualization.density_plot(sample1, sample2,\n fig_size=(5, 4), **kwargs)\n return HypothesisTester.define_hypothesis(df_result, 'mean',\n alternative, paired,\n alpha).T", "def ttest(x, mu=0, alpha=0.05, is_bernoulli=False, two_sided=True, return_tuple=False):\n\n # Define test degrees of freedom\n if two_sided:\n quant_order = 1 - (alpha / 2)\n h0 = f'X_bar = {mu}'\n h1 = f'X_bar != {mu}'\n else:\n quant_order = 1 - alpha\n h0 = f'X_bar <= {mu}'\n h1 = f'X_bar > {mu}'\n\n # Input vector as array\n x = np.asarray(x)\n # Sample size\n n = len(x)\n\n # Empirical mean\n x_bar = x.mean()\n # s estimator (variance)\n if is_bernoulli:\n s2 = x_bar * (1 - x_bar)\n else:\n s2 = desc.var(x)\n\n # Degrees of freedom\n df = n - 1\n\n # T statistic\n t = (x_bar - mu) / (math.sqrt(s2 / n))\n if two_sided:\n t = math.fabs(t)\n # p and critical values\n p = 2.0 * (1.0 - scp.t.cdf(t, df=df))\n\n if n > 30:\n cv = scp.norm.ppf(quant_order)\n else:\n cv = scp.t.ppf(quant_order, df=df)\n\n _summ = test_summary(df=df, critical_value=cv, t_value=t,\n p_value=p,\n title='One Sample Student test',\n h0=h0, h1=h1,\n alpha=alpha)\n\n if return_tuple:\n return t, cv, p\n else:\n return _summ", "def two_tailed_t_test(samples: np.ndarray, H0: float):\n empirical_mean = np.mean(samples, axis=0)\n number_samples = samples.shape[0]\n standard_error = np.std(samples, ddof=1, axis=0) / np.sqrt(number_samples)\n t_value = (empirical_mean - H0) / standard_error\n p_value = 2.0 * (1.0 - t(df=number_samples - 1).cdf(np.abs(t_value)))\n return t_value, p_value", "def test_onesample_two_tailed(self):\n rng = np.random.default_rng(13489132474)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(-5, 2, 100)\n\n ttest = one_sample_ttest(data1, -5)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def _t_test_results(self):\n t, df, p = self.api.m.math_utils.welchs_t_test(\n self.lkgr.values, self.fkbr.values)\n lines = [\n 'LKGR values: %r' % self.lkgr.values,\n 'FKBR values: %r' % self.fkbr.values,\n 't-statistic: %r' % t,\n 'deg. of freedom: %r' % df,\n 'p-value: %r' % p,\n 'Confidence score: %r' % (100 * (1 - p))\n ]\n return '\\n'.join(lines)", "def ttest(\n data, dataLabel=None, paired=False, decimals=4,\n textline=False, units=None\n ):\n\n # test calling values\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.ttest: data must be a dictionary'\n + ' with at exactly 2 keys'\n + '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n g = {}\n n = {}\n gmean = {}\n gstd = {}\n\n g[1] = data[k[0]]\n g[2] = data[k[1]]\n n[1] = len(g[1])\n n[2] = len(g[2])\n # (w1, p1) = Stats.shapiro(g1, a=None, reta=False)\n # (w2, p2) = Stats.shapiro(g2, a=None, reta=False)\n # Tb, pb = Stats.bartlett(g1, g2) # do bartletss for equal variance\n equalVar = False\n\n if paired:\n print (len(g[1]), len(g[2]))\n (t, p) = Stats.ttest_rel(g[1], g[2])\n else:\n (t, p) = Stats.ttest_ind(g[1], g[2], equal_var=equalVar)\n gmean[1] = np.mean(g[1])\n gstd[1] = np.std(g[1], ddof=1)\n gmean[2] = np.mean(g[2])\n gstd[2] = np.std(g[2], ddof=1)\n # df = (tstd[k]**2/tN[k] + dstd[k]**2/dN[k])**2 / (( (tstd[k]**2 /\n # tN[k])**2 / (tN[k] - 1) ) + ( (dstd[k]**2 / dN[k])**2 / (tN[k] - 1) ) )\n df = ((gstd[1]**2/n[1] + gstd[2]**2/n[2])**2\n / (((gstd[1]**2 / n[1])**2 / (n[1] - 1)\n + ((gstd[2]**2 / n[2])**2 / (n[1] - 1))))\n )\n if dataLabel is not None:\n testtype = 'Independent'\n if paired:\n testtype = 'Paired'\n n = max([len(l) for l in k])\n print ('\\n%s\\n %s T-test, Welch correction' % (dataLabel, testtype))\n # if p1 < 0.05 and p2 < 0.05:\n # print(u' Both data sets appear normally distributed: Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # else:\n # print(u' ****At least one Data set is NOT normally distributed****\\n Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # print (u' (performing test anyway, as requested)')\n # if equalVar:\n # print(u' Variances are equivalent (Bartletts test, p = {:.3f})'.format(pb))\n # else:\n # print(u' Variances are unequal (Bartletts test, p = {:.3f}); not assuming equal variances'.format(pb))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[0].rjust(n), gmean[1], gstd[1],\n len(g[1]), pc=decimals))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[1].rjust(n), gmean[2], gstd[2],\n len(g[2]), pc=decimals))\n print(u' t({:6.2f})={:8.4f} p={:8.6f}\\n'.\n format(df, float(t), float(p)))\n # generate one line of text suitable for pasting into a paper\n if textline:\n if units is not None:\n units = ' ' + units\n else:\n units = ''\n fmtstring = u'{:s}: {:.{pc}f} (SD {:.{pc}f}, N={:d}){:s}; '\n print(u'(', end='')\n for s in range(1, 3):\n print(fmtstring.format(\n k[s-1], gmean[s], gstd[s], len(g[s]), units, \n pc=decimals), end='')\n print(u't{:.2f}={:.3f}, p={:s})\\n'.format(df, float(t), pformat(p)))\n\n return(df, float(t), float(p))", "def ttest_2samp(x1, x2, alpha=0.05, paired=False, is_bernoulli=False, two_sided=True, return_tuple=False):\n x = np.asarray(x1)\n y = np.asarray(x2)\n\n # Define test degrees of freedom\n if two_sided:\n quant_order = 1 - (alpha / 2)\n h0 = 'X1_bar = X2_bar'\n h1 = 'X1_bar != X2_bar'\n else:\n quant_order = 1 - alpha\n h0 = 'X1 <= X2'\n h1 = 'X1 > X2'\n\n # Sample sizes\n n1, n2 = len(x), len(y)\n\n if paired:\n # If samples are paired, we perform a 1-sample student test\n # We compare if the difference is different from 0.\n mean1, mean2 = x.mean(), y.mean()\n d = x - y\n t, cv, p = ttest(d, alpha=alpha, return_tuple=True)\n df = len(d)\n else:\n # Else samples are independent\n # Compute means\n mean1, mean2 = x.mean(), y.mean()\n # Compute standard deviations\n if is_bernoulli:\n s1 = mean1 * (1 - mean1)\n s2 = mean2 * (1 - mean2)\n else:\n s1 = desc.var(x)\n s2 = desc.var(y)\n # Compute grouped variance\n sd = np.sqrt(((n1 - 1) * s1 + (n2 - 1) * s2) / (n1 + n2 - 2))\n # Degrees of freedom\n df = n1 + n2 - 2\n # Calculate the t statistic\n t = (mean1 - mean2) / sd\n\n # calculate the critical value\n cv = scp.t.ppf(quant_order, df)\n # calculate the p-value\n if (n1 > 30) & (n2 > 30):\n p = 2.0 * (1.0 - scp.norm.cdf(math.fabs(t)))\n else:\n p = 2.0 * (1.0 - scp.t.cdf(math.fabs(t), df=df))\n\n extra = f\" * E(X1) = {round(mean1, 3)} and E(X2) = {round(mean2, 3)} \\n\"\n extra += \" * Performed test for paired samples \\n\" if paired else ''\n extra += \" * Large sample sizes, t ~ N(0, 1) from CLT\" if (n1 > 30) & (n2 > 30) else ' * Small sample sizes, assumed t ~ T(n-1)'\n\n _summ = test_summary(df=df, critical_value=cv, t_value=t,\n p_value=p,\n title='Two Samples Student test',\n h0=h0, h1=h1,\n alpha=alpha,\n extra=extra)\n\n if return_tuple:\n return t, cv, p\n else:\n return _summ", "def hypothesis_test_two_means_testvalue(datae,dataf,test_value,alpha):\n \n # Dataset E\n data_e = 1.0*np.array(datae)\n n_e = data_e.shape[0]*data_e.shape[1]\n mean_e = np.array(data_e).mean()\n var_e = np.array(data_e).var(ddof=1)\n df_e = n_e-1\n \n # Dataset F\n data_f = 1.0*np.array(dataf)\n n_f = dataf.shape[0]*dataf.shape[1]\n mean_f = np.array(data_f).mean()\n var_f = np.array(data_f).var(ddof=1)\n df_f = n_f-1\n \n # Sp,t and pvalue\n Sp = np.sqrt((((df_e*var_e) + (df_f*var_f))/(df_e+df_f)))\n t = ((mean_e-mean_f)-test_value)/(Sp*np.sqrt(1/n_e+1/n_f))\n pvalue = 1-scs.t.cdf(t,df_e+df_f,)\n \n # Decision\n if pvalue > alpha:\n decision = 'Fail to Reject H0'\n return t,pvalue,decision\n else:\n decision = 'Reject H0'\n return t,pvalue,decision", "def compare_samples(populations,parametric=False):\n from scipy.stats import mannwhitneyu, ttest_ind, f_oneway, kruskal, ranksums\n from statsmodels.stats.multicomp import pairwise_tukeyhsd\n populations = [np.array(pop) for pop in populations] #obscure line to take out missing values\n populations = [pop[~np.isnan(pop)] for pop in populations]\n\n if len(populations) == 2:\n if parametric:\n stat, p_value = ttest_ind(*populations)\n print(\"P-value t-test: {0:2.10f}\".format(p_value))\n else:\n stat, p_value1 = mannwhitneyu(*populations)\n print(\"P-value MWW: {0:2.10f}\".format(p_value))\n stat, p_value2 = ranksums(*populations)\n print(\"P-value Ranksum: {0:2.10f}\".format(p_value))\n \n if len(populations) > 2:\n if parametric:\n stat, p_value = f_oneway(*populations)\n print(\"P-value anova: {0:2.10f}\".format(p_value))\n else:\n stat, p_value = kruskal(*populations) \n print(\"P-value kruskal: {0:2.10f}\".format(p_value))\n \n if p_value < 0.05:\n flatten_pop = []\n label_pop = []\n for i,pop in enumerate(populations):\n flatten_pop += list(pop)\n label_pop += [\"pop{0}\".format(i)]*len(pop)\n \n res2 = pairwise_tukeyhsd(np.asarray(flatten_pop),label_pop)\n print(\"Printing pair comparisons using Tukey HSD\")\n print(res2)\n res2.plot_simultaneous(comparison_name=None,xlabel='diffs',ylabel='grups')\n \n print((\"Means: \" + \", {}\"*len(populations)).format(*[np.mean(_) for _ in populations]))\n print((\"STDs: \" + \", {}\"*len(populations)).format(*[np.std(_) for _ in populations]))\n \n \n return p_value", "def t_test(result, reference):\n \n # Check that result and reference are 1D and that they have the same length\n \n print('\\nChecking that result and reference are 1D and that they have the same length\\n')\n \n if (len(result.shape) == 1) and (len(reference.shape) == 1):\n \n if len(result) == len(reference):\n \n print('Performing t test\\n')\n \n t_stat, p_value = scipy.stats.ttest_ind(result, reference)\n \n print('t test completed successfully!\\n')\n \n print('t statistic: {} // p value: {}'.format(t_stat, p_value))\n \n return t_stat, p_value\n \n else:\n \n print('Result and reference vectors do not have the same length. Please input them so that they have the same length')\n \n else:\n \n print('Result or reference vectors are not 1D. Please reformat them to be 1D')", "def ttest(array1, array2):\n diff = np.mean(array1) - np.mean(array2)\n if diff < c.cart_p60:\n return c.low_score\n if array1.size <= 1 or array2.size <= 1:\n return min(diff, c.single_item_cart_max)\n return 1 - ttest_ind(array1, array2, equal_var=False).pvalue\n # return diff", "def t_test_(x):\n assert np.ndim(x) == 1 and (not np.any(np.isnan(x)))\n\n if (len(x) <= 1) or (not np.all(np.isfinite(x))):\n return 1.0 # Can't say anything about scale => p=1\n\n _, pval = sst.ttest_1samp(x, 0.0)\n if np.isnan(pval):\n # Should only be possible if scale underflowed to zero:\n assert np.var(x, ddof=1) <= 1e-100\n # It is debatable if the condition should be ``np.mean(x) == 0.0`` or\n # ``np.all(x == 0.0)``. Should not matter in practice.\n pval = np.float(np.mean(x) == 0.0)\n assert 0.0 <= pval and pval <= 1.0\n return pval", "def report_ttest_2sample(null_hypothesis, sample1, sample2, paired, alpha=0.05):\n\n if paired:\n t_value, p_value = stats.ttest_rel(sample1, sample2)\n else:\n t_value, p_value = stats.ttest_ind(sample1, sample2)\n print('Test for null hypothesis \"{}\".'.format(null_hypothesis))\n print('Sample 1 mean: {}, Sample 1 SD: {}'.format(np.mean(sample1), np.std(sample1)))\n print('Sample 2 mean: {}, Sample 2 SD: {}'.format(np.mean(sample2), np.std(sample2)))\n print('t({})={}, p={}.'.format(len(sample1)-1, t_value, p_value))\n if p_value < alpha:\n print('Reject null hypothesis.\\n')\n else:\n print('Fail to reject null hypothesis.\\n')", "def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = stdev(a)**2\r\n v2 = stdev(b)**2\r\n n1 = len(a)\r\n n2 = len(b)\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2)/float(df)\r\n t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,min(a),max(a),\r\n name2,n2,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t,prob", "def run_welchs_ttest(stat1, stat2, alpha, faster):\n m1 = stat1[MEAN]\n m2 = stat2[MEAN]\n\n s1 = stat1[STDDEV]\n s2 = stat2[STDDEV]\n\n n1 = stat1[ROUNDS]\n n2 = stat2[ROUNDS]\n\n df1 = n1 - 1 # degree of freedom of stat1\n df2 = n2 - 1 # degree of freedom of stat2\n\n sample_v1 = s1**2 / n1 # biased estimated sample variance of stat1\n sample_v2 = s2**2 / n2 # biased estimated sample variance of stat2\n\n biased_variance = np.sqrt(sample_v1 + sample_v2)\n # degree of freedom\n df = (sample_v1 + sample_v2) ** 2 / (\n sample_v1**2 / (df1) + sample_v2**2 / (df2)\n )\n\n mean_delta = m1 - m2\n t_stat = mean_delta / biased_variance\n\n if faster:\n # Null hypothesis is stat1 >= stat2.\n # Alternative hypothesis is stat1 < stat2.\n p_value = t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (-inf, x)\n upper_bound = mean_delta + t.ppf(1.0 - alpha, df) * biased_variance\n upper_bound = format(upper_bound, \".5f\")\n lower_bound = \"-inf\"\n else:\n # Null hypothesis is stat1 <= stat2.\n # Alternative hypothesis is stat1 > stat2.\n p_value = 1.0 - t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (x, inf)\n upper_bound = \"inf\"\n lower_bound = mean_delta + t.ppf(alpha, df) * biased_variance\n lower_bound = format(lower_bound, \".5f\")\n\n return TTestResult(\n p_value=p_value,\n t_stat=t_stat,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n mean_delta=format(mean_delta, \".5f\"),\n )", "def ttest():\n # open test results and perform regression analysis\n alphas = []\n betas = []\n iterations = {}\n with open(f\"Results/conclusion2.csv\") as f:\n csv_reader = csv.reader(f, delimiter=',')\n\n for run in csv_reader:\n max, max_i = get_max_run(run)\n if int(run[0]) not in iterations:\n iterations[int(run[0])] = {100 - int(run[1])-1: int(max)}\n else:\n iterations[int(run[0])][100 - int(run[1])-1] = int(max)\n\n for iteration in iterations:\n mono_levels = list(iterations[iteration].keys())\n pop_sizes = [iterations[iteration][i] for i in mono_levels]\n\n regress_result = regress(pop_sizes, mono_levels)\n alphas += [regress_result[1]]\n betas += [regress_result[0]]\n\n # plot scatter and regression line\n avg_alpha = sum(alphas)/len(alphas)\n avg_beta = sum(betas)/len(betas)\n stddev_beta = np.std(betas)\n vis.scatter_mono(iterations, avg_alpha, avg_beta)\n\n # perform t-test\n ttest_result = stats.ttest_ind(betas, [0 for i in betas], equal_var=True)\n t_stat = ttest_result[0]\n p_value = ttest_result[1]\n print(f'Results from t-test:')\n print(f'Avg beta: {avg_beta}, stddev beta: {stddev_beta}.')\n print(f't-stat: {t_stat}, p-value: {p_value}.')", "def test_t_two_sample_switch(self):\r\n sample = array([4.02, 3.88, 3.34, 3.87, 3.18])\r\n x = array([3.02])\r\n self.assertFloatEqual(t_two_sample(x, sample), (-1.5637254, 0.1929248))\r\n self.assertFloatEqual(t_two_sample(sample, x), (1.5637254, 0.1929248))\r\n\r\n # can't do the test if both samples have single item\r\n self.assertEqual(t_two_sample(x, x), (None, None))\r\n\r\n # Test special case if t=0.\r\n self.assertFloatEqual(t_two_sample([2], [1, 2, 3]), (0.0, 1.0))\r\n self.assertFloatEqual(t_two_sample([1, 2, 3], [2]), (0.0, 1.0))", "def test_repeated_two_tailed(self):\n rng = np.random.default_rng(6464584234)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def test_t_paired_2tailed(self):\r\n x, y = self.x, self.y\r\n # check value of t and the probability for 2-tailed\r\n self.assertFloatEqual(t_paired(y, x)[0], 19.7203, 1e-4)\r\n self.assertFloatEqual(t_paired(y, x)[1], 1.301439e-11, 1e-4)", "def calc_indttest_90(varx,vary):\n print('\\n>>> Using calc_ttest function!')\n \n ### Import modules\n import numpy as np\n import scipy.stats as sts\n \n ### 2-independent sample t-test\n stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')\n \n ### Significant at 90% confidence level\n pvalue[np.where(pvalue >= 0.1)] = np.nan\n pvalue[np.where(pvalue < 0.1)] = 1.\n pvalue[np.isnan(pvalue)] = 0.\n \n print('*Completed: Finished calc_ttest function!')\n return stat,pvalue", "def test_one_sample_right_tailed(self):\n rng = np.random.default_rng(615419864354)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(12.2, 1, 100)\n\n ttest = one_sample_ttest(data1, 12.2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def ttest(x):\n from ..group.onesample import stat\n t = stat(x.T, id='student', axis=0)\n return np.squeeze(t)", "def t_two_sample(x, y, tails=2):\n assert tails in (1,2), \"invalid: tails must be 1 or 2, found %s\"%str(tails)\n x, y = np.asarray(x), np.asarray(y)\n nx, ny = x.size, y.size\n df = nx + ny - 2\n s_xy = np.sqrt(((nx - 1)*x.var() + (ny - 1)*y.var()) / df)\n t_obs = (x.mean() - y.mean()) / (s_xy * np.sqrt(1./nx + 1./ny))\n p_value = tails * st.t.sf(abs(t_obs), df)\n return TtestResults(t_obs, p_value)", "def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length lists in ttest_rel.'\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = var(a)\r\n v2 = var(b)\r\n n = len(a)\r\n cov = 0\r\n for i in range(len(a)):\r\n cov = cov + (a[i]-x1) * (b[i]-x2)\r\n df = n-1\r\n cov = cov / float(df)\r\n sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))\r\n t = (x1-x2)/sd\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,min(a),max(a),\r\n name2,n,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t, prob" ]
[ "0.7822148", "0.71075654", "0.70718163", "0.7043813", "0.7022971", "0.6847565", "0.6847342", "0.6802037", "0.66970724", "0.667655", "0.66473764", "0.6610097", "0.65943104", "0.65460646", "0.6540486", "0.6539085", "0.65042484", "0.644084", "0.64313716", "0.64187086", "0.6412364", "0.63670486", "0.63501376", "0.628993", "0.62844133", "0.6245853", "0.62265325", "0.61562675", "0.6120938", "0.610828" ]
0.7776446
1
Plotting a figure to visualize parameter coefficients
def plot_param_coef(model, kind = 'barh', figsize = (10,5)): ## Getting coefficients as a Series params = model.params[1:] params.sort_values(inplace=True) plt.figure(figsize=figsize) # Used if large number of params ax = params.plot(kind=kind) ax.axvline() ax.set_xlabel('Coefficient') ax.set_ylabel('Features') ax.set_title('Comparing Feature Coefficients') plt.tight_layout() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot():\n xvals = np.arange(-50, 250, step=0.1)\n\n fig = plt.figure()\n plt.suptitle(\"Gaussian with smooth transition to power law\")\n\n A0vals = [10, 11]\n avals = [5*10**-3, 10**-3, 5*10**-4]\n ttvals = [10., 50., 100.]\n cvals = [-0.1, -0.9, -5./3., -4.]\n offset = [-30, 0.0, 30]\n\n paramvals = [A0vals, avals, ttvals,cvals, offset]\n titles, labels = return_parameter_names()\n\n nplots = len(paramvals)\n\n for i in range(nplots):\n plt.subplot(nplots, 1, i+1)\n vals = paramvals[i]\n for j in range(len(vals)):\n pset = list(default())\n pset[i] = vals[j]\n yvals=[]\n ypower=[]\n ypeak=[]\n for x in xvals:\n yvals.append(fitfunc(x, pset))\n ypeak.append(logpeak(x,pset))\n if x > 0:\n ypower.append(logpowerlaw(x,pset))\n label = labels[i] + \"=\"+str(vals[j])\n plt.plot(xvals, yvals, label = label)\n\n plt.title(titles[i])\n plt.legend()\n\n fig.set_size_inches(15, 30)\n plt.savefig(\"graphs/misc/lightcurve_models.pdf\")\n plt.close()", "def plot_fitting_coefficients(self):\n from matplotlib import pyplot as plt\n coeff = self.linear_fit[\"coeff\"]\n order = self.linear_fit[\"order\"]\n\n data = {}\n annotations = {}\n for c, o in zip(coeff, order):\n if len(o) == 0:\n continue\n n = len(o)\n if n not in data.keys():\n data[n] = [c]\n annotations[n] = [WulffConstruction.order2string(o)]\n else:\n data[n].append(c)\n annotations[n].append(WulffConstruction.order2string(o))\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n start = 0\n keys = list(data.keys())\n keys.sort()\n for k in keys:\n x = list(range(start, start+len(data[k])))\n ax.bar(x, data[k], label=str(k))\n start += len(data[k]) + 1\n for i in range(len(data[k])):\n ax.annotate(annotations[k][i], xy=(x[i], data[k][i]))\n ax.set_ylabel(\"Fitting coefficient\")\n ax.set_xticklabels([])\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.legend(frameon=False)\n return fig", "def plot_polynomial(self):\n plt.scatter(self.x_values, self.y_values)\n plt.title(f\"Graph of polynomial between {np.floor(min(self.x_values))} and {np.ceil(max(self.x_values))}\")\n plt.xlabel('x-axis')\n plt.ylabel('y-axis')\n plt.show()", "def _plot_model(params, label, range_=None):\n b, a = params\n if range_ is None:\n x = np.linspace(0,1)\n else:\n u, v = range_\n x = np.linspace(u,v)\n y = a*x + b\n return plt.plot(x, y, label=label)", "def plot(self,ax,**kwargs):\n self.XP_Plotter.plot(ax,**kwargs)\n self.lines_theory[0], = ax.plot(self.xx, self.pp_non_rel,'--g',**kwargs)\n self.lines_theory[1], = ax.plot(self.xx, self.pp_rel,'--m',**kwargs)\n self.lines_theory[2], = ax.plot(self.xx_itpl, self.pp_itpl,'-r',**kwargs)", "def plot(self):\n pass", "def equationPlot(self):\n clf()\n x = np.arange(0,9.9,0.1)\n plot(x,1/(10-x))\n xlabel('X')\n ylabel('1/(10-x)')\n savefig('equation.png')", "def draw_parameters_trend(X, Y):\n ridge_weights, _ = generate_parameters(X, Y)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(range(-30,30), ridge_weights) # plot of first dimension 和 alpha 定义域一致\n plt.show()", "def coefplot(model, varnames=True, intercept=False, fit_stats=True, figsize=(7, 12)):\n if intercept is True:\n coefficients = model.params.values\n errors = model.bse\n if varnames is True:\n varnames = model.params.index\n else:\n coefficients = model.params.values[1:]\n errors = model.bse[1:]\n if varnames is True:\n varnames = model.params.index[1:]\n\n tmp_coefs_df = pd.DataFrame({'varnames': varnames, 'coefs': coefficients,'error_bars': errors})\n fig, ax = plt.subplots(figsize=figsize)\n ax.errorbar(y=tmp_coefs_df['varnames'], x=tmp_coefs_df['coefs'],\n xerr=tmp_coefs_df['error_bars'], fmt='o',\n color='slategray', label='Estimated point')\n ax.axvline(0, color='tomato', linestyle='--', label='Null Effect')\n ax.set_xlabel(r'$\\hat{\\beta}$')\n fig.tight_layout()\n plt.legend(loc='best')\n\n if fit_stats is True:\n if 'linear_model' in model.__module__.split('.'):\n plt.title(r'R$^{2}$' + \"={0}, f-value={1}, n={2}\".format(round(model.rsquared, 2),\n round(model.f_pvalue, 3),\n model.nobs))\n elif 'discrete_model' in model.__module__.split('.'):\n plt.title(\"Loglikelihood = {0}, p(ll-Rest)={1}, n={2}\".format(round(model.llf, 2),\n round(model.llr_pvalue, 3),\n model.nobs))", "def plot(self, fig=None, ax=None,\n curve=True, control_points=True, frenet_serret=False, axis_off=False, ticks_off=False):\n\n if fig is None:\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n fig = plt.figure(figsize=(6, 5))\n ax = fig.add_subplot(111)\n ax.set_xlabel('$u$ parameter', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('NURBS curve value', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(12)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(12)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n if axis_off:\n ax.axis('off')\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n fig = plt.figure(figsize=(6, 5))\n ax = fig.add_subplot(111)\n ax.set_xlabel('$x$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('$y$ axis', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(12)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(12)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n if axis_off:\n ax.axis('off')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n fig = mpl.pyplot.figure(figsize=(6, 5))\n ax = fig.add_subplot(111, projection='3d')\n ax.view_init(azim=-120, elev=30)\n ax.grid(False)\n ax.xaxis.pane.fill = False\n ax.yaxis.pane.fill = False\n ax.zaxis.pane.fill = False\n ax.xaxis.pane.set_edgecolor('k')\n ax.yaxis.pane.set_edgecolor('k')\n ax.zaxis.pane.set_edgecolor('k')\n ax.xaxis.pane._alpha = 0.9\n ax.yaxis.pane._alpha = 0.9\n ax.zaxis.pane._alpha = 0.9\n ax.set_xlabel('$x$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('$y$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_zlabel('$z$ axis', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.zaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(8)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(8)\n for t in ax.zaxis.get_major_ticks(): t.label.set_fontsize(8)\n ax.xaxis.set_rotate_label(False)\n ax.yaxis.set_rotate_label(False)\n ax.zaxis.set_rotate_label(False)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n if axis_off:\n ax.axis('off')\n\n else: raise Exception('The number of dimensions must be 1, 2 or 3')\n\n\n # Add objects to the plot\n if curve: self.plot_curve(fig, ax)\n if control_points: self.plot_control_points(fig, ax)\n if frenet_serret: self.plot_frenet_serret(fig, ax)\n\n # Set the scaling of the axes\n self.rescale_plot(fig, ax)\n\n return fig, ax", "def plot(self):\n\t\tself.plotOfTF().plot()", "def figure_10_12_b():\n xs = np.arange(-6,6,0.1)\n plt.plot(xs,sigmoid(xs))\n x=2.5\n plt.scatter(x,sigmoid(x))\n plt.plot(xs,logistic_lower_bound(xs,x))\n plt.show()", "def plot(self,include_rho=True,show=True,nx=129):\n xs = np.linspace(0.0,1.0,nx)\n plt.plot(xs,self.f0(xs),'b-')\n plt.plot([self.rho_max,self.rho_max],[0.0,1.0],'b:')\n plt.plot(xs,self.f1(xs),'r-')\n plt.plot([self.rho_min,self.rho_min],[0.0,1.0],'r:')\n plt.plot([0.0,1.0],[0.0,1.0],'k--')\n if include_rho:\n plt.plot([self.rho,self.rho],[0.0,1.0],'g:')\n plt.xlim(0.0,1.0)\n plt.ylim(0.0,1.0)\n plt.axes().set_aspect(1.0)\n if show:\n plt.show()", "def plotSVMCoefficients(self, **kwargs):\n ser_X = pd.Series(np.repeat(1, len(self.features)))\n ser_X.index = self.features\n new_kwargs = dict(kwargs)\n new_kwargs[\"is_plot\"] = False\n ax = self._plotFeatureBars(ser_X, **new_kwargs)\n ax.set_ylabel(\"Coefficient\")\n self._showPlot(kwargs)", "def visualisation(self):\n plt.plot(self.x, self.y, 'o', label = 'Example data')\n plt.plot(self.x, np.dot(self.w, self.X), label = 'Model')\n plt.xlim([-1,1])\n plt.ylim([-1,1])", "def plot(self):\n\t\tself.plotOfCos1().plot()", "def plot_plateau(x,y,p,n,Vdc):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel('Vrf [V]')\n ax.set_ylabel('Current [nA]')\n fig.suptitle('Vdc = '+str(Vdc)+' n = '+str(n), fontsize=24)\n \n plt.plot(x,y,'x',label='Experimental data') \n t = np.linspace(min(x),max(x),1000)\n plt.plot(t,f(t,p[0],p[1],p[2]),label='Fit')\n plt.axhline(y=n*e*frequency*1e9, color='black', linestyle='-')\n\n ax.legend()\n plt.show(block=True)\n plt.pause(0.3)\n plt.close()\n \n return None", "def plot_phase_diagram(param, ax=None, title=None):\n if ax is None:\n ax = plt.gca()\n if title is None:\n title = \"Phase space, {}\".format(param) \n \n ax.set(xlabel='v', ylabel='w', title=title)\n \n # Isocline and flow... \n xlimit = (-1.5, 1.5)\n ylimit = (-.6, .9)\n plot_vector_field(ax, param, xlimit, ylimit)\n plot_isocline(ax, **param, vmin=xlimit[0],vmax=xlimit[1])\n \n # Plot the equilibria \n eqnproot = find_roots(**param)\n eqstability = [stability(jacobian_fitznagumo(e[0],e[1], **param)) for e in eqnproot] \n for e,n in zip(eqnproot,eqstability):\n ax.scatter(*e, color=EQUILIBRIUM_COLOR[n])\n \n # Show a small perturbation of the stable equilibria...\n time_span = np.linspace(0, 200, num=1500)\n if n[:6] == 'Stable':\n for perturb in (0.1, 0.6):\n ic = [e[0]+abs(perturb*e[0]),e[1]]\n traj = scipy.integrate.odeint(partial(fitzhugh_nagumo, **param),\n y0=ic,\n t=time_span)\n ax.plot(traj[:,0], traj[:,1])\n\n # Legend\n labels = frozenset(eqstability)\n ax.legend([mpatches.Patch(color=EQUILIBRIUM_COLOR[n]) for n in labels], labels, \n loc='lower right')", "def plot_priors(params):\n prior_dicts = {'ic' : params['ic_prior'], 'ii' : params['ii_prior']}\n pidxs = (pidx for pidx in onp.arange(1,12))\n f = plt.figure(figsize=(12,8))\n for k in prior_dicts:\n for j in prior_dicts[k]:\n plt.subplot(2,3,next(pidxs));\n data = prior_dicts[k][j]\n if \"log\" in j:\n data = onp.exp(data)\n j_title = j.strip('log')\n else:\n j_title = j\n plt.stem(data)\n plt.title(k + ' ' + j_title)\n return f", "def plot(self, x, y, mX = 1, mY = 1):\n x = self.xy[x]\n y = self.xy[y]\n\n # Find pareto front:\n xp, yp = self.pareto_frontier(x,y, mX, mY)\n\n plt.plot(x, y, 'o')\n plt.plot(xp, yp, '-')\n plt.tight_layout()\n plt.show()", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def plotCoefficients(model):\n\n coefs = pd.DataFrame(model.coef_, X_train.columns)\n coefs.columns = [\"coef\"]\n coefs[\"abs\"] = coefs.coef.apply(np.abs)\n coefs = coefs.sort_values(by=\"abs\", ascending=False).drop([\"abs\"], axis=1)\n\n plt.figure(figsize=(15, 7))\n plt.title('sorted coefficient values of the model')\n coefs.coef.plot(kind='bar')\n plt.grid(True, axis='y')\n plt.hlines(y=0, xmin=0, xmax=len(coefs), linestyles='dashed');\n plt.draw()", "def plot_model(voi, states, algebraic):\n import pylab\n (legend_states, legend_algebraic, legend_voi, legend_constants) = createLegends()\n pylab.figure(1)\n pylab.plot(voi,vstack((states,algebraic)).T)\n pylab.xlabel(legend_voi)\n# pylab.legend(legend_states + legend_algebraic, loc='best')\n pylab.show()", "def plot_pat_fit(x_data, y_data, z_data, pp, trans='one_ele', fig=400, title='Fitted model', label='model'):\n if z_data is not None:\n plt.figure(fig)\n plt.clf()\n plt.pcolormesh(x_data, y_data, z_data, shading='auto')\n plt.title(title)\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Frequency (Hz)')\n\n if trans == 'one_ele':\n model = one_ele_pat_model\n yfit = model(x_data, pp)\n plt.plot(x_data, yfit, '-g', label=label)\n yfit_t0 = model(x_data, np.array([pp[0], pp[1], 0]))\n plt.plot(x_data, yfit_t0, '--g')\n elif trans == 'two_ele':\n model = two_ele_pat_model\n ylfit, ymfit, yrfit = model(x_data, pp)\n plt.plot(x_data, ylfit, '-g', label='S-T')\n plt.plot(x_data, ymfit, '-r', label='S-S')\n plt.plot(x_data, yrfit, '-b', label='T-S')\n\n plt.ylim([np.min(y_data), np.max(y_data)])", "def plot_asymptotic_coefficients(filename, fax=None):\n # create plot\n labels = [1, 2, 3]\n # create plot\n if fax is None:\n fig, ax = plt.subplots(1, 1, figsize=(8, 8))\n else:\n fig, ax = fax\n\n coef_file = h5py.File(filename, 'r')\n sigmaP_vals = list(coef_file)\n\n ks = np.arange(1, 26)\n\n for idx, sigmaP in enumerate(sigmaP_vals):\n coefs = coef_file[sigmaP]\n ax.plot(\n ks, coefs,\n linewidth=4,\n label=r'$\\sigma_P=%s$' % labels[idx],\n color=colors[-idx - 3])\n\n lgd = ax.legend(\n facecolor='white',\n prop={'size': 25},\n handletextpad=0.4,\n handlelength=1.2,\n labelspacing=0.27,\n columnspacing=0.50)\n lgd.get_frame().set_edgecolor('k')", "def plot(self):\n\t\tself.plotOfSpect()", "def plot():\n pass", "def plot_cf(self, **options):\n n = len(self.hs)\n xs = np.arange(-n//2, n//2)\n hs = np.roll(self.hs, len(self.hs) // 2)\n plt.plot(xs, hs.real, label='real', **options)\n plt.plot(xs, hs.imag, label='imag', **options)\n plt.legend()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def mplot(m, xlabel='', ylabel='', title='', custom=(7,7)):\n lag = ps.lag_spatial(m.w, m.z)\n fit = ps.spreg.OLS(m.z[:, None], lag[:,None])\n\n # Customize plot\n fig = plt.figure(figsize=custom)\n ax = fig.add_subplot(111)\n\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n fig.suptitle(title)\n\n ax.scatter(m.z, lag, s=60, color='k', alpha=.6)\n ax.plot(lag, fit.predy, color='r')\n\n ax.axvline(0, alpha=0.5)\n ax.axhline(0, alpha=0.5)\n\n return fig" ]
[ "0.7194646", "0.7173507", "0.6597603", "0.654072", "0.6468319", "0.64644027", "0.6439089", "0.6413635", "0.637166", "0.6341059", "0.6325652", "0.6312689", "0.63120836", "0.6265934", "0.625251", "0.6251491", "0.62430817", "0.6190655", "0.6189252", "0.61731386", "0.61717844", "0.61697435", "0.61675423", "0.61578983", "0.6146826", "0.6139503", "0.61254305", "0.6118698", "0.61084133", "0.6092691" ]
0.7313041
0
Plots a figure to visualize parameter pvalues exceeding stated alpha.
def plot_p_values(model, kind = 'barh', figsize = (10,5), alpha = .05): pv = model.pvalues[1:] pv_high = pv[pv > alpha] pv_low = pv[pv <= alpha] pv_high.sort_values(ascending=False, inplace=True) if len(pv_high) > 0: plt.figure(figsize=figsize) # Used if large number of params ax = pv_high.plot(kind=kind) ax = pv_low.plot(kind=kind) ax.axvline() plt.suptitle(f'P-Values') if len(pv_low) > 0: plt.figure(figsize=figsize) # Used if large number of params ax = pv_low.plot(kind=kind) ax.axvline() plt.suptitle(f'P-Values Below {alpha}') ## Not used; keeping just in case # else: # print(f'There are no p-values above {alpha}.') plt.tight_layout() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def p_plot(data,pv_index=0,alpha=0.05):\n ####if it's a pd.dataframe, rename to col header\n if isinstance(data, pd.DataFrame):\n if isinstance(pv_index, int):\n pv_index = data.columns.get_values()[pv_index]\n data =data.rename(columns ={pv_index: \"p_value\"})\n if not (np.issubdtype(data['p_value'].dtypes, np.number)):\n raise TypeError(\"Please ensure you have specified the column index of numeric p-values.\")\n ###or make a vector a pd.dataframe\n else:\n data = pd.DataFrame({\"p_value\": data})\n \n if (data[\"p_value\"].max()> 1) or (data[\"p_value\"].max()< 0):\n raise ProbabilityError(\"One or more p-values is not between 0 and 1!\")\n \n m = len(data['p_value'])\n\n data = data.sort_values('p_value',ascending=True)\n data['rank'] = np.arange(1,len(data['p_value'])+1)\n data['critical_value'] = data['rank']*alpha/m\n\n fig = plt.clf()\n plt.scatter(data['rank'],data['p_value'],color='black')\n plt.axhline(y=alpha,label='Bonferroni')\n plt.plot(data['rank'],data['critical_value'],label='BH',color='red')\n plt.legend()\n plt.title(\"Bonferroni vs BH\")\n plt.xlabel(\"Rank\")\n plt.ylabel(\"p(k)\")\n return fig", "def plot_p(self, show = False):\n try:\n difference = self.binom_null\n except:\n self.simulate_significance()\n difference = self.binom_null\n\n observed_difference = self.p_treatment - self.p_control\n\n mu, sigma = stats.norm.fit(difference)\n crit_density = stats.norm.pdf(observed_difference, mu, sigma)\n\n x = np.linspace(min(difference), max(difference), self.n_control + self.n_treatment)\n y = stats.norm.pdf(x, mu, sigma)\n\n line_curve = dict(color = 'blue', width = 2)\n\n data = [\n go.Scatter(\n x = x,\n y = y,\n mode = 'lines',\n showlegend = False,\n line = line_curve\n ),\n go.Scatter(\n x = x[x > observed_difference],\n y = y[np.where(x > observed_difference)],\n fill = 'tozeroy',\n showlegend = False,\n line = line_curve\n )\n ]\n\n layout = dict(\n plot_bgcolor = 'white',\n width = 800,\n height = 600,\n title = 'Significance',\n xaxis = dict(\n title = 'Difference in Probabilities',\n showgrid = False,\n zeroline = False,\n showline = True,\n linecolor = 'black'\n ),\n yaxis = dict(\n title = 'Density',\n showgrid = False,\n zeroline = False,\n showline = True,\n linecolor = 'black'\n )\n )\n\n fig = go.Figure(data = data, layout = layout)\n\n fig.add_vline(x = observed_difference,\n line_width = 2,\n line_dash = 'dash',\n line_color = 'black',\n annotation_text = 'P Value {:.4f}'.format(self.p_value),\n annotation_position = 'top right')\n\n if show:\n # Intended to be used in notebooks.\n # .py app files that use this module will handle saving and opening from desktop\n fig.show();\n\n return fig", "def plot_pade_figure(self):\n data_analysis = DatabaseData(dataframe=self.plot_data)\n print (data_analysis.dataframe.columns)\n data_analysis.run_pade_through_R(rscript='birch',get_inits_ev=True)\n data_analysis.create_precisions()\n data_analysis.extract_pade_curve()\n x_eos_kpts, y_eos, xs_err, ys_err, x_pade_kpts, y_pade = \\\n data_analysis.create_pade_bokeh_compat(properties=self.properties)\n print (type(self.properties), self.properties)\n if self.properties == 'B':\n ext = data_analysis.Bp\n print ('HERE AT PROPERTIES', ext, type(ext))\n elif self.properties == 'BP':\n ext = data_analysis.BPp\n elif self.properties == 'E0':\n ext = data_analysis.E0p\n elif self.properties == 'V0':\n ext = data_analysis.V0p\n p = figure(plot_height=400, plot_width=400,tools=\"pan,wheel_zoom,box_zoom,reset,previewsave\",\\\n x_axis_type=\"log\", x_axis_label='K-points per atom', title='Pade Extrapolate of {0} is {1}'.format(self.properties, str(ext)) )\n p.xaxis.axis_label = 'K-points per atom'\n p.line(x_pade_kpts, y_pade, color='red')\n p.circle(x_eos_kpts, y_eos,color='blue',size=5, line_alpha=0)\n p.multi_line(xs_err, ys_err, color='black')\n if self.properties == 'B':\n p.yaxis.axis_label = 'Bulk Modulus B (GPa)'\n elif self.properties == 'dB':\n p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative'\n elif self.properties == 'E0':\n p.yaxis.axis_label = 'DFT Energy (eV/atom)'\n elif self.properties == 'V0':\n p.yaxis.axis_label = 'Volume (A^3/atom)'\n\n return p", "def ex1_plot(pace=\"\",delta=\"\",a_range=[.5,1,5]):\n\t# safety\n\tpace = str(pace)\n\tdelta = str(delta)\n\t\n\t# parameters\n\t#a_range = [0.5,2,5] # different values of alpha,beta\n\t#a_range = [x/5 for x in range(1,4)]\n\tb_range = sorted([1.5/a for a in a_range]) # different values of alpha,beta\n\tb_range = [.5,1,1.5]\n\tpace = 10\n\tl = len(a_range)\n\tc = [ ['#FFA13D', '#7DD85F', '#8EBFFF'],\n\t\t ['#FF1C1C', '#0EA03C', '#0B6DDD'],\n\t\t ['#960019', '#155B00', '#0A0AA8']]\n\tX = [i for i in range(T+1)]\n\t\t \n\tfig,axes = plt.subplots(l,1, sharex=True, sharey=True, figsize=(10,15))\n\t\n\tplt.xlabel('Time')\n\tplt.ylabel('Energy')\n\tplt.ylim(0,0.6)\n\t\n\tthreads=[]\n\t# create the data\n\tstep = 0\n\tfor i in range(l):\n\t\talpha = a_range[i]\n\t\tfor j in range(l):\n\t\t\tbeta = 1.5*b_range[j]/alpha\n\t\t\tdelta = beta*pace/T\n\t\t\tthreads+=[mp.Process(target=ex1_create, args=(alpha,beta,pace,delta))]\n\t\t\tthreads[-1].start()\n\t\t\tif(len(threads)>=3):\n\t\t\t\tfor t in threads:\n\t\t\t\t\tplot_avancement(step, l*l)\n\t\t\t\t\tstep+=1\n\t\t\t\t\tt.join()\n\t\t\t\tthreads = []\n\t\n\tfor t in threads:\n\t\tplot_avancement(step, l*l)\n\t\tstep+=1\n\t\tt.join()\n\t\t\n\t# get the data\n\tfor i in range(l):\n\t\talpha = a_range[i]\n\t\tfor j in range(l):\n\t\t\tbeta = 1.5*b_range[j]/alpha\n\t\t\tdelta = beta*pace/T\n\t\t\tY = ex1_get(alpha,beta,pace,delta)\n\t\t\taxes[i].plot(X,Y,label='beta='+str(beta)[:4],color=c[j][0])\n\t\t\t#axes[j,1].plot(X,Y,label='alpha='+str(alpha)[:4],color=c[i][j])\n\t\t\t\n\t\t\t#if i==l-1:\n\t\t\t#\taxes[j,1].set_title('Energy evolution for beta='+str(beta)[:4])\n\t\t\t#\taxes[j,1].legend() \n\n\t\taxes[i].set_title('Energy evolution with simulated annealing for alpha='+str(alpha)[:4])\n\t\taxes[i].legend()\n\t\t\n\t\n\tdest_file = res_path+'ex1_sim_'+seed+'.png'\n\tfig.savefig(dest_file)\n\tprint('\\nEnergy evolution plots saved in '+dest_file)", "def plot_ps(self, show=False, density=True, pcolor=\"r\", mcolor=\"b\", lw=0.6):\n\n if (density):\n \"\"\" also read the local overdeOptimization of spectroscopic surveys for testing non-Gaussianity\nnsity value and plot line colors according to\n the density value, + = red, - = blue; adjust alpha accordingly\n \"\"\"\n if len(self.ds)<self.Nsubs:\n print (\"no density data\")\n return 0\n ads=np.abs(self.ds)\n meands=np.mean(self.ds)\n mads=np.max(ads)\n normds=np.array([ads[i]/mads/1.5 for i in range(len(ads))])\n self.normds=normds\n\n for sub in range(self.Nsubs):\n #print sub\n if not(density):\n self.plt.plot(self.klist, self.pfactor*self.powerspectra[sub])\n else:\n if self.ds[sub]>meands:\n self.plt.plot(self.klist[:-1], self.pfactor*self.powerspectra[sub][1:-1], color=pcolor, alpha=normds[sub], linewidth=lw)\n else:\n self.plt.plot(self.klist[:-1], self.pfactor*self.powerspectra[sub][1:-1], color=mcolor, alpha=normds[sub], linewidth=lw)\n #self.plt.xlim(self.klist[1], 0.1)\n #if (self.normalized):\n # self.plt.ylim(0.0,2)\n #else:\n # self.plt.ylim(500, 50000)\n # self.plt.yscale('log')\n\n self.plt.xlabel(r\"$k {\\rm (h/Mpc)}$\")\n if (self.normalized):\n self.plt.ylabel(r\"$P_{\\rm subvolume}(k)/ P_{\\rm avg}(k)$\")\n self.plt.yscale('linear')\n else:\n self.plt.ylabel(r\"$P_{\\rm subvolume}(k)\\; {\\rm (Mpc/h)}^3$\")\n self.plt.yscale('log')\n\n if (show):\n self.plt.show()", "def plot_stability_function(self,bounds=[-20,1]):\n import matplotlib.pyplot as plt\n p,q=self.stability_function()\n xx=np.arange(bounds[0], bounds[1], 0.01)\n yy=p(xx)/q(xx)\n fig, = plt.plot(xx,yy)\n plt.draw()", "def plot_precision_figure(self):\n\n data_analysis = DatabaseData(dataframe=self.plot_data)\n prop_data, energy_data, M, C, pred_energy, pred_property = \\\n data_analysis.create_precision_bokeh_compat(self.prop_data, self.energy_data, properties=self.properties)\n p = figure(plot_height=400, plot_width=400,tools=\"pan,wheel_zoom,box_zoom,reset,previewsave\",\\\n x_axis_type=\"log\", y_axis_type=\"log\", x_axis_label='Energy Convergence (meV/atom)', title='Slope M is {0}'.format(str(M)) )\n p.line(pred_energy, pred_property, color='red')\n p.circle(self.energy_data, self.prop_data, color='blue',size=5, line_alpha=0)\n #p.multi_line(xs_err, ys_err, color='black')\n if self.properties == 'B':\n p.yaxis.axis_label = 'Bulk Modulus B (%)'\n elif self.properties == 'dB':\n p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative (%)'\n elif self.properties == 'Multiple':\n p.yaxis.axis_label = \"V0, B, B' (%)\"\n elif self.properties == 'V0':\n p.yaxis.axis_label = 'Volume (%)'\n\n return p", "def test_alpha_param(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## Instantiate a UMAPVisualizer, provide custom alpha\n umap = UMAPVisualizer(random_state=64, alpha=0.5)\n\n # Test param gets set correctly\n assert umap.alpha == 0.5\n\n # Mock ax and fit the visualizer\n umap.ax = mock.MagicMock(autospec=True)\n umap.fit(X, y)\n\n # Test that alpha was passed to internal matplotlib scatterplot\n _, scatter_kwargs = umap.ax.scatter.call_args\n assert \"alpha\" in scatter_kwargs\n assert scatter_kwargs[\"alpha\"] == 0.5", "def plot_plateau(x,y,p,n,Vdc):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel('Vrf [V]')\n ax.set_ylabel('Current [nA]')\n fig.suptitle('Vdc = '+str(Vdc)+' n = '+str(n), fontsize=24)\n \n plt.plot(x,y,'x',label='Experimental data') \n t = np.linspace(min(x),max(x),1000)\n plt.plot(t,f(t,p[0],p[1],p[2]),label='Fit')\n plt.axhline(y=n*e*frequency*1e9, color='black', linestyle='-')\n\n ax.legend()\n plt.show(block=True)\n plt.pause(0.3)\n plt.close()\n \n return None", "def comp_beta_plot(p1=database['K+'], p2=database['pi+'], pmax=0.5):\r\n db = []\r\n mom = np.linspace(0, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n for p in mom:\r\n b1 = beta(p, m1)\r\n b2 = beta(p, m2)\r\n db.append(abs(b1-b2))\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(mom, db, 'b')\r\n ax.set_xlim(0, pmax)\r\n ax.set_ylim(0)\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n ax.set_ylabel(r'$\\Delta\\beta$', fontsize=20)\r\n title = f'{p1.name} to {p2.name} '\r\n title += r'$\\Delta\\beta$ dependancy on particle momenta'\r\n ax.set_title(title, fontsize=20)\r\n ax.text(0.30, 0.10, r'Maximum $\\Delta\\beta$ at p={0:.3f} GeV'.format(mom[np.argmax(db)]),\r\n transform=ax.transAxes, fontsize=20)\r\n plt.show()\r\n return", "def plot_parameter_evolution(analyses, pdf=False):\n ncs = np.arange(11, 15)\n genes = set(analyses.gene)\n constructs = set(analyses.construct)\n long_labels = {'bac': 'bac', 'no_pr': 'no pr', 'no_sh': 'no sh'}\n gene_long = {'hb': 'hunchback', 'kn': 'knirps', 'sn': 'snail'}\n y_label = {'j': 'Normalized flux $j$',\n 'rho': 'Site occupation density $\\\\rho$', 'tau': 'Residence time $\\\\tau$ (s)', 'alpha_comb': 'Initiation rate $\\\\alpha$ (pol/min)'}\n\n # Add extra jiggle to be able to distinguish overlapping data points\n x_jiggle = 0.04\n x_shifts = np.array([-1, 0, 1]) * x_jiggle\n\n # Plot parameters\n capsize = 0\n markersize = 4\n lw = 1 # line width\n\n for gene in genes:\n grouped_data = analyses.groupby(by=['gene', 'construct', 'nc'])\n all_means = grouped_data.mean()\n all_stds = grouped_data.std(ddof=1)\n all_ps = analyses.groupby(by=['gene', 'nc']).first()\n\n for quantity in ['j', 'rho', 'tau', 'alpha_comb']:\n ymaxs = {'j': 0.36, 'rho': 0.27, 'tau': 103, 'alpha_comb': 12}\n num = 12\n set_figure_size(num=num, rows=1, page_width_frac=0.5, clear=True, height_factor=0.7)\n fig, ax = plt.subplots(1, 1, num=num, clear=True)\n avg_data, std_data = {}, {}\n for construct in constructs:\n if quantity in ['rho', 'j']:\n avg_data[construct] = all_means.loc[(\n gene, construct, slice(None)), quantity].values\n std_data[construct] = all_stds.loc[(\n gene, construct, slice(None)), quantity].values\n\n elif quantity in ['tau', 'alpha_comb']:\n avg_data[construct] = all_means.loc[(\n gene, construct, slice(None)), quantity].values\n std_data[construct] = np.sqrt(\n all_means.loc[(gene, construct, slice(None)), quantity + 'V'].values)\n\n # Prepare a marker generator and plot the data with errorbars\n marker_gen = itertools.cycle(markers_additivity)\n for i, construct in enumerate(constructs):\n m = next(marker_gen)\n plt.errorbar(\n ncs + x_shifts[i], avg_data[construct],\n yerr=std_data[construct],\n fmt='-' + m, color=colors_additivity[construct],\n capsize=capsize, label=long_labels[construct],\n markersize=markersize, lw=lw)\n\n # Adjust plot\n plt.xlabel('Nuclear cycle')\n plt.ylabel(y_label[quantity])\n plt.ylim(ymin=0, ymax=ymaxs[quantity])\n\n plt.xticks(ncs)\n plt.title(gene_long[gene])\n\n plt.tight_layout()\n plt.show()\n\n # Save figure\n figname = 'additivity_' + quantity + '_' + gene\n figpath = os.path.join(figures_folder, figname)\n fig.savefig(figpath + '.png', pad_inches=0, bbox_inches='tight')\n if pdf:\n fig.savefig(figpath + '.pdf', pad_inches=0, bbox_inches='tight')", "def do_alpha_plot(uvals,vectors,wvectors,names,tag=None, fig_exten='.png',\n dolevels=False,log=True,outdir='SingleFigs/',\n vparams_dict=None, prefix='',truth=None,latexnames=None,\n logspline=True, others=None):\n import os\n import math\n \n if tag is not None:\n outdir=tag+outdir\n if not os.path.isdir(outdir):\n os.makedirs(outdir) \n \n if log:\n logfile=outdir+'limits.dat'\n logfile=open(logfile,'w')\n \n if dolevels:\n results=np.zeros([len(uvals),9]) # holds mean and error info for each parameter\n prior_results=np.zeros([len(uvals),9]) # does the same with alpha priors\n \n for i,vals in enumerate(uvals):\n if len(vals) == 1:\n continue\n if len(vals) < 4:\n kind = 'linear'\n else:\n kind = 'cubic'\n # does the for alpha\n plt.figure()\n lw=3\n \n\n # Convert vals?\n if vparams_dict is not None:\n # Check\n assert vparams_dict[names[i]]['n'] == len(vals)\n vals = np.linspace(vparams_dict[names[i]]['min'], \n vparams_dict[names[i]]['max'],\n len(vals))\n \n # get raw ylimits\n # removes zeroes, could lead to strange behaviour in theory\n ymax=np.max(vectors[i])\n temp=np.where((vectors[i] > 0.) & (np.isfinite(vectors[i])) )\n \n # set to integers and get range\n ymax=math.ceil(ymax)\n ymin=0.\n \n x,y=ac.interpolate_points(vals[temp],vectors[i][temp],logspline)\n \n norm=np.sum(y)*(x[1]-x[0]) # integral y dx ~ sum y delta x\n norm=np.abs(norm)\n y /= norm\n vectors[i][temp] /= norm\n plt.plot(x,y,label='Uniform',color='blue',linewidth=lw,linestyle='-')\n plt.plot(vals[temp],vectors[i][temp],color='blue',linestyle='',marker='s')\n \n \n # weighted plotting\n if wvectors is not None:\n wx,wy=ac.interpolate_points(vals[temp],wvectors[i][temp],logspline)\n wnorm=np.sum(wy)*(x[1]-x[0])\n wnorm = np.abs(wnorm)\n \n wvectors[i][temp] /= wnorm\n wy /= wnorm\n plt.plot(x,wy,label='Gauss',color='orange',linewidth=lw,linestyle='--')\n \n ax=plt.gca()\n ax.xaxis.set_ticks_position('both')\n #ax.Xaxis.set_ticks_position('both')\n if wvectors is not None:\n ymax=np.max([np.max(wy),np.max(y)])\n else:\n ymax=np.max(y)\n \n #ymax=(np.ceil(ymax*5.))/5.\n \n \n if dolevels==True:# and i != 1:\n limvals=np.array([0.15866])\n labels=['68%']\n styles=['-']\n upper=np.max(vectors[i])\n \n besty=np.max(y)\n imax=np.argmax(y)\n xmax=x[imax]\n results[i,0]=xmax\n string=names[i]+\" & {0:4.2f}\".format(xmax)\n for iav,av in enumerate(limvals):\n # need to integrate from min to some point\n # gets cumulative distribution\n # sets intervals according to highest likelihood\n v0,v1,ik1,ik2=ac.extract_limits(x,y,av,method=1)\n \n v0=0.15\n v1=1.85\n ik1=np.where(x>-0.15)[0][-1]\n ik2=np.where(x<-1.85)[0][0]\n \n string += \" & $_{\"\n string += \"{0:4.2f}\".format(v0-xmax)\n string += \"}^{+\"\n string += \"{0:4.2f}\".format(v1-xmax)\n string += \"}$ \"\n results[i,2*iav+1]=v0-xmax\n results[i,2*iav+2]=v1-xmax\n \n hl=0.03\n doff=(x[-1]-x[0])/100.\n ybar=(av+ymax)/2.\n xbar=(v0+v1)/2.\n \n # need to separate the plots\n if wvectors is not None:\n if ik1 != 0:\n #if iav==3 and i==4:\n # ybar -= 0.8\n plt.plot([x[ik1],x[ik1]],[ymax,y[ik1]],color='blue',linestyle=styles[iav],alpha=0.5)\n if i==1:\n t=plt.text(x[ik1]+doff*0.5,(ymax)+(-3.6+iav)*0.2*ymax,labels[iav],rotation=90,fontsize=12)\n t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='white',pad=-1))\n if ik2 != wy.size-1:\n plt.plot([x[ik2],x[ik2]],[ymax,y[ik2]],color='blue',linestyle=styles[iav],alpha=0.5)\n if i != 1:\n t=plt.text(x[ik2]-doff*3,(ymax)+(-3.6+iav)*0.2*ymax,labels[iav],rotation=90,fontsize=12)\n t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='white',pad=-1))\n else:\n plt.plot([x[ik1],x[ik1]],[0,y[ik1]],color='red',linestyle=styles[iav])\n plt.plot([x[ik2],x[ik2]],[0,y[ik2]],color='red',linestyle=styles[iav])\n Dx=x[-1]-x[0]\n if Dx < 0.:\n plt.text(x[ik1],y[ik1]+ymax*0.05,labels[iav],color='red',rotation=90)\n plt.text(x[ik2]+0.02*Dx,y[ik2]+ymax*0.05,labels[iav],color='red',rotation=90)\n else:\n plt.text(x[ik1]-0.02*Dx,y[ik1]+ymax*0.05,labels[iav],color='red',rotation=90)\n plt.text(x[ik2],y[ik2]+ymax*0.05,labels[iav],color='red',rotation=90)\n #print(\"For parameter \",i,\" CI \",iav, \" is \",x[ik1],\" to \",x[ik2])\n string += \" & \"\n \n #could just ignore the weightings \n if wvectors is not None:\n plt.plot(vals[temp],wvectors[i][temp],color='orange',linestyle='',marker='o')\n if dolevels==True:\n limvals=np.array([0.0015,0.025,0.05,0.16])\n labels=['99.7%','95%','90%','68%']\n styles=['--',':','-.','-']\n upper=np.max(wvectors[i])\n \n besty=np.max(wy)\n imax=np.argmax(wy)\n xmax=x[imax]\n prior_results[i,0]=xmax\n string+=\" {0:4.2f}\".format(xmax)\n for iav,av in enumerate(limvals):\n \n # sets intervals according to highest likelihood\n v0,v1,ik1,ik2=ac.extract_limits(x,wy,av,method=1)\n \n string += \" & $_{\"\n string += \"{0:4.2f}\".format(v0-xmax)\n string += \"}^{+\"\n string += \"{0:4.2f}\".format(v1-xmax)\n string += \"}$ \"\n prior_results[i,2*iav+1]=v0-xmax\n prior_results[i,2*iav+2]=v1-xmax\n \n # version 2\n hl=0.03\n \n doff=(x[-1]-x[0])/100.\n if i==1:\n doff=0.001\n ybar=(av+ymin)/2.\n xbar=(v0+v1)/2.\n if ik1 != 0:\n plt.plot([x[ik1],x[ik1]],[ymin,wy[ik1]],color='orange',linestyle=styles[iav])\n if i ==1:\n t=plt.text(x[ik1]+doff*0.5,wy[ik1]/2.2,labels[iav],rotation=90,fontsize=12)\n t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='white',pad=-1))\n \n if ik2 != wy.size-1:\n \n plt.plot([x[ik2],x[ik2]],[ymin,wy[ik2]],color='orange',linestyle=styles[iav])\n if i != 1:\n t=plt.text(x[ik2]-doff*3,wy[ik2]/2.2,labels[iav],rotation=90,fontsize=12)\n t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='white',pad=-1))\n other_styles=[\":\",\"--\",\"-.\"]\n # plot any other plots\n if others is not None:\n if others[i] is not None:\n for io,data in enumerate(others[i]):\n x,y=ac.interpolate_points(vals,data,logspline)\n norm=np.sum(y)*(x[1]-x[0]) # integral y dx ~ sum y delta x\n norm=np.abs(norm)\n y /= norm\n plt.plot(x,y,color='grey',linewidth=1,linestyle=other_styles[io % 3])\n if dolevels:\n string += \"\\\\\\\\\"\n if log:\n logfile.write(string+'\\n')\n else:\n print(string)\n #plt.ylim(0.,ymax)\n plt.gca().set_ylim(bottom=0)\n if truth is not None:\n plt.plot([truth[i],truth[i]],plt.gca().get_ylim(),color='black',linestyle=':')\n Dx=x[-1]-x[0]\n plt.text(truth[i]+0.01*Dx,ymax*0.4,'simulated truth',rotation=90)\n \n if latexnames is not None:\n plt.xlabel(latexnames[i])\n plt.ylabel('$p($'+latexnames[i]+'$)$')\n else:\n plt.xlabel(names[i])\n plt.ylabel('p('+names[i]+')')\n if i==4 and wvectors is not None:\n plt.legend(loc='upper left',title='Prior on $\\\\alpha$')\n \n plt.tight_layout()\n plt.savefig(os.path.join(outdir, prefix+names[i]+fig_exten), dpi=300)\n plt.close()\n if log:\n logfile.close()\n if dolevels:\n return results,prior_results\n else:\n return", "def plot_data(data, param_choice, args):\n \n ### set general plot properties\n \n savebase = '/usr/users/iff_th2/duman/Cells_in_LAMMPS/POVRAY/'\n #downlim = -1\n #uplim = sim.lx/4.\n num_ticks = 5\n ax_len = 1.0 # Length of one subplot square box\n ax_b = 0.0 # Beginning/offset of the subplot in the box\n ax_sep = 0.0 # Separation length between two subplots\n total_subplots_in_x = 1 # Total number of subplots \n fig = plt.figure()\n subp = misc_tools.Subplots(fig, ax_len, ax_sep, ax_b, total_subplots_in_x) \n ax0 = subp.addSubplot()\n \n name = ''\n pname = ''\n if param_choice == 'areak': \n name = 'AREAK'\n pname = name + '_eps_' + str(args.eps) + '_fp_' + str(args.fp) + \\\n '_kappa_' + str(args.kappa)\n xlab = '$\\kappa_A$'\n tit = '$\\epsilon=$' + str(args.eps) + ',$f_m=$' + str(args.fp) + \\\n ',$\\kappa=$' + str(args.kappa)\n elif param_choice == 'eps':\n name = 'EPS'\n pname = name + '_fp_' + str(args.fp) + '_areak_' + str(args.areak) + \\\n '_kappa_' + str(args.kappa)\n xlab = '$\\epsilon$'\n tit = '$f_m=$' + str(args.fp) + ',$\\kappa_A=$' + str(args.areak) + \\\n ',$\\kappa=$' + str(args.kappa) \n elif param_choice == 'fp':\n name = 'FP'\n pname = name + '_eps_' + str(args.eps) + '_areak_' + str(args.areak) + \\\n '_kappa_' + str(args.kappa)\n xlab = '$f_{m}$'\n tit = '$\\epsilon=$' + str(args.eps) + ',$\\kappa_A=$' + str(args.areak) + \\\n ',$\\kappa=$' + str(args.kappa) \n elif param_choice == 'kappa':\n name = 'KAPPA'\n pname = name + '_eps_' + str(args.eps) + '_fp_' + str(args.fp) + \\\n '_areak_' + str(args.areak)\n xlab = '$\\kappa$'\n tit = '$\\epsilon=$' + str(args.eps) + ',$f_m=$' + str(args.fp) + \\\n ',$\\kappa_A=$' + str(args.areak) \n base = savebase + name + '/'\n os.system(\"mkdir -p \" + base) \n \n ### plot \n\n subp = misc_tools.Subplots(fig, ax_len, ax_sep, ax_b, total_subplots_in_x) \n ax0 = subp.addSubplot()\n \n x = data.keys()\n y = [1 for j in range(len(data.keys()))]\n print x\n ax0.scatter(x, y)\n ax0.set_xscale('log')\n ax0.set_yscale('log')\n \n for j, p in enumerate(data.keys()):\n \n fname = data[p] \n \n if os.path.exists(fname):\n arr_hand = read_png(fname)\n \n zoom=0.099\n imagebox = OffsetImage(arr_hand, zoom=zoom)\n\n xy = [x[j], y[j]] # coordinates to position this image\n\n ab = AnnotationBbox(imagebox, xy,\n xybox=(0., -0.),\n xycoords='data',\n boxcoords=\"offset points\",frameon=1,pad=.1) \n \n ax0.add_artist(ab)\n \n ### title\n \n ax0.set_title(tit, fontsize=30)\n \n ### labels\n \n ax0.set_xlabel(xlab, fontsize=30)\n #ax0.set_ylabel(\"$F_{s}(q,\\\\Delta t)$\", fontsize=40)\n\n ### limits\n\n #ax0.set_xlim((-1, 15))\n ax0.set_ylim((0.9999, 1.0001))\n \n ax0.grid(1, color='#cccccc', linestyle='--')\n ax0.set_frame_on(False)\n ax0.get_xaxis().tick_bottom()\n ax0.axes.get_yaxis().set_visible(False)\n xmin, xmax = ax0.get_xaxis().get_view_interval()\n ymin, ymax = ax0.get_yaxis().get_view_interval()\n ax0.add_artist(Line2D((xmin, xmax), (ymin, ymin), color='black', linewidth=2)) \n ### ticks\n \n #ax0.xaxis.set_ticks(np.linspace(0, 15, num_ticks, endpoint=True))\n #ax0.yaxis.set_ticks(np.linspace(0, uplim, num_ticks, endpoint=True))\n plt.setp(ax0.get_yticklabels(),visible=False) \n ax0.tick_params(axis='both', which='major', labelsize=30)\n \n ### legend\n\n# ax0.legend(bbox_to_anchor=(1.005, 0.,0.65, 1.), loc=2, borderaxespad=0., \\\n# prop={'size': 20}, mode=\"expand\", frameon=False)\n \n ### save \n \n savepath = base + \"images_per_\" + pname + \".pdf\"\n print savepath\n plt.savefig(savepath, dpi=300, bbox_inches='tight', pad_inches=0.08) \n fig.clf() \n \n return", "def plot_parameter(self, parm):\n # If user wants to plot density, make sure it exists\n if parm == 'density' and 'density' not in self.ds.data_vars:\n self.insert_density()\n \n if parm == 'theta' and 'theta' not in self.ds.data_vars:\n self.insert_potential_density()\n \n if parm == 'N' and 'N' not in self.ds.data_vars:\n self.insert_buoyancy_frequency()\n \n # Use xarray to plot this parameter\n self.ds[parm].plot(y=self.ztsp[0])\n if plt.ylim()[0] <= 0:\n plt.gca().invert_yaxis()\n plt.tight_layout()\n \n plt.show()", "def plot_ppplot(obj1,sheet1,variable1,obj2,sheet2,variable2,title,opath):\n p1 = np.percentile(obj1.me[sheet1][variable1],range(0,101,1))\n p2 = np.percentile(obj2.me[sheet2][variable2],range(0,101,1))\n p1c = np.cumsum(np.array(p1))/np.cumsum(np.array(p1)).max()\n p2c = np.cumsum(np.array(p2))/np.cumsum(np.array(p2)).max()\n fig = plt.figure(figsize=(8,8),dpi=120)\n plt.scatter(p1c,p2c,color='#566c73',s=30)\n plt.plot([0,1],[0,1],color='red',alpha=0.3)\n plt.xlim(0,1)\n plt.ylim(0,1)\n plt.grid()\n plt.xlabel(sheet1+'_'+variable1)\n plt.ylabel(sheet2+'_'+variable2)\n plt.title(title)\n plt.savefig(opath+'.png')\n plt.close()", "def plot_parameter_visualisation_1d_a_b(parameters_dict, nr_components, ab, colors, prec_wrt_L=False, plot_out=None):\n\n\n plot = {'data': [], 'layout': {}}\n\n\n # component weights\n weights_bg = [ v[0] for k,v in sorted(parameters_dict.iteritems()) if 'weight_bg_' in k]\n weights_contact = [ v[0] for k,v in sorted(parameters_dict.iteritems()) if 'weight_contact_' in k]\n\n #component mu\n means = [v[ab] for k,v in sorted(parameters_dict.iteritems()) if 'mu_' in k]\n\n #component sd\n sd = []\n for component in range(nr_components):\n try:\n if prec_wrt_L:\n sd.append(np.sqrt(1.0/(parameters_dict['prec_'+str(component)][ab] * 142) )) #in case precision is spec depending on L=142\n else:\n sd.append(np.sqrt(1.0/parameters_dict['prec_'+str(component)][ab]))\n except ZeroDivisionError as e:\n print(e)\n sd.append(0) #in case prec is zero bc optimizer tries strange values\n\n\n ### add components\n for component in range(nr_components):\n gaussian_component_density = get_coordinates_for_1d_gaussian(-1, 1, means[component], sd[component])\n plot['data'].append(\n go.Scatter(x=gaussian_component_density[0],\n y=gaussian_component_density[1],\n mode='lines',\n name='component ' + str(component) + ' for ' + AB[ab],\n line=dict(dash='dot',\n color=colors[component]),\n showlegend=False\n )\n )\n\n ### add mixture if there are more than one component\n if (nr_components > 1):\n gaussian_mixture_x_contact, gaussian_mixture_y_contact =get_coordinates_for_1d_gaussian_mixture(-1, 1,\n weights_contact,\n means,\n sd)\n plot['data'].append(go.Scatter(x=gaussian_mixture_x_contact,\n y=gaussian_mixture_y_contact,\n mode='lines',\n name='mixture (contact) for ' + AB[ab],\n line=dict(color='rgb(50,205,50)',\n width = 3),\n showlegend=False\n )\n )\n\n if (nr_components > 1):\n gaussian_mixture_x_bg, gaussian_mixture_y_bg = get_coordinates_for_1d_gaussian_mixture(-1, 1,\n weights_bg,\n means,\n sd)\n plot['data'].append(go.Scatter(x=gaussian_mixture_x_bg,\n y=gaussian_mixture_y_bg,\n mode='lines',\n name='mixture (bg) for ' + AB[ab],\n line=dict(color='rgb(50,50,205 )',\n width = 3),\n showlegend=False\n )\n )\n\n\n plot['layout'].update({'title': 'Coupling prior as a gaussian mixture'})\n plot['layout'].update({'xaxis1': {'title': \"coupling values\"}})\n plot['layout'].update({'yaxis1': {'title': \"density\"}})\n plot['layout']['font'] = {'size': 18}\n\n if plot_out is not None:\n plotly_plot(plot, filename=plot_out, auto_open=False)\n else:\n return plot", "def _plot_at_fixed_alpha_and_age(ax, index1, index2, alpha_fe=0.0, age=13.0):\n\n age=float(age)\n\n alpha_fe_ind=get_numpy_indices_for_params(alpha_fe=alpha_fe)\n age_ind=get_numpy_indices_for_params(age=age)\n\n #The colours are a function of Alpha Enhancements so N=4\n N=4\n\n c=cm(1.0*alpha_fe_ind/N)\n\n\n \"\"\"\n #Useful for seeing how the model parameters move\n solar_alpha_fe_ind=get_numpy_indices_for_params(alpha_fe=0.0)\n ten_Gyr_age_ind=get_numpy_indices_for_params(age=10.0)\n solar_metallicity_ind=get_numpy_indices_for_params(Z=0.0)\n \"\"\"\n \n \n\n\n ax.plot(index1[alpha_fe_ind, :, age_ind], index2[alpha_fe_ind, :, age_ind], label=r\"$\\alpha$/Fe={}, age={}\".format(alpha_fe, age), linewidth=3.0, zorder=10, c=c)\n ax.scatter(index1[alpha_fe_ind, :, age_ind], index2[alpha_fe_ind, :, age_ind], marker=\"o\", s=np.linspace(50, 300, 6), facecolors=\"w\", linewidth=3.0, zorder=10)", "def plot_figure(param1, param2):\n return 0", "def plot_some(*arr, **kwargs):\n title_list = kwargs.pop('title_list',None)\n pmin = kwargs.pop('pmin',0)\n pmax = kwargs.pop('pmax',100)\n cmap = kwargs.pop('cmap','magma')\n imshow_kwargs = kwargs\n return _plot_some(arr=arr, title_list=title_list, pmin=pmin, pmax=pmax, cmap=cmap, **imshow_kwargs)", "def plot_live(X, y, evaluator, param_name, param_range, scale='log', ylim=(0,1), ylabel='score'):\n # Plot interactively\n plt.ion()\n plt.ylabel(ylabel)\n plt.xlabel(param_name)\n \n # Make the scale look nice\n plt.xscale(scale)\n plt.xlim(param_range[0],param_range[-1])\n plt.ylim(ylim)\n \n # Start from empty plot, then fill it\n series = {}\n lines = {}\n xvals = []\n for i in param_range:\n scores = evaluator(X, y, i) \n if i == param_range[0]: # initialize series\n for k in scores.keys():\n lines[k], = plt.plot(xvals, [], marker = '.', label = k)\n series[k] = []\n xvals.append(i)\n for k in scores.keys(): # append new data\n series[k].append(scores[k])\n lines[k].set_data(xvals, series[k])\n # refresh plot\n plt.legend(loc='best')\n plt.margins(0.1)\n display.display(plt.gcf())\n display.clear_output(wait=True)", "def visualize(title, particles):\n\n plt.figure(figsize=(10,10))\n plt.title(\"Best configuration for \" + str(len(particles)) + \" particles\", size=25)\n plt.xlabel(\"xcoordinate\", size=18)\n plt.ylabel(\"ycoordinate\", size=18)\n\n plt.xticks(size=13)\n plt.yticks(size=13)\n\n circle = plt.Circle((0, 0), 1)\n circle.set_edgecolor(\"red\")\n circle.set_facecolor(\"none\")\n fig = plt.gcf()\n ax = fig.gca()\n\n ax.add_artist(circle)\n plt.xlim(-1.1,1.1)\n plt.ylim(-1.1,1.1)\n\n # draw all the particles\n for particle in particles:\n plt.scatter(particle.x, particle.y)\n\n fig.savefig(title)", "def plot_plasma(self):\n x = self.geom.x\n fig, axes = plt.subplots(1, 2, figsize=(8, 3),\n constrained_layout=True)\n # plot densities\n ax = axes[0]\n ax.plot(x, self.ne, 'b-')\n ax.plot(x, self.ni, 'r-')\n ax.legend(['E', 'Ion'])\n ax.set_xlabel('Position (m)')\n ax.set_ylabel('Density (m^-3)')\n # plot temperature\n ax = axes[1]\n ax.plot(x, self.Te, 'b-')\n ax.plot(x, self.Ti, 'r-')\n ax.legend(['Te', 'Ti'])\n ax.set_xlabel('Position (m)')\n ax.set_ylabel('Temperature (eV)')\n plt.show()", "def show(im,fig= None): #X\n im = im.copy()\n if len(im.shape)==1 or im.shape[1]==1:\n im = X2patch(im)\n im[im<=DEAD]=-0.5\n if fig is None:\n plt.figure()\n fig = plt.imshow(hsv_to_rgb(im+0.5))\n fig.set_data(hsv_to_rgb(im+0.5))\n plt.draw()\n plt.pause(0.001)\n return fig", "def show():\n plt.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def ppk_plot(data: (List[int], List[float], pd.Series, np.array),\n upper_control_limit: (int, float), lower_control_limit: (int, float),\n threshold_percent: float = 0.001,\n ax: Axis = None):\n\n data = coerce(data)\n mean = data.mean()\n std = data.std()\n\n if ax is None:\n fig, ax = plt.subplots()\n\n ax.hist(data, density=True, label='data', alpha=0.3)\n x = np.linspace(mean - 4 * std, mean + 4 * std, 100)\n pdf = stats.norm.pdf(x, mean, std)\n ax.plot(x, pdf, label='normal fit', alpha=0.7)\n\n bottom, top = ax.get_ylim()\n\n ax.axvline(mean, linestyle='--')\n ax.text(mean, top * 1.01, s='$\\mu$', ha='center')\n\n ax.axvline(mean + std, alpha=0.6, linestyle='--')\n ax.text(mean + std, top * 1.01, s='$\\sigma$', ha='center')\n\n ax.axvline(mean - std, alpha=0.6, linestyle='--')\n ax.text(mean - std, top * 1.01, s='$-\\sigma$', ha='center')\n\n ax.axvline(mean + 2 * std, alpha=0.4, linestyle='--')\n ax.text(mean + 2 * std, top * 1.01, s='$2\\sigma$', ha='center')\n\n ax.axvline(mean - 2 * std, alpha=0.4, linestyle='--')\n ax.text(mean - 2 * std, top * 1.01, s='-$2\\sigma$', ha='center')\n\n ax.axvline(mean + 3 * std, alpha=0.2, linestyle='--')\n ax.text(mean + 3 * std, top * 1.01, s='$3\\sigma$', ha='center')\n\n ax.axvline(mean - 3 * std, alpha=0.2, linestyle='--')\n ax.text(mean - 3 * std, top * 1.01, s='-$3\\sigma$', ha='center')\n\n ax.fill_between(x, pdf, where=x < lower_control_limit, facecolor='red', alpha=0.5)\n ax.fill_between(x, pdf, where=x > upper_control_limit, facecolor='red', alpha=0.5)\n\n lower_percent = 100.0 * stats.norm.cdf(lower_control_limit, mean, std)\n lower_percent_text = f'{lower_percent:.02f}% < LCL' if lower_percent > threshold_percent else None\n\n higher_percent = 100.0 - 100.0 * stats.norm.cdf(upper_control_limit, mean, std)\n higher_percent_text = f'{higher_percent:.02f}% > UCL' if higher_percent > threshold_percent else None\n\n left, right = ax.get_xlim()\n bottom, top = ax.get_ylim()\n cpk = calc_ppk(data, upper_control_limit=upper_control_limit, lower_control_limit=lower_control_limit)\n\n lower_sigma_level = (mean - lower_control_limit) / std\n if lower_sigma_level < 6.0:\n ax.axvline(lower_control_limit, color='red', alpha=0.25, label='limits')\n ax.text(lower_control_limit, top * 0.95, s=f'$-{lower_sigma_level:.01f}\\sigma$', ha='center')\n else:\n ax.text(left, top * 0.95, s=f'limit > $-6\\sigma$', ha='left')\n\n upper_sigma_level = (upper_control_limit - mean) / std\n if upper_sigma_level < 6.0:\n ax.axvline(upper_control_limit, color='red', alpha=0.25)\n ax.text(upper_control_limit, top * 0.95, s=f'${upper_sigma_level:.01f}\\sigma$', ha='center')\n else:\n ax.text(right, top * 0.95, s=f'limit > $6\\sigma$', ha='right')\n\n strings = [f'Ppk = {cpk:.02f}']\n\n strings.append(f'$\\mu = {mean:.3g}$')\n strings.append(f'$\\sigma = {std:.3g}$')\n\n if lower_percent_text:\n strings.append(lower_percent_text)\n if higher_percent_text:\n strings.append(higher_percent_text)\n\n props = dict(boxstyle='round', facecolor='white', alpha=0.75, edgecolor='grey')\n ax.text(right - (right - left) * 0.05, 0.85 * top, '\\n'.join(strings), bbox=props, ha='right', va='top')\n\n ax.legend(loc='lower right')", "def plot_power(self, show = False):\n if self.p_treatment - self.p_control < 0:\n thresh = 1 - self.alpha\n else:\n thresh = self.alpha\n\n try:\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n except:\n self.simulate_power()\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n\n sample_null = self.norm_null.rvs(size = self.n_control)\n sample_alt = self.norm_alt.rvs(size = self.n_treatment)\n\n lowest_x = min(min(sample_null), min(sample_alt))\n highest_x = max(max(sample_null), max(sample_alt))\n\n x = np.linspace(lowest_x, highest_x, self.n_control + self.n_treatment)\n\n y_null = self.norm_null.pdf(x)\n y_alt = self.norm_alt.pdf(x)\n\n # Set line parameters for visual styling\n line_null = dict(color = 'blue', width = 2)\n line_alt = dict(color = 'orange', width = 2)\n\n # Plot the null and alt distributions\n data = [\n go.Scatter(\n x = x,\n y = y_null,\n mode = 'lines',\n name = 'Null',\n line = line_null\n ),\n go.Scatter(\n x = x,\n y = y_alt,\n mode = 'lines',\n name = 'alt',\n line = line_alt\n ),\n # Shade P under null distribution\n go.Scatter(\n x = x[x > p_crit],\n y = y_null[np.where(x > p_crit)],\n fill = 'tozeroy',\n showlegend = False,\n line = line_null\n ),\n # Shade beta under alt distribution\n go.Scatter(\n x = x[x < p_crit],\n y = y_alt[np.where(x < p_crit)],\n fill = 'tozeroy',\n showlegend = False,\n line = line_alt\n )\n ]\n\n # Apply axis configurations to the plot\n layout = dict(\n yaxis = dict(\n showgrid = False,\n title = 'Probability Density',\n showline = True,\n linecolor = 'black',\n zeroline = False\n ),\n xaxis = dict(\n showgrid = False,\n title = 'Sample Mean Differences (Probabilities)',\n showline = True,\n linecolor = 'black',\n zeroline = False\n ),\n plot_bgcolor = 'white',\n width = 800,\n height = 600,\n title = 'Power'\n )\n\n fig = go.Figure(data = data, layout = layout)\n\n # Mark p_crit with a dashed vertical line\n fig.add_vline(x = p_crit,\n line_width = 2,\n line_dash = 'dash',\n line_color = 'black',\n annotation_text = 'P Crit (Power {:.2f})'.format(self.power),\n annotation_position = 'top right')\n\n if show:\n fig.show()\n\n return fig", "def plot():\n xvals = np.arange(-50, 250, step=0.1)\n\n fig = plt.figure()\n plt.suptitle(\"Gaussian with smooth transition to power law\")\n\n A0vals = [10, 11]\n avals = [5*10**-3, 10**-3, 5*10**-4]\n ttvals = [10., 50., 100.]\n cvals = [-0.1, -0.9, -5./3., -4.]\n offset = [-30, 0.0, 30]\n\n paramvals = [A0vals, avals, ttvals,cvals, offset]\n titles, labels = return_parameter_names()\n\n nplots = len(paramvals)\n\n for i in range(nplots):\n plt.subplot(nplots, 1, i+1)\n vals = paramvals[i]\n for j in range(len(vals)):\n pset = list(default())\n pset[i] = vals[j]\n yvals=[]\n ypower=[]\n ypeak=[]\n for x in xvals:\n yvals.append(fitfunc(x, pset))\n ypeak.append(logpeak(x,pset))\n if x > 0:\n ypower.append(logpowerlaw(x,pset))\n label = labels[i] + \"=\"+str(vals[j])\n plt.plot(xvals, yvals, label = label)\n\n plt.title(titles[i])\n plt.legend()\n\n fig.set_size_inches(15, 30)\n plt.savefig(\"graphs/misc/lightcurve_models.pdf\")\n plt.close()", "def plot_param(self,param,blocking=True):\n import matplotlib.pyplot as plt\n from matplotlib import cm\n from matplotlib.ticker import LinearLocator\n import numpy as np\n\n fig, ax = plt.subplots()\n\n sweep_param=self.x_param\n\n df_original=self.get_params()\n\n if isinstance(param,str):\n\n param=[param]\n\n if len(param)==1:\n\n y = []\n\n for i in range(len(sweep_param)):\n\n self._set_params(sweep_param(i))\n\n df=self.export_all()\n\n y.append(df[param[0]])\n\n p=ax.scatter([_+1 for _ in range(len(y))],y)\n\n p.set_clip_on(False)\n\n ax.set_ylabel(param[0])\n\n elif len(param)>1:\n\n y=[]\n p=[]\n\n for j in range(len(param)):\n\n y.append([])\n\n for i in range(len(sweep_param)):\n\n self._set_params(sweep_param(i))\n\n df=self.export_all()\n\n y[j].append(df[param[j]])\n\n p.append(ax.scatter([_+1 for _ in range(len(y[j]))],y[j],label=param[j]))\n\n p[j].set_clip_on(False)\n\n ax.set_ylabel(\", \".join(param))\n\n ax.legend()\n\n ax.grid(linestyle='--',linewidth=0.5, color='grey')\n\n ax.autoscale(enable=True, tight=True)\n\n self.x_param.populate_plot_axis(ax)\n\n self._set_params(df_original)\n\n plt.show()\n\n return fig" ]
[ "0.657726", "0.6151264", "0.59895754", "0.59583163", "0.59405845", "0.5909575", "0.5869844", "0.5831743", "0.58210385", "0.5804914", "0.5795052", "0.57885706", "0.5769044", "0.5761594", "0.57519627", "0.5731968", "0.5731957", "0.5722585", "0.5717964", "0.5657798", "0.5656237", "0.5646385", "0.56427354", "0.5642202", "0.5642202", "0.5642202", "0.56151843", "0.5613194", "0.5612536", "0.56061316" ]
0.67259836
0
Evaluates the performance of a model on training data
def eval_perf_train(model, X_train=None, y_train=None): # if X_train != None and y_train != None: y_hat_train = model.predict(X_train) train_mae = metrics.mean_absolute_error(y_train, y_hat_train) train_mse = metrics.mean_squared_error(y_train, y_hat_train) train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train)) train_r = metrics.r2_score(y_train, y_hat_train) print('Evaluating Performance on Training Data:\n') print(f'Train Mean Absolute Error: {train_mae:,.2f}') print(f'Train Mean Squared Error: {train_mse:,.2f}\n') print(f'Train Root Mean Squared Error: {train_rmse:,.2f}') print(f'Train R-Square Value: {round(train_r,2)}') # if X_test != None and y_test != None: # y_hat_test = model.predict(X_test) # test_mae = metrics.mean_absolute_error(y_test, y_hat_test) # test_mse = metrics.mean_squared_error(y_test, y_hat_test) # test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test)) # test_r = metrics.r2_score(y_test, y_hat_test) # print('Evaluating Performance on Testing Data:\n') # print(f'Test Mean Absolute Error: {test_mae:,.2f}') # print(f'Test Mean Squared Error: {test_mse:,.2f}\n') # print(f'Test Root Mean Squared Error: {test_rmse:,.2f}') # print(f'Test R-Square Value: {round(test_r,2)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def evaluate(data_loader, model, device):\n\n\tmodel.eval()\n\ttotal_num_examples = 0\n\ttotal_error = 0\n\tfor idx, batch in enumerate(data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t####Your code here ---\n\n\t\t# get the output from the model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# get error, num_examples using accuracy_fn defined previously\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# update total_error and total_num_examples\n\t\ttotal_error += error\n\t\ttotal_num_examples += num_examples\n\n\taccuracy = 1 - total_error / total_num_examples\n\treturn accuracy", "def evaluate_performance(data_loader, model):\n acc = mx.metric.Accuracy()\n\n for idx, (data, label) in enumerate(data_loader):\n data = data.as_in_context(model.ctx)\n label = label.as_in_context(model.ctx)\n pred = model(data)\n pred = mx.nd.argmax(pred, axis=1)\n acc.update(label, pred)\n return acc.get()", "def evaluate_model(sess, model, data_set):\n total_cost = 0.0\n total_r_cost = 0.0\n total_kl_cost = 0.0\n for batch in range(data_set.num_batches):\n unused_orig_x, x, s = data_set.get_batch(batch)\n feed = {model.input_data: x, model.sequence_lengths: s}\n (cost, r_cost,\n kl_cost) = sess.run([model.cost, model.r_cost, model.kl_cost], feed)\n total_cost += cost\n total_r_cost += r_cost\n total_kl_cost += kl_cost\n\n total_cost /= (data_set.num_batches)\n total_r_cost /= (data_set.num_batches)\n total_kl_cost /= (data_set.num_batches)\n return (total_cost, total_r_cost, total_kl_cost)", "def evaluate_model(model, X_test, Y_test, category_names):\n\n print(\"Testing Performance\")\n print(classification_report(Y_test, model.predict(X_test), target_names=category_names))\n\n #Todo cat names", "def run(self, data, training=False):\n # Set mode\n if training:\n self._model.train()\n else:\n self._model.eval()\n # Compute\n return self._model(data)", "def train(self):\n\t\t# Helper: Early stopping.\n\t\tearly_stopper = EarlyStopping(patience=2, verbose = 1)\n\t\tself.model.fit(data.x_train, data.y_train,\n\t\t\t\t\t\tbatch_size=data.batch_size,\n\t\t\t\t\t\tepochs=10000, # using early stopping, so no real limit\n\t\t\t\t\t\tverbose=1,\n\t\t\t\t\t\tvalidation_split=0.05,\n\t\t\t\t\t\tcallbacks=[early_stopper])\n\n\t\tscore = self.model.evaluate(data.x_test, data.y_test, verbose=1)\n\n\t\treturn score[1] # 1 is accuracy. 0 is loss.", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)", "def train_model(mdl, X_train, Y_train, X_val, Y_val, epochs, batch_size):\n mdl.compile(loss='mean_squared_error',\n optimizer='adam')\n\n mdl.fit(X_train,\n Y_train,\n batch_size=batch_size,\n nb_epoch=epochs,\n verbose=1)\n\n # evaluate the model\n score_eval = mdl.evaluate(X_val, Y_val, verbose=0)\n loss = score_eval\n\n print(\"[Evaluation]%s: %.2f%%\" % (mdl.metrics_names, loss))\n\n return mdl, loss", "def evaluate_model(model, X_train, y_train, X_test, y_test):\n model = model\n model.fit(X_train, y_train)\n\n y_pred = model.predict(X_test)\n\n report = classificationreport(y_test, y_pred, target_names= [\"0\", \"1\"], output_dict=True)\n\n return report", "def evaluate(model, val_data, epoch):\n print('validating')\n\n # 设置为评估模式 \n model.eval() \n\n val_loss = []\n with torch.no_grad():\n DEVICE = config.DEVICE\n\n val_dataloader = DataLoader(dataset=val_data,\n batch_size=config.batch_size,\n shuffle=True,\n pin_memory=True, drop_last=True,\n collate_fn=collate_fn)\n\n for batch, data in enumerate(tqdm(val_dataloader)):\n\n x, y, x_len, y_len, oov, len_oovs = data\n\n if config.is_cuda:\n x = x.to(DEVICE)\n y = y.to(DEVICE)\n x_len = x_len.to(DEVICE)\n len_oovs = len_oovs.to(DEVICE)\n\n loss = model(x, x_len, y, len_oovs, batch=batch, \n num_batches=len(val_dataloader),\n teacher_forcing=True)\n\n val_loss.append(loss.item())\n\n return np.mean(val_loss)", "def evaluate(model, data):\n n_targets = 0\n n_correct_predictions = 0\n\n # Set the model on evaluatio mode.\n model.eval()\n\n # Create progress bar.\n progress_bar = tqdm.tqdm(total=len(data),\n unit='batch',\n desc='[evaluate] batch accuracy: 0.000',\n leave=False)\n\n # Loop through validation batches.\n for inputs, targets in data:\n\n # Send data to GPU if CUDA is enabled.\n if next(model.parameters()).is_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Feed forward.\n with torch.set_grad_enabled(False):\n outputs = model(inputs)\n\n # Choose the class with maximum probability.\n _, predictions = torch.max(outputs, 1)\n\n accuracy = (predictions == targets).sum().item() / len(targets)\n progress_bar.update(1)\n progress_bar.set_description(\n '[evaluate] batch accuracy: {accuracy:.3f}'.format(\n accuracy=accuracy))\n\n # Accumulate targets and correct predictions count.\n n_targets += len(targets)\n n_correct_predictions += (predictions == targets).sum().item()\n\n # Close progress bar.\n progress_bar.close()\n\n return n_correct_predictions / n_targets", "def train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, model_dir, restore_dir=None):\n # reload weights from restore_dir if specified\n if restore_dir is not None:\n model = BertForSequenceTagging.from_pretrained(tagger_model_dir)\n \n best_val_f1 = 0.0\n patience_counter = 0\n\n for epoch in range(1, params.epoch_num + 1):\n # Run one epoch\n logging.info(\"Epoch {}/{}\".format(epoch, params.epoch_num))\n\n # Compute number of batches in one epoch\n params.train_steps = params.train_size // params.batch_size\n params.val_steps = params.val_size // params.batch_size\n\n # data iterator for training\n train_data_iterator = data_loader.data_iterator(train_data, shuffle=True)\n\n # Train for one epoch on training set\n train_epoch(model, train_data_iterator, optimizer, scheduler, params)\n\n # data iterator for evaluation\n # train_data_iterator = data_loader.data_iterator(train_data, shuffle=False)\n val_data_iterator = data_loader.data_iterator(val_data, shuffle=False)\n\n # Evaluate for one epoch on training set and validation set\n # params.eval_steps = params.train_steps\n # train_metrics = evaluate(model, train_data_iterator, params, mark='Train') # callback train f1\n params.eval_steps = params.val_steps\n val_metrics = evaluate(model, val_data_iterator, params, mark='Val')\n \n val_f1 = val_metrics['f1']\n improve_f1 = val_f1 - best_val_f1\n if improve_f1 > 1e-5: \n logging.info(\"- Found new best F1\")\n best_val_f1 = val_f1\n model.save_pretrained(model_dir)\n if improve_f1 < params.patience:\n patience_counter += 1\n else:\n patience_counter = 0\n else:\n patience_counter += 1\n\n # Early stopping and logging best f1\n if (patience_counter >= params.patience_num and epoch > params.min_epoch_num) or epoch == params.epoch_num:\n logging.info(\"Best val f1: {:05.2f}\".format(best_val_f1))\n break", "def evaluate_model(model, X_test, Y_test, category_names): \n # predict on the X_test\n y_pred = model.predict(X_test)\n \n # build classification report on every column\n performances = []\n for i in range(len(category_names)):\n performances.append([f1_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n precision_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n recall_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro')])\n # build dataframe\n performances = pd.DataFrame(performances, columns=['f1 score', 'precision', 'recall'],\n index = category_names) \n return performances", "def evaluate(args, dev_dataset, model):\n\n if args.dynamic_batching:\n dev_sampler = CustomBatchSampler(dev_dataset, args.dev_batch_size)\n dev_dataloader = DataLoader(\n dev_dataset,\n batch_sampler=dev_sampler,\n num_workers=0,\n collate_fn=dynamic_padding_collate_fn\n )\n else:\n dev_sampler = SequentialSampler(dev_dataset)\n dev_dataloader = DataLoader(dev_dataset, sampler=dev_sampler,\n batch_size=args.dev_batch_size, num_workers=0)\n\n model.eval()\n loss_fn = nn.CrossEntropyLoss(ignore_index=0)\n iterator = tqdm(dev_dataloader, desc=\"Evaluation\", smoothing=0.05)\n loss_cum = None\n num_batch = 0\n for step, batch_cpu in enumerate(iterator):\n num_batch += 1\n\n batch = tuple(t.to(args.device) for t in batch_cpu)\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n\n with torch.no_grad():\n outputs = model(**inputs)\n\n # Calculate loss of just the question part\n q_mask = (inputs['token_type_ids'] == 2)\n masked_labels = inputs['input_ids'].masked_fill(~q_mask, 0)\n shift_labels = masked_labels[..., 1:].contiguous()\n\n lm_logits = outputs[0]\n shift_logits = lm_logits[..., : -1, :].contiguous()\n loss = loss_fn(shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1))\n\n if loss_cum is None:\n loss_cum = loss\n else:\n loss_cum += loss\n\n model.train()\n\n return loss_cum.item() / num_batch", "def evaluate(model, iterations, use_cuda=False):\n\n logger.debug(\"Allocating input and target tensors on GPU : %r\", use_cuda)\n\n # create the instance of data loader\n data_loader = DataLoaderMnist(cuda=use_cuda, seed=1, shuffle=False, train_batch_size=64, test_batch_size=100)\n\n model.eval()\n total = 0\n correct = 0\n current_iterations = 0\n\n with torch.no_grad():\n for inputs, labels in data_loader.test_loader:\n inputs, labels = inputs.to(data_loader.device), labels.to(data_loader.device)\n output = model(inputs)\n current_iterations += 1\n _, predicted = torch.max(output.data, dim=1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if iterations is not None:\n if current_iterations >= iterations:\n break\n\n accuracy = correct / total\n return accuracy", "def evaluate(model, g, val_nid, device):\n model.eval()\n nfeat = g.ndata['features']\n labels = g.ndata['labels']\n with th.no_grad():\n pred = model.module.inference(g, nfeat, device, args.batch_size, args.num_workers)\n model.train()\n test_acc = Accuracy()\n return test_acc(th.softmax(pred[val_nid], -1), labels[val_nid].to(pred.device))", "def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)", "def evaluate(cfg: DictConfig):\n\n experiments = cfg.get('experiment_type', f'{cfg.model.name}_only')\n fixed_t0 = cfg.get('fixed_t0', False)\n ext = '_fixedT0' if fixed_t0 else ''\n\n base_dir = cfg.device.root\n datasource = cfg.datasource.name\n\n if experiments == 'ablations':\n models = {\n 'FluxRGNN': ['final',\n 'final_without_encoder',\n 'final_without_boundary'],\n 'LocalLSTM': ['final']\n }\n elif experiments == 'final':\n models = {\n 'FluxRGNN': ['final'],\n 'GAM': ['final'],\n 'HA': ['final'],\n 'GBT': ['final']\n }\n else:\n m = cfg.model.name\n year = cfg.datasource.test_year\n\n # find all experiments available for this model, datasource and test year\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{year}')\n models = {\n m : [ f.name for f in os.scandir(result_dir) if f.is_dir() ]\n }\n\n\n # thresholds for binary classification metrics\n if cfg.datasource.name == 'abm':\n thresholds = [0.0019, 0.0207]\n else:\n thresholds = [0, 10, 20]\n\n rmse_per_hour = []\n mae_per_hour = []\n pcc_per_hour = []\n bin_per_hour = []\n\n rmse_per_night = []\n mae_per_night = []\n\n output_dir = osp.join(base_dir, 'results', datasource, f'performance_evaluation{ext}', experiments)\n os.makedirs(output_dir, exist_ok=True)\n\n counter = 0\n\n for m, dirs in models.items():\n print(f'evaluate {m}')\n\n for d in dirs:\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{cfg.datasource.test_year}', d)\n\n # check if directory exists\n if os.path.isdir(result_dir):\n results, model_cfg = load_cv_results(result_dir, trials=cfg.task.repeats, ext=ext)\n\n df_prep = pd.read_csv(osp.join(base_dir, 'data', 'preprocessed',\n f'{model_cfg[\"t_unit\"]}_{model_cfg[\"model\"][\"edge_type\"]}_ndummy={model_cfg[\"datasource\"][\"n_dummy_radars\"]}',\n datasource, cfg.season, str(cfg.datasource.test_year), 'dynamic_features.csv'))\n tidx2night = dict(zip(df_prep.tidx, df_prep.nightID))\n\n rmse_per_hour.append(compute_rmse(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n mae_per_hour.append(compute_mae(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n pcc_per_hour.append(compute_pcc(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n\n if fixed_t0:\n rmse_per_night.append(compute_rmse_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n mae_per_night.append(compute_mae_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n\n # compute binary classification measures\n for thr in thresholds:\n bin_per_hour.append(compute_bin(m, d, results, groupby=['horizon', 'trial'], threshold=thr, km2=True))\n\n counter += 1\n\n else:\n print(f'Experiment \"{d}\" for model \"{m}\" and datasource \"{datasource}\" is not available. '\n f'Use \"run_experiments.py model={m} datasource={datasource} +experiment={d}\" to run this experiment.')\n\n if counter > 0:\n rmse_per_hour = pd.concat(rmse_per_hour)\n rmse_per_hour.to_csv(osp.join(output_dir, f'rmse_per_hour.csv'))\n\n mae_per_hour = pd.concat(mae_per_hour)\n mae_per_hour.to_csv(osp.join(output_dir, f'mae_per_hour.csv'))\n\n pcc_per_hour = pd.concat(pcc_per_hour)\n pcc_per_hour.to_csv(osp.join(output_dir, f'pcc_per_hour.csv'))\n\n bin_per_hour = pd.concat(bin_per_hour)\n bin_per_hour.to_csv(osp.join(output_dir, f'bin_per_hour.csv'))\n\n if fixed_t0:\n rmse_per_night = pd.concat(rmse_per_night)\n rmse_per_night.to_csv(osp.join(output_dir, f'rmse_per_night.csv'))\n\n mae_per_night = pd.concat(mae_per_night)\n mae_per_night.to_csv(osp.join(output_dir, f'mae_per_night.csv'))", "def evaluate_model_performance():\n\n config = load_config()\n data_processor = DataProcessor()\n df_test = data_processor.create_user_click_sequence(\n start_date=config[\"test_split_date\"]\n )\n df_test[\"truths\"] = df_test[\"merchant_seq\"].apply(lambda x: list(set(x)))\n truth_dict = dict(zip(df_test[\"user_id\"], df_test[\"truths\"]))\n\n # get model\n print(\"model training...\")\n model = Merchant2VecModel()\n model.train()\n\n # compute mAP@k\n k = model.num_rec\n all_truths, all_preds = [], []\n for user_id, user_merchants in truth_dict.items():\n this_pred = model.generate_predictions(\n user_id=user_id, eval_date=config[\"test_split_date\"]\n )\n all_truths.append(user_merchants)\n all_preds.append(this_pred)\n score = mapk(all_truths, all_preds, k)\n print(\"mAP@{} for current model: {:.4f}\".format(k, score))", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def evaluate_model(model_name, y_true, y_pred):\n\n # Calculate performance metrics\n rmse_eval = evaluate_rmse(y_true, y_pred)\n mae_eval = evaluate_mae(y_true, y_pred) \n r2_eval = evaluate_r2(y_true, y_pred)\n\n # Print results\n print_evaluation(model_name, mae_eval, rmse_eval, r2_eval)", "def evaluate(model, optimizer, loss_function, loader, device, labels, log_every_n=10):\n\n model.eval()\n\n batch_wise_true_labels = []\n batch_wise_predictions = []\n\n loss_history = []\n running_loss = 0.\n running_loss_history = []\n\n with torch.no_grad(): # Disable gradient computation - required only during training\n for i, batch in tqdm(enumerate(loader)):\n\n logits = model(batch[0].to(device), batch[1]).squeeze()\n loss = loss_function(logits, batch[2].to(device))\n loss_history.append(loss.item())\n\n running_loss += (loss_history[-1] - running_loss) / (i + 1) # Compute rolling average\n\n running_loss_history.append(running_loss)\n\n predictions = torch.sigmoid(logits)\n\n batch_wise_true_labels.append(batch[2].view(-1).tolist())\n batch_wise_predictions.append(predictions.view(-1).tolist())\n\n # flatten the list of predictions using itertools\n all_true_labels = list(chain.from_iterable(batch_wise_true_labels))\n all_predictions = list(chain.from_iterable(batch_wise_predictions))\n all_predictions = [1 if p > 0.5 else 0 for p in all_predictions]\n\n\n print(\"Evaluation Loss: \", running_loss)\n # Now we can generate a classification report\n print(\"Classification report after epoch:\")\n print(f1_score(all_true_labels, all_predictions, average='micro'))\n print(classification_report(all_true_labels, all_predictions, labels=labels))\n\n return loss_history, running_loss_history", "def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)", "def evaluate(cfg: DictConfig):\n\n # suppress TensorFlow and DALI warnings\n suppress_warnings()\n\n if cfg.USE_MULTI_GPUS.VALUE:\n # change number of visible gpus for evaluation\n set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS)\n # update batch size according to available gpus\n data_generator.update_batch_size(cfg)\n\n if cfg.OPTIMIZATION.AMP:\n print(\"Enabling Automatic Mixed Precision(AMP) training\")\n policy = mixed_precision.Policy('mixed_float16')\n mixed_precision.set_global_policy(policy)\n\n if cfg.OPTIMIZATION.XLA:\n print(\"Enabling Automatic Mixed Precision(XLA) training\")\n tf.config.optimizer.set_jit(True)\n\n # create model\n strategy = None\n if cfg.USE_MULTI_GPUS.VALUE:\n # multi gpu training using tensorflow mirrored strategy\n strategy = tf.distribute.MirroredStrategy(\n cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()\n )\n print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync))\n with strategy.scope():\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n else:\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n\n model.compile(\n optimizer=optimizer,\n loss=unet3p_hybrid_loss,\n metrics=[dice_coef],\n )\n\n # weights model path\n checkpoint_path = join_paths(\n cfg.WORK_DIR,\n cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,\n f\"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5\"\n )\n\n assert os.path.exists(checkpoint_path), \\\n f\"Model weight's file does not exist at \\n{checkpoint_path}\"\n\n # TODO: verify without augment it produces same results\n # load model weights\n model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True)\n model.summary()\n\n # data generators\n val_generator = data_generator.get_data_generator(cfg, \"VAL\", strategy)\n validation_steps = data_generator.get_iterations(cfg, mode=\"VAL\")\n\n # evaluation metric\n evaluation_metric = \"dice_coef\"\n if len(model.outputs) > 1:\n evaluation_metric = f\"{model.output_names[0]}_dice_coef\"\n\n result = model.evaluate(\n x=val_generator,\n steps=validation_steps,\n workers=cfg.DATALOADER_WORKERS,\n return_dict=True,\n )\n\n # return computed loss, validation accuracy, and it's metric name\n return result, evaluation_metric" ]
[ "0.73358834", "0.73080355", "0.72717404", "0.72445047", "0.72132945", "0.72110033", "0.71732163", "0.7143858", "0.71342355", "0.70897985", "0.7084085", "0.7028087", "0.70188946", "0.7007979", "0.6981331", "0.69766915", "0.69762504", "0.6974851", "0.6973674", "0.6969121", "0.69660336", "0.69486946", "0.69434327", "0.69365007", "0.6923862", "0.6918139", "0.69146544", "0.6901599", "0.6892579", "0.68862015" ]
0.7601281
0
Evaluate the performance of a given model on the testing data
def eval_perf_test(model, X_test, y_test): y_hat_test = model.predict(X_test) test_mae = metrics.mean_absolute_error(y_test, y_hat_test) test_mse = metrics.mean_squared_error(y_test, y_hat_test) test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test)) test_r = metrics.r2_score(y_test, y_hat_test) print('Evaluating Performance on Testing Data:\n') print(f'Test Mean Absolute Error: {test_mae:,.2f}') print(f'Test Mean Squared Error: {test_mse:,.2f}\n') print(f'Test Root Mean Squared Error: {test_rmse:,.2f}') print(f'Test R-Square Value: {round(test_r,2)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_model(model, X_test, Y_test, category_names):\n\n print(\"Testing Performance\")\n print(classification_report(Y_test, model.predict(X_test), target_names=category_names))\n\n #Todo cat names", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def eval_perf_train(model, X_train=None, y_train=None):\n\n # if X_train != None and y_train != None:\n\n y_hat_train = model.predict(X_train)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f'Train Mean Absolute Error: {train_mae:,.2f}')\n print(f'Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n # if X_test != None and y_test != None:\n\n # y_hat_test = model.predict(X_test)\n\n # test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n # test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n # test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n # test_r = metrics.r2_score(y_test, y_hat_test)\n\n # print('Evaluating Performance on Testing Data:\\n')\n # print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n # print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n # print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n # print(f'Test R-Square Value: {round(test_r,2)}')", "def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def evaluate_model(model, X_train, y_train, X_test, y_test):\n model = model\n model.fit(X_train, y_train)\n\n y_pred = model.predict(X_test)\n\n report = classificationreport(y_test, y_pred, target_names= [\"0\", \"1\"], output_dict=True)\n\n return report", "def evaluate_model(model, X_test, Y_test): \n #Make predictions with the model\n Y_pred = model.predict(X_test)\n #convert numpy output to dataframe and add columns\n Y_pred_df = pd.DataFrame(Y_pred)\n Y_pred_df.columns = Y_test.columns\n #Convert predictions and correct y values to float for faciliate comparison\n Y_pred_df = Y_pred_df.astype('float64')\n Y_test = Y_test.astype('float64')\n print_score(Y_test, Y_pred_df, 'weighted avg')", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def evaluate_model(model, X_test, Y_test, category_names): \n # predict on the X_test\n y_pred = model.predict(X_test)\n \n # build classification report on every column\n performances = []\n for i in range(len(category_names)):\n performances.append([f1_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n precision_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n recall_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro')])\n # build dataframe\n performances = pd.DataFrame(performances, columns=['f1 score', 'precision', 'recall'],\n index = category_names) \n return performances", "def evaluate(cfg: DictConfig):\n\n experiments = cfg.get('experiment_type', f'{cfg.model.name}_only')\n fixed_t0 = cfg.get('fixed_t0', False)\n ext = '_fixedT0' if fixed_t0 else ''\n\n base_dir = cfg.device.root\n datasource = cfg.datasource.name\n\n if experiments == 'ablations':\n models = {\n 'FluxRGNN': ['final',\n 'final_without_encoder',\n 'final_without_boundary'],\n 'LocalLSTM': ['final']\n }\n elif experiments == 'final':\n models = {\n 'FluxRGNN': ['final'],\n 'GAM': ['final'],\n 'HA': ['final'],\n 'GBT': ['final']\n }\n else:\n m = cfg.model.name\n year = cfg.datasource.test_year\n\n # find all experiments available for this model, datasource and test year\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{year}')\n models = {\n m : [ f.name for f in os.scandir(result_dir) if f.is_dir() ]\n }\n\n\n # thresholds for binary classification metrics\n if cfg.datasource.name == 'abm':\n thresholds = [0.0019, 0.0207]\n else:\n thresholds = [0, 10, 20]\n\n rmse_per_hour = []\n mae_per_hour = []\n pcc_per_hour = []\n bin_per_hour = []\n\n rmse_per_night = []\n mae_per_night = []\n\n output_dir = osp.join(base_dir, 'results', datasource, f'performance_evaluation{ext}', experiments)\n os.makedirs(output_dir, exist_ok=True)\n\n counter = 0\n\n for m, dirs in models.items():\n print(f'evaluate {m}')\n\n for d in dirs:\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{cfg.datasource.test_year}', d)\n\n # check if directory exists\n if os.path.isdir(result_dir):\n results, model_cfg = load_cv_results(result_dir, trials=cfg.task.repeats, ext=ext)\n\n df_prep = pd.read_csv(osp.join(base_dir, 'data', 'preprocessed',\n f'{model_cfg[\"t_unit\"]}_{model_cfg[\"model\"][\"edge_type\"]}_ndummy={model_cfg[\"datasource\"][\"n_dummy_radars\"]}',\n datasource, cfg.season, str(cfg.datasource.test_year), 'dynamic_features.csv'))\n tidx2night = dict(zip(df_prep.tidx, df_prep.nightID))\n\n rmse_per_hour.append(compute_rmse(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n mae_per_hour.append(compute_mae(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n pcc_per_hour.append(compute_pcc(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n\n if fixed_t0:\n rmse_per_night.append(compute_rmse_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n mae_per_night.append(compute_mae_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n\n # compute binary classification measures\n for thr in thresholds:\n bin_per_hour.append(compute_bin(m, d, results, groupby=['horizon', 'trial'], threshold=thr, km2=True))\n\n counter += 1\n\n else:\n print(f'Experiment \"{d}\" for model \"{m}\" and datasource \"{datasource}\" is not available. '\n f'Use \"run_experiments.py model={m} datasource={datasource} +experiment={d}\" to run this experiment.')\n\n if counter > 0:\n rmse_per_hour = pd.concat(rmse_per_hour)\n rmse_per_hour.to_csv(osp.join(output_dir, f'rmse_per_hour.csv'))\n\n mae_per_hour = pd.concat(mae_per_hour)\n mae_per_hour.to_csv(osp.join(output_dir, f'mae_per_hour.csv'))\n\n pcc_per_hour = pd.concat(pcc_per_hour)\n pcc_per_hour.to_csv(osp.join(output_dir, f'pcc_per_hour.csv'))\n\n bin_per_hour = pd.concat(bin_per_hour)\n bin_per_hour.to_csv(osp.join(output_dir, f'bin_per_hour.csv'))\n\n if fixed_t0:\n rmse_per_night = pd.concat(rmse_per_night)\n rmse_per_night.to_csv(osp.join(output_dir, f'rmse_per_night.csv'))\n\n mae_per_night = pd.concat(mae_per_night)\n mae_per_night.to_csv(osp.join(output_dir, f'mae_per_night.csv'))", "def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)", "def evaluate(self, test_data):\n result = self.model.run(test_data)\n self._save_result(result)", "def evaluate_model_performance():\n\n config = load_config()\n data_processor = DataProcessor()\n df_test = data_processor.create_user_click_sequence(\n start_date=config[\"test_split_date\"]\n )\n df_test[\"truths\"] = df_test[\"merchant_seq\"].apply(lambda x: list(set(x)))\n truth_dict = dict(zip(df_test[\"user_id\"], df_test[\"truths\"]))\n\n # get model\n print(\"model training...\")\n model = Merchant2VecModel()\n model.train()\n\n # compute mAP@k\n k = model.num_rec\n all_truths, all_preds = [], []\n for user_id, user_merchants in truth_dict.items():\n this_pred = model.generate_predictions(\n user_id=user_id, eval_date=config[\"test_split_date\"]\n )\n all_truths.append(user_merchants)\n all_preds.append(this_pred)\n score = mapk(all_truths, all_preds, k)\n print(\"mAP@{} for current model: {:.4f}\".format(k, score))", "def evaluate_model(model_name, y_true, y_pred):\n\n # Calculate performance metrics\n rmse_eval = evaluate_rmse(y_true, y_pred)\n mae_eval = evaluate_mae(y_true, y_pred) \n r2_eval = evaluate_r2(y_true, y_pred)\n\n # Print results\n print_evaluation(model_name, mae_eval, rmse_eval, r2_eval)", "def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)", "def test_evaluate_model(sequential_model, model_data):\n _, _, _, _, x_test, y_test = model_data\n compile_model(sequential_model)\n output = evaluate_model(sequential_model, x_test, y_test, 64)\n assert len(output) == 2", "def evaluate_model(model, X_test, Y_test, category_names):\n\n y_pred = model.predict(X_test)\n Y_test_as_array = np.array(Y_test)\n for i in range(len(category_names)):\n print(\"{} accuracy {} precision {} recall {} f1 {}\".format(\n category_names[i],\n (y_pred[:, i] == Y_test_as_array[:, i]).mean(), # accuracy\n precision_score(Y_test_as_array[:, i], y_pred[:, i], average=None), # precision\n recall_score(Y_test_as_array[:, i], y_pred[:, i], average=None), # recall\n f1_score(Y_test_as_array[:, i], y_pred[:, i], average=None) # f1\n ))\n print(\"mean accuracy {}\".format((y_pred == Y_test_as_array).mean().mean()))", "def evaluate_model(model, testset):\n\n # Sort data by top level label to ease inspection\n testset = testset.sort_using_layer(-1, reverse=True)\n\n # Feed the samples to the model to obtain each layers' activations\n v = testset.get_layer(0)\n hs = model.transform(v)[1:]\n\n # Read model weights\n ws = [params['w'] for params in model.parameters]\n del params\n\n # Take the (hidden) labels from the data set\n ls = testset.get_layers()[1:]\n\n # In each layer, reorder and invert neurons to match best with the labels\n for i in range(len(ls)):\n hs[i], ws[i] = align_with_labels(ls[i], hs[i], ws[i])\n del i\n\n # Measure correlations, etcetera\n metrics = compare(ls, hs)\n\n # Simply return a dict with all used variables\n return locals()", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def evaluate_model(model, model_name, X_train, Y_train, X_test, ground_truth):\n\tprint(\"\t\tModel [\" + model_name + \"]\")\n\tmodel.fit(X_train, Y_train)\n\tY_pred = model.predict(X_test).astype(int)\n\tregression = np.sqrt(metrics.mean_squared_error(ground_truth, Y_pred))\n\treturn regression", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = pd.DataFrame(data = model.predict(X_test), columns = category_names)\n\n precision, recall, f1_score = [], [], []\n\n for category in category_names:\n scores = classification_report(Y_test[category], y_pred[category])\n precision.append([x for x in scores.strip().split(\"avg / total\")[1].strip().split(\" \") \n if len(x) > 0][:3][0])\n recall.append([x for x in scores.strip().split(\"avg / total\")[1].strip().split(\" \") \n if len(x) > 0][:3][1])\n \n model_metric = pd.concat([\n pd.DataFrame(data = [precision, recall], index = [\"precision\", \"recall\"], \n columns = category_names),\n (Y_test.reset_index() == y_pred.reset_index()).mean()[1:].to_frame(\"accuracy\").T\n ])\n\n for col in model_metric.columns:\n model_metric[col] = model_metric[col].astype(float)\n\n return model_metric", "def assess_model(model, test_data, label):\n return model.score(test_data,label)", "def eval_model(self, model):\n evaluation = model.evaluate(x=self.xt_test, y=self.yt_test)\n print(\"loss : \" + str(round(evaluation[0]*100, 2)) + \"%\")\n print(\"accuracy: \" + str(round(evaluation[1]*100, 2)) + \"%\")", "def test(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n \r\n dataset.set_split('test')\r\n batch_generator = generate_nmt_batches(dataset, \r\n batch_size=len(dataset), \r\n device=args.device)\r\n\r\n acc_sum = 0.0\r\n model.eval()\r\n \r\n for batch_index, batch_dict in enumerate(batch_generator):\r\n # step 1. compute the output\r\n if isinstance(model,NMTModelWithMLTM):\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_mltm_vector'],\r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n else:\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n\r\n acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index)\r\n acc_sum += acc_t\r\n \r\n return acc_sum / (batch_index+1)", "def evaluate(model_class, params, matches, valid_index, test_index,\n seasons_train, seasons_valid, seasons_test, eval_functions):\n model = model_class(**params)\n output = {}\n start = time()\n predictions = model.fit_predict(matches, seasons_train, seasons_valid, seasons_test)\n output['train_time'] = np.round((time() - start) / 60., 4)\n # TODO: Monitor train results\n for eval_set, index in zip(('valid', 'test'), (valid_index, test_index)):\n for eval_fun in eval_functions:\n output['{}_{}'.format(eval_set, eval_fun.__name__)] = np.round(eval_fun(predictions[index],\n matches['FTR'][index]), 4)\n output['{}_size'.format(eval_set)] = index.sum()\n output['model'] = model_class.__name__\n return {**params, **output}", "def evaluate_model(model, X_test, y_test, category_names):\n # Predict for test set\n y_pred = model.predict(X_test)\n \n print(\"**** Scores for each category *****\\n\")\n for i in range(36):\n print(\"Scores for '{}':\".format(category_names[i]))\n print(classification_report(y_test.values[:,i], y_pred[:,i]))", "def evaluate_model(model, X_test, y_test, category_names):\n y_pred = model.predict(X_test)\n labels = np.unique(y_pred)\n print(labels)\n #print out score for each class and mean scores, including precision, recall, f1 score\n print(classification_report(y_test.values, y_pred, target_names=category_names.values))", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred = model.predict(X_test)\n Y_pred = pd.DataFrame(Y_pred, columns=category_names)\n \n # calculate summary stats on test data\n results = pd.DataFrame()\n for column_name in Y_pred.columns:\n col_report = classification_report(y_true=Y_test[[column_name]], y_pred=Y_pred[[column_name]], output_dict=True)\n accuracy = col_report['accuracy']\n precision = col_report['macro avg']['precision']\n recall = col_report['macro avg']['recall']\n results[column_name] = [accuracy, precision, recall]\n results.index = ['accuracy', 'precision', 'recall']\n results.mean(axis=1) \n \n # save results to local csv file\n model_name = type(model.best_params_['clf']).__name__\n avg_accuracy = results.mean(axis=1)['accuracy']\n avg_precision = results.mean(axis=1)['precision']\n avg_recall = results.mean(axis=1)['recall']\n params = model.best_params_\n stored_results = pd.DataFrame({'Model': [model_name], 'Accuracy': [avg_accuracy], 'Precision': [avg_precision], \n 'Recall': [avg_recall], 'Parameters': [params]})\n\n add_header = not os.path.isfile('models/model_results.csv')\n with open('models/model_results.csv', 'a') as f:\n stored_results.to_csv(f, header=add_header, index=False)", "def evaluate_model(model, X_test, Y_test, category_names): \n \n Y_pred = model.predict(X_test)\n print(classification_report(Y_test, Y_pred))\n display_results(Y_test, Y_pred)" ]
[ "0.7771298", "0.76623577", "0.7547008", "0.75343114", "0.7407108", "0.73708004", "0.7349949", "0.7340081", "0.7324706", "0.73219764", "0.73095006", "0.72968715", "0.72346336", "0.7234285", "0.719979", "0.7182023", "0.71800107", "0.71700156", "0.71667916", "0.7146786", "0.71438515", "0.7129115", "0.7119242", "0.7078164", "0.70738935", "0.70642376", "0.7064223", "0.7057235", "0.70547694", "0.70474195" ]
0.7736204
1
Evaluates the performance of a model on training data
def eval_perf_total(model, X_train, y_train, X_test, y_test): y_hat_train = model.predict(X_train) y_hat_test = model.predict(X_test) train_mae = metrics.mean_absolute_error(y_train, y_hat_train) train_mse = metrics.mean_squared_error(y_train, y_hat_train) train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train)) train_r = metrics.r2_score(y_train, y_hat_train) print('Evaluating Performance on Training Data:\n') print(f' Train Mean Absolute Error: {train_mae:,.2f}') print(f' Train Mean Squared Error: {train_mse:,.2f}\n') print(f'Train Root Mean Squared Error: {train_rmse:,.2f}') print(f'Train R-Square Value: {round(train_r,2)}') print('\n'+'---'*25+'\n') test_mae = metrics.mean_absolute_error(y_test, y_hat_test) test_mse = metrics.mean_squared_error(y_test, y_hat_test) test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test)) test_r = metrics.r2_score(y_test, y_hat_test) print('Evaluating Performance on Testing Data:\n') print(f' Test Mean Absolute Error: {test_mae:,.2f}') print(f' Test Mean Squared Error: {test_mse:,.2f}\n') print(f'Test Root Mean Squared Error: {test_rmse:,.2f}') print(f'Test R-Square Value: {round(test_r,2)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_perf_train(model, X_train=None, y_train=None):\n\n # if X_train != None and y_train != None:\n\n y_hat_train = model.predict(X_train)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f'Train Mean Absolute Error: {train_mae:,.2f}')\n print(f'Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n # if X_test != None and y_test != None:\n\n # y_hat_test = model.predict(X_test)\n\n # test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n # test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n # test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n # test_r = metrics.r2_score(y_test, y_hat_test)\n\n # print('Evaluating Performance on Testing Data:\\n')\n # print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n # print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n # print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n # print(f'Test R-Square Value: {round(test_r,2)}')", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def evaluate(data_loader, model, device):\n\n\tmodel.eval()\n\ttotal_num_examples = 0\n\ttotal_error = 0\n\tfor idx, batch in enumerate(data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t####Your code here ---\n\n\t\t# get the output from the model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# get error, num_examples using accuracy_fn defined previously\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# update total_error and total_num_examples\n\t\ttotal_error += error\n\t\ttotal_num_examples += num_examples\n\n\taccuracy = 1 - total_error / total_num_examples\n\treturn accuracy", "def evaluate_performance(data_loader, model):\n acc = mx.metric.Accuracy()\n\n for idx, (data, label) in enumerate(data_loader):\n data = data.as_in_context(model.ctx)\n label = label.as_in_context(model.ctx)\n pred = model(data)\n pred = mx.nd.argmax(pred, axis=1)\n acc.update(label, pred)\n return acc.get()", "def evaluate_model(sess, model, data_set):\n total_cost = 0.0\n total_r_cost = 0.0\n total_kl_cost = 0.0\n for batch in range(data_set.num_batches):\n unused_orig_x, x, s = data_set.get_batch(batch)\n feed = {model.input_data: x, model.sequence_lengths: s}\n (cost, r_cost,\n kl_cost) = sess.run([model.cost, model.r_cost, model.kl_cost], feed)\n total_cost += cost\n total_r_cost += r_cost\n total_kl_cost += kl_cost\n\n total_cost /= (data_set.num_batches)\n total_r_cost /= (data_set.num_batches)\n total_kl_cost /= (data_set.num_batches)\n return (total_cost, total_r_cost, total_kl_cost)", "def evaluate_model(model, X_test, Y_test, category_names):\n\n print(\"Testing Performance\")\n print(classification_report(Y_test, model.predict(X_test), target_names=category_names))\n\n #Todo cat names", "def run(self, data, training=False):\n # Set mode\n if training:\n self._model.train()\n else:\n self._model.eval()\n # Compute\n return self._model(data)", "def train(self):\n\t\t# Helper: Early stopping.\n\t\tearly_stopper = EarlyStopping(patience=2, verbose = 1)\n\t\tself.model.fit(data.x_train, data.y_train,\n\t\t\t\t\t\tbatch_size=data.batch_size,\n\t\t\t\t\t\tepochs=10000, # using early stopping, so no real limit\n\t\t\t\t\t\tverbose=1,\n\t\t\t\t\t\tvalidation_split=0.05,\n\t\t\t\t\t\tcallbacks=[early_stopper])\n\n\t\tscore = self.model.evaluate(data.x_test, data.y_test, verbose=1)\n\n\t\treturn score[1] # 1 is accuracy. 0 is loss.", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)", "def train_model(mdl, X_train, Y_train, X_val, Y_val, epochs, batch_size):\n mdl.compile(loss='mean_squared_error',\n optimizer='adam')\n\n mdl.fit(X_train,\n Y_train,\n batch_size=batch_size,\n nb_epoch=epochs,\n verbose=1)\n\n # evaluate the model\n score_eval = mdl.evaluate(X_val, Y_val, verbose=0)\n loss = score_eval\n\n print(\"[Evaluation]%s: %.2f%%\" % (mdl.metrics_names, loss))\n\n return mdl, loss", "def evaluate_model(model, X_train, y_train, X_test, y_test):\n model = model\n model.fit(X_train, y_train)\n\n y_pred = model.predict(X_test)\n\n report = classificationreport(y_test, y_pred, target_names= [\"0\", \"1\"], output_dict=True)\n\n return report", "def evaluate(model, val_data, epoch):\n print('validating')\n\n # 设置为评估模式 \n model.eval() \n\n val_loss = []\n with torch.no_grad():\n DEVICE = config.DEVICE\n\n val_dataloader = DataLoader(dataset=val_data,\n batch_size=config.batch_size,\n shuffle=True,\n pin_memory=True, drop_last=True,\n collate_fn=collate_fn)\n\n for batch, data in enumerate(tqdm(val_dataloader)):\n\n x, y, x_len, y_len, oov, len_oovs = data\n\n if config.is_cuda:\n x = x.to(DEVICE)\n y = y.to(DEVICE)\n x_len = x_len.to(DEVICE)\n len_oovs = len_oovs.to(DEVICE)\n\n loss = model(x, x_len, y, len_oovs, batch=batch, \n num_batches=len(val_dataloader),\n teacher_forcing=True)\n\n val_loss.append(loss.item())\n\n return np.mean(val_loss)", "def evaluate(model, data):\n n_targets = 0\n n_correct_predictions = 0\n\n # Set the model on evaluatio mode.\n model.eval()\n\n # Create progress bar.\n progress_bar = tqdm.tqdm(total=len(data),\n unit='batch',\n desc='[evaluate] batch accuracy: 0.000',\n leave=False)\n\n # Loop through validation batches.\n for inputs, targets in data:\n\n # Send data to GPU if CUDA is enabled.\n if next(model.parameters()).is_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Feed forward.\n with torch.set_grad_enabled(False):\n outputs = model(inputs)\n\n # Choose the class with maximum probability.\n _, predictions = torch.max(outputs, 1)\n\n accuracy = (predictions == targets).sum().item() / len(targets)\n progress_bar.update(1)\n progress_bar.set_description(\n '[evaluate] batch accuracy: {accuracy:.3f}'.format(\n accuracy=accuracy))\n\n # Accumulate targets and correct predictions count.\n n_targets += len(targets)\n n_correct_predictions += (predictions == targets).sum().item()\n\n # Close progress bar.\n progress_bar.close()\n\n return n_correct_predictions / n_targets", "def train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, model_dir, restore_dir=None):\n # reload weights from restore_dir if specified\n if restore_dir is not None:\n model = BertForSequenceTagging.from_pretrained(tagger_model_dir)\n \n best_val_f1 = 0.0\n patience_counter = 0\n\n for epoch in range(1, params.epoch_num + 1):\n # Run one epoch\n logging.info(\"Epoch {}/{}\".format(epoch, params.epoch_num))\n\n # Compute number of batches in one epoch\n params.train_steps = params.train_size // params.batch_size\n params.val_steps = params.val_size // params.batch_size\n\n # data iterator for training\n train_data_iterator = data_loader.data_iterator(train_data, shuffle=True)\n\n # Train for one epoch on training set\n train_epoch(model, train_data_iterator, optimizer, scheduler, params)\n\n # data iterator for evaluation\n # train_data_iterator = data_loader.data_iterator(train_data, shuffle=False)\n val_data_iterator = data_loader.data_iterator(val_data, shuffle=False)\n\n # Evaluate for one epoch on training set and validation set\n # params.eval_steps = params.train_steps\n # train_metrics = evaluate(model, train_data_iterator, params, mark='Train') # callback train f1\n params.eval_steps = params.val_steps\n val_metrics = evaluate(model, val_data_iterator, params, mark='Val')\n \n val_f1 = val_metrics['f1']\n improve_f1 = val_f1 - best_val_f1\n if improve_f1 > 1e-5: \n logging.info(\"- Found new best F1\")\n best_val_f1 = val_f1\n model.save_pretrained(model_dir)\n if improve_f1 < params.patience:\n patience_counter += 1\n else:\n patience_counter = 0\n else:\n patience_counter += 1\n\n # Early stopping and logging best f1\n if (patience_counter >= params.patience_num and epoch > params.min_epoch_num) or epoch == params.epoch_num:\n logging.info(\"Best val f1: {:05.2f}\".format(best_val_f1))\n break", "def evaluate_model(model, X_test, Y_test, category_names): \n # predict on the X_test\n y_pred = model.predict(X_test)\n \n # build classification report on every column\n performances = []\n for i in range(len(category_names)):\n performances.append([f1_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n precision_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n recall_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro')])\n # build dataframe\n performances = pd.DataFrame(performances, columns=['f1 score', 'precision', 'recall'],\n index = category_names) \n return performances", "def evaluate(args, dev_dataset, model):\n\n if args.dynamic_batching:\n dev_sampler = CustomBatchSampler(dev_dataset, args.dev_batch_size)\n dev_dataloader = DataLoader(\n dev_dataset,\n batch_sampler=dev_sampler,\n num_workers=0,\n collate_fn=dynamic_padding_collate_fn\n )\n else:\n dev_sampler = SequentialSampler(dev_dataset)\n dev_dataloader = DataLoader(dev_dataset, sampler=dev_sampler,\n batch_size=args.dev_batch_size, num_workers=0)\n\n model.eval()\n loss_fn = nn.CrossEntropyLoss(ignore_index=0)\n iterator = tqdm(dev_dataloader, desc=\"Evaluation\", smoothing=0.05)\n loss_cum = None\n num_batch = 0\n for step, batch_cpu in enumerate(iterator):\n num_batch += 1\n\n batch = tuple(t.to(args.device) for t in batch_cpu)\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n\n with torch.no_grad():\n outputs = model(**inputs)\n\n # Calculate loss of just the question part\n q_mask = (inputs['token_type_ids'] == 2)\n masked_labels = inputs['input_ids'].masked_fill(~q_mask, 0)\n shift_labels = masked_labels[..., 1:].contiguous()\n\n lm_logits = outputs[0]\n shift_logits = lm_logits[..., : -1, :].contiguous()\n loss = loss_fn(shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1))\n\n if loss_cum is None:\n loss_cum = loss\n else:\n loss_cum += loss\n\n model.train()\n\n return loss_cum.item() / num_batch", "def evaluate(model, iterations, use_cuda=False):\n\n logger.debug(\"Allocating input and target tensors on GPU : %r\", use_cuda)\n\n # create the instance of data loader\n data_loader = DataLoaderMnist(cuda=use_cuda, seed=1, shuffle=False, train_batch_size=64, test_batch_size=100)\n\n model.eval()\n total = 0\n correct = 0\n current_iterations = 0\n\n with torch.no_grad():\n for inputs, labels in data_loader.test_loader:\n inputs, labels = inputs.to(data_loader.device), labels.to(data_loader.device)\n output = model(inputs)\n current_iterations += 1\n _, predicted = torch.max(output.data, dim=1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if iterations is not None:\n if current_iterations >= iterations:\n break\n\n accuracy = correct / total\n return accuracy", "def evaluate(model, g, val_nid, device):\n model.eval()\n nfeat = g.ndata['features']\n labels = g.ndata['labels']\n with th.no_grad():\n pred = model.module.inference(g, nfeat, device, args.batch_size, args.num_workers)\n model.train()\n test_acc = Accuracy()\n return test_acc(th.softmax(pred[val_nid], -1), labels[val_nid].to(pred.device))", "def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)", "def evaluate(cfg: DictConfig):\n\n experiments = cfg.get('experiment_type', f'{cfg.model.name}_only')\n fixed_t0 = cfg.get('fixed_t0', False)\n ext = '_fixedT0' if fixed_t0 else ''\n\n base_dir = cfg.device.root\n datasource = cfg.datasource.name\n\n if experiments == 'ablations':\n models = {\n 'FluxRGNN': ['final',\n 'final_without_encoder',\n 'final_without_boundary'],\n 'LocalLSTM': ['final']\n }\n elif experiments == 'final':\n models = {\n 'FluxRGNN': ['final'],\n 'GAM': ['final'],\n 'HA': ['final'],\n 'GBT': ['final']\n }\n else:\n m = cfg.model.name\n year = cfg.datasource.test_year\n\n # find all experiments available for this model, datasource and test year\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{year}')\n models = {\n m : [ f.name for f in os.scandir(result_dir) if f.is_dir() ]\n }\n\n\n # thresholds for binary classification metrics\n if cfg.datasource.name == 'abm':\n thresholds = [0.0019, 0.0207]\n else:\n thresholds = [0, 10, 20]\n\n rmse_per_hour = []\n mae_per_hour = []\n pcc_per_hour = []\n bin_per_hour = []\n\n rmse_per_night = []\n mae_per_night = []\n\n output_dir = osp.join(base_dir, 'results', datasource, f'performance_evaluation{ext}', experiments)\n os.makedirs(output_dir, exist_ok=True)\n\n counter = 0\n\n for m, dirs in models.items():\n print(f'evaluate {m}')\n\n for d in dirs:\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{cfg.datasource.test_year}', d)\n\n # check if directory exists\n if os.path.isdir(result_dir):\n results, model_cfg = load_cv_results(result_dir, trials=cfg.task.repeats, ext=ext)\n\n df_prep = pd.read_csv(osp.join(base_dir, 'data', 'preprocessed',\n f'{model_cfg[\"t_unit\"]}_{model_cfg[\"model\"][\"edge_type\"]}_ndummy={model_cfg[\"datasource\"][\"n_dummy_radars\"]}',\n datasource, cfg.season, str(cfg.datasource.test_year), 'dynamic_features.csv'))\n tidx2night = dict(zip(df_prep.tidx, df_prep.nightID))\n\n rmse_per_hour.append(compute_rmse(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n mae_per_hour.append(compute_mae(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n pcc_per_hour.append(compute_pcc(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n\n if fixed_t0:\n rmse_per_night.append(compute_rmse_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n mae_per_night.append(compute_mae_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n\n # compute binary classification measures\n for thr in thresholds:\n bin_per_hour.append(compute_bin(m, d, results, groupby=['horizon', 'trial'], threshold=thr, km2=True))\n\n counter += 1\n\n else:\n print(f'Experiment \"{d}\" for model \"{m}\" and datasource \"{datasource}\" is not available. '\n f'Use \"run_experiments.py model={m} datasource={datasource} +experiment={d}\" to run this experiment.')\n\n if counter > 0:\n rmse_per_hour = pd.concat(rmse_per_hour)\n rmse_per_hour.to_csv(osp.join(output_dir, f'rmse_per_hour.csv'))\n\n mae_per_hour = pd.concat(mae_per_hour)\n mae_per_hour.to_csv(osp.join(output_dir, f'mae_per_hour.csv'))\n\n pcc_per_hour = pd.concat(pcc_per_hour)\n pcc_per_hour.to_csv(osp.join(output_dir, f'pcc_per_hour.csv'))\n\n bin_per_hour = pd.concat(bin_per_hour)\n bin_per_hour.to_csv(osp.join(output_dir, f'bin_per_hour.csv'))\n\n if fixed_t0:\n rmse_per_night = pd.concat(rmse_per_night)\n rmse_per_night.to_csv(osp.join(output_dir, f'rmse_per_night.csv'))\n\n mae_per_night = pd.concat(mae_per_night)\n mae_per_night.to_csv(osp.join(output_dir, f'mae_per_night.csv'))", "def evaluate_model_performance():\n\n config = load_config()\n data_processor = DataProcessor()\n df_test = data_processor.create_user_click_sequence(\n start_date=config[\"test_split_date\"]\n )\n df_test[\"truths\"] = df_test[\"merchant_seq\"].apply(lambda x: list(set(x)))\n truth_dict = dict(zip(df_test[\"user_id\"], df_test[\"truths\"]))\n\n # get model\n print(\"model training...\")\n model = Merchant2VecModel()\n model.train()\n\n # compute mAP@k\n k = model.num_rec\n all_truths, all_preds = [], []\n for user_id, user_merchants in truth_dict.items():\n this_pred = model.generate_predictions(\n user_id=user_id, eval_date=config[\"test_split_date\"]\n )\n all_truths.append(user_merchants)\n all_preds.append(this_pred)\n score = mapk(all_truths, all_preds, k)\n print(\"mAP@{} for current model: {:.4f}\".format(k, score))", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def evaluate_model(model_name, y_true, y_pred):\n\n # Calculate performance metrics\n rmse_eval = evaluate_rmse(y_true, y_pred)\n mae_eval = evaluate_mae(y_true, y_pred) \n r2_eval = evaluate_r2(y_true, y_pred)\n\n # Print results\n print_evaluation(model_name, mae_eval, rmse_eval, r2_eval)", "def evaluate(model, optimizer, loss_function, loader, device, labels, log_every_n=10):\n\n model.eval()\n\n batch_wise_true_labels = []\n batch_wise_predictions = []\n\n loss_history = []\n running_loss = 0.\n running_loss_history = []\n\n with torch.no_grad(): # Disable gradient computation - required only during training\n for i, batch in tqdm(enumerate(loader)):\n\n logits = model(batch[0].to(device), batch[1]).squeeze()\n loss = loss_function(logits, batch[2].to(device))\n loss_history.append(loss.item())\n\n running_loss += (loss_history[-1] - running_loss) / (i + 1) # Compute rolling average\n\n running_loss_history.append(running_loss)\n\n predictions = torch.sigmoid(logits)\n\n batch_wise_true_labels.append(batch[2].view(-1).tolist())\n batch_wise_predictions.append(predictions.view(-1).tolist())\n\n # flatten the list of predictions using itertools\n all_true_labels = list(chain.from_iterable(batch_wise_true_labels))\n all_predictions = list(chain.from_iterable(batch_wise_predictions))\n all_predictions = [1 if p > 0.5 else 0 for p in all_predictions]\n\n\n print(\"Evaluation Loss: \", running_loss)\n # Now we can generate a classification report\n print(\"Classification report after epoch:\")\n print(f1_score(all_true_labels, all_predictions, average='micro'))\n print(classification_report(all_true_labels, all_predictions, labels=labels))\n\n return loss_history, running_loss_history", "def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)", "def evaluate(cfg: DictConfig):\n\n # suppress TensorFlow and DALI warnings\n suppress_warnings()\n\n if cfg.USE_MULTI_GPUS.VALUE:\n # change number of visible gpus for evaluation\n set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS)\n # update batch size according to available gpus\n data_generator.update_batch_size(cfg)\n\n if cfg.OPTIMIZATION.AMP:\n print(\"Enabling Automatic Mixed Precision(AMP) training\")\n policy = mixed_precision.Policy('mixed_float16')\n mixed_precision.set_global_policy(policy)\n\n if cfg.OPTIMIZATION.XLA:\n print(\"Enabling Automatic Mixed Precision(XLA) training\")\n tf.config.optimizer.set_jit(True)\n\n # create model\n strategy = None\n if cfg.USE_MULTI_GPUS.VALUE:\n # multi gpu training using tensorflow mirrored strategy\n strategy = tf.distribute.MirroredStrategy(\n cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()\n )\n print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync))\n with strategy.scope():\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n else:\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n\n model.compile(\n optimizer=optimizer,\n loss=unet3p_hybrid_loss,\n metrics=[dice_coef],\n )\n\n # weights model path\n checkpoint_path = join_paths(\n cfg.WORK_DIR,\n cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,\n f\"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5\"\n )\n\n assert os.path.exists(checkpoint_path), \\\n f\"Model weight's file does not exist at \\n{checkpoint_path}\"\n\n # TODO: verify without augment it produces same results\n # load model weights\n model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True)\n model.summary()\n\n # data generators\n val_generator = data_generator.get_data_generator(cfg, \"VAL\", strategy)\n validation_steps = data_generator.get_iterations(cfg, mode=\"VAL\")\n\n # evaluation metric\n evaluation_metric = \"dice_coef\"\n if len(model.outputs) > 1:\n evaluation_metric = f\"{model.output_names[0]}_dice_coef\"\n\n result = model.evaluate(\n x=val_generator,\n steps=validation_steps,\n workers=cfg.DATALOADER_WORKERS,\n return_dict=True,\n )\n\n # return computed loss, validation accuracy, and it's metric name\n return result, evaluation_metric" ]
[ "0.7601281", "0.73080355", "0.72717404", "0.72445047", "0.72132945", "0.72110033", "0.71732163", "0.7143858", "0.71342355", "0.70897985", "0.7084085", "0.7028087", "0.70188946", "0.7007979", "0.6981331", "0.69766915", "0.69762504", "0.6974851", "0.6973674", "0.6969121", "0.69660336", "0.69486946", "0.69434327", "0.69365007", "0.6923862", "0.6918139", "0.69146544", "0.6901599", "0.6892579", "0.68862015" ]
0.73358834
1
Get parameter constraints. Returns dict or sequence of dict Equality and inequality constraints. See scipy.optimize.minimize
def get_constraints(self): return ({'type': 'ineq', 'fun': lambda x: x[1] - x[2]}, {'type': 'ineq', 'fun': lambda x: x[3] - x[4]})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getConstraints(self, nStates, nParams):\n # currently untested and unused\n raise NotImplementedError(\n \"constraints have not been implemented for this Experiment\")", "def objective_constraints(self, variables, mask, load, generation, reservations=None):\n constraint_list = []\n constraint_list += [cvx.NonPos(-variables['regu_c'])]\n constraint_list += [cvx.NonPos(-variables['regd_c'])]\n constraint_list += [cvx.NonPos(-variables['regu_d'])]\n constraint_list += [cvx.NonPos(-variables['regd_d'])]\n # p = opt_vars['dis'] - opt_vars['ch']\n # constraint_list += [cvx.NonPos(opt_vars['regd_d'] - cvx.pos(p))]\n # constraint_list += [cvx.NonPos(opt_vars['regu_c'] - cvx.neg(p))]\n if self.combined_market:\n constraint_list += [cvx.Zero(variables['regd_d'] + variables['regd_c'] - variables['regu_d'] - variables['regu_c'])]\n\n return constraint_list", "def constraints(self) -> Tuple[NDArray, NDArray]:", "def init_constraint_list(self):\n constraints = []\n for row, equ_val, rhs_val in \\\n zip(self.matrix, self.equ_vec, self.rhs_vec):\n\n constraints.append({'type': self.get_eq_type(equ_val),\n 'fun': lambda x: rhs_val - np.dot(row, x)})\n\n bounds = Bounds(self.low_bounds, self.upper_bounds)\n\n return constraints, bounds", "def getConstraint(self):\n return self.gk, self.g_mink, self.g_maxk", "def constraints(self):\n constraints = np.concatenate( (np.ravel(self.noise_var_constraint), \n self.kern.constraints), axis=0)\n return constraints", "def constraints(self):\n return self._constraints", "def constraints(self):\n return self._constraints", "def optimization_bounds(self, topology):\n bounds_low = np.zeros(self.number_of_parameters())\n bounds_up = np.zeros(self.number_of_parameters())\n\n for pkey, parameter in self.parameters.items():\n bounds_low[pkey] = parameter.bound_low(topology)\n bounds_up[pkey] = parameter.bound_up(topology)\n\n return bounds_low, bounds_up", "def get_model_parameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), rho=(0.0 ,inf))\n return params", "def get_constraints(self, scaled=True, use_indices=True):\n return self._get_variables_of_type('constraint', scaled, use_indices)", "def objective_constraints(self, variables, mask, reservations, mpc_ene=None):\n constraint_list = []\n ice_gen = variables['ice_gen']\n on_ice = variables['on_ice']\n\n constraint_list += [cvx.NonPos(cvx.multiply(self.p_min, on_ice) - ice_gen)]\n constraint_list += [cvx.NonPos(ice_gen - cvx.multiply(self.rated_power*self.n, on_ice))]\n\n return constraint_list", "def constraints(self) -> Tuple[NDArray, NDArray]:\n symm = not self._asym\n k = 3 + self._asym\n a = np.zeros((5, k))\n b = np.zeros(5)\n # omega\n a[0, 0] = 1.0\n # alpha >0 or alpha+gamma>0\n # alpha<1 or alpha+0.5*gamma<1\n if symm:\n a[1, 1] = 1.0\n a[2, 1] = -1.0\n else:\n a[1, 1:3] = 1.0\n a[2, 1:3] = [-1, -0.5]\n b[2] = -1.0\n # theta\n a[3, k - 1] = 1.0\n a[4, k - 1] = -1.0\n b[4] = -1.0\n\n return a, b", "def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params", "def get_constraints(self):\n return self.constraints", "def remaining_constraints(self):\r\n \r\n def iec1(state,decision,nodes):\r\n return decision['E:L']+decision['E:R_1']<=nodes['E'].get_preds_value(state)\r\n def iec2(state,decision,nodes):\r\n return decision['R_1:L']<=nodes['R_1'].get_preds_value(state)\r\n def iec3(state,decision,nodes):\r\n return decision['G:R_1']>=-(nodes['R_1'].get_preds_value(state)) \r\n def iec4(state,decision,nodes):\r\n return decision['G:L']>=0.0\r\n def iec5(state,decision,nodes):\r\n return decision['E:L']>=0.0\r\n def iec6(state,decision,nodes):\r\n return decision['E:R_1']>=0.0\r\n def iec7(state,decision,nodes):\r\n return decision['R_1:L']>=0.0\r\n\r\n Inequality_Constraints=[iec1,iec2,iec3,iec4,iec5,iec6,iec7]\r\n \r\n return Inequality_Constraints", "def constraints(self):\n ans = self.execute(self.commands.get_constraints(self.db.name, self.name))\n return [Constraint(*tup) for tup in ans]", "def get_constraints(self):\n\n return vertcat(*self.g), self.g_min, self.g_max", "def _ingest_constraints(param_dict):\n bounds = param_dict['constraints']['bounds']\n fixed = param_dict['constraints']['fixed']\n tied = param_dict['constraints']['tied']\n\n # bounds are tuples stored as strings so the user\n # can read and edit the file using a text editor.\n # They need to be converted back to python tuples.\n for name in bounds:\n bound = literal_eval(bounds[name])\n bounds[name] = (bound[0], bound[1])\n\n # TODO: re-do this when implementing ties\n # YAML returns different data types depending\n # on the model type. They need to be properly\n # converted.\n for name in fixed:\n if isinstance(fixed[name], str):\n fixed[name] = literal_eval(fixed[name])\n tied[name] = literal_eval(tied[name])\n\n return bounds, fixed, tied", "def get_constraints(self, X_v, U_v, X_last_p, U_last_p):\n\n constraints = [\n # Boundary conditions:\n X_v[0:2, 0] == self.x_init[0:2],\n X_v[2:4, 0] == self.x_init[2:4],\n X_v[4, 0] == self.x_init[4],\n X_v[5, 0] == self.x_init[5],\n\n X_v[:, -1] == self.x_final,\n\n # State constraints:\n cvx.abs(X_v[4, :]) <= self.t_max,\n cvx.abs(X_v[5, :]) <= self.w_max,\n X_v[1, :] >= 0,\n\n # Control constraints:\n cvx.abs(U_v[0, :]) <= self.max_gimbal,\n U_v[1, :] >= self.T_min,\n U_v[1, :] <= self.T_max,\n ]\n return constraints", "def optimization_parameters(self, topology):\n parameters = np.zeros(self.number_of_parameters())\n\n for pkey, parameter in self.parameters.items():\n parameters[pkey] = parameter.start_value(topology)\n\n return parameters", "def get_basicConstraints(self):\n\n return self.get_POW().getBasicConstraints()", "def constraints(self, x):\n pass", "def constraints(self) -> constraints.QuantumCircuitConstraints:\n return self._constraints", "def build_param_bindings(self, params: list_of(str)) -> list:\n constraints = []\n \n for var_name in params:\n \n def param_binding(vn):\n return lambda vd = { vn : vn } : \"%s == self.%s\" % (vd[vn], vn)\n \n constraints.append( ({var_name}, param_binding(var_name)) )\n \n return constraints", "def get_constraints(self, prev_layer):\n constraints = []\n if self.activation is not None:\n constraints += self.activation.get_constraints(self, prev_layer)\n else:\n # for linear activations\n current_constraints = []\n for channel_indx in range(self.n_in_channels):\n upper_bound, _ = prev_layer.get_bounds(channel_indx)\n critical_prob = prev_layer.get_critical_neurons(channel_indx)\n if critical_prob is None:\n keep_upper_bound = 0\n else:\n keep_upper_bound = cp.multiply(1 - critical_prob, upper_bound)\n\n current_constraints += [\n self.layer_input[channel_indx]\n == prev_layer.get_computation_layer(channel_indx) - keep_upper_bound\n ]\n constraints += self.create_constraint(\n f\"{self.name}_linear\", current_constraints\n )\n if prev_layer.compute_critical_neurons:\n constraints += self.create_constraint(\n f\"neuron_importance_bounds_{prev_layer.name}\",\n [prev_layer.neuron_importance >= 0, prev_layer.neuron_importance <= 1],\n )\n return constraints", "def lagrange_multiplier(variables, objective, *constraints) -> Dict[str, sp.Rational]:\n lmbs = sp.symbols('lmb:{}'.format(len(constraints)))\n\n L = objective - sum(l * g for l, g in zip(lmbs, constraints))\n grad_L = [sp.diff(L, wrt) for wrt in (*variables, *lmbs)] # Vector calc time\n candidates = sp.nsolve(grad_L, (*variables, *lmbs), [25 for i in (*variables, *lmbs)], dict=True) # Stationary points of L\n\n # Find the optimal stationary point\n optimum = max(candidates, key=lambda assignment: objective.subs(assignment))\n return {var: optimum[var] for var in optimum if var in variables}", "def lp(mode, objective, constraints):\n if mode.lower() == 'max':\n mode = LpMaximize\n elif mode.lower() == 'min':\n mode = LpMinimize\n prob = LpProblem(\"\", mode)\n prob += objective\n for c in constraints:\n prob += c\n prob.solve()\n return prob, prob.objective.value(), dict((v.name, v.value()) for v in prob.variables())", "def sum_parameter_constraint_to_dict(\n parameter_constraint: SumConstraint,\n) -> Dict[str, Any]:\n return {\n \"__type\": parameter_constraint.__class__.__name__,\n \"parameter_names\": parameter_constraint._parameter_names,\n \"is_upper_bound\": parameter_constraint._is_upper_bound,\n # SumParameterConstraint constructor takes in absolute value of\n # the bound and transforms it based on the is_upper_bound value\n \"bound\": abs(parameter_constraint._bound),\n }", "def get_constraint_list(self):\n constraints = []\n for i in xrange(self.num_repeats):\n # Using start_index, start each domain at the correct index when flattening out points in COBYLA.\n constraints.extend(self._domain.get_constraint_list(start_index=self.dim * i))\n return constraints" ]
[ "0.6881272", "0.66086096", "0.6579088", "0.6528492", "0.6473818", "0.64624715", "0.6349276", "0.6349276", "0.6345814", "0.6336405", "0.6333328", "0.62987316", "0.6298038", "0.6296775", "0.6277697", "0.6087487", "0.6079092", "0.60544395", "0.6052682", "0.6023962", "0.60235626", "0.60151345", "0.59993863", "0.59848464", "0.59798956", "0.5966571", "0.5958894", "0.59157676", "0.59121025", "0.5888489" ]
0.715775
0
convert to a geodataframe Uses the same parameters as array_to_dataframe
def raster_to_geodataframe(*a, **kw) -> gpd.GeoDataFrame: kw["geo"] = True return raster_to_dataframe(*a, **kw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeGeoDf(self, arr: dict):\n geometry_points = [Point(x, y) for x, y in zip(arr[\"X\"], arr[\"Y\"])]\n elevetions = arr[\"Z\"]\n df = gpd.GeoDataFrame(columns=[\"elevation\", \"geometry\"])\n df['elevation'] = elevetions\n df['geometry'] = geometry_points\n df = df.set_geometry(\"geometry\")\n df.set_crs(self.output_epsg, inplace=True)\n return df", "def convert_to_geopandas(df):\n df['geometry'] = [Point(xy) for xy in zip(df.latitude, df.longitude)]\n crs = {'init': 'epsg:4326'}\n df = gpd.GeoDataFrame(df, crs=crs, geometry=df['geometry'])\n\n return df", "def to_geopandas(self):\n import geopandas\n import shapely\n\n return geopandas.GeoSeries(self.apply(lambda x: shapely.wkb.loads(x) if x is not None else None), crs=self.crs)", "def list_to_gdf (lis):\r\n gdf = gpd.GeoDataFrame(lis)\r\n # rename the column \r\n gdf.rename(columns ={0:\"geometry\"},inplace=True)\r\n # define crs to dataframe\r\n gdf.crs = {'init' :'epsg:{}'.format(4326)} \r\n gdf = gdf.to_crs(epsg = 4326)\r\n \r\n return gdf", "def from_shapely(data):\n\n from spatialpandas import GeoDataFrame, GeoSeries\n from shapely.geometry.base import BaseGeometry\n\n if not data:\n pass\n elif all(isinstance(d, BaseGeometry) for d in data):\n data = GeoSeries(data).to_frame()\n elif all(isinstance(d, dict) and 'geometry' in d and isinstance(d['geometry'], BaseGeometry)\n for d in data):\n new_data = {col: [] for col in data[0]}\n for d in data:\n for col, val in d.items():\n new_data[col].append(val if isscalar(val) or isinstance(val, BaseGeometry) else np.asarray(val))\n new_data['geometry'] = GeoSeries(new_data['geometry'])\n data = GeoDataFrame(new_data)\n return data", "def gdf(self) -> gpd.GeoDataFrame:\n return self.just_geometry_gdf.join(self.df)", "def to_frame(self):\n return gpd.GeoDataFrame(\n data=self.tree_ids,\n geometry=self.to_geom(),\n crs=self.crs,\n columns=['id'],\n )", "def gdf(self) -> gpd.GeoDataFrame:\n path = str(get_path(\"geojson/FeatureCollection02.json\"))\n df = gpd.read_file(path)\n return df", "def df_from_postgis(engine, query, params, geocolumn, epsg):\n data = geopandas.GeoDataFrame.from_postgis(\n query,\n engine,\n geom_col=geocolumn,\n crs={'init': 'epsg:{}'.format(epsg)},\n params=params)\n return data", "def raster_to_dataframe(\n in_raster: np.array,\n transform,\n h3_resolution: int,\n nodata_value=None,\n axis_order: str = \"yx\",\n compact: bool = True,\n geo: bool = False,\n) -> typing.Union[gpd.GeoDataFrame, pd.DataFrame]:\n\n df = arrow_raster.raster_to_dataframe(\n in_raster, transform, h3_resolution, nodata_value=nodata_value, axis_order=axis_order, compact=compact\n ).to_pandas()\n\n if geo:\n return cells_dataframe_to_geodataframe(df)\n else:\n return df", "def to_frame(self):\n return gpd.GeoDataFrame(\n data=range(0, self.nleaves),\n geometry=self.to_geom(),\n crs=self.crs,\n columns=['id'],\n )", "def array_to_df (a_array,b_as_column='') :\n if b_as_column == '' :\n loc_result = __pd.DataFrame(data=a_array)\n else :\n loc_result = __pd.DataFrame(data=a_array,columns=b_as_column)\n return loc_result", "def from_geopandas(cls, data):\n\n import geopandas as gpd\n import shapely.wkb\n if not isinstance(data, gpd.GeoSeries):\n raise TypeError(f\"data must be {gpd.GeoSeries}, got {type(data)}\")\n\n if data.crs is not None:\n crs = data.crs.to_authority() or data.crs.source_crs.to_authority()\n crs = crs[0] + ':' + crs[1]\n else:\n crs = None\n\n def f(x):\n if x is None:\n return x\n return shapely.wkb.dumps(x)\n\n return cls(data.apply(f), crs=crs)", "def build_person_travel_geodataframe(person, from_epsg=None, to_epsg=None):\n df = pd.DataFrame()\n for leg in person.legs:\n if (leg.start_location.loc is None) or (leg.end_location.loc is None):\n raise AttributeError('To create a geopandas.DataFrame you need specific locations. Make sure Legs have'\n 'loc attribute defined with a shapely.Point or s2sphere.CellId.')\n _leg_dict = leg.__dict__.copy()\n _leg_dict['geometry'] = utils.get_linestring(leg.start_location.loc, leg.end_location.loc)\n coords = list(_leg_dict['geometry'].coords)\n _leg_dict['start_location'] = coords[0]\n _leg_dict['end_location'] = coords[-1]\n df = df.append(pd.Series(_leg_dict), ignore_index=True)\n\n df['pid'] = person.pid\n df = GeoDataFrame(df, geometry='geometry')\n if from_epsg:\n df.crs = from_epsg\n if to_epsg:\n df = df.to_crs(to_epsg)\n\n return df", "def _prepare_geocode_result(results):\n # Prepare the data for the DataFrame as a dict of lists\n d = defaultdict(list)\n index = []\n\n for i, s in iteritems(results):\n address, loc = s\n\n # loc is lat, lon and we want lon, lat\n if loc is None:\n p = Point()\n else:\n p = Point(loc[1], loc[0])\n\n if address is None:\n address = np.nan\n\n d['geometry'].append(p)\n d['address'].append(address)\n index.append(i)\n\n df = gpd.GeoDataFrame(d, index=index)\n df.crs = from_epsg(4326)\n\n return df", "def to_geometry(self, to_crs=None):\n from geopandas import GeoDataFrame\n from shapely.geometry import Polygon\n out = GeoDataFrame()\n geoms = []\n ii = []\n jj = []\n xx = self.corner_grid.x0 + np.arange(self.nx+1) * self.dx\n yy = self.corner_grid.y0 + np.arange(self.ny+1) * self.dy\n for j, (y0, y1) in enumerate(zip(yy[:-1], yy[1:])):\n for i, (x0, x1) in enumerate(zip(xx[:-1], xx[1:])):\n coords = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]\n geoms.append(Polygon(coords))\n jj.append(j)\n ii.append(i)\n out['j'] = jj\n out['i'] = ii\n out['geometry'] = geoms\n out.crs = self.proj.srs\n\n if check_crs(to_crs):\n transform_geopandas(out, to_crs=to_crs, inplace=True)\n return out", "def convert_GeoPandas_to_Bokeh_format(gdf):\r\n gdf_new = gdf.drop('geometry', axis=1).copy()\r\n gdf_new['x'] = gdf.apply(getGeometryCoords, \r\n geom='geometry', \r\n coord_type='x', \r\n shape_type='polygon', \r\n axis=1)\r\n \r\n gdf_new['y'] = gdf.apply(getGeometryCoords, \r\n geom='geometry', \r\n coord_type='y', \r\n shape_type='polygon', \r\n axis=1)\r\n \r\n return ColumnDataSource(gdf_new)", "def from_geopandas(cls, ga):\n return super(PointArray, cls).from_geopandas(ga)", "def to_spatialpandas(data, xdim, ydim, columns=[], geom='point'):\n from spatialpandas import GeoSeries, GeoDataFrame\n from spatialpandas.geometry import (\n Point, Line, Polygon, Ring, LineArray, PolygonArray, PointArray,\n MultiLineArray, MultiPolygonArray, MultiPointArray, RingArray\n )\n from ...element import Polygons\n poly = any(Polygons._hole_key in d for d in data) or geom == 'Polygon'\n if poly:\n geom_type = Polygon\n single_array, multi_array = PolygonArray, MultiPolygonArray\n elif geom == 'Line':\n geom_type = Line\n single_array, multi_array = LineArray, MultiLineArray\n elif geom == 'Ring':\n geom_type = Ring\n single_array, multi_array = RingArray, MultiLineArray\n else:\n geom_type = Point\n single_array, multi_array = PointArray, MultiPointArray\n\n array_type = None\n hole_arrays, geom_arrays = [], []\n for geom in data:\n geom = dict(geom)\n if xdim not in geom or ydim not in geom:\n raise ValueError('Could not find geometry dimensions')\n xs, ys = geom.pop(xdim), geom.pop(ydim)\n xscalar, yscalar = isscalar(xs), isscalar(ys)\n if xscalar and yscalar:\n xs, ys = np.array([xs]), np.array([ys])\n elif xscalar:\n xs = np.full_like(ys, xs)\n elif yscalar:\n ys = np.full_like(xs, ys)\n geom_array = np.column_stack([xs, ys])\n\n if geom_type in (Polygon, Ring):\n geom_array = ensure_ring(geom_array)\n\n splits = np.where(np.isnan(geom_array[:, :2].astype('float')).sum(axis=1))[0]\n split_geoms = np.split(geom_array, splits+1) if len(splits) else [geom_array]\n split_holes = geom.pop(Polygons._hole_key, None)\n if split_holes is not None:\n if len(split_holes) != len(split_geoms):\n raise DataError('Polygons with holes containing multi-geometries '\n 'must declare a list of holes for each geometry.',\n SpatialPandasInterface)\n else:\n split_holes = [[ensure_ring(np.asarray(h)) for h in hs] for hs in split_holes]\n\n geom_arrays.append(split_geoms)\n hole_arrays.append(split_holes)\n if geom_type is Point:\n if len(splits) > 1 or any(len(g) > 1 for g in split_geoms):\n array_type = multi_array\n elif array_type is None:\n array_type = single_array\n elif len(splits):\n array_type = multi_array\n elif array_type is None:\n array_type = single_array\n\n converted = defaultdict(list)\n for geom, arrays, holes in zip(data, geom_arrays, hole_arrays):\n parts = []\n for i, g in enumerate(arrays):\n if i != (len(arrays)-1):\n g = g[:-1]\n if len(g) < (3 if poly else 2) and geom_type is not Point:\n continue\n if poly:\n parts.append([])\n subparts = parts[-1]\n else:\n subparts = parts\n subparts.append(g[:, :2])\n if poly and holes is not None:\n subparts += [np.array(h) for h in holes[i]]\n\n for c, v in geom.items():\n converted[c].append(v)\n\n if array_type is PointArray:\n parts = parts[0].flatten()\n elif array_type is MultiPointArray:\n parts = np.concatenate([sp.flatten() for sp in parts])\n elif array_type is multi_array:\n parts = [[ssp.flatten() for ssp in sp] if poly else sp.flatten() for sp in parts]\n else:\n parts = [np.asarray(sp).flatten() for sp in parts[0]] if poly else parts[0].flatten()\n converted['geometry'].append(parts)\n\n if converted:\n geometries = converted['geometry']\n if array_type is PointArray:\n geometries = np.concatenate(geometries)\n geom_array = array_type(geometries)\n if poly:\n geom_array = geom_array.oriented()\n converted['geometry'] = GeoSeries(geom_array)\n else:\n converted['geometry'] = GeoSeries(single_array([]))\n return GeoDataFrame(converted, columns=['geometry']+columns)", "def vectorize(self, connectivity=8):\n data = self._obj.values\n data_isnan = True if self.nodata is None else np.isnan(self.nodata)\n mask = ~np.isnan(data) if data_isnan else data != self.nodata\n feats_gen = features.shapes(\n data,\n mask=mask,\n transform=self.transform,\n connectivity=connectivity,\n )\n feats = [\n {\"geometry\": geom, \"properties\": {\"value\": idx}}\n for geom, idx in list(feats_gen)\n ]\n if len(feats) == 0: # return empty GeoDataFrame\n return gpd.GeoDataFrame()\n gdf = gpd.GeoDataFrame.from_features(feats, crs=self.crs)\n gdf.index = gdf.index.astype(self._obj.dtype)\n return gdf", "def vectorize(df):\n\tt = calc_affine(df)\n\ta = df.values\n\t# zeros an nan are left open space, means mask = True!\n\tmaske = (df != 0).fillna(True)\n\tgdf = gpd.GeoDataFrame()\n\tgeoms = []\n\tvalue = []\n\tfor s,v in rasterio.features.shapes(a,transform=t,mask=maske.values):\n\t\tgeoms.append(shape(s))\n\t\tvalue.append(v)\n\tgdf['geometry'] = geoms\n\tgdf = gdf.set_geometry('geometry')\n\tgdf['val']=value\n\treturn gdf", "def process_dataframe(df):\n\n if isinstance(df, pd.DataFrame):\n df2 = df.copy()\n required_columns = {'name', 'wkt', 'lower_limit', 'upper_limit'}\n if not required_columns <= set(df2.columns):\n raise ValueError(\"DataFrame must contain columns 'name', 'wkt', 'lower_limit', 'upper_limit'.\")\n if not 'geometry' in list(df2.columns):\n logger.info(\"Converting WKT representation of geometry to geometry objects.\")\n df2['geometry'] = df2.wkt.apply(shapely.wkt.loads)\n gdf = geopandas.GeoDataFrame(df2, geometry=df2.geometry)\n elif isinstance(df, geopandas.GeoDataFrame):\n df2 = df.copy()\n required_columns = {'name', 'lower_limit', 'upper_limit'}\n if not required_columns <= set(df2.columns):\n raise ValueError(\"GeoDataFrame must contain columns 'name', 'lower_limit', 'upper_limit'.\")\n if not 'wkt' in list(df2.columns):\n logger.info(\"Converting geometry objects to their WKT representations.\")\n df2['wkt'] = df2.geometry.apply(lambda g: g.wkt)\n gdf = df2\n else:\n raise ValueError(\"df must be a DataFrame or GeoDataFrame!\")\n\n return gdf", "def create_airports(data):\n \n airport_cities = pd.DataFrame(data)\n geometry = [Point(xy) for xy in zip(airport_cities.lon, airport_cities.lat)]\n airport_cities = airport_cities.drop(['lon', 'lat'], axis=1)\n crs = {'init': 'epsg:4326'}\n geo_airport_cities = gpd.GeoDataFrame(airport_cities, crs=crs, geometry=geometry)\n return geo_airport_cities", "def reach_points_as_dataframe(self) -> pd.DataFrame:\n df_pt = pd.DataFrame([pt.as_dictionary for pt in self._reach_points])\n df_pt.spatial.set_geometry('SHAPE')\n return df_pt", "def to_df(self, columns=None) -> pd.DataFrame:\n return create_dataframe(\n data=super().to_list(),\n columns=columns,\n default_columns=DEFAULT_COLUMNS,\n logger_description=\"Geographies\",\n )", "def __geo_interface__(self):\r\n if HASARCPY:\r\n template = {\r\n \"type\": \"FeatureCollection\",\r\n \"features\": []\r\n }\r\n geom_type = self.geometry_type\r\n if geom_type.lower() == \"point\":\r\n geom_type = \"Point\"\r\n elif geom_type.lower() == \"polyline\":\r\n geom_type = \"LineString\"\r\n elif geom_type.lower() == \"polygon\":\r\n geom_type = \"Polygon\"\r\n df_copy = self.copy(deep=True)\r\n df_copy['geom_json'] = self.geometry.JSON\r\n df_copy['SHAPE'] = df_copy['geom_json']\r\n del df_copy['geom_json']\r\n for index, row in df_copy.iterrows():\r\n geom = row['SHAPE']\r\n del row['SHAPE']\r\n template['features'].append(\r\n {\"type\" : geom_type,\r\n \"geometry\" : pd.io.json.loads(geom),\r\n \"attributes\":row}\r\n )\r\n return pd.io.json.dumps(template)", "def create_demo_location_history() -> geopandas.GeoDataFrame:\n np.random.seed(123)\n\n time = pd.date_range(start=datetime.fromtimestamp(1624241116), end=datetime.now(), freq=\"1min\").values\n\n center_point = (-36.875990410695394, 174.76398830024274)\n lat = np.random.normal(loc=center_point[0], scale=0.01, size=len(time))\n lon = np.random.normal(loc=center_point[1], scale=0.01, size=len(time))\n\n geometry = [Point(lon, lat) for lon, lat in zip(lon, lat)]\n return geopandas.GeoDataFrame(pd.DataFrame(dict(time=time, lat=lat, lon=lon)), geometry=geometry)", "def create_geodata(x):\n list_len = len(x)\n pilot_log = pd.concat(x[i][['time','Cn0DbHz','svid','geometry']] for i in range(list_len))\n \n return pilot_log", "def df_with_hexid_to_gdf(df, hexcolname='_id'):\n df_geometry=hexlist_to_geodataframe(df[hexcolname].to_list())\n #Creando el geodataframe\n gdf=gpd.GeoDataFrame(df, geometry=df_geometry['geometry'])\n gdf.crs = 'EPSG:4326'\n return gdf", "def gpx_to_dataframe( gpxs ):\n\n track_columns = [\"longitude\",\n \"latitude\",\n \"altitude\",\n \"course\",\n \"computed_speed\",\n \"reported_speed\",\n \"satellites\",\n \"source\",\n \"geoid_height\",\n \"symbol\",\n \"gpx_fix_type\",\n \"hdop\",\n \"vdop\",\n \"pdop\"]\n\n # our GPX data source doesn't populate much for waypoints, so we don't\n # bother creating useless columns.\n waypoint_columns = [\"name\",\n \"longitude\",\n \"latitude\",\n \"altitude\",\n \"source\"]\n\n tracks_df = pd.DataFrame( [], columns=track_columns )\n waypoints_df = pd.DataFrame( [], columns=waypoint_columns )\n\n # help the user in a common use case by creating the list for them.\n if type( gpxs ) != list:\n gpxs = [gpxs]\n\n # walk through each GPX object creating new DataFrames and appending them\n # to the existing DataFrames.\n for gpx in gpxs:\n # XXX: assumes we only have a single track with a single segment in it.\n\n track_data = []\n times = []\n for (point_index, point) in enumerate( gpx.tracks[0].segments[0].points ):\n track_data.append( [point.longitude,\n point.latitude,\n point.elevation,\n point.course,\n gpx.tracks[0].segments[0].get_speed( point_index ),\n point.speed,\n point.satellites,\n point.source,\n point.geoid_height,\n point.symbol,\n point.type_of_gpx_fix,\n point.horizontal_dilution,\n point.vertical_dilution,\n point.position_dilution] )\n times.append( pd.Timestamp( point.time ) )\n\n # convert this track into a data frame and store it.\n tracks_df = pd.concat( [tracks_df,\n pd.DataFrame( track_data,\n index=times,\n columns=track_columns )] )\n\n waypoint_data = []\n times = []\n for (point_index, point) in enumerate( gpx.waypoints ):\n waypoint_data.append( [point.name,\n point.longitude,\n point.latitude,\n point.elevation,\n point.source] )\n times.append( pd.Timestamp( point.time ) )\n\n # convert this track into a data frame and store it.\n waypoints_df = pd.concat( [waypoints_df,\n pd.DataFrame( waypoint_data,\n index=times,\n columns=waypoint_columns )] )\n\n # work around gpxpy's speed computation for first points in a track.\n null_indices = tracks_df[\"computed_speed\"].isnull()\n tracks_df.loc[null_indices, \"computed_speed\"] = 0.0\n\n # explicitly label our times as UTC as that's what is stored in GPX.\n tracks_df.tz_localize( \"UTC\", copy=False )\n waypoints_df.tz_localize( \"UTC\", copy=False )\n\n return (tracks_df, waypoints_df)" ]
[ "0.7414189", "0.7339243", "0.7252249", "0.6799631", "0.6643421", "0.6636774", "0.6576207", "0.64738333", "0.64714026", "0.6319929", "0.63041216", "0.62951463", "0.6272487", "0.62259114", "0.6190221", "0.61478746", "0.61438227", "0.61257994", "0.60967374", "0.6092033", "0.609072", "0.6038176", "0.6035514", "0.60322404", "0.6015464", "0.59607446", "0.5915695", "0.5906894", "0.5885226", "0.588295" ]
0.7731929
0
GIVEN correct item id WHEN /gs/api/v1/54590 is called THEN it returns status 200
def test_correctitemid_status200(self): config = self.__load_config() url = f"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590" r = requests.get(url) self.assertEqual(r.status_code, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_correctitemid_correctresponsebody(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590\"\r\n r = requests.get(url)\r\n\r\n expected = {\r\n 'itemId': 54590,\r\n 'name': 'Sharpened Twilight Scale',\r\n 'gearScore': 310\r\n }\r\n\r\n self.assertEqual(r.json(), expected)", "def taco_test_put_error_requires_id(self):\n body = '{ \"id\": 400, \"name\": \"item_new\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '501'))", "def test_itemidnotvalid_return4042(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/asdfg\"\r\n r = requests.get(url)\r\n\r\n self.assertEqual(r.status_code, 404)", "def test_update_item_incorrect_id(test_client, item):\n\n response = test_client.put(BAD_ITEM_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def test_request_for_a_bucket_has_integer_id(self):\n with self.client:\n response = self.client.get(\n '/bucketlists/dsfgsdsg',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def test_itemidnotindb_returnerr002(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590123\"\r\n r = requests.get(url)\r\n\r\n expected = {\r\n \"ErrorCode\": \"E001\",\r\n \"ErrorMessage\": \"Item ID not in database.\"\r\n }\r\n\r\n self.assertEqual(r.json(), expected)", "def find_by_id(self, _id: int) -> tuple:\n item = self.model.find_by_id(_id)\n if item:\n return {'item': check_json(item)}, 200\n else:\n return {'error': {'message': 'Item not found'}}, 400", "def test_get_single_good_item(test_client):\n\n response = test_client.get(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 200\n assert len(data['items']) == 1\n assert data['items'][0]['id'] == 3", "def test_put_item_wrong_id(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_delete_item_incorrect_id(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def delete_item(id):\n return '', 201", "def test_delete_item_wrong_id(self):\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, 0)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def item(item_id):\n kwargs = {k: parse(v) for k, v in request.args.to_dict().items()}\n\n try:\n trading = Trading(**kwargs)\n except ConnectionError as err:\n result = str(err)\n status = 500\n else:\n response = trading.get_item(item_id)\n result = response['Item']\n status = 200\n\n return jsonify(status, objects=result)", "def test_get_bucketlist_item_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n get_item = self.client.get('/bucketlistitems/1/items/1', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)", "def test_response_200_on_get(self):\n pass", "def item_handler(id):\n if request.method == 'PUT':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to update a item\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n item = updateItem(id, name, picture, description)\n return jsonify(item=item.serialize)\n elif request.method == 'DELETE':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to remove a item\n item = deleteItem(id)\n return jsonify(item=item.serialize)", "def test_get_single_bad_item(test_client):\n\n response = test_client.get(BAD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def item_retrieve(id):\n item = getItem(id)\n if item is None:\n return jsonify({}), 204\n else:\n return jsonify(item=item.serialize)", "def get(self, _id):\n endpoint = URL_MAPPING + \"/{}\".format(_id)\n response = self.client.get_json(endpoint)\n response.success = response.status_code == 200\n return response", "def retrieve(short_id):\n try:\n url = Url.get(short_id)\n\n url.update(actions=[\n url.hits.set(url.hits + 1),\n url.lastHit.set(datetime.utcnow())\n ])\n\n return jsonify({\n \"statusCode\": 301,\n \"location\": url.longURL\n })\n\n except:\n return jsonify({\"Error\", \"No Such ID\"})", "def test_detail_by_id(self):\n responses.add(\n responses.Response(\n method='GET',\n url=('https://connection.keboola.com/v2/storage/buckets/'\n 'in.c-ga'),\n json=detail_response\n )\n )\n bucket_id = 'in.c-ga'\n bucket_detail = self.buckets.detail(bucket_id)\n assert bucket_detail['id'] == 'in.c-ga'", "def deleteItem(request, itemid):\n try:\n item = ItemSerializer(Item.objects.get(id=itemid))\n Item.objects.get(id=itemid).delete()\n return Response(item.data)\n\n except Item.DoesNotExist:\n fail = {\n \"item\":\"item does not exist\"\n }\n return JsonResponse(fail)", "def test_get_car_invalid_id():\n response = client.get(\"/11111\")\n assert response.status_code == STATUS_NOT_FOUND", "def harvest(function, itemID):\n\n try:\n payload = build_api_call(function, itemID)\n response = json_api_call(payload)\n except Exception as e:\n return None\n\n return response", "def test_get_item_by_id(self):\n response = self.client.get('/api/v1/category/1',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 200)", "def test_item():\n # 404 reponses\n response = app.test_client().get('/v1/resources/menu?')\n assert response.status_code == 404\n\n response = app.test_client().get('/v1/resources/menu?itype=pizza')\n assert response.status_code == 404\n\n response = app.test_client().get('/v1/resources/menu?itype=pizza&item=pepperoni')\n assert response.status_code == 404\n\n response = app.test_client().get('/v1/resources/menu?item=pepperoni')\n assert response.status_code == 404\n\n # 204 responses\n response = app.test_client().get('/v1/resources/menu?item=p&itype=pizza&size=small')\n assert response.status_code == 204\n\n response = app.test_client().get('/v1/resources/menu?item=pepperoni&itype=p&size=small')\n assert response.status_code == 204\n\n response = app.test_client().get('/v1/resources/menu?item=pepperoni&itype=pizza&size=s')\n assert response.status_code == 204\n\n response = app.test_client().get('/v1/resources/menu?itype=topping&item=b')\n assert response.status_code == 204\n\n # 200 responses\n response = app.test_client().get('/v1/resources/menu?itype=topping&item=beef')\n assert response.status_code == 200\n assert response.content_type == 'application/json'\n assert isinstance((response.json)['price'], int)\n\n response = app.test_client().get('/v1/resources/menu?itype=pizza&item=pepperoni&size=small')\n assert response.status_code == 200\n assert response.content_type == 'application/json'\n assert isinstance((response.json)['price'], int)", "def test_api_random_id(api_client, single_brew_number):\n response = api_client.get(path='/breweries' + '/' + str(single_brew_number))\n assert response.json()['id'] == single_brew_number", "def _get(self, table, _id):\n data = {\"Key\": _id}\n return self._response_handler(table, \"get_item\", data)", "def item(self, item_id):\n response = self._request(V2_ENDPOINTS['ITEMS'] + item_id)\n return response", "def test_items_400(client):\n\n rv = client.get(\"/items\")\n assert 401 == rv.status_code" ]
[ "0.6747325", "0.64232856", "0.62856835", "0.62728584", "0.626374", "0.6185258", "0.61106116", "0.6109352", "0.6107731", "0.6023801", "0.6018421", "0.59478205", "0.59417444", "0.5801451", "0.57989514", "0.57970667", "0.5776777", "0.57645893", "0.5755658", "0.57544607", "0.5748873", "0.5747595", "0.569508", "0.5682856", "0.5667939", "0.56448376", "0.5586524", "0.55862284", "0.5576344", "0.55737776" ]
0.7938036
0
GIVEN correct item id WHEN /gs/api/v1/54590 is called THEN it returns correct message body
def test_correctitemid_correctresponsebody(self): config = self.__load_config() url = f"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590" r = requests.get(url) expected = { 'itemId': 54590, 'name': 'Sharpened Twilight Scale', 'gearScore': 310 } self.assertEqual(r.json(), expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_correctitemid_status200(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590\"\r\n r = requests.get(url)\r\n self.assertEqual(r.status_code, 200)", "def get_item_detail(item_id):\n pass", "def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError", "def find_by_id(self, _id: int) -> tuple:\n item = self.model.find_by_id(_id)\n if item:\n return {'item': check_json(item)}, 200\n else:\n return {'error': {'message': 'Item not found'}}, 400", "def test_delete_item_wrong_id(self):\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, 0)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_put_item_wrong_id(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def delete_item(id):\n return '', 201", "def test_itemidnotindb_returnerr002(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590123\"\r\n r = requests.get(url)\r\n\r\n expected = {\r\n \"ErrorCode\": \"E001\",\r\n \"ErrorMessage\": \"Item ID not in database.\"\r\n }\r\n\r\n self.assertEqual(r.json(), expected)", "def item(self, item_id):\n response = self._request(V2_ENDPOINTS['ITEMS'] + item_id)\n return response", "def item(item_id):\n kwargs = {k: parse(v) for k, v in request.args.to_dict().items()}\n\n try:\n trading = Trading(**kwargs)\n except ConnectionError as err:\n result = str(err)\n status = 500\n else:\n response = trading.get_item(item_id)\n result = response['Item']\n status = 200\n\n return jsonify(status, objects=result)", "def message():\n # Retrieve JSON parameters data.\n data = request.get_json() or {}\n data.update(dict(request.values))\n msg = data.get(\"msg\")\n if not msg:\n raise abort(400, \"missing 'msg' data\")\n\n # Deffer the message as a task.\n result = tasks.process_message.delay(msg, delta=10)\n task_id = result.task_id\n if not task_id or result.failed():\n raise abort(400, \"task failed\")\n # Then check and return ID.\n return {\n \"task_id\": result.id\n }", "def item_retrieve(id):\n item = getItem(id)\n if item is None:\n return jsonify({}), 204\n else:\n return jsonify(item=item.serialize)", "def GetMessageWithId(service, user_id, msg_id, format):\r\n try:\r\n message = service.users().messages().get(userId=user_id,\r\n id=msg_id,\r\n format=format).execute()\r\n msg_str = str(base64.urlsafe_b64decode(message[\"raw\"].encode(\"utf-8\")))\r\n return msg_str\r\n except errors.HttpError as error:\r\n print(\"An error occurred: %s\" % error)", "def _get(self, table, _id):\n data = {\"Key\": _id}\n return self._response_handler(table, \"get_item\", data)", "def test_update_item_incorrect_id(test_client, item):\n\n response = test_client.put(BAD_ITEM_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def get_item(id):\n return jsonify(id=id, name='name', number=123)", "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "def item_handler(id):\n if request.method == 'PUT':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to update a item\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n item = updateItem(id, name, picture, description)\n return jsonify(item=item.serialize)\n elif request.method == 'DELETE':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to remove a item\n item = deleteItem(id)\n return jsonify(item=item.serialize)", "def get(self, item_name, item_id):\n item = {}\n try:\n item = self.glpi.get(item_name, item_id)\n except Exception as e:\n item = \"{ \\\"error_message\\\": \\\"%s\\\" }\" % e\n\n return item", "def get(self, item_id: int):\n\n try:\n\n controller = self.controller()\n schema = self.schema()\n raw_data = controller.read(id=item_id)\n data = {'item': schema.dump(raw_data)}\n\n return ResponseHandler.render_response(data=data)\n\n except Exception as ex:\n\n return ResponseHandler.render_response(status=ERR, message=traceback.format_exc())", "def test_request_for_a_bucket_has_integer_id(self):\n with self.client:\n response = self.client.get(\n '/bucketlists/dsfgsdsg',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def test_get_bucketlist_item_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n get_item = self.client.get('/bucketlistitems/1/items/1', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)", "def read_item(id: str, request: Request):\n obj = db.get(id, kind=endpoint_model)\n return obj", "def process_message(self, message):\n\n if 'id' in message:\n logger.debug(\"Processing message {0}: {1!r}\",\n message['id'], message['method'])\n else:\n logger.debug(\"Processing method {0!r}\", message['method'])\n\n response = self.get_response(message.get('id', None),\n self.registry,\n message['method'],\n *message['params'])\n return response", "def test_drugs_id_get(self):\n pass", "def getitem(itemID):\n\n return harvest(GET_ITEM_URL, itemID)", "def get_item_id_sold_last():\n\n # your code", "def item_json(item_id):\n try:\n item = session.query(Item).filter_by(id=item_id).one()\n return jsonify(item=item.serialize)\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def get_item(item_id):\n return Item.query.filter_by(id=item_id).first()", "def get_item(self, item_id):\n if self._database:\n try:\n return self._database.retrieve(item_id)\n except PyragargaError:\n pass\n # TODO: Retry if it times out \n details_page = self._build_tree(\n self._session.get(KG_URL + DETAILS_SCRIPT,\n params={'id': item_id, 'filelist':1}\n ).content)\n item = self._parse_details_page(details_page, item_id)\n if self._database:\n self._database.store(item)\n self.logger.info('Received details for item %d' % item.kg_id)\n return item" ]
[ "0.6074377", "0.579045", "0.57845813", "0.56190115", "0.5607293", "0.56032187", "0.55939", "0.5583104", "0.5578272", "0.55464506", "0.55355704", "0.55175406", "0.5496165", "0.54874635", "0.54852206", "0.5482914", "0.54752815", "0.5474251", "0.5472206", "0.5456713", "0.54462916", "0.53827053", "0.5371634", "0.53472906", "0.5337633", "0.53214186", "0.53211623", "0.52930295", "0.52899325", "0.5281488" ]
0.67007536
0
GIVEN item id not in database WHEN /gs/api/v1/54590123 is called THEN it returns message body with error 002
def test_itemidnotindb_returnerr002(self): config = self.__load_config() url = f"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590123" r = requests.get(url) expected = { "ErrorCode": "E001", "ErrorMessage": "Item ID not in database." } self.assertEqual(r.json(), expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_correctitemid_status200(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590\"\r\n r = requests.get(url)\r\n self.assertEqual(r.status_code, 200)", "def test_update_item_incorrect_id(test_client, item):\n\n response = test_client.put(BAD_ITEM_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def test_itemidnotvalid_return4042(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/asdfg\"\r\n r = requests.get(url)\r\n\r\n self.assertEqual(r.status_code, 404)", "def test_correctitemid_correctresponsebody(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590\"\r\n r = requests.get(url)\r\n\r\n expected = {\r\n 'itemId': 54590,\r\n 'name': 'Sharpened Twilight Scale',\r\n 'gearScore': 310\r\n }\r\n\r\n self.assertEqual(r.json(), expected)", "def test_put_item_wrong_id(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def _no_items_found(service, account_id):\n logger.info(f'No {service} for account: {account_id}')\n return {\n \"statusCode\": 422,\n \"body\": f'No {service} found for account: {account_id}'\n }", "def test_delete_item_wrong_id(self):\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, 0)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_get_single_bad_item(test_client):\n\n response = test_client.get(BAD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def taco_test_put_error_requires_id(self):\n body = '{ \"id\": 400, \"name\": \"item_new\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '501'))", "def test_request_for_a_bucket_has_integer_id(self):\n with self.client:\n response = self.client.get(\n '/bucketlists/dsfgsdsg',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def test_AlgorithmsIdHandler_GET_MalformedRequest(self):\n searchedId='xyz' + ' ' + '1'\n response = self.testapp.get('/algorithms/' + searchedId, expect_errors=True)\n self.assertEqual(400, response.status_int, msg='Wrong answer code')\n self.assertEqual('application/json', response.content_type)\n self.assertIn('Malformed Data', response.normal_body.decode(encoding='UTF-8'))", "def test_items_400(client):\n\n rv = client.get(\"/items\")\n assert 401 == rv.status_code", "def test_delete_item_incorrect_id(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def get_items_not_in_stock_by_box(uuid: str):\n try: \n return get_items_not_in_stock_by_box_dao(uuid), 200\n except:\n return \"An error ocurred\", 404", "async def test_txn_get_with_bad_id(self):\n self.stream.preset_response(self.status.NO_RESOURCE)\n response = await self.get_assert_status('/transactions/bad', 404)\n\n self.assert_has_valid_error(response, 72)", "def test_get_item_details_invalid_id(self, mock_requests_get_404):\n with pytest.raises(exceptions.NoSuchItemException):\n resources.get_item_details(1)", "def test_get_sdb_id_missing_id(self, mget):\n data = [\n {\n \"id\": \"5f0-99-414-bc-e5909c\",\n \"name\": \"Disco Events\",\n \"path\": \"app/disco-events/\",\n \"category_id\": \"b07-42d0-e6-9-0a47c03\"\n }\n ]\n mget.return_value = self._mock_response(content=json.dumps(data))\n with self.assertRaises(CerberusClientException):\n self.client.get_sdb_id('not_found')", "def send_error_missing_id(message, obj_type):\n return make_response(jsonify({\"validation_error\": {\n \"error\": 'Missing id',\n \"object_type\": obj_type,\n \"description\": message\n }}), 400)", "def test_fail_repeated_buckelist_item(self):\r\n user = User.query.filter_by(email=\"[email protected]\").first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(\"[email protected]\", \"test\", bucketlist.id, \"test item\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '409 CONFLICT')\r\n self.assertEqual(result['message'], 'Bucketlist Item Exists')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertEqual(item_no, new_item_no)", "def test_get_sdb_id_invalid_response(self, mget):\n data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n mget.return_value = self._mock_response(status=401, content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_sdb_id('some_id')", "def handle_api_error(e):\n return f\"Failed to call Giphy API: {e}\", 500", "def bad_request():\n return HttpError(400)", "def test_get_item_not_found(self):\n resp = self.app.get('/items/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def resp400(msg):\n app.logger.error(msg)\n return Resp({'message':msg, 'success':False}, status=400)", "def test_get_car_invalid_id():\n response = client.get(\"/11111\")\n assert response.status_code == STATUS_NOT_FOUND", "def test_api_get_bucketlist_by_id_not_exist(self):\n res = self.client().get(f\"/bucketlist/99\")\n self.assertEqual(res.status_code, 404)", "def test_get_posts_missing_ids(client):\n response = client.simulate_get('/page/get_records')\n assert response.status_code == 400", "def bad_request(message):\n return error_response(400, message)", "def test_get_non_existent_book_by_id(self):\n response = self.client().get('/api/v1/products/0')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"That book does not exist\")\n self.assertEqual(response.status_code, 404)", "def test_retrieve_with_bad_id(self):\n resp = self.api_client.get('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'Not found')" ]
[ "0.67247957", "0.644694", "0.6350344", "0.6243379", "0.62398946", "0.6226191", "0.60893637", "0.6078208", "0.60741085", "0.593693", "0.58945924", "0.5894188", "0.58937216", "0.5800409", "0.57709134", "0.5752169", "0.5729775", "0.56522334", "0.56265795", "0.5623816", "0.56119204", "0.5585401", "0.55677956", "0.5553875", "0.5549608", "0.55438024", "0.5538165", "0.5526159", "0.5520593", "0.5505948" ]
0.7131409
0
GIVEN item id not valid WHEN /gs/api/v1/asdfg is called THEN it returns message 404
def test_itemidnotvalid_return4042(self): config = self.__load_config() url = f"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/asdfg" r = requests.get(url) self.assertEqual(r.status_code, 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_item_not_found(self):\n resp = self.app.get('/items/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_get_single_bad_item(test_client):\n\n response = test_client.get(BAD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def test_get_item_details_invalid_id(self, mock_requests_get_404):\n with pytest.raises(exceptions.NoSuchItemException):\n resources.get_item_details(1)", "def test_get_car_invalid_id():\n response = client.get(\"/11111\")\n assert response.status_code == STATUS_NOT_FOUND", "def test_get_single_movie_incorrect_id(client):\n resp = client.get(f\"/api/movies/{30}/\")\n assert resp.status_code == 404", "def test_correctitemid_status200(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590\"\r\n r = requests.get(url)\r\n self.assertEqual(r.status_code, 200)", "def taco_test_put_error_requires_id(self):\n body = '{ \"id\": 400, \"name\": \"item_new\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '501'))", "def test_update_item_incorrect_id(test_client, item):\n\n response = test_client.put(BAD_ITEM_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def test_itemidnotindb_returnerr002(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590123\"\r\n r = requests.get(url)\r\n\r\n expected = {\r\n \"ErrorCode\": \"E001\",\r\n \"ErrorMessage\": \"Item ID not in database.\"\r\n }\r\n\r\n self.assertEqual(r.json(), expected)", "def test_retrieve_with_bad_id(self):\n resp = self.api_client.get('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'Not found')", "def test_delete_item_incorrect_id(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def test_get_non_existent_book_by_id(self):\n response = self.client().get('/api/v1/products/0')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"That book does not exist\")\n self.assertEqual(response.status_code, 404)", "def test_get_restaurant_by_id_not_number(self):\n resp = self.test_client.get(self.API_BASE + '/hello', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 400)", "def test_AlgorithmsIdHandler_GET_MalformedRequest(self):\n searchedId='xyz' + ' ' + '1'\n response = self.testapp.get('/algorithms/' + searchedId, expect_errors=True)\n self.assertEqual(400, response.status_int, msg='Wrong answer code')\n self.assertEqual('application/json', response.content_type)\n self.assertIn('Malformed Data', response.normal_body.decode(encoding='UTF-8'))", "def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)", "def badRequest(message):\r\n raise Http404(message)", "def test_get_interest_by_id_not_found(self):\n id = '1234'\n url = reverse('xds_api:interest-list', args=(id,))\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_api_get_bucketlist_by_id_not_exist(self):\n res = self.client().get(f\"/bucketlist/99\")\n self.assertEqual(res.status_code, 404)", "def _taco_test_post_param_new_404(self):\n body = '{ \"id\": 500, \"name\": \"item5\", \"content\": \"qwerwqer5\" }'\n env = self.get_env('POST', '/item/5', body=body)\n result = webapi_start(env, lambda status, response_headers: self.assertEqual(status, '404'))[0]", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def _no_items_found(service, account_id):\n logger.info(f'No {service} for account: {account_id}')\n return {\n \"statusCode\": 422,\n \"body\": f'No {service} found for account: {account_id}'\n }", "def test_get_event_type_by_id_not_found(self):\n\t\tevent_type = EventType.objects.get(name=\"asd\")\n\t\trequest = self.client.get('/api/event_type/esper/' + str(event_type.id + 1), follow=True)\n\t\tself.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)", "def test_items_400(client):\n\n rv = client.get(\"/items\")\n assert 401 == rv.status_code", "def test_api_404(self):\n r = requests.get('{server}/api/0.1/sam'.format(\n server=self.get_server_url()),\n headers={'accept': 'application/json'})\n self.assertEquals(404, r.status_code)\n self.assertIn('error', r.json())", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.htsv.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_find_pairs_error_404_wrong_api(flask_app, db):\n tmp_api = 'api/v1/finddd-pairssss'\n url = '{api}?Sender_No=923367790512'.format(api=tmp_api)\n rslt = flask_app.get(url)\n print(rslt.data)\n assert rslt.status_code == 404", "def not_found():\n return HttpError(404)", "def test_ProductsDataViewSet_with_get_request_Invalid_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(-1))\n\n # Checking the response\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json()['detail'], 'Not found.')" ]
[ "0.7137308", "0.70888245", "0.7062536", "0.6974903", "0.69301313", "0.68988854", "0.68076444", "0.6800001", "0.6726583", "0.66637677", "0.6646029", "0.6623722", "0.66190183", "0.661218", "0.6599899", "0.65989226", "0.64434445", "0.64308655", "0.64130604", "0.6410084", "0.6410084", "0.6410084", "0.64098483", "0.64018416", "0.6393212", "0.6380881", "0.63672215", "0.635555", "0.6348094", "0.6343229" ]
0.78823984
0
count number of aparitions of pattern dintrun fisier
def parse_file_count(path, args): try: fisier = open(path, 'r') except IOError: print("Nu am putut deschide fisierul :", path) return n_found = 0 pattern = args.pattern for line in fisier: if args.ignore_case: line = line.lower() pattern = pattern.lower() n_found += line.count(pattern) fisier.close() return n_found
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pattern_count(sequence, pattern):\n return len(re.findall(r'(?=' + pattern + ')', sequence))", "def count():", "def pattern_count(text, pattern):\n return len([i\n for i in range(0, len(text) - len(pattern) + 1)\n if text[i:i + len(pattern)] == pattern])", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def count_patterns(pattern, file):\n count = 0\n with open(file, 'r') as f:\n for line in f:\n if re.search(pattern, line):\n count += 1\n print(\"The pattern '{}' appears {} times.\".format(pattern, count))", "def PatternCount(text, pattern):\n\n count = 0\n for i in range(0, len(text)-len(pattern)+1):\n if text[i:i+len(pattern)] == pattern:\n count += 1\n return count", "def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count", "def support_count(pattern, D):\n support_count = 0\n tmp_p = set(pattern)\n for transaction in D:\n if tmp_p <= set(transaction):\n support_count += 1\n return support_count", "def pattern_count(DNA, pattern, start=0, end=0, mutation_thresh=0):\n if start < 0 or start >= len(DNA):\n raise ValueError(\"The starting position should be between 0 and the size \" + \\\n \"of the DNA\")\n\n k = len(pattern)\n count = 0\n end = len(DNA) - k + 1 if end == 0 else end\n\n for i in range(0, end):\n if hamming_distance(DNA[i:i+k], pattern) <= mutation_thresh:\n count += 1\n\n return count", "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def get_count(self):\n\n return len(self._pattern)", "def test_ababab():\n assert part_01.count_for('ababab', 2) == 0\n assert part_01.count_for('ababab', 3) == 1", "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def count(seq):\n\treturn sum(1 for x in seq)", "def count_occurrences(text, pattern, d=0):\n return len(find_occurrences(text, pattern, d))", "def __len__(self) -> int:\n n_fuzzy_patterns = sum(len(p[\"patterns\"]) for p in self.fuzzy_patterns.values())\n n_regex_patterns = sum(len(p[\"patterns\"]) for p in self.regex_patterns.values())\n return n_fuzzy_patterns + n_regex_patterns", "def num_patterns(self):\n return len(self._pattern_reg)", "def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count", "def countAtom (dico_count, PDB_parsed, debug = 0):\n count = 0\n \n for atom in PDB_parsed : \n residue = tool.transformAA(atom[\"resName\"])\n if debug : print residue\n \n if residue in dico_count : \n atom_Name = atom[\"name\"]\n if atom_Name in dico_count[residue] : \n count = count + 1\n return count", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def count(args):\n path = os.path.abspath(args.path)\n total = 0\n\n if args.recursive:\n if os.path.exists(args.path):\n for item in os.listdir(path):\n little_path = os.path.join(path, item)\n if os.path.isfile(little_path):\n total += parse_file_count(little_path, args)\n else:\n total += count(little_path)\n else:\n print(\"EROARE: <\" + args.path +\n \"> invalid, nu putem ajunge acolo\")\n else:\n if os.path.isfile(args.path):\n total += parse_file_count(args.path, args)\n else:\n print(\"EROARE: <\" + args.pattern +\n \"> invalid, nu este fisier\")\n return total", "def test_abcccd():\n assert part_01.count_for('abcccd', 2) == 0\n assert part_01.count_for('abcccd', 3) == 1", "def __len__(self: TokenMatcher) -> int:\n return len(self._patterns)", "def test_abbcde():\n assert part_01.count_for('abbcde', 2) == 1\n assert part_01.count_for('abbcde', 3) == 0", "def npatterns(self):\n return len(self.patterns)", "def CountAppStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tif debug:\n\t\tprint len(text)-len(pattern)+1\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\treturn count", "def utr5_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.five_prime_utr_sequence.upper()))", "def utr3_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.three_prime_utr_sequence.upper()))", "def _count_sequence(sequence, regex=None):\n # type: (pyfaidx.Sequence, Pattern[str]) -> int\n\n if regex is None:\n count = len(sequence)\n else:\n count = sum((1 for _ in regex.finditer(str(sequence))))\n\n return count", "def get_pattern_count(left, coins):\r\n if len(coins) == 0:\r\n return 1\r\n # Get next coin\r\n coin = coins[0]\r\n # See how many could go into left\r\n most = left // coin\r\n # Loop through possible\r\n count = 0\r\n for i in range(0, most + 1):\r\n remaining = left - i * coin\r\n count += get_pattern_count(remaining, coins[1:])\r\n\r\n return count" ]
[ "0.7307175", "0.70045376", "0.6915059", "0.68551326", "0.6829665", "0.6796761", "0.66697985", "0.6597165", "0.65943575", "0.6564186", "0.6545753", "0.64330035", "0.6394136", "0.6371323", "0.63517815", "0.63134825", "0.62883955", "0.62866604", "0.6211942", "0.6205408", "0.61837375", "0.6128623", "0.6126631", "0.61052406", "0.6090737", "0.60725677", "0.60703516", "0.606997", "0.60651267", "0.6034135" ]
0.7055671
1
Report if src and dest are different. Arguments
def check(src, perm, dest, cmds, comp, verbose=False): if comp == Cmp.differ: ansiprint(f"The file '{src}' differs from '{dest}'.", fg=Color.red, i=True) elif comp == Cmp.nodest: ansiprint( f"The destination file '{dest}' does not exist", fg=Color.black, bg=Color.red, ) elif comp == Cmp.nosrc: ansiprint( f"The source file '{src}' does not exist.", fg=Color.black, bg=Color.red ) elif comp == Cmp.same and verbose: ansiprint(f"The files '{src}' and '{dest}' are the same.", fg=Color.green)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_equals_with_different_sources(self):\n measurement_1 = Measurement(self.metric(), sources=[{\"source_uuid\": SOURCE_ID}])\n measurement_2 = Measurement(self.metric())\n self.assertFalse(measurement_1.equals(measurement_2))", "def compare(src, dest):\n xsrc, xdest = os.path.exists(src), os.path.exists(dest)\n if not xsrc:\n return Cmp.nosrc\n if not xdest:\n return Cmp.nodest\n with open(src, \"rb\") as s:\n csrc = sha256(s.read()).digest()\n if xdest:\n with open(dest, \"rb\") as d:\n cdest = sha256(d.read()).digest()\n else:\n cdest = b\"\"\n if csrc == cdest:\n return Cmp.same\n return Cmp.differ", "def _source_filename_field_is_not_equal_target(self):\n if self.source == self.target:\n # print(f\"{self}\")\n raise SourceEqualsTargetError(\"source must not equal target.\")\n return True", "def skip_source_dest_check(self):\n return self._skip_source_dest_check", "def do_compare(self, str_arg):\n arg = validateString(str_arg)\n source, target = arg.split(' ', 1)\n if os.path.isfile(source):\n # Mar 27 @swang: if target file doesn't exist, copy source file to setup directory for later test\n # 2015-08-27: decided to go to fail path\n if not os.path.isfile(target):\n # copy(source, target)\n self.resultFlag = False\n raise ValueError('COMPARE FAILED: target file not found.')\n # if not self.__compareImage(source, target):\n if not filecmp.cmp(source, target):\n printLog(self.threadName + 'COMPARE FAILED: source file and target file DIFFER!', logging.WARNING)\n self.resultFlag = False\n else:\n self.resultFlag = False\n raise ValueError('COMPARE FAILED: source file not found.')", "def skip_source_dest_check(self, skip_source_dest_check):\n self._skip_source_dest_check = skip_source_dest_check", "def check(src, dst):\n walker = Walker()\n walker.check(src, dst)\n return", "def match_stat(dest_path, source_path):\n return shutil.copystat(source_path, dest_path)", "def assert_source_space_equal(src1, src2, msg=\"SourceSpace Dimension objects \"\n \"unequal\"):\n msg = \"%s:\" % msg\n assert_array_equal(src1.vertno[0], src2.vertno[0], \"%s unequal lh vertno \"\n \"(%r vs %r)\" % (msg, src1.vertno[0], src2.vertno[0]))\n assert_array_equal(src1.vertno[1], src2.vertno[1], \"%s unequal rh vertno \"\n \"(%r vs %r)\" % (msg, src1.vertno[1], src2.vertno[1]))\n assert_equal(src1.subject, src2.subject, \"%s unequal subject (%r vs %r\"\n \")\" % (msg, src1.subject, src2.subject))\n assert_equal(src1.src, src2.src, \"%s unequal names (%r vs %r\"\n \")\" % (msg, src1.src, src2.src))\n assert_equal(src1.subjects_dir, src2.subjects_dir, \"%s unequal names (%r \"\n \"vs %r)\" % (msg, src1.subjects_dir, src2.subjects_dir))", "def test_copy_sources(self):\n metric_copy = copy_metric(self.metric, self.DATA_MODEL)\n self.assertEqual(\"Source\", first(metric_copy[\"sources\"].values())[\"name\"])", "def validate(source_media_info: Metadata,\n dest_media_info: Metadata) -> None:\n src_duration = max(source_media_info[VIDEO_DURATION],\n source_media_info[AUDIO_DURATION])\n dst_duration = min(dest_media_info[VIDEO_DURATION],\n dest_media_info[AUDIO_DURATION])\n if dst_duration < DURATION_DELTA * src_duration:\n # Check whether result duration corresponds to source duration\n # (damaged source files may be processed successfully but result\n # is shorter)\n raise TranscodeError(f\"incomplete file: {dst_duration}\")", "def check_diff(src, dst):\n result = _subprocess(['git', '--no-pager', 'log', '--graph', '--abbrev-commit', '--pretty=oneline',\n '--no-merges', \"--\", f\"{src}\", f\"^{dst}\"])\n\n if result:\n print(f\"Warning: the following commits are present on {dst} but not on {src}: \\n{result}\")\n if args.force:\n print(f\"Warning: they will be overwritten on {dst} and discarded.\")\n else:\n print(f\"Warning: run with --force to overwrite and discard these commits from {dst}\")\n exit(1)", "def calc_diff(src, dest, temp, filename, inc_backup=None, dbg=False):\n src_str = os.sep.join([src, filename])\n dest_str = os.sep.join([dest, filename])\n if inc_backup is not None:\n inc_backup = set(inc_backup)\n\n val = str(filename).split(\".\")\n\n try:\n diff_str = \"\".join([temp, os.sep, val[0], \".diff\"])\n diff_list = [\"/usr/bin/diff\", \"-s\", src_str, dest_str]\n\n call_rslt = subp.run(diff_list, stdout=subp.PIPE, stderr=subp.PIPE)\n init_rslt = call_rslt.stdout.decode(\"UTF-8\")\n # print(str(call_rslt.returncode) + os.linesep + init_rslt)\n if call_rslt.returncode == 0 and init_rslt.endswith(\"identical\" + os.linesep):\n base_str = \"Excluding Identical File: \" + filename\n dbc.print_helper(base_str, dbg=dbg)\n elif call_rslt.returncode == 1 and init_rslt is not None and\\\n not init_rslt.endswith(\"identical\" + os.linesep):\n file_ptr = open(diff_str, \"w\")\n file_ptr.write(init_rslt)\n file_ptr.close()\n\n if inc_backup is not None and filename in inc_backup:\n temp_filename = calc_filename(os.sep.join([temp, filename]), include_time=True,\n dbg=dbg)\n sh.copy(src_str, temp_filename)\n\n base_str = \" \".join([\"Diff\", diff_str, \"success\"])\n dbc.print_helper(base_str, dbg=dbg)\n else:\n dbc.error_helper(\"Diff Error:\", call_rslt.stderr, filename, dbg=dbg)\n except:\n dbc.error_helper(\"Diff Exception: \", stderr=None, post=filename, dbg=dbg)", "def __init__(self, src, dest):\n self.src = src\n self.dest = dest", "def copy_file_check(self):\n pass", "def testDetermineDest(self):\n self.cc.determine_dest('cdl', '/bobsBestDirectory')\n\n dir = os.path.abspath('/bobsBestDirectory')\n filename = os.path.join(dir, 'uniqueId.cdl')\n\n self.assertEqual(\n filename,\n self.cc.file_out\n )", "def check(self, src, dst, map=True):\n if map:\n map = mapping.create_mapping(src)\n else:\n map = src\n if map != dst:\n dumper.dumpDoc(map)\n print \"---- vs ----\"\n dumper.dumpDoc(dst)\n self.assertEqual(map, dst)\n self.assertEqual(dst, map) # do the vice versa test too\n return map", "def verify_destinations(**kwargs):\n if \"mapd_db\" in kwargs[\"destinations\"]:\n valid_destination_set = True\n if kwargs[\"dest_db_server\"] is None:\n # If dest_server is not set for mapd_db, then exit\n logging.error(\n '\"dest_server\" is required when destination = \"mapd_db\"'\n )\n if \"file_json\" in kwargs[\"destinations\"]:\n valid_destination_set = True\n if kwargs[\"output_file_json\"] is None:\n # If output_file_json is not set for file_json, then exit\n logging.error(\n '\"output_file_json\" is required when destination = \"file_json\"'\n )\n if \"output\" in kwargs[\"destinations\"]:\n valid_destination_set = True\n if \"jenkins_bench\" in kwargs[\"destinations\"]:\n valid_destination_set = True\n if kwargs[\"output_file_jenkins\"] is None:\n # If output_file_jenkins is not set for jenkins_bench, then exit\n logging.error(\n '\"output_file_jenkins\" is required '\n + 'when destination = \"jenkins_bench\"'\n )\n if not valid_destination_set:\n return False\n else:\n return True", "def diff(src, perm, dest, cmds, comp, verbose=False):\n if comp != Cmp.differ:\n return\n with open(src) as s, open(dest) as d:\n srcl, destl = list(s), list(d)\n out = unified_diff(destl, srcl, dest, src)\n colordiff(out)", "def verifyFile(source, destination):\n\tsourceHash = hashlib.sha256(open(source, 'rb').read()).digest()\n\tdestinationHash = hashlib.sha256(open(destination, 'rb').read()).digest()\n\n\tif sourceHash == destinationHash:\n\t\treturn (True, str(sourceHash))\n\n\treturn False", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def determine_should_sync(\n self, src_file: Optional[FileStats], dest_file: Optional[FileStats]\n ) -> bool:\n if dest_file:\n dest_file.operation_name = \"delete\"\n LOGGER.debug(\n \"syncing: (None) -> %s (remove), file does not \"\n \"exist at source (%s) and delete mode enabled\",\n dest_file.src if dest_file else None,\n dest_file.dest if dest_file else None,\n )\n return True", "def check_diff_as_arg(self):\n if self.args.diff is True:\n if (\n self.args.pre_snapfile is not None\n and os.path.isfile(self.args.pre_snapfile)\n ) and (\n self.args.post_snapfile is not None\n and os.path.isfile(self.args.post_snapfile)\n ):\n comp = Comparator()\n comp.compare_diff(self.args.pre_snapfile, self.args.post_snapfile, None)\n sys.exit(1)", "def run_copy(self, src, dst):\n pass", "def testDetermineDestNoFileIn(self):\n # Reset the members list\n cdl_convert.ColorCollection.reset_members()\n\n # Create a few Collections\n cdl_convert.ColorCollection()\n cdl_convert.ColorCollection()\n cdl_convert.ColorCollection()\n\n # The 4th one will be the one we use\n self.node = cdl_convert.ColorCollection()\n\n # But we'll create a 5th.\n cdl_convert.ColorCollection()\n\n self.node.type = 'ccc'\n\n self.node.determine_dest('./converted/')\n\n self.assertEqual(\n './converted/color_collection_003.ccc',\n self.node.file_out\n )", "def test_diff(self):\n _ff_source = FlatfileDataset(_filename=os.path.join(Test_Resource_Dir, \"csv_source.csv\"),\n _has_header=True, _delimiter=\";\", _csv_dialect=\"excel-tab\",\n _quoting=\"MINIMAL\", _quotechar='\"')\n _dataset_source = _ff_source.load()\n _ff_dest = FlatfileDataset(_filename=os.path.join(Test_Resource_Dir, \"csv_dest_orig.csv\"),\n _has_header=True, _delimiter=\";\", _csv_dialect=\"excel-tab\",\n _quoting=\"MINIMAL\", _quotechar='\"')\n _dataset_dest = _ff_dest.load()\n # print(str(_dataset_dest))\n _missing_left, _missing_right, _difference, _sorted = compare(_dataset_source, _dataset_dest, [0], True)\n self.assertEqual(_missing_left,\n [[9, 7, ['7844', 'TURNER', 'SALESMAN', '7698', '1981-09-08 00:00:00', '1500', '', '30']],\n [12, 12, ['7999', 'BORJESSON', 'HACKER', '7839', '2013-01-01', '99999', '', '10']]],\n 'Missing left differs')\n self.assertEqual(_missing_right,\n [[6, 6, ['7782', 'CLARK', 'MANAGER', '7839', '1981-06-09 00:00:00', '2450', '', '10']],\n [7, 6, ['7788', 'SCOTT', 'ANALYST', '7566', '1982-12-09 00:00:00', '3000', '', '20']]],\n 'Missing right differs')\n\n self.assertEqual(_difference,\n [\n [0, 0, ['7369', 'SMITH', 'CLERK', '7902', '1980-12-17 00:00:00', '800', '', '20'],\n ['7369', 'SMITH', 'CLERK', '7902', '1980-12-17 00:00:00', '700', '', '20']],\n [1, 1, ['7499', 'ALLEN', 'SALE;SMAN', '7698', '1981-02-20 00:00:00', '1600', '300', '30'],\n ['7499', 'ALLEN', 'SALESMAN', '7698', '1981-02-20 00:00:00', '1600', '300', '30']],\n [8, 6, ['7839', 'KING', 'PRESIDENT ', '', '1981-11-17 00:00:00', '5000', '', '10'],\n ['7839', 'KING', 'PRESIDENT', '', '1981-11-17 00:00:00', '4500', '', '10']],\n [9, 8, ['7876', 'ADAMS', 'CLERK', '7788', '1983-01-12 00:00:00', '1100,5', '', '20'],\n ['7876', 'ADAMS', 'CLERK', '7788', '1983-01-12 00:00:00', '1100', '', '20']]\n ], 'Difference differs')", "def check_connect(self, src, dest, scope):\n\n if self.get_source(dest) is not None:\n scope.raise_exception(\"'%s' is already connected to source '%s'\" % (dest, self.get_source(dest)),\n RuntimeError)\n\n destexpr = ConnectedExprEvaluator(dest, scope, getter='get_wrapped_attr',\n is_dest=True)\n srcexpr = ConnectedExprEvaluator(src, scope, getter='get_wrapped_attr')\n\n srccomps = srcexpr.get_referenced_compnames()\n destcomps = destexpr.get_referenced_compnames()\n\n if destcomps and destcomps.pop() in srccomps:\n raise RuntimeError(\"'%s' and '%s' refer to the same component.\" % (src, dest))\n return srcexpr, destexpr", "def check_duplicate(fp1, fp2):\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False", "def is_duplicate(self, event):\n # only checking remote and expected remote for the endpoint. We don't care about names,\n # interfaces/tunnels, or pctags for dup stale suppression\n return (self.remote == event.remote and self.expected_remote == event.expected_remote)" ]
[ "0.654689", "0.6510692", "0.61557376", "0.61256444", "0.60510474", "0.6012479", "0.59540343", "0.5925164", "0.590129", "0.588346", "0.58549356", "0.5794634", "0.57847124", "0.5747154", "0.57156837", "0.57082486", "0.5705017", "0.5698565", "0.5669838", "0.5648875", "0.56427705", "0.56427705", "0.5635313", "0.5620452", "0.5570606", "0.55360484", "0.5531758", "0.55248195", "0.5484599", "0.54747397" ]
0.6719928
0
Print the difference between src and dest. Arguments
def diff(src, perm, dest, cmds, comp, verbose=False): if comp != Cmp.differ: return with open(src) as s, open(dest) as d: srcl, destl = list(s), list(d) out = unified_diff(destl, srcl, dest, src) colordiff(out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_dry_run_copy_info(source, dest):\n\n def shorten_home(path):\n expanded_home = os.path.expanduser(\"~\")\n path = str(path)\n if path.startswith(expanded_home):\n return path.replace(expanded_home, \"~\")\n return path\n\n def truncate_middle(path: str, acceptable_len: int):\n \"\"\"Middle truncate a string\n https://www.xormedia.com/string-truncate-middle-with-ellipsis/\n \"\"\"\n if len(path) <= acceptable_len:\n return path\n # half of the size, minus the 3 .'s\n n_2 = int(acceptable_len / 2 - 3)\n # whatever's left\n n_1 = int(acceptable_len - n_2 - 3)\n return f\"{path[:n_1]}...{path[-n_2:]}\"\n\n trimmed_source = shorten_home(source)\n trimmed_dest = shorten_home(dest)\n longest_allowed_path_len = 87\n if len(trimmed_source) + len(trimmed_dest) > longest_allowed_path_len:\n trimmed_source = truncate_middle(trimmed_source, longest_allowed_path_len)\n trimmed_dest = truncate_middle(trimmed_dest, longest_allowed_path_len)\n print(\n Fore.YELLOW + Style.BRIGHT + trimmed_source + Style.NORMAL,\n \"->\",\n Style.BRIGHT + trimmed_dest + Style.RESET_ALL,\n )", "def pprint_transfer(src, dest):\n len_src = len(src)\n len_dest = len(dest)\n\n # Find common prefix\n pfx_length = 0\n i = 0\n j = 0\n while (i < len_src and j < len_dest and src[i] == dest[j]):\n if src[i] == os.path.sep:\n pfx_length = i + 1\n i += 1\n j += 1\n\n # Find common suffix\n sfx_length = 0\n i = len_src - 1\n j = len_dest - 1\n while (i > 0 and j > 0 and src[i] == dest[j]):\n if src[i] == os.path.sep:\n sfx_length = len_src - i\n i -= 1\n j -= 1\n\n src_midlen = len_src - pfx_length - sfx_length\n dest_midlen = len_dest - pfx_length - sfx_length\n\n pfx = src[:pfx_length]\n sfx = dest[len_dest - sfx_length:]\n src_mid = src [pfx_length:pfx_length + src_midlen ]\n dest_mid = dest[pfx_length:pfx_length + dest_midlen]\n\n if pfx == os.path.sep:\n # The common prefix is / ,\n # avoid print /{etc => tmp}/foo, and\n # print {/etc => /tmp}/foo\n pfx = \"\"\n src_mid = os.path.sep + src_mid\n dest_mid = os.path.sep + dest_mid\n\n if not pfx and not sfx:\n return \"%s => %s\" % (src, dest)\n\n res = \"%s{%s => %s}%s\" % (pfx, src_mid, dest_mid, sfx)\n return res", "def PrintDiffs(message, lhs, rhs):\n dif = set(lhs).difference(rhs)\n if dif:\n print message, ', '.join(dif)", "def printDistance(self, src, dst):\n self.src = src\n self.dst = dst\n \n dist = self.getDistance(src, dst)\n print (\"The distance between %s and %s is %.2f meters\\n\" % (src, dst, float(dist)))", "def calc_diff(src, dest, temp, filename, inc_backup=None, dbg=False):\n src_str = os.sep.join([src, filename])\n dest_str = os.sep.join([dest, filename])\n if inc_backup is not None:\n inc_backup = set(inc_backup)\n\n val = str(filename).split(\".\")\n\n try:\n diff_str = \"\".join([temp, os.sep, val[0], \".diff\"])\n diff_list = [\"/usr/bin/diff\", \"-s\", src_str, dest_str]\n\n call_rslt = subp.run(diff_list, stdout=subp.PIPE, stderr=subp.PIPE)\n init_rslt = call_rslt.stdout.decode(\"UTF-8\")\n # print(str(call_rslt.returncode) + os.linesep + init_rslt)\n if call_rslt.returncode == 0 and init_rslt.endswith(\"identical\" + os.linesep):\n base_str = \"Excluding Identical File: \" + filename\n dbc.print_helper(base_str, dbg=dbg)\n elif call_rslt.returncode == 1 and init_rslt is not None and\\\n not init_rslt.endswith(\"identical\" + os.linesep):\n file_ptr = open(diff_str, \"w\")\n file_ptr.write(init_rslt)\n file_ptr.close()\n\n if inc_backup is not None and filename in inc_backup:\n temp_filename = calc_filename(os.sep.join([temp, filename]), include_time=True,\n dbg=dbg)\n sh.copy(src_str, temp_filename)\n\n base_str = \" \".join([\"Diff\", diff_str, \"success\"])\n dbc.print_helper(base_str, dbg=dbg)\n else:\n dbc.error_helper(\"Diff Error:\", call_rslt.stderr, filename, dbg=dbg)\n except:\n dbc.error_helper(\"Diff Exception: \", stderr=None, post=filename, dbg=dbg)", "def hexdump_diff( source1, source2, start=None, end=None, length=None, major_len=8, minor_len=4, colour=True, before=2, after=2, address_base=None ):\n for line in hexdump_diff_iter( source1, source2, start, end, length, major_len, minor_len, colour, before, after, address_base ):\n print( line )", "def print_comparison(name, dates, times, orig_data, comp_data):\n\n # Output comparison of data\n print(' ORIGINAL COMPUTED')\n print(f' DATE TIME {name.upper():>9} {name.upper():>9} DIFFERENCE')\n print('------- ------ --------- --------- ----------')\n zip_data = zip(dates, times, orig_data, comp_data)\n for date, time, orig, comp in zip_data:\n diff = orig - comp\n print(f'{date} {time:>6} {orig:9.6f} {comp:9.6f} {diff:10.6f}')", "def print_diff(ip, common, diff1, diff2):\n logging.info('IP: %s', ip)\n if common:\n common = [' {0}'.format(elem) for elem in common]\n logging.info('\\n'.join(common))\n if diff1:\n diff = ['+ {0}'.format(elem) for elem in diff1]\n logging.info('\\n'.join(diff))\n if diff2:\n diff = ['- {0}'.format(elem) for elem in diff2]\n logging.info('\\n'.join(diff))", "def print_comparison(name, dates, times, original_data, computed_data):\n \n # Output comparison of data\n print(' ORIGINAL COMPUTED')\n print(f' DATE TIME {name.upper():>9} {name.upper():>9} DIFFERENCE')\n print('------- ------ --------- --------- ----------')\n zip_data = zip(dates, times, original_data, computed_data)\n for date, time, orig, comp in zip_data:\n diff = orig - comp\n print(f'{date} {time:>6} {orig:9.6f} {comp:9.6f} {diff:10.6f}')", "def diff(args):\n local = set(args.cache)\n remote = set(args.remote_cache)\n here = local.difference(remote)\n for item in sorted(here):\n sys.stdout.write('< {}'.format(item) + '\\n')\n there = remote.difference(local)\n for item in sorted(there):\n sys.stdout.write('> {}'.format(item) + '\\n')\n return", "def print_unidiff(self):\n\n color_stdout(\"\\nTest failed! Result content mismatch:\\n\", schema='error')\n with open(self.result, \"r\") as result:\n with open(self.reject, \"r\") as reject:\n result_time = time.ctime(os.stat(self.result).st_mtime)\n reject_time = time.ctime(os.stat(self.reject).st_mtime)\n diff = difflib.unified_diff(result.readlines(),\n reject.readlines(),\n self.result,\n self.reject,\n result_time,\n reject_time)\n\n color_stdout.writeout_unidiff(diff)", "def test_ndiff(self):\n print \"\\n\"\n for d in ndiff(a, b): print d", "def test_printdiff(self):\n\n # Testing different string input options\n assert printdiff(self.data(\"arange.fits\"), self.data(\"blank.fits\")) is None\n assert (\n printdiff(self.data(\"arange.fits\"), self.data(\"blank.fits\"), ext=0) is None\n )\n assert (\n printdiff(\n self.data(\"o4sp040b0_raw.fits\"),\n self.data(\"o4sp040b0_raw.fits\"),\n extname=\"sci\",\n )\n is None\n )\n\n # This may seem weird, but check printdiff to see, need to test\n # incorrect second file\n with pytest.raises(OSError):\n printdiff(\"o4sp040b0_raw.fits\", \"fakefile.fits\", extname=\"sci\")\n\n # Test HDU object inputs\n with fits.open(self.data(\"stddata.fits\"), mode=\"readonly\") as in1:\n with fits.open(self.data(\"checksum.fits\"), mode=\"readonly\") as in2:\n assert printdiff(in1[0], in2[0]) is None\n\n with pytest.raises(ValueError):\n printdiff(in1[0], in2[0], ext=0)\n\n assert printdiff(in1, in2) is None\n\n with pytest.raises(NotImplementedError):\n printdiff(in1, in2, 0)", "def _unidiff_output(expected, actual):\n\n expected=expected.splitlines(1)\n actual=actual.splitlines(1)\n\n diff=difflib.unified_diff(expected, actual)\n\n return ''.join(diff)", "def check(src, perm, dest, cmds, comp, verbose=False):\n if comp == Cmp.differ:\n ansiprint(f\"The file '{src}' differs from '{dest}'.\", fg=Color.red, i=True)\n elif comp == Cmp.nodest:\n ansiprint(\n f\"The destination file '{dest}' does not exist\",\n fg=Color.black,\n bg=Color.red,\n )\n elif comp == Cmp.nosrc:\n ansiprint(\n f\"The source file '{src}' does not exist.\", fg=Color.black, bg=Color.red\n )\n elif comp == Cmp.same and verbose:\n ansiprint(f\"The files '{src}' and '{dest}' are the same.\", fg=Color.green)", "def test_printdiff(self):\n\n # Testing different string input options\n assert printdiff(self.data('arange.fits'),\n self.data('blank.fits')) is None\n assert printdiff(self.data('arange.fits'),\n self.data('blank.fits'), ext=0) is None\n assert printdiff(self.data('o4sp040b0_raw.fits'),\n self.data('o4sp040b0_raw.fits'),\n extname='sci') is None\n\n # This may seem weird, but check printdiff to see, need to test\n # incorrect second file\n with pytest.raises(IOError):\n printdiff('o4sp040b0_raw.fits', 'fakefile.fits', extname='sci')\n\n # Test HDU object inputs\n with fits.open(self.data('stddata.fits'), mode='readonly') as in1:\n with fits.open(self.data('checksum.fits'), mode='readonly') as in2:\n\n assert printdiff(in1[0], in2[0]) is None\n\n with pytest.raises(ValueError):\n printdiff(in1[0], in2[0], ext=0)\n\n assert printdiff(in1, in2) is None\n\n with pytest.raises(NotImplementedError):\n printdiff(in1, in2, 0)", "def do_diff(sourcelist):\n for source in sourcelist:\n dc = filecmp.dircmp('output-pandoc/'+source, 'output-panzer/'+source)\n if dc.right_only or dc.left_only or dc.diff_files:\n print(pretty_title(source))\n if dc.right_only:\n print('* only in output-panzer/%s:' % source)\n for line in pretty_list(dc.right_only):\n print(' ' + line)\n if dc.left_only:\n print('* only in output-pandoc/%s:' % source)\n for line in pretty_list(dc.left_only):\n print(' ' + line)\n if dc.diff_files:\n print('* differing:')\n for line in pretty_list(dc.diff_files):\n print(' ' + line)", "def compare(src, dest):\n xsrc, xdest = os.path.exists(src), os.path.exists(dest)\n if not xsrc:\n return Cmp.nosrc\n if not xdest:\n return Cmp.nodest\n with open(src, \"rb\") as s:\n csrc = sha256(s.read()).digest()\n if xdest:\n with open(dest, \"rb\") as d:\n cdest = sha256(d.read()).digest()\n else:\n cdest = b\"\"\n if csrc == cdest:\n return Cmp.same\n return Cmp.differ", "def print_path(window, source, dest):\n path = []\n curr_node = dest\n while curr_node.prev:\n path.append(curr_node)\n curr_node = curr_node.prev\n path.append(source)\n path = path[::-1] # reverse the path to display source->dest and not dest->source\n for node in path:\n if not node.is_colored:\n block = get_block_from_node(node)\n block.draw(window, PATH_COLOR)", "def w_print_diff(self, message, d1, d2, expectedResult=None):\n print(\"Message: '%s'\" % message)\n print(\"Message length: %d\" % len(message))\n if expectedResult:\n print(\"%-48s (expected)\" % self._format(expectedResult))\n print(\"%-48s (Std. lib. MD5)\" % self._format_hex(d1))\n print(\"%-48s (Pure Python MD5)\" % self._format_hex(d2))\n print()", "def compare_output(file1, file2):\n output = subprocess.getoutput(f\"diff -u -b {file1} {file2} | sed -n '12d;/^[-+]/p'\")\n\n if not output.strip():\n name = file1.rsplit('/', 1)[-1]\n print('Equivalent:', name)\n else:\n print(output)", "def check_diff(src, dst):\n result = _subprocess(['git', '--no-pager', 'log', '--graph', '--abbrev-commit', '--pretty=oneline',\n '--no-merges', \"--\", f\"{src}\", f\"^{dst}\"])\n\n if result:\n print(f\"Warning: the following commits are present on {dst} but not on {src}: \\n{result}\")\n if args.force:\n print(f\"Warning: they will be overwritten on {dst} and discarded.\")\n else:\n print(f\"Warning: run with --force to overwrite and discard these commits from {dst}\")\n exit(1)", "def show_diff(seqm):\n output= []\n for opcode, a0, a1, b0, b1 in seqm.get_opcodes():\n if opcode == 'equal':\n output.append(seqm.a[a0:a1])\n elif opcode == 'insert':\n output.append(\"{+\" + seqm.b[b0:b1] + \"+}\")\n elif opcode == 'delete':\n output.append(\"{-\" + seqm.a[a0:a1] + \"-}\")\n elif opcode == 'replace':\n output.append(\"<del>\" + seqm.a[a0:a1] + \"</del><ins>\" + seqm.b[b0:b1] + \"</ins>\")\n else:\n raise RuntimeError(\"unexpected opcode\")\n return ''.join(output)", "def printUsage():\r\n print \"usage: rsync.py [options] source target\"\r\n print \"\"\"\r\n -q, --quiet decrease verbosity\r\n -r, --recursive recurse into directories\r\n -R, --relative use relative path names\r\n -u, --update update only (don't overwrite newer files)\r\n -t, --times preserve times\r\n -n, --dry-run show what would have been transferred\r\n --existing only update files that already exist\r\n --delete delete files that don't exist on the sending side\r\n --delete-excluded also delete excluded files on the receiving side\r\n -I, --ignore-times don't exclude files that match length and time\r\n --size-only only use file size when determining if a file should\r\n be transferred\r\n --modify-window=NUM timestamp window (seconds) for file match (default=2)\r\n --existing only update existing target files or folders\r\n -C, --cvs-exclude auto ignore files in the same way CVS does\r\n --exclude=PATTERN exclude files matching PATTERN\r\n --exclude-from=FILE exclude patterns listed in FILE\r\n --include=PATTERN don't exclude files matching PATTERN\r\n --include-from=FILE don't exclude patterns listed in FILE\r\n --version print version number\r\n -h, --help show this help screen\r\n\r\nSee http://www.vdesmedt.com/~vds2212/rsync.html for informations and updates.\r\nSend an email to [email protected] for comments and bug reports.\"\"\"", "def reverse_difference():", "def test_ddiff_v2(self):\n print \"\\n\"\n for d in ddiff_v2(a, b): print d\n self.assertEqual(d, \"+FUN\")", "def display_diff(dta1, dta2, all_data=False):\n if not isinstance(dta1, Dta) or not isinstance(dta2, Dta):\n raise TypeError(\"objects to be compared must be Dta\")\n \n typlist_converters = {\n 'Dta115': {\n 'Dta117': lambda i: i if i <= 244 else 65530 + (251 - i)\n }\n }\n \n different = False\n \n # Python class types <-> dta version\n # ----------------------------------\n dta1_type, dta2_type = dta1.__class__.__name__, dta2.__class__.__name__\n if not dta1_type == dta2_type:\n different = True\n print(\" class types differ:\")\n print(\" {} vs {}\".format(dta1_type, dta2_type))\n \n # data set descriptors\n # --------------------\n if not dta1._ds_format == dta2._ds_format:\n different = True\n print(\" formats differ:\")\n print(\" {} vs {}\".format(dta1._ds_format, dta2._ds_format))\n \n if not dta1._data_label == dta2._data_label:\n different = True\n print(\" data labels differ:\")\n print(\" {} vs {}\".format(dta1._data_label, dta2._data_label))\n \n # time stamp\n # ----------\n stamp1 = dta1._time_stamp.split()\n stamp2 = dta2._time_stamp.split()\n stamp1[0] = int(stamp1[0]) #day\n stamp2[0] = int(stamp2[0])\n stamp1[2] = int(stamp1[2]) #year\n stamp2[2] = int(stamp2[2])\n stamp1 = stamp1[:-1] + [int(x) for x in stamp1[-1].split(':')] # hr & min\n stamp2 = stamp2[:-1] + [int(x) for x in stamp2[-1].split(':')]\n if not stamp1 == stamp2:\n different = True\n print(\" time stamps differ:\")\n print(\" {} vs {}\".format(dta1._time_stamp, dta2._time_stamp))\n \n # number of variables and observations\n # ------------------------------------\n if not dta1._nvar == dta2._nvar:\n different = True\n print(\" # of vars differs:\")\n print(\" {} vs {}\".format(dta1._nvar, dta2._nvar))\n print(\" > comparison now limited to vars 0 .. min(nvar1, nvar2)\")\n \n if not dta1._nobs == dta2._nobs:\n different = True\n print(\" # of obs differs:\")\n print(\" {} vs {}\".format(dta1._nobs, dta2._nobs))\n print(\" > comparison now limited to obs 0 .. min(nobs1, nobs2)\")\n \n nvar = min(dta1._nvar, dta2._nvar)\n nobs = min(dta1._nobs, dta2._nobs)\n \n # descriptors\n # -----------\n \n # typlist\n # If dta versions are the same, can make direct comparison. If versions\n # are different, a direct comparison doesn't mean much if data types\n # are encoded differently, so convert one before comparing.\n if dta1_type == dta2_type:\n diff = [i for i in range(nvar) if dta1._typlist[i] != dta2._typlist[i]]\n else:\n s = sorted(((dta1_type, dta1), (dta2_type, dta2)))\n (older_type, older_dta), (newer_type, newer_dta) = s\n converter = typlist_converters[older_type][newer_type]\n diff = [i for i in range(nvar) \n if converter(older_dta._typlist[i]) != newer_dta._typlist[i]]\n if diff != []:\n different = True\n print(\" Stata data types differ in {} places\".format(len(diff)))\n print(\" first difference in position {}\".format(diff[0]))\n \n # varlist\n diff = [i for i in range(nvar) if dta1._varlist[i] != dta2._varlist[i]]\n if diff != []:\n different = True\n print(\" variable names differ in {} places\".format(len(diff)))\n print(\" first difference in position {}\".format(diff[0]))\n \n # srtlist\n diff = [i for i in range(nvar) if dta1._srtlist[i] != dta2._srtlist[i]]\n if diff != []:\n different = True\n print(\" sort lists differ in {} places\".format(len(diff)))\n print(\" first difference in position {}\".format(diff[0]))\n \n # fmtlist\n diff = [i for i in range(nvar) if dta1._fmtlist[i] != dta2._fmtlist[i]]\n if diff != []:\n different = True\n print(\" display formats differ in {} places\".format(len(diff)))\n print(\" first difference in position {}\".format(diff[0]))\n \n # lbllist\n diff = [i for i in range(nvar) if dta1._lbllist[i] != dta2._lbllist[i]]\n if diff != []:\n different = True\n msg = \" attached value labels differ in {} places\".format(len(diff))\n print(msg)\n print(\" first difference in position {}\".format(diff[0]))\n \n # vlblist\n diff = [i for i in range(nvar) if dta1._vlblist[i] != dta2._vlblist[i]]\n if diff != []:\n different = True\n print(\" variable labels differ in {} places\".format(len(diff)))\n print(\" first difference in position {}\".format(diff[0]))\n \n # characteristics\n # ---------------\n keys1 = set(dta1._chrdict.keys())\n keys2 = set(dta2._chrdict.keys())\n diff = keys1 - keys2\n if diff != set():\n different = True\n print(\" charataristic keys in #1 but not in #2:\")\n print(\" \", str(diff))\n \n diff = keys2 - keys1\n if diff != set():\n different = True\n print(\" charataristic keys in #2 but not in #1:\")\n print(\" \", str(diff))\n \n diff = [k for k in keys1.intersection(keys2) \n if dta1._chrdict[k] != dta2._chrdict[k]]\n if diff != []:\n different = True\n print(\" charataristic keys with different value:\")\n print(\" \", str(diff))\n \n # defined value labels\n # --------------------\n keys1 = set(dta1._vallabs.keys())\n keys2 = set(dta2._vallabs.keys())\n diff = keys1 - keys2\n if diff != set():\n different = True\n print(\" value labels defined in #1 but not in #2:\")\n print(\" \", str(diff))\n \n diff = keys2 - keys1\n if diff != set():\n different = True\n print(\" value labels defined in #2 but not in #1:\")\n print(\" \", str(diff))\n \n diff = [k for k in keys1.intersection(keys2)\n if dta1._vallabs[k] != dta2._vallabs[k]]\n if diff != []:\n different = True\n print(\" value labels with same name but different mapping:\")\n print(\" \", str(diff))\n \n # data values\n # -----------\n if all_data:\n diff = sum([0] + [1 for i in range(nobs) for j in range(nvar)\n if dta1._varvals[i][j] != dta2._varvals[i][j]])\n if diff != 0:\n different = True\n print(\" data values differ in \" + str(diff) + \" places\")\n else:\n for i in range(nobs):\n for j in range(nvar):\n if dta1._varvals[i][j] != dta2._varvals[i][j]:\n different = True\n print(\"\".join(\n (\" data values differ\\n \",\n \"first difference in position {},{}\".format(i,j))))\n break\n else:\n continue # executed if the loop ended normally (no break)\n break # executed if 'continue' was skipped (break)\n # trick from http://stackoverflow.com/questions/653509 \n # to exit from nested for loops\n\n if not different:\n print(\" no difference found\")", "def subtract(a, b):\n print(\"SUBTRACTING %d - %d\" % (a, b))\n return a - b", "def findDist(digraph, src, dest):\n for i in digraph.edges[src]:\n if i[0]==str(dest):\n result=i[1][0]\n return result", "def DumpDiff(blocks, line1, line2):\n for offset1, offset2, size in blocks:\n print offset1, offset2, size\n print offset1, size, \": \", line1[offset1:offset1+size]\n print offset2, size, \": \", line2[offset2:offset2+size]" ]
[ "0.6709384", "0.63324916", "0.63003516", "0.62832433", "0.62829316", "0.62491095", "0.6149095", "0.5964433", "0.59323263", "0.58849573", "0.58178675", "0.5776394", "0.5690763", "0.56890917", "0.56674516", "0.5649746", "0.56477267", "0.564664", "0.562467", "0.5609834", "0.5607146", "0.55838025", "0.5579707", "0.5515245", "0.54941374", "0.5466949", "0.5452457", "0.54497296", "0.5433974", "0.542789" ]
0.6461151
1
Parse a install file list. The install file list should have the name 'filelist.' or 'filelist..', where the hostname is without the domain. Both are tried, in the order given above.
def parsefilelist(verbose): user = pwd.getpwuid(os.getuid()).pw_name hostname = os.environ["HOST"].split(".")[0] filenames = [f"filelist.{user}", f"filelist.{hostname}.{user}"] installs = [] for filename in filenames: try: with open(filename, "r") as infile: for ln in infile: if ln.startswith("#") or ln.isspace(): continue try: src, perm, dest, *cmds = ln.strip().split() except ValueError: ansiprint(f"Invalid line in {filename}: '{ln}'", fg=Color.red) continue installs.append((src, int(perm, base=8), dest, cmds)) except FileNotFoundError: if verbose: ansiprint( f"Command file '{filename}' not found, skipping.", fg=Color.cyan ) return installs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_filelist(self):\n if not os.path.exists(self.filelist):\n print \"couldn't find \",self.filelist\n return\n\n f = open( self.filelist, 'r' )\n flist = f.readlines()\n self.larlitefilelist = []\n for f in flist:\n if \".root\" in f:\n self.larlitefilelist.append( f.strip() )", "def load_installed_file_list(self):\n listpath = os.path.join(self._build_root, 'src', 'gromacs', 'installed-headers.txt')\n with open(listpath, 'r') as installedfp:\n for line in installedfp:\n path = line.strip()\n if not os.path.isabs(path):\n self._reporter.input_error(\n \"installed file not specified with absolute path: {0}\"\n .format(path))\n continue\n relpath = self._get_rel_path(path)\n if relpath not in self._files:\n self._reporter.input_error(\n \"installed file not in source tree: {0}\".format(path))\n continue\n self._files[relpath].set_installed()", "def parseInputFileList (self) :\n filelist = []\n try :\n with open (self.cfgName) as fIn:\n for line in fIn:\n line = (line.split(\"#\")[0]).strip()\n if line:\n self.lines.append(line)\n except IOError:\n print \"*** WARNING: cfg file \" , self.cfgName , \" not found\"\n return\n\n #return filelist", "def parseInputFileList (self):\n filelist = []\n try:\n with open (self.cfgName) as fIn:\n for line in fIn:\n line = (line.split(\"@@@\")[0]).strip()\n if line:\n self.lines.append(line)\n except IOError:\n print \"*** WARNING: label cfg file \" , self.cfgName , \" not found\"\n return", "def parse_nested_files(self):\n # type: () -> Tuple[InstallReqFileSet, InstallReqFileSet]\n nested_cfiles = ordered_set.OrderedSet()\n nested_rfiles = ordered_set.OrderedSet()\n parser = pip.req.req_file.build_parser()\n defaults = parser.get_default_values()\n defaults.index_url = None\n with io.open(str(self.filename), 'r') as f:\n for line in f:\n if line.startswith('#'):\n continue\n args_str, options_str = pip.req.req_file.break_args_options(\n line)\n opts, _ = parser.parse_args(shlex.split(options_str), defaults)\n if opts.requirements:\n filename = self.filename.parent / opts.requirements[0]\n nested_rfiles.add(self.__class__(str(filename)))\n elif opts.constraints:\n filename = self.filename.parent / opts.constraints[0]\n nested_cfiles.add(self.__class__(str(filename)))\n return nested_cfiles, nested_rfiles", "def parse_update(self, file):\n\n self.new_hashes = []\n self.old_hashes = []\n parsed = self.parse_header(file.readline())\n if parsed:\n (type, version) = parsed\n self.log.debug(\"Received list type: %s, version: %s\" % (type, version))\n pattern = re.compile(HASH_REGEX)\n for line in file:\n m = pattern.search(line)\n if m:\n if m.group(1) == \"+\":\n self.new_hashes.append(m.group(2))\n elif m.group(1) == \"-\":\n self.old_hashes.append(m.group(2))\n\n self._version = int(version)\n else:\n raise SafeBrowsingUpdateError(\"Received bad/empty list, no changes made\")", "def parse_ftp_list_line(ftp_list_line):\n return FTPListDataParser().parse_line(ftp_list_line)", "def load_url_list(url_list_file):\n url_list = []\n with open(url_list_file, 'r') as f:\n for eachline in f:\n eachline = eachline.rstrip('\\n')\n parts = eachline.split('\\t')\n domain, script_url = parts\n url_list.append((domain, script_url))\n\n return url_list", "def parse_file_list(self, file_path=None, file_name_id='Producer Granule ID', url_id='Online Access URLs'):\n\n # read in and maintain the raw csv file as df\n df = pd.read_csv(file_path)\n\n # record the number of files\n self.file_num = df.__len__()\n\n # initiate the data frame\n self.file_list = pd.DataFrame()\n self.file_list['download_dir'] = np.NaN\n self.file_list['file_name'] = df[file_name_id]\n self.file_list['online_url'] = df[url_id]\n self.file_list['status'] = 0\n self.file_list['year'] = 0\n self.file_list['day'] = 0\n self.file_list = self.file_list.reset_index(drop=True)\n\n # clean up the variables for a file list downloaded from Reverb\n # extract http urls from the file list\n print(\"Extracting http urls from the file list...\")\n self.file_list['online_url'] = self.file_list['online_url'].str.rstrip(\"\\'\").str.split(',').str[1]\n self.file_list['year'] = self.file_list['online_url'].str.split('/', expand=True).iloc[:, 7]\n self.file_list['day'] = self.file_list['online_url'].str.split('/', expand=True).iloc[:, 8]\n self.file_list['download_dir'] = self.download_dir + self.file_list['year'] + '/' + self.file_list['day'] + '/'", "def parse_paths(self):\n self.soup = BeautifulSoup(open(self.get_path('install')))\n for spec in list(self.specs.keys()):\n spec_file = self.find_specs_path(spec)\n if spec_file:\n # If spec file exists\n self.specs[spec] = path_format(spec_file)\n else:\n # If specs are held inside install.xml\n self.specs[spec] = self.install", "def parse_line(self, ftp_list_line):\n buf = ftp_list_line\n\n if len(buf) < 2: # an empty name in EPLF, with no info, could be 2 chars\n return None\n\n c = buf[0]\n if c == '+':\n return self._parse_EPLF(buf)\n\n elif c in 'bcdlps-':\n return self._parse_unix_style(buf)\n\n i = buf.find(';')\n if i > 0:\n return self._parse_multinet(buf, i)\n\n if c in '0123456789':\n return self._parse_msdos(buf)\n\n return None", "def parsePackages(self, packages_list) -> None:\n\t\tif self.package_manager == \"apt\":\n\t\t\tfor package in packages_list:\n\t\t\t\tpackage = package.strip().split(\" \")\n\t\t\t\tname = package[0].split(\"/\")[0]\n\t\t\t\tversion = package[1]\n\t\t\t\tarchitecture = package[2]\n\t\t\t\tself.installed_packages.add(Package(name=name, version=version, architecture=architecture))\n\t\telse:\n\t\t\tlogger.error(\"Package manager parser not supported.\")\n\t\t\traise ValueError(\"Package manager unsupported\")\n\t\tlogger.info(\"Packages parsed successfully\")", "def validate_list(parser, listname):\r\n if (valid_ogfile(listname) and valid_resfile(listname)):\r\n return listname\r\n else:\r\n parser.error(\"Filename error: %s\" % listname)", "def list_files_in_drs_manifest(hostname, auth, infile: str) -> bool:\n return _listfiles(hostname, auth, infile)", "def parse_files(files):\n ans = []\n if files:\n for f in files:\n split = f.split(\"=\")\n if len(split) != 2:\n raise Exception(\"invalid file specification '%s'\" % f)\n ans.append((split[0], split[1]))\n return ans", "def parse_optional_file_list_from_args(args_list: Any, append_error_func: Callable[[str], None]) -> List[str]:\n results = [] # type: List[str]\n if args_list is None:\n # No arguments\n pass\n elif isinstance(args_list, List):\n for c in args_list:\n if not os.path.exists(c):\n append_error_func(\"Given path %s does not exist!\" % c)\n results = list(args_list)\n else:\n append_error_func(\"Argument was not a list?\")\n return results", "def install_list(self, deplist):\n for dep in deplist:\n alldeps = list(self.dependency_dict.keys()) + [\"all\"]\n if dep not in alldeps:\n logger.error(f'\"{dep}\" is not a recognized dependency')\n logger.error(f\"possible dependencies are {alldeps}\")\n sys.exit(1)\n self.check_all()\n if deplist == (\"all\",):\n deplist = [\n d\n for d in self.dependencies\n if self.dependency_dict[d][\"required\"]\n ]\n install_list = [\n dep\n for dep in deplist\n if not self.dependency_dict[dep][\"installed\"]\n ]\n if len(install_list):\n if not self.bin_path_exists:\n logger.error(\n f\"Installation directory {self.install_path} does not\"\n \" exist.\"\n )\n sys.exit(1)\n if not self.install_path_writable:\n logger.error(\n f\"Installation directory {self.install_path} is not\"\n \" writable.\"\n )\n sys.exit(1)\n for dep in install_list:\n self.install(dep)", "def first_import(file, list):\n\n list.append(file)\n print(\"Path added to list\")", "def add_list(self, files):\n if files:\n if not list:\n self.set_list(files)\n else:\n self.playlist.extend(files)", "def initFileList(self,extension):\r\n self.listExec.Clear()\r\n for fname in os.listdir(\"data\"):\r\n #print 'testing file ' , fname\r\n \r\n if extension in fname :\r\n #print fname\r\n self.listExec.Append(fname)\r\n self.Refresh()", "def blosxom_file_list_handler(args):\n request = args[\"request\"]\n\n data = request.getData()\n config = request.getConfiguration()\n\n if data['bl_type'] == 'dir':\n filelist = tools.Walk(request, data['root_datadir'], int(config['depth']))\n elif data['bl_type'] == 'file':\n filelist = [data['root_datadir']]\n else:\n filelist = []\n\n entrylist = []\n for ourfile in filelist:\n entry = FileEntry(request, ourfile, data['root_datadir'])\n entrylist.append((entry._mtime, entry))\n\n # this sorts entries by mtime in reverse order. entries that have\n # no mtime get sorted to the top.\n entrylist.sort()\n entrylist.reverse()\n entrylist = [x[1] for x in entrylist]\n \n # Match dates with files if applicable\n if data['pi_yr']:\n month = (data['pi_mo'] in tools.month2num.keys() and tools.month2num[data['pi_mo']] or data['pi_mo'])\n matchstr = \"^\" + data[\"pi_yr\"] + month + data[\"pi_da\"]\n valid_list = [x for x in entrylist if re.match(matchstr, x['fulltime'])]\n else:\n valid_list = entrylist\n\n return valid_list", "def _filter_mrpack_files(file_list: List[MrpackFile], mrpack_install_options: MrpackInstallOptions) -> List[MrpackFile]:\n filtered_list: List[MrpackFile] = []\n for file in file_list:\n if \"env\" not in file:\n filtered_list.append(file)\n continue\n\n if file[\"env\"][\"client\"] == \"required\":\n filtered_list.append(file)\n if file[\"env\"][\"client\"] == \"optional\" and file[\"path\"] in mrpack_install_options.get(\"optionalFiles\", []):\n filtered_list.append(file)\n\n return filtered_list", "def ftp_LIST(self, line):\n # - If no argument, fall back on cwd as default.\n # - Some older FTP clients erroneously issue /bin/ls-like LIST\n # formats in which case we fall back on cwd as default.\n if not line or line.lower() in ('-a', '-l', '-al', '-la'):\n line = self.fs.cwd\n path = self.fs.ftp2fs(line)\n line = self.fs.ftpnorm(line)\n try:\n data = self.fs.get_list_dir(path)\n except OSError, err:\n why = _strerror(err)\n self.log('FAIL LIST \"%s\". %s.' %(line, why))\n self.respond('550 %s.' %why)\n else:\n self.push_dtp_data(data, log='OK LIST \"%s\". Transfer starting.' %line)", "def preprocessFileList( filelist ):\n dirs = []\n if filelist is not None:\n for afile in filelist:\n with open(afile, \"r\") as f:\n tmp_dirs = f.read().split('\\n')\n dirs.extend(tmp_dirs)\n\n removeComments( dirs )\n return dirs", "def import_blog_list(list_file_path=\"tumblr_todo_list.txt\"):\n logging.debug(\"import_blog_list() list_file_path: \"+repr(list_file_path))\n # Make sure list file folder exists\n list_file_folder = os.path.dirname(list_file_path)\n if list_file_folder:\n if not os.path.exists(list_file_folder):\n os.makedirs(list_file_folder)\n # Create new empty list file if no list file exists\n if not os.path.exists(list_file_path):\n logging.debug(\"import_blog_list() Blog list file not found, creating it.\")\n new_file = open(list_file_path, \"w\")\n new_file.write('# Add one URL per line, comments start with a #, nothing but username on a line that isnt a comment\\n\\n')\n new_file.close()\n return []\n # Read each line from the list file and process it\n blog_urls = []\n list_file = open(list_file_path, \"rU\")\n line_counter = 0\n for line in list_file:\n line_counter += 1\n # Strip empty and comment lines\n if line[0] in [\"#\", \"\\r\", \"\\n\"]:\n continue\n else:\n cleaned_url = clean_list_line(line)\n if cleaned_url:\n blog_urls.append(cleaned_url+u\"\")\n else:\n logging.error(\"import_blog_list(): Cleaning line \"+repr(line_counter)+\" : \"+repr(line)+\"Failed!\")\n blog_urls = uniquify(blog_urls)\n logging.debug(\"import_blog_list() blog_urls: \"+repr(blog_urls))\n return blog_urls", "def set_files(self, file_list):\n\tif file_list==None: return []\n\timport types\n\tisString = isinstance(file_list, types.StringTypes) \n\tisList = isinstance(file_list, list) \n\tassert isString or isList, \"You should provide a list of files as list or as CVS string!\"\n\tif isList: return file_list\n\tif isString :\n\t import re\n\t file_list_converted = re.sub(r'\\s', '', file_list).split(',') #remove all whitespaces\n\t return file_list_converted", "def loadFileList(self):\r\n try:\r\n data = open(self.filelist_file, 'rb')\r\n except IOError:\r\n '''print \"No SRTM cached file list. Creating new one!\"'''\r\n if self.offline == 0:\r\n self.createFileList()\r\n return\r\n try:\r\n self.filelist = pickle.load(data)\r\n except:\r\n '''print \"Unknown error loading cached SRTM file list. Creating new one!\"'''\r\n if self.offline == 0:\r\n self.createFileList()", "def parse_req_file(req_file, verbatim=False):\n req_list = []\n requirements = req_file.readlines()\n for requirement in requirements:\n requirement_no_comments = requirement.split(\"#\")[0].strip()\n\n # if matching requirement line (Thing==1.2.3), update dict, continue\n req_match = re.match(\n r\"\\s*(?P<package>[^\\s\\[\\]]+)(?P<extras>\\[\\S+\\])?==(?P<version>\\S+)\",\n requirement_no_comments,\n )\n req_ignore = requirement.strip().endswith(\" # norot\")\n\n if req_match:\n req_list.append(\n (req_match.group(\"package\"), req_match.group(\"version\"), req_ignore)\n )\n elif requirement_no_comments.startswith(\"-r\"):\n try:\n base_dir = os.path.dirname(os.path.abspath(req_file.name))\n except AttributeError:\n print(\n \"Recursive requirements are not supported in URL based \" \"lookups\"\n )\n continue\n\n # replace the -r and ensure there are no leading spaces\n file_name = requirement_no_comments.replace(\"-r\", \"\").strip()\n new_path = os.path.join(base_dir, file_name)\n try:\n if verbatim:\n req_list.append((None, requirement, req_ignore))\n req_list.extend(parse_req_file(open(new_path), verbatim=verbatim))\n except IOError:\n print(\"Failed to import {}\".format(file_name))\n elif verbatim:\n req_list.append((None, requirement, req_ignore))\n return req_list", "def process_list(entity_list):\n LOGGING.info('Processing returned entities...')\n\n folder_list = []\n file_list = []\n\n for entity in entity_list:\n entity_parts = os.path.split(urlparse(entity).path)\n\n if entity_parts[1] == '':\n folder_list.append(entity)\n\n else:\n if any(entity_parts[1].endswith(ext) for ext in ALLOWED_EXTENSIONS):\n file_list.append(entity)\n\n return folder_list, file_list", "def _listfiles(hostname, auth, infile: str) -> bool:\n object_list = Manifest.load(Path(infile))\n if object_list is None:\n return False\n\n try:\n auth.get_access_token()\n except Gen3AuthError:\n logger.critical(f\"Unable to authenticate your credentials with {hostname}\")\n return False\n except requests.exceptions.RequestException as ex:\n logger.critical(\n f\"Unable to authenticate your credentials with {hostname}: {str(ex)}\"\n )\n return False\n\n DownloadManager(\n hostname=hostname, auth=auth, download_list=object_list, show_progress=True\n )\n\n for x in object_list:\n print(x.pprint())\n\n return True" ]
[ "0.62899894", "0.6053552", "0.58387065", "0.56976694", "0.55428076", "0.543702", "0.5347889", "0.53217775", "0.53147715", "0.5311632", "0.52500206", "0.5237415", "0.5211338", "0.52101344", "0.51486844", "0.51301765", "0.51095843", "0.50503826", "0.50044495", "0.49863774", "0.49506676", "0.49495998", "0.49169612", "0.4882201", "0.4861794", "0.4836828", "0.48304632", "0.48062825", "0.48049375", "0.47964862" ]
0.7423649
0
This function returns the price per kWh at APP
def abbott_elec(): per_kwh = 0.08 # [$/kWh] return per_kwh
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_kwh_price(supplier_with_transaction):\n\n supplier_item = supplier_with_transaction.get('supplier_detail')\n total_kwh_price = 0\n if supplier_item.get('has_time_based_kwh') and supplier_item.get('time_price'):\n # start to compute as complex\n for rec in supplier_item.get('time_price'):\n if rec.get('hour_from') and rec.get('hour_to'):\n if rec.get('hour_from') > rec.get('hour_to'):\n duration = (rec.get('hour_to') - rec.get('hour_from')) * 60\n else:\n duration = (rec.get('hour_to') - (24 - rec.get('hour_from'))) * 60\n else:\n duration = 0\n total_kwh_price += duration * rec.get('kwh_price', 0)\n else:\n # start to calculate the simple version for kwh price\n total_kwh_price = 24 * supplier_item.get('kwh_price', 0)\n return total_kwh_price", "def solar_ppa():\n per_kwh = 0.196 # [$/kWh]\n\n return per_kwh", "def wind_ppa():\n per_kwh = 0.0384 # [$/kWh]\n\n return per_kwh", "def get_price(hours):\n price = round(hours * 5, 2)\n print(\"Total Price is $\", price)", "def get_price():\n return uniform(1.0, 350.0)", "def get_price():\n \n #Teacher's code. Could not get it working.\n #price = db(db.product.name == productName).select(db.product.price)[0].price\n \n \n return (200)", "def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def get_price(self, request, pk):\n return Response('20$')", "def get_price(self, request, pk):\n return Response('20$')", "def get_price(self, spot, t = 0, k = 1):\n if k == 0:\n return self.fv * np.exp(- spot * (self.maturity - t))\n else:\n return self.fv / np.power(1 + spot / k, (self.maturity - t) * k)", "def abbott_steam():\n per_klb = 20 # dollars per klb of steam\n kwh_eq = to_kwh(1) # kwh equivalent of steam\n per_kwh = per_klb / kwh_eq\n return per_kwh", "def calc_price(self):\n price = self.price\n action = self.action\n mortage = 5 # here set mortage multiplier \n\n if action == 'RESIDENTIAL_SALE':\n return price * 12 * mortage\n\n\n if price >= 10000:\n return price * 0.7\n elif price < 10000 & price >= 5000:\n return price * 0.55\n elif price < 5000 & price >= 2800:\n return price * 0.475\n else:\n return price * 0.4", "def BuyingPrice(self):\n return self.buying_rice", "def get_price(self):\r\n return self.price", "def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result", "def getFactor(currency):", "def calculate_sell_price(price: float):\n return round(price * (1 + CONF.trade_advantage_in_percent / 100), 1)", "def get_stock_price(stock):\n pass", "def main(price, service, vat):\n service = (price * 10)/100\n if service < 50:\n service = 50\n elif service > 1000:\n service = 1000\n price += service\n vat = (price * 7)/100\n price += vat\n print(\"%.2f\" % (price))", "def option_price(self, K, payoff='Call'):\n return self.IV.price_from_vol(self.smile_func(K), self.f, K, self.T_expiry, payoff=payoff)", "def midprice(bid, ask):\n midprice = (bid + ask) / 2.0\n return midprice", "def check_price(self):\n return self.day*self.price", "def get_price(self):\n return self.price", "def get_price(self):\n return self.price", "def get_price(self):\n return self.price", "def best_price(self):\n # TODO rename this to \"display_lowest_price\" or something...\n price = self.lowest_price\n if price:\n return Decimal.quantize(price.per_kg, settings.DECIMAL_CENTS) # round to cents", "def findVWSP(self):\n num=0\n den=0\n ban=False\n for el in self.TL:\n if datetime.fromtimestamp(el.TS) > (datetime.now()-timedelta(minutes = 15)):\n ban=True\n num+=el.Price * el.NoSh\n den+= el.NoSh \n if ban:\n if den!=0:\n return num/den\n else:\n raise BaseException(\"Oops! the vwsp cannot be computed.\")\n else:\n return 0", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://api.kraken.com/0/public/Ticker\"\n requestUrl = uri + \"?pair=\" + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"result\"][pair][\"c\"]\n return currentPrice", "def get_risk_per_unit(price, sl_price):\n return abs(price - sl_price)" ]
[ "0.71423", "0.69435227", "0.6919334", "0.66548496", "0.663056", "0.6410288", "0.6407813", "0.63824", "0.6294701", "0.6294701", "0.6252796", "0.62478536", "0.6247632", "0.6194834", "0.6162884", "0.6131623", "0.61082387", "0.6062231", "0.6058075", "0.60528606", "0.6048763", "0.60449", "0.60140383", "0.6007634", "0.6007634", "0.6007634", "0.59926355", "0.59923315", "0.5988139", "0.5956997" ]
0.7098736
1
This function converts a mass flow rate in klbs/hr of steam to an energy in kWh. Known values are currently hard coded.
def to_kwh(m): cp = 4243.5 # specific heat of water [J/ kg K] dT = 179 # change in steam temperature [deg C] h_in = 196 # inlet enthalpy [BTU/lb] h_out = 1368 # outlet enthalpy [BTU/lb] # times 0.29307107 to convert from BTU/hr to kilowatts kwh = (m * (h_out - h_in)) * 0.29307107 return kwh
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mel2hz(mel):\n return 700 * np.exp(mel / 1127) - 700", "def mel2hz(mel):\n return 700 * (numpy.exp(mel/1127.0)-1)", "def convert_H_kJmol(en_H):\n return en_H/kJmol_H", "def convert_kJmol_H(en_kJmol):\n return en_kJmol*kJmol_H", "def mel2hz(mel):\n return 700. * (10**(mel/2595.0)-1)", "def mel2hz(mel):\n\treturn 700 * (10 ** (mel / 2595.0) - 1)", "def mel2hz(mel):\r\n return 700*(10**(mel/2595.0)-1)", "def mel2hz(mel):\n return 700*(10**(mel/2595.0)-1)", "def mel2hz(mel):\n return 700*(10**(mel/2595.0)-1)", "def ergToKkms(x, toErg=False, nu_or_lambda='nu'):\n # To W=Joule/s => Joule = 1e7 erg\n factor = 1\n #print value\n if nu_or_lambda == 'lambda':\n x = const.c / x\n # Conversion between erg/s/cm2/sr = 2k(CGS) nu^3/c(cm)^3 K km/s\n # k(CGS) is Boltzsmanns constant in units of the CGS, nu the frequency of\n # the measusrement\n # c(cm) is the speed of light in cm.\n # => to make the units fit we have to multiply by 1*km in cm -> 1e5\n # i.e. const.km_in_cm\n # converts from K - > ergs\n conversionFactor = (2 * const.k_CGS * x ** 3 * const.km_in_cm /\n (const.c_in_cm **3))\n factor = factor / conversionFactor\n if toErg == False:\n return factor\n if toErg == True:\n return 1 / factor", "def fahrenheitToKelvin(fahrenheit:float, ndigits = 2)->float:\n return round(((float(fahrenheit) - 32) * 5 / 9) + 273.5, ndigits)", "def khm_to_mph(speed_in_kph):\r\n # for wind speed, when I find a way to measure\r\n speed_in_mph = speed_in_kph * 0.621371\r\n return speed_in_mph", "def kelvin_to_fahrenheit(kelvin_temp):\n\n\treturn math.floor(9/5 * (kelvin_temp - 273) + 32)", "def k_Wa92(wind_second_moment, temp_C):\n\n U2 = wind_second_moment\n\n Sc = schmidt_number(temp_C)\n k = (0.31 * U2) * (660 / Sc) ** 0.5\n\n return k", "def abbott_elec():\n per_kwh = 0.08 # [$/kWh]\n return per_kwh", "def KEtoSpeed(KE, mass):\n return 299792458*(1-(KE/mass+1)**-2)**.5", "def hz2mel(hz):\r\n return 2595 * np.log10(1+hz/700.0)", "def _mps_to_kph(self) -> None:\n if self.units == \"m/s\":\n self.units = \"km/h\"\n self.value = ((self.value * 360) / 100).__round__(2)\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'm/s' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def convert_kcalmol_H(en_kcalmol):\n return en_kcalmol*kcalmol_H", "def convert_H_kcalmol(en_H):\n return en_H/kcalmol_H", "def harmonicOscillator_heatCapacity(T, freq):\n x = freq / (0.695039 * T) # kB = 0.695039 cm^-1/K\n exp_x = math.exp(x)\n one_minus_exp_x = 1.0 - exp_x\n return x * x * exp_x / one_minus_exp_x / one_minus_exp_x", "def hz2mel(hz):\n return 2595 * np.log10(1+hz/700.)", "def convert_kJmol_eV(en_kJmol):\n return en_kJmol*kJmol_eV", "def convertCelsiusToKelv(C):\n if isinstance(C, str) == True:\n raise ValueError(\"Celsius cannot be a string value\")\n if isinstance(C,complex) == True:\n raise ValueError(\"Celsius cannot be a complex value\")\n if isinstance(C,int) == True:\n raise ValueError(\"Celsius should be a float value, example: 40.00\")\n \n \n K = C + 273.15\n return K", "def hz2mel(hz):\n\treturn 2595 * numpy.log10(1 + hz / 700.0)", "def KH_timescale(M_gas, r_o, rho_halo, v_dwarf,\n T_halo, T_dwarf, n_halo=None, mu_halo=0.61,mu_dwarf=1.31,\n gamma=5.0/3.0):\n if n_halo is not None:\n rho_halo = n_halo *cgs.mp * mu_halo\n\n# M_rate = np.pi * r_o**2 * rho_halo * v_dwarf\n cs_dwarf = np.sqrt(gamma * cgs.kb * T_dwarf / (cgs.mp*mu_dwarf)) \n cs_halo = np.sqrt(gamma * cgs.kb * T_halo / (cgs.mp*mu_halo ))\n\n M_rate = np.pi * r_o**2 * rho_halo * v_dwarf * (cs_dwarf/cs_halo)\n\n return M_gas / M_rate", "def convertToKg(lbs):\n return lbs*0.45359237", "def hz2mel(hz):\n return 1127 * np.log(1 + hz / 700)", "def _hz_to_mel(freq: float, mel_scale: str = \"htk\") -> float:\n if mel_scale not in ['slaney', 'htk']:\n raise ValueError('mel_scale should be one of \"htk\" or \"slaney\".')\n\n if mel_scale == \"htk\":\n return 2595.0 * math.log10(1.0 + (freq / 700.0))\n\n # Fill in the linear part\n f_min = 0.0\n f_sp = 200.0 / 3\n\n mels = (freq - f_min) / f_sp\n min_log_hz = 1000.0\n min_log_mel = (min_log_hz - f_min) / f_sp\n logstep = math.log(6.4) / 27.0\n\n if freq >= min_log_hz:\n mels = min_log_mel + math.log(freq / min_log_hz) / logstep\n return mels", "def convert_eV_kJmol(en_eV):\n return en_eV/kJmol_eV" ]
[ "0.65070075", "0.647109", "0.6463585", "0.64307433", "0.6367391", "0.6315835", "0.6300439", "0.62821877", "0.62821877", "0.62517065", "0.62285256", "0.6186239", "0.61541593", "0.615211", "0.6126979", "0.612365", "0.612063", "0.61140716", "0.61104786", "0.6107376", "0.6066106", "0.6057892", "0.60486555", "0.6044647", "0.60414606", "0.6031954", "0.6002921", "0.5995037", "0.59849435", "0.59789854" ]
0.725068
0
This function returns the price per kwh of electricity when it is produced by solar power at the UIUC solar farm.
def solar_ppa(): per_kwh = 0.196 # [$/kWh] return per_kwh
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def abbott_elec():\n per_kwh = 0.08 # [$/kWh]\n return per_kwh", "def compute_kwh_price(supplier_with_transaction):\n\n supplier_item = supplier_with_transaction.get('supplier_detail')\n total_kwh_price = 0\n if supplier_item.get('has_time_based_kwh') and supplier_item.get('time_price'):\n # start to compute as complex\n for rec in supplier_item.get('time_price'):\n if rec.get('hour_from') and rec.get('hour_to'):\n if rec.get('hour_from') > rec.get('hour_to'):\n duration = (rec.get('hour_to') - rec.get('hour_from')) * 60\n else:\n duration = (rec.get('hour_to') - (24 - rec.get('hour_from'))) * 60\n else:\n duration = 0\n total_kwh_price += duration * rec.get('kwh_price', 0)\n else:\n # start to calculate the simple version for kwh price\n total_kwh_price = 24 * supplier_item.get('kwh_price', 0)\n return total_kwh_price", "def wind_ppa():\n per_kwh = 0.0384 # [$/kWh]\n\n return per_kwh", "def get_price():\n return uniform(1.0, 350.0)", "def get_kwh(self):\n\n svc = \"urn:micasaverde-com:serviceId:EnergyMetering1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"KWH\")", "def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result", "def findAShin(self):\n #return reduce(lambda x, y: x*y, [self.DoS[key].get_price() for key in self.DoS] )\n a = array([self.DoS[key].get_Price() for key in self.DoS])\n return a.prod()**(1.0/len(a))", "def _dynamic_price(self):\n adjust = PriceAdjustmentCalc(self)\n signals.satchmo_price_query.send(self, adjustment=adjust,\n slug=self.product.slug, discountable=self.product.is_discountable)\n return adjust.final_price()", "def CalculateTimeFrameElectricEneregyCost(self, kwh:float, dollarsPerKiloWattHour = 0.1149):\n\t\t\n\t\treturn kwh * dollarsPerKiloWattHour", "def calc_boiler_const(Q_load_Wh, thermal_efficiency):\n Q_fuel_Wh = Q_load_Wh / thermal_efficiency\n Q_losses_Wh = Q_fuel_Wh - Q_load_Wh\n\n return Q_fuel_Wh, Q_losses_Wh", "def energy_yield(self):\n return self['kwh_per_kw']", "def assigned_service_kW(self):\n return self.service_weight*self.fleet_rating", "def desired_price(self):\n return self._desired_price", "def CalculateElectricEneregyCost(self, dollarsPerKiloWattHour = 0.1149):\n\t\telectricKWHs = self.building_hvac.GetElectricKilowattHours()\n\t\t# get the cost per kwh\n\t\treturn electricKWHs * dollarsPerKiloWattHour", "def GetKelvin(self):\n return self.GetCelcius() + 273.15", "def energyK(k):\r\n C1 = 9.7846113e-07\r\n C2 = 12.263868e0 \r\n E = (-1.0 + np.sqrt(1.0 + 4.0 * C1 * C2**2 * k**2))/(2.0 * C1)\r\n return E", "def BuyingPrice(self):\n return self.buying_rice", "def calculate_ttw_energy(self) -> None:\n\n self.energy = self.ecm.motive_energy_per_km(\n driving_mass=self[\"driving mass\"],\n rr_coef=self[\"rolling resistance coefficient\"],\n drag_coef=self[\"aerodynamic drag coefficient\"],\n frontal_area=self[\"frontal area\"],\n electric_motor_power=self[\"electric power\"],\n engine_power=self[\"power\"],\n recuperation_efficiency=self[\"recuperation efficiency\"],\n aux_power=self[\"auxiliary power demand\"],\n battery_charge_eff=self[\"battery charge efficiency\"],\n battery_discharge_eff=self[\"battery discharge efficiency\"],\n fuel_cell_system_efficiency=self[\"fuel cell system efficiency\"],\n )\n\n self.energy = self.energy.assign_coords(\n {\n \"powertrain\": self.array.powertrain,\n \"year\": self.array.year,\n \"size\": self.array.coords[\"size\"],\n \"value\": self.array.coords[\"value\"],\n }\n )\n\n if self.energy_consumption:\n self.override_ttw_energy()\n\n distance = self.energy.sel(parameter=\"velocity\").sum(dim=\"second\") / 1000\n\n self[\"engine efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"engine efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n _o = lambda x: np.where((x == 0) | (x == np.nan), 1, x)\n\n if self.engine_efficiency is not None:\n print(\"Engine efficiency is being overridden.\")\n for key, val in self.engine_efficiency.items():\n pwt, size, year = key\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"transmission efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"transmission efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n if self.transmission_efficiency is not None:\n print(\"Transmission efficiency is being overridden.\")\n for key, val in self.transmission_efficiency.items():\n pwt, size, year = key\n\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"TtW energy\"] = (\n self.energy.sel(\n parameter=[\"motive energy\", \"auxiliary energy\", \"recuperated energy\"]\n ).sum(dim=[\"second\", \"parameter\"])\n / distance\n ).T\n\n self[\"TtW energy, combustion mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] > 0\n )\n self[\"TtW energy, electric mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] == 0\n )\n\n self[\"auxiliary energy\"] = (\n self.energy.sel(parameter=\"auxiliary energy\").sum(dim=\"second\") / distance\n ).T", "def Kepsilon(self):\n kE = 2 + 0.1024 / self.r + (0.1124 + 0.1265 * radians(self.sweep25W) + 0.1766 * radians(self.sweep25W)**2) / \\\n (self.r**2)\n kE0 = 2 + 0.1024 / self.r + 0.1124 / (self.r**2)\n return kE / kE0", "def get_price(self):\r\n return self.price", "def get_risk_per_unit(price, sl_price):\n return abs(price - sl_price)", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def kinetic_energy(self, units = 'si'):\n if units == 'si':\n return 0.5 * self.mass * (linalg.norm(self.velocity) ** 2)\n if units == 'au':\n return 0.5 * self.mass * (linalg.norm(self.velocity * (1.496e11) * 86400) ** 2)", "def buildEnergyUIValue(self, kwh):\n\n if kwh < 0.01:\n uiValue = '{:.4f} kWh'.format(kwh)\n elif kwh < 1:\n uiValue = '{:.3f} kWh'.format(kwh)\n elif kwh < 10:\n uiValue = '{:.2f} kWh'.format(kwh)\n else:\n uiValue = '{:.1f} kWh'.format(kwh)\n\n return uiValue", "def Keldysh_Rate(Uion,Z,E):\n\tans = np.sqrt(6.0*np.pi)/4.0\n\tans *= Uion * np.sqrt(E/(Uion**1.5))\n\tans *= np.exp(-(4.0/3.0)*np.sqrt(2.0)*(Uion**1.5)/E)\n\treturn ans", "def comptcptotalquantum(self) :\n\t\ttry :\n\t\t\treturn self._comptcptotalquantum\n\t\texcept Exception as e:\n\t\t\traise e", "def standard_init_price(self):\n # If a system can't use something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tu and _good.name not in 'fuel':\n base_price = 0\n else:\n base_price = _good.plt + (self.planet.tech_level * _good.pi)\n # if good is highly requested, increase the price\n if self.planet.status in [_good.dps]:\n base_price = base_price + (base_price * 0.5)\n # large system: high production decreases prices\n base_price = (base_price * (100 - self.planet.system_size)) / 100\n\n # price can't be negative\n if base_price < 0:\n base_price = 0\n\n return int(base_price)", "def calc_h_sen(dry_bulb_C):\n\n h_kJ_kg = dry_bulb_C * CPA_kJ_kgC\n\n return h_kJ_kg", "def get_price(self, spot, t = 0, k = 1):\n if k == 0:\n return self.fv * np.exp(- spot * (self.maturity - t))\n else:\n return self.fv / np.power(1 + spot / k, (self.maturity - t) * k)", "def get_price():\n \n #Teacher's code. Could not get it working.\n #price = db(db.product.name == productName).select(db.product.price)[0].price\n \n \n return (200)" ]
[ "0.70530295", "0.6877747", "0.6414504", "0.6401159", "0.63494396", "0.63399446", "0.6335698", "0.6288253", "0.62364966", "0.62210023", "0.6209003", "0.6197808", "0.61296284", "0.61269224", "0.60681945", "0.60333014", "0.6009466", "0.6003833", "0.59978616", "0.5968576", "0.5966878", "0.5950441", "0.5950213", "0.59490836", "0.592957", "0.5924351", "0.59194547", "0.5908669", "0.589719", "0.5871358" ]
0.7044015
1
Returns the layout specified as the class attribute default_layout. Override this method to provide more complex behavior.
def get_layout(self): return self._layout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def layoutDefault(self): # real signature unknown; restored from __doc__\n pass", "def layout(self) -> str:\n return self._layout", "def layout(self):\n return self._layout_manager", "def currentLayout( self ):\n return self._current_layout_name", "def getLayout(self, *args):\n return _libsbml.LayoutModelPlugin_getLayout(self, *args)", "def app_layout(self):\n return self.pt_app.layout", "def getDefaultLevel():\n return _libsbml.LayoutExtension_getDefaultLevel()", "def get_current_layout(self):\n # ported from the widgets/src/LayoutIndicator.c code\n\n self._engine.start_listen(Xkl.EngineListenModes.TRACK_KEYBOARD_STATE)\n state = self._engine.get_current_state()\n cur_group = state.group\n num_groups = self._engine.get_num_groups()\n\n # BUG?: if the last layout in the list is activated and removed,\n # state.group may be equal to n_groups\n if cur_group >= num_groups:\n cur_group = num_groups - 1\n\n # pylint: disable=unsubscriptable-object\n layout = self._rec.layouts[cur_group]\n try:\n # pylint: disable=unsubscriptable-object\n variant = self._rec.variants[cur_group]\n except IndexError:\n # X server may have forgotten to add the \"\" variant for its default layout\n variant = \"\"\n\n self._engine.stop_listen(Xkl.EngineListenModes.TRACK_KEYBOARD_STATE)\n\n return join_layout_variant(layout, variant)", "def LayoutExtension_getDefaultLevel():\n return _libsbml.LayoutExtension_getDefaultLevel()", "def defaultLayout():\n return ['OverlayDisplayToolBar',\n 'OrthoToolBar',\n 'OverlayListPanel',\n 'LocationPanel']", "def LayoutExtension_getDefaultVersion():\n return _libsbml.LayoutExtension_getDefaultVersion()", "def get_graph_layout(self, graph_id, layout_id):\n\n\t\tresponse = self._make_request(\"GET\", '/api/v1/graphs/%s/layouts/%s' % (graph_id, layout_id)).json()\n\t\treturn None if 'id' not in response else response", "def getDefaultVersion():\n return _libsbml.LayoutExtension_getDefaultVersion()", "def initDefaults(self):\n return _libsbml.Layout_initDefaults(self)", "def _getWidgetLayout(self, section, widget):\n if not self.parser.has_option(section, \"layout\"):\n self.logger.debug(\"No layout section: %s\" % widget)\n return None\n # This is a parent widget\n layout = self.parser.get(section, \"layout\")\n layout = layout.strip()\n if layout.lower().startswith('v'):\n self.logger.debug(\"Layout value: 'Vertical'\")\n widgetLayout = qt.QVBoxLayout(widget)\n elif layout.lower().startswith('h'):\n self.logger.debug(\"Layout value: 'Horizontal'\")\n widgetLayout = qt.QHBoxLayout(widget)\n elif layout.lower().startswith('f'):\n self.logger.debug(\"Layout value: 'Form'\")\n widgetLayout = qt.QFormLayout(widget)\n else:\n raise Exception(\"Layout value is unrecognized:\", layout)\n self.logger.debug(\"widget layout: %s\" % widgetLayout)\n return widgetLayout", "def createLayout(self):\n return _libsbml.LayoutModelPlugin_createLayout(self)", "def getDefaultPackageVersion():\n return _libsbml.LayoutExtension_getDefaultPackageVersion()", "def LayoutExtension_getDefaultPackageVersion():\n return _libsbml.LayoutExtension_getDefaultPackageVersion()", "def retrieve_graph(self):\n\n g = self.g\n\n if 'grid' in g['name']:\n my_layout = g.layout(\"grid\")\n else:\n my_layout = g.layout(\"kk\")\n\n return g, my_layout", "def use(self, layout):\n self._wid.setLayout(layout)\n return layout", "def layout(self):\n pass", "def get_layout_validator(self):\n if self._layout_validator is None:\n self._compute_layout_validator()\n return self._layout_validator", "def get_default(self):\n\n\t\treturn self.__default", "def _get_layout(panels, layout=None):\n if layout is None:\n layout = (-1, 2)\n if len(layout) != 2:\n raise ValueError(\"layout should have two elements\")\n if layout[0] < 0 and layout[1] < 0:\n raise ValueError(\"At least one dimension of layout must be positive\")\n if layout[0] < 0:\n layout = (int(np.ceil(panels / layout[1])), layout[1])\n if layout[1] < 0:\n layout = (layout[0], int(np.ceil(panels / layout[0])))\n if panels > layout[0] * layout[1]:\n raise ValueError(f\"layout {layout[0]}x{layout[1]} must be larger than {panels}\")\n return layout", "def test_create_default(self):\n layout = Layout()\n self.assertEqual(layout.width, 24)\n self.assertEqual(layout.height, 5)", "def getSourceLayout(self):\n #\n # TODO: Implement this for your convenience. Example:\n #\n # return [('src/yourClientBinary', 'yourClientBinary')]\n #\n # For more extensive clients:\n #\n # return [('src/executableFile', 'executableFile'),\n # ('peerlists/alllists/most_recent', 'data/peerlist'),\n # ('po/english.po', 'data/translationfile')]\n #\n # Note that for each entry in getBinaryLayout that is not a directory, exactly one entry must be present in getSourceLayout.\n # Also note that each entry in getSourceLayout corresponds to exactly one entry in getBinaryLayout.\n # This means, in particular, that if self.getBinaryLayout() == None then also self.getSourceLayout() == None.\n #\n # If your sources compile nicely in-place, be sure to fill this in, anyway. Something like:\n #\n # return [('yourClientBinary', 'yourClientBinary')]\n #\n return None", "def layout_method_mapper(self):\n return {\n \"kamada_kawai_layout\": kamada_kawai_layout,\n \"fruchterman_reingold_layout\": fruchterman_reingold_layout,\n \"spectral_layout\": spectral_layout,\n }", "def create_layout( self ):", "def _generate_layout(self):\n\n pass", "def get_class_layout(cls):\n\n return get_class_definitions(cls)" ]
[ "0.7747452", "0.69015557", "0.6825233", "0.66689813", "0.66203535", "0.6539448", "0.6419217", "0.6396516", "0.63342154", "0.63152045", "0.62654394", "0.62495625", "0.62386113", "0.6141546", "0.6068759", "0.6058207", "0.602838", "0.59690404", "0.5852301", "0.5848287", "0.58481044", "0.5796081", "0.5790843", "0.576477", "0.57427895", "0.5706422", "0.5677734", "0.5674117", "0.56509644", "0.5609573" ]
0.73576134
1
FindRoutes determines the shortest paths to visit the input stops and returns the driving directions, information about the visited stops, and the route paths, including travel time and distance. The tool is capable of finding routes that visit several input stops in a sequence you predetermine or in the sequence that minimizes overall travel. You can group the input stops into different routes using the RouteName field, and the tool will output one route for each group of stops, allowing you to generate routes for many vehicles in a single solve operation. When using FindRoutes to route multiple vehicles, you need to assign stops to routes before solving. If you need a tool that determines the best way to divide stops among different vehicles, and then route the vehicles, use the SolveVehicleRoutingProblem tool instead.
def find_routes( stops, measurement_units = """Minutes""", analysis_region = None, reorder_stops_to_find_optimal_routes = False, preserve_terminal_stops = """Preserve First""", return_to_start = False, use_time_windows = False, time_of_day = None, time_zone_for_time_of_day = """Geographically Local""", uturn_at_junctions = """Allowed Only at Intersections and Dead Ends""", point_barriers = None, line_barriers = None, polygon_barriers = None, use_hierarchy = True, restrictions = None, attribute_parameter_values = None, route_shape = """True Shape""", route_line_simplification_tolerance = None, populate_route_edges = False, populate_directions = True, directions_language = """en""", directions_distance_units = """Miles""", directions_style_name = """NA Desktop""", travel_mode = """Custom""", impedance = """Drive Time""", gis = None): kwargs = locals() if stops is None: stops = default_stops if point_barriers is None: point_barriers = default_point_barriers if line_barriers is None: line_barriers = default_line_barriers if polygon_barriers is None: polygon_barriers = default_polygon_barriers if restrictions is None: restrictions = default_restrictions if attribute_parameter_values is None: attribute_parameter_values = default_attributes if route_line_simplification_tolerance is None: route_line_simplification_tolerance = default_tolerance param_db = { "stops": (FeatureSet, "Stops"), "measurement_units": (str, "Measurement_Units"), "analysis_region": (str, "Analysis_Region"), "reorder_stops_to_find_optimal_routes": (bool, "Reorder_Stops_to_Find_Optimal_Routes"), "preserve_terminal_stops": (str, "Preserve_Terminal_Stops"), "return_to_start": (bool, "Return_to_Start"), "use_time_windows": (bool, "Use_Time_Windows"), "time_of_day": (datetime, "Time_of_Day"), "time_zone_for_time_of_day": (str, "Time_Zone_for_Time_of_Day"), "uturn_at_junctions": (str, "UTurn_at_Junctions"), "point_barriers": (FeatureSet, "Point_Barriers"), "line_barriers": (FeatureSet, "Line_Barriers"), "polygon_barriers": (FeatureSet, "Polygon_Barriers"), "use_hierarchy": (bool, "Use_Hierarchy"), "restrictions": (str, "Restrictions"), "attribute_parameter_values": (FeatureSet, "Attribute_Parameter_Values"), "route_shape": (str, "Route_Shape"), "route_line_simplification_tolerance": (LinearUnit, "Route_Line_Simplification_Tolerance"), "populate_route_edges": (bool, "Populate_Route_Edges"), "populate_directions": (bool, "Populate_Directions"), "directions_language": (str, "Directions_Language"), "directions_distance_units": (str, "Directions_Distance_Units"), "directions_style_name": (str, "Directions_Style_Name"), "travel_mode": (str, "Travel_Mode"), "impedance": (str, "Impedance"), "solve_succeeded": (bool, "Solve Succeeded"), "output_routes": (FeatureSet, "Output Routes"), "output_route_edges": (FeatureSet, "Output Route Edges"), "output_directions": (FeatureSet, "Output Directions"), "output_stops": (FeatureSet, "Output Stops"), } return_values = [ {"name": "solve_succeeded", "display_name": "Solve Succeeded", "type": bool}, {"name": "output_routes", "display_name": "Output Routes", "type": FeatureSet}, {"name": "output_route_edges", "display_name": "Output Route Edges", "type": FeatureSet}, {"name": "output_directions", "display_name": "Output Directions", "type": FeatureSet}, {"name": "output_stops", "display_name": "Output Stops", "type": FeatureSet}, ] if gis is None: gis = arcgis.env.active_gis url = gis.properties.helperServices.asyncRoute.url return _execute_gp_tool(gis, "FindRoutes", kwargs, param_db, return_values, _use_async, url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stops_on_routes_with_direction():\n routes_and_stops = {}\n routes = ['102y', '102z', '104y', '104z', '111y', '111z', '114y', '114z', '116y', '116z', '118y', '11y', '11z', '120y', '120z', '122y', '122z', '123y', '123z', '130y', '130z', '13y', '13z', '140y', '140z', '142y', '142z', '145y', '145z', '14Cy', '14Cz', '14y', '14z', '150y', '150z', '151y', '151z', '15Ay', '15Az', '15By', '15Bz', '15y', '15z', '161y', '161z', '16Cy', '16Cz', '16y', '16z', '17Ay', '17Az', '17y', '17z', '184y', '184z', '185y', '185z', '18y', '18z', '1y', '1z', '220y', '220z', '236y', '236z', '238y', '238z', '239y', '239z', '25Ay', '25Az', '25By', '25Bz', '25Xy', '25Xz', '25y', '25z', '26y', '26z', '270y', '270z', '27Ay', '27Az', '27By', '27Bz', '27Xy', '27Xz', '27y', '27z', '29Ay', '29Az', '31Ay', '31Az', '31By', '31Bz', '31y', '31z', '32Ay', '32Az', '32By', '32Bz', '32Xy', '32Xz', '32y', '32z', '33Ay', '33Az', '33By', '33Bz', '33Xy', '33Xz', '33y', '33z', '37y', '37z', '38Ay', '38Az', '38By', '38Bz', '38y', '38z', '39Ay', '39Az', '39y', '39z', '40By', '40Bz', '40Dy', '40Dz', '40y', '40z', '41Ay', '41By', '41Bz', '41Cy', '41Cz', '41Xy', '41Xz', '41y', '41z', '42y', '42z', '43y', '43z', '44By', '44Bz', '44y', '44z', '45Ay', '45Az', '46Ay', '46Az', '46Ey', '47y', '47z', '49y', '49z', '4y', '4z', '51Dy', '51Dz', '51Xy', '53By', '53Bz', '53y', '53z', '54Ay', '54Az', '56Ay', '56Az', '59y', '59z', '61y', '61z', '63y', '63z', '65By', '65Bz', '65y', '65z', '66Ay', '66Az', '66By', '66Bz', '66Xy', '66Xz', '66y', '66z', '67Xy', '67Xz', '67y', '67z', '68Ay', '68Az', '68y', '68z', '69Xy', '69Xz', '69y', '69z', '70y', '70z', '747y', '747z', '75y', '75z', '76Ay', '76Az', '76y', '76z', '77Ay', '77Az', '79Ay', '79Az', '79y', '79z', '7By', '7Bz', '7Dy', '7Dz', '7y', '7z', '83Ay', '83Az', '83y', '83z', '84Ay', '84Az', '84Xy', '84Xz', '84y', '84z', '8y', '8z', '9y', '9z']\n for route in routes:\n routes_and_stops[route] = [] # new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route = extract_route_and_direction(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops", "def test_parse_routes(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n problem = problem_builder.build(riders, vehicles, depots)\n model = model_builder.build(problem)\n solution = model.solve()\n routes = Router._parse_routes(problem, solution)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def test_route(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n router = Router(\n problem_builder=problem_builder,\n optimization_model_builder=model_builder\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n routes = router.route(riders, vehicles, depots)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def route(self, ori, dest, pois):\n #find one route from ori to dest\n departure_time = int(time.time())\n routes = util.query_routes(origin=ori, \n destination=dest,\n departure_time=departure_time)\n if routes is None or routes['status'] != \"OK\":\n print ',=====',routes\n return None\n\n route = routes[\"routes\"][0] #get the first route\n\n #get the points in the route to search the potential poi\n points = util.extract_points(route)\n\n if points is None or len(points) ==0:\n print \"Error in extracting points\"\n return None\n #get the candiates in the route\n candidates = []\n way_points = pois.split(\"|\")\n for point in points:\n information = {}\n information[\"location\"] = point\n for way_p in way_points:\n response = util.get_nearby_points(location=point, keyword=way_p)\n if response is None or response[\"status\"] != \"OK\":\n information[way_p] = []\n continue\n ps = []\n for result in response[\"results\"]:\n poi = {\"geometry\": result[\"geometry\"],\n \"name\": result[\"name\"],\n \"price_level\": result.get(\"price_level\", None),\n \"rating\": result.get(\"rating\", None),\n \"vicinity\": result[\"vicinity\"]}\n ps.append(poi)\n information[way_p] = ps\n candidates.append(information)\n \n cost_matrix = waypoint.find_waypoints([candidates], way_points)\n cost_matrix.sort(key=lambda x:x[1])\n\n top_candidate = cost_matrix[0]\n json.dump(top_candidate, open('./top_candidate.json','w'))\n final_route = self.get_direction(ori, dest, top_candidate)\n json.dump(final_route, open(\"./real_route.json\", \"w\"))\n\n return final_route, top_candidate", "def get_routes(solution, routing, manager):\n # Get vehicle routes and store them in a two dimensional array whose\n # i,j entry is the jth location visited by vehicle i along its route.\n routes = []\n for route_nbr in range(routing.vehicles()):\n index = routing.Start(route_nbr)\n route = [manager.IndexToNode(index)]\n while not routing.IsEnd(index):\n index = solution.Value(routing.NextVar(index))\n route.append(manager.IndexToNode(index))\n routes.append(route)\n return routes", "def print_routes(num_vehicles, locations, routing, assignment):\n total_dist = 0\n\n for vehicle_id in range(num_vehicles):\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {0}:\\n'.format(vehicle_id)\n route_dist = 0\n\n while not routing.IsEnd(index):\n node = routing.IndexToNode(index)\n next_node = routing.IndexToNode(\n assignment.Value(routing.NextVar(index)))\n route_dist += haversine(\n locations[node],\n locations[next_node])\n plan_output += ' {node} -> '.format(\n node=node)\n index = assignment.Value(routing.NextVar(index))\n\n node = routing.IndexToNode(index)\n total_dist += route_dist\n plan_output += ' {node}\\n'.format(\n node=node)\n plan_output += 'Distance of route {0}: {dist}\\n'.format(\n vehicle_id,\n dist=route_dist)\n print(plan_output)\n print('Total distance of all routes: {dist}'.format(dist=total_dist))", "def stops_on_routes():\n routes = ['15', '46A', '14', '41B', '39A', '65', '40D', '11', '31', '27', '67', '79', '42', '66A', '33B', '140', '44', '83A', '27B', '38', '16C', '747', '41C', '39', '25', '239', '43', '70', '13', '150', '145', '77A', '184', '84', '61', '83', '40', '66', '15A', '123', '17A', '16', '14C', '9', '4', '37', '32', '33', '49', '56A', '151', '25A', '45A', '54A', '47', '18', '7', '17', '102', '120', '65B', '41', '122', '29A', '76', '68', '59', '25B', '69', '27A', '66B', '38B', '7D', '75', '15B', '84A', '63', '84X', '33X', '68A', '1', '76A', '7B', '270', '236', '130', '238', '220', '44B', '40B', '26', '32B', '8', '41A', '53', '67X', '104', '32A', '79A', '114', '185', '66X', '31B', '32X', '51X', '51D', '41X', '142', '111', '69X', '27X', '116', '46E', '161', '118', '25X', '38A', '33A', '31A']\n routes_and_stops={}\n for route in routes:\n routes_and_stops[route]=[] #new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route=extract_bus_route(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops", "def get_routes():\n # get from cache if it exists\n routes = cache.get(\"routes\")\n if routes:\n return routes\n\n trips_url = \"https://data.edmonton.ca/api/views/ctwr-tvrd/rows.json?accessType=DOWNLOAD\"\n bus_heading_url = \"https://data.edmonton.ca/resource/atvz-ppyb.json\"\n\n trips_response = requests.get(trips_url)\n bus_heading_response = requests.get(bus_heading_url)\n\n if trips_response.status_code == 200 and bus_heading_response.status_code == 200:\n trips = trips_response.json()\n headings = bus_heading_response.json()\n\n bus_to_headings = {}\n trip_to_bus = {}\n\n for heading in headings:\n if \"route_long_name\" in heading:\n bus_to_headings[heading[\"route_id\"]] = heading[\"route_long_name\"]\n\n for item in trips[\"data\"]:\n trip_id = item[-4]\n bus_number = item[-6]\n if bus_number in bus_to_headings:\n bus_heading = bus_to_headings[bus_number]\n trip_to_bus[trip_id] = [bus_number, bus_heading]\n \n # store the routes in the cache for five minutes\n cache.set(\"routes\", trip_to_bus, timeout=5*60) \n return trip_to_bus", "def solve(\n self,\n initial_routes=None,\n solver=\"cbc\",\n cspy=False,\n exact=True,\n pricing_strategy=\"PrunePaths\",\n ):\n if cspy:\n self.G.graph[\"subproblem\"] = \"cspy\"\n else:\n self.G.graph[\"subproblem\"] = \"lp\"\n print(self.G.graph[\"name\"], self.G.graph[\"subproblem\"])\n print(\"===========\")\n prob = VehicleRoutingProblem(\n self.G,\n duration=self.max_duration,\n load_capacity=self.max_load,\n drop_penalty=self.penalty,\n pickup_delivery=self.activate_pickup_delivery,\n distribution_collection=self.activate_distribution_collection,\n time_windows=self.activate_time_windows,\n )\n prob.solve(\n initial_routes=initial_routes,\n cspy=cspy,\n exact=exact,\n pricing_strategy=pricing_strategy,\n solver=solver,\n )\n self.best_value, self.best_routes = prob.best_value, prob._best_routes_as_graphs\n self.best_routes_nodes = prob.best_routes", "def routes(self) -> pulumi.Output[Sequence['outputs.RouteTableRoute']]:\n return pulumi.get(self, \"routes\")", "def buildStopsDict(self):\n \n if len(self.nodesDict) == 0:\n raise Exception('Nodes dictionary is empty!')\n if len(self.linksDict) == 0:\n raise Exception('Links dictionary is empty!')\n \n self.stopsByRoute = dict()\n self.stopsByNode = dict()\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n \n tempStops = \"temp_stops\"\n tempStopsSp = \"temp_stops_sp\"\n \n # Delete temp_stops and temp_stops_sp feature classes if they exist.\n if arcpy.Exists(tempStops):\n arcpy.Delete_management(tempStops)\n if arcpy.Exists(tempStopsSp):\n arcpy.Delete_management(tempStopsSp)\n arcpy.CopyFeatures_management(PublicTransit.RTD_PATH + PublicTransit.RTD_STOPS,\n tempStops)\n \n # Project temp_stops to CA state plane and add XY.\n install_dir = arcpy.GetInstallInfo()['InstallDir']\n out_coordinate_system = os.path.join(install_dir, PublicTransit.NAD_83_DIRECTORY)\n arcpy.Project_management(tempStops, tempStopsSp, out_coordinate_system,\n \"NAD_1983_To_WGS_1984_1\")\n arcpy.AddXY_management(tempStopsSp)\n \n # Create a search cursor to traverse all stops.\n stops = arcpy.SearchCursor(tempStopsSp, \"\", \"\",\n \"CPT_STOPPOINTID; SCH_STOPPOINTSEQNO; \" +\n \"SCH_ROUTEID; SCH_PATTERNID; ROUTE_PATTERN; \" +\n \"SourceOID; POINT_X; POINT_Y\",\n \"ROUTE_PATTERN A; SCH_STOPPOINTSEQNO A\")\n numStops = int(arcpy.GetCount_management(tempStopsSp).getOutput(0))\n print \"Found %d stops\" % numStops\n \n p = index.Property()\n p.overwrite = True\n self.spIndex = index.Index(PublicTransit.SPATIAL_INDEX_FILE,properties=p)\n \n # For each stop determine the nearest network node.\n scount = 0\n icount = 0\n for s in stops:\n # only create stops for routes which exist in RTD\n if not s.ROUTE_PATTERN in self.transitRoutes:\n continue\n scount += 1\n st = TransitStop(s.CPT_STOPPOINTID, s.SCH_ROUTEID, s.SCH_PATTERNID,\n s.ROUTE_PATTERN, s.SourceOID, s.SCH_STOPPOINTSEQNO)\n # If the stop's linkId is in the links dictionary use the link from\n # and to node (these should all be bus routes since MTC's route\n # traversal FC was created for buses only at this time).\n if s.SourceOID in self.linksDict:\n link = self.linksDict[s.SourceOID]\n # Determine which node is nearest and snap to it.\n if self.__getDistance(s.POINT_X,\n s.POINT_Y,\n link.fromNode.x,\n link.fromNode.y) <= \\\n self.__getDistance(s.POINT_X,\n s.POINT_Y,\n link.toNode.x,\n link.toNode.y):\n st.tanaNode = link.fromNode.nodeId\n else:\n st.tanaNode = link.toNode.nodeId\n st.inRegion = True\n \n # The stop's link is not in linksDict. These are either stops \n # outside the region or non-bus routes for which there are no\n # route traversal edges. Do a link lookup from the Roadways\n # feature class.\n else:\n arcpy.env.workspace = PublicTransit.RTD_PATH\n roadwaysSearch = arcpy.SearchCursor(PublicTransit.ROADWAYS_FC,\n \"LinkId = \" + str(s.SourceOID),\n \"\", \"\", \"F_JNCTID; T_JNCTID\", \"\")\n for r in roadwaysSearch:\n fromNode = self.__getIdHash(r.F_JNCTID)\n toNode = self.__getIdHash(r.T_JNCTID)\n if fromNode in self.nodesDict and toNode in self.nodesDict:\n if self.__getDistance(s.POINT_X,\n s.POINT_Y,\n self.nodesDict[fromNode].x,\n self.nodesDict[fromNode].y) <= \\\n self.__getDistance(s.POINT_X,\n s.POINT_Y,\n self.nodesDict[toNode].x,\n self.nodesDict[toNode].y):\n st.tanaNode = fromNode\n else:\n st.tanaNode = toNode\n st.inRegion = True\n else:\n st.inRegion = False\n \n # Add the stop to stopsByRoute and stopsByNode dictionaries\n if s.ROUTE_PATTERN in self.stopsByRoute:\n self.stopsByRoute[s.ROUTE_PATTERN].append(st)\n else:\n self.stopsByRoute[s.ROUTE_PATTERN] = [st]\n if (st.tanaNode in self.stopsByNode):\n self.stopsByNode[st.tanaNode].append(st)\n else:\n self.stopsByNode[st.tanaNode] = [st]\n # add the stop node to the spatial index\n if st.tanaNode in self.nodesDict:\n icount += 1\n self.spIndex.insert(st.stopPointId,\n (self.nodesDict[st.tanaNode].x,\n self.nodesDict[st.tanaNode].y,\n self.nodesDict[st.tanaNode].x,\n self.nodesDict[st.tanaNode].y))\n del stops", "def print_stops_for_route(route_id: str) -> None:\n mbta = MBTA(config.CT_MBTA_API_KEY)\n try:\n stops = mbta.get_stops_for_route(route_id)\n except MBTAEmptyResult:\n print(f\"Route '{route_id}' returned no results\")\n return\n title_text = f\"Stops for '{route_id}'\"\n print(f\"{title_text:=^80}\")\n for stop in stops:\n print(f\"ID: {stop['id']}, NAME: {stop['attributes']['name']}\")\n return", "def lookup_routes(self, daddr):\n outroutes = []\n binary_of_dest = self.ip_to_binary(daddr)\n best_cidr = float('-inf')\n\n for r in self.routes:\n # convert network and netmask to binary for longest prefix matching\n binary_of_network = self.ip_to_binary(r[MESG][NTWK])\n cidr_of_netmask = self.ip_to_binary(r[MESG][NMSK]).count('1')\n # use subnet mask to get the prefix\n dst = binary_of_dest[:cidr_of_netmask]\n ntk = binary_of_network[:cidr_of_netmask]\n # matching prefixes?\n if dst == ntk:\n # found better match. clear and start over with just this route\n if best_cidr < cidr_of_netmask:\n best_cidr = cidr_of_netmask\n outroutes.clear()\n outroutes.append(r)\n # 1 to 1 match, add route to list\n if best_cidr == cidr_of_netmask:\n outroutes.append(r)\n\n return outroutes", "def best_routes(self) -> Sequence['outputs.GetRouterStatusBestRouteResult']:\n return pulumi.get(self, \"best_routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GatewayApiRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes_with_criteria(self, src, target, criteria):\n\n # BFS\n routes = []\n q = deque() # <- [ ... ] <-\n stops = 0\n distance = 0 # not true for this app, but it works out in the conditional check\n q.append((src, stops, distance, [src]))\n\n while q:\n # this city, stops to this city, distance to this city, route to this city\n city, stops, distance, route = q.popleft()\n if target == city and distance: # no self-loops!\n r = list(route)\n routes.append(r)\n for dest, cost in self.G[city].items():\n if criteria(stops + 1, distance + cost):\n new_route = list(route)\n new_route.append(dest)\n q.append((dest, stops + 1, distance + cost, new_route))\n return routes", "def findRoute(self, returnNonSelection=False):\n \n # pick the start and end GPS points # TODO: sort GPS Points first\n start_point = self.gps_points[0]\n end_point = self.gps_points[-1]\n \n start_node = self.getNearestNode(start_point)\n end_node = self.getNearestNode(end_point)\n \n # the start and endnodes returnes by the index are not in the graph, \n # therefore we need to look them up ....\n \n start_node = self.node_counter__node.get(start_node.getAttributes().get(\"nodecounter\"))\n end_node = self.node_counter__node.get(end_node.getAttributes().get(\"nodecounter\"))\n \n self.routfinder = RouteFinder(self.G)\n label_list = self.routefinder.findroutes(start_node, end_node)\n\n label_scores = []\n \n \n \n # let us loop through the label list \n for label in label_list:\n number_of_points = 0\n # we sum up the number of points and relate them to the length of the route\n print label\n \n for edge in label.getEdges():\n\n edge_id = edge.getAttributes().get(self.shapeFileUniqueId)\n number_of_points = number_of_points + self.edge_id__count.get(edge_id, 0)\n print \" \", number_of_points\n #we add the scores to a dict\n \n if number_of_points > 1:\n label_scores.append((label, number_of_points/label.getLength()))\n \n # print label_scores\n \n # and extract the maximum score\n score = 0\n selected = None\n \n for ls in label_scores:\n if ls[1] > score:\n selected = ls[0]\n score = ls[1]\n \n if returnNonSelection:\n pass\n else:\n return selected", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def bicycle_route(\n self,\n origin: List,\n destination: List,\n via: Optional[List[Tuple]] = None,\n origin_place_options: Optional[PlaceOptions] = None,\n destination_place_options: Optional[PlaceOptions] = None,\n via_place_options: Optional[PlaceOptions] = None,\n destination_waypoint_options: Optional[WayPointOptions] = None,\n via_waypoint_options: Optional[WayPointOptions] = None,\n departure_time: Optional[datetime] = None,\n routing_mode: str = \"fast\",\n alternatives: int = 0,\n units: str = \"metric\",\n lang: str = \"en-US\",\n return_results: Optional[List] = None,\n spans: Optional[List] = None,\n avoid_features: Optional[List[str]] = None,\n avoid_areas: Optional[List[AvoidBoundingBox]] = None,\n exclude: Optional[List[str]] = None,\n ) -> RoutingResponse: # noqa E501\n resp = self.routing_api.route(\n transport_mode=\"bicycle\",\n origin=origin,\n destination=destination,\n via=via,\n origin_place_options=origin_place_options,\n destination_place_options=destination_place_options,\n via_place_options=via_place_options,\n destination_waypoint_options=destination_waypoint_options,\n via_waypoint_options=via_waypoint_options,\n departure_time=departure_time,\n routing_mode=routing_mode,\n alternatives=alternatives,\n units=units,\n lang=lang,\n return_results=return_results,\n spans=spans,\n avoid_features=avoid_features,\n avoid_areas=avoid_areas,\n exclude=exclude,\n )\n return RoutingResponse.new(resp.json())", "def create_route(stop_list):\n delivery_route = []\n stop_list = stop_list\n current_location = 0\n shortest_distance = sys.maxsize\n shortest_trip = None\n\n while len(stop_list) != 1:\n # calculate possible next trips from current location\n possible = determine_next_stop(current_location, stop_list)\n\n for key, value in possible.items():\n if value < shortest_distance:\n shortest_trip = key\n shortest_distance = value\n\n # adds the shortest next stop to delivery route\n delivery_route.append(shortest_trip[1])\n\n # makes the next shortest stop the current location\n current_location = shortest_trip[1]\n\n # removes current location from stop list\n stop_list.remove(shortest_trip[1])\n\n # resets shortest_distance variable\n shortest_distance = sys.maxsize\n\n # adds last stop to delivery route\n delivery_route.append(stop_list[0])\n\n return delivery_route", "def lookup_routes(self, daddr):\n outroutes = []\n for entry in self.routes:\n for varat in entry[\"varats\"]:\n ip = varat[\"network\"].split(\".\")\n netmask = varat[\"netmask\"].split(\".\")\n\n mask_bit = \"\".join([ format(int(quad), \"08b\") for quad in netmask ])\n num_ones = mask_bit.count(\"1\")\n ip_bin = \"\".join([ format(int(quad), \"08b\") for quad in ip ])\n ip_start = ip_bin[:num_ones]\n daddr_bin = \"\".join([ format(int(quad), \"08b\") for quad in daddr.split(\".\") ])\n if daddr_bin.startswith(ip_start):\n outroutes.append({\"peer\": entry[\"peer\"], \"us\": entry[\"us\"], \"ghoti\": num_ones, \"msg\": varat})\n\n #print(\"outroutessssssssssssssssssssss\", outroutes)\n return outroutes", "def gtfs_routes(gtfs, output_f):\n\n\t# Load up the stop times so we can find which are the best routes.\n\t#TODO\n\tstop_times_file = [x for x in gtfs.namelist() if 'stop_times' in x][0]\n\n\tstoptimes_c = csv.reader((gtfs.open(stop_times_file, 'r')))\n\theader = stoptimes_c.next()\n\ttrip_id_col = header.index('trip_id')\n\tarrtime_col = header.index('arrival_time')\n\tdeptime_col = header.index('departure_time')\n\tstopseq_col = header.index('stop_sequence')\n\ttrip_times = {}\n\tfor row in stoptimes_c:\n\t\tif row[trip_id_col] not in trip_times:\n\t\t\t# earliest seq, latest seq, earliest seq dep time, latest seq dep time\n\t\t\ttrip_times[row[trip_id_col]] = [None, None, None, None]\n\n\t\tarrtime = time_as_timedelta(row[arrtime_col])\n\t\tdeptime = time_as_timedelta(row[deptime_col])\n\t\tif arrtime is None or deptime is None:\n\t\t\t# bad data, skip!\n\t\t\tcontinue\n\t\tseq = int(row[stopseq_col])\n\n\t\t# Find if this is an earlier item in the sequence\n\t\tif trip_times[row[trip_id_col]][0] is None or trip_times[row[trip_id_col]][0] > seq:\n\t\t\ttrip_times[row[trip_id_col]][0] = seq\n\t\t\ttrip_times[row[trip_id_col]][2] = deptime\n\n\t\t# Find if this is an later item in the sequence\n\t\tif trip_times[row[trip_id_col]][1] is None or trip_times[row[trip_id_col]][1] < seq:\n\t\t\ttrip_times[row[trip_id_col]][1] = seq\n\t\t\ttrip_times[row[trip_id_col]][3] = arrtime\n\n\t# Load the shapes into a map that we can lookup.\n\t# We should do all the geometry processing here so that we only have to do\n\t# this once-off.\n\t#TODO\n\tshapes_file = [x for x in gtfs.namelist() if 'shapes' in x][0]\n\tshapes_c = csv.reader(swallow_windows_unicode(gtfs.open(shapes_file, 'r')))\n\n\theader = shapes_c.next()\n\tshape_id_col = header.index('shape_id')\n\tshape_lat_col = header.index('shape_pt_lat')\n\tshape_lng_col = header.index('shape_pt_lon')\n\tshape_seq_col = header.index('shape_pt_sequence')\n\tshape_dist_col = header.index('shape_dist_traveled') if 'shape_dist_traveled' in header else None\n\n\tshapes = {}\n\tshape_lengths = {}\n\tfor row in shapes_c:\n\t\tif row[shape_id_col] not in shapes:\n\t\t\tshapes[row[shape_id_col]] = {}\n\n\t\tshapes[row[shape_id_col]][int(row[shape_seq_col])] = (Decimal(row[shape_lng_col]), Decimal(row[shape_lat_col]))\n\n\t\t# Calculate length according to GTFS\n\t\t# This could also be calculated by the geometry, but we trust GTFS, right...\n\t\tif shape_dist_col is not None and row[shape_dist_col]:\n\t\t\tlength = Decimal(row[shape_dist_col])\n\t\t\tif row[shape_id_col] not in shape_lengths or shape_lengths[row[shape_id_col]] < length:\n\t\t\t\tshape_lengths[row[shape_id_col]] = length\n\n\t# translate the shapes into a LineString for use by the GeoJSON module\n\tfor shape_id in shapes.iterkeys():\n\t\tshape_keys = shapes[shape_id].keys()\n\t\tshape_keys.sort()\n\t\tshape = []\n\t\tfor ordinal in shape_keys:\n\t\t\tshape.append(shapes[shape_id][ordinal])\n\n\t\tshapes[shape_id] = shape\n\n\t# Make a matching dict between routes and shapes\n\ttrips = {}\n\ttrips_ref = {}\n\troute_time = {}\n\n\t#TODO\n\ttrips_file = [x for x in gtfs.namelist() if 'trips' in x][0]\n\n\ttrips_c = csv.reader(swallow_windows_unicode(gtfs.open(trips_file, 'r')))\n\theader = trips_c.next()\n\troute_id_col = header.index('route_id')\n\tshape_id_col = header.index('shape_id')\n\ttrip_id_col = header.index('trip_id')\n\tfor row in trips_c:\n\t\t# reference count the shapes\n\t\tif row[route_id_col] not in trips_ref:\n\t\t\t# route is unknown, create dict\n\t\t\ttrips_ref[row[route_id_col]] = {}\n\t\t\troute_time[row[route_id_col]] = trip_times[row[trip_id_col]]\n\n\t\tif row[shape_id_col] not in trips_ref[row[route_id_col]]:\n\t\t\t# shape is unknown, create counter\n\t\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] = 0\n\n\t\t# increment counter\n\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] += 1\n\n\t# now we're done, iterate through the reference-counters and find the best\n\t# shape\n\tfor route_id, candidate_shapes in trips_ref.iteritems():\n\t\tpopular_shape, popular_shape_refs = None, 0\n\t\tfor shape_id, refs in candidate_shapes.iteritems():\n\t\t\tif refs > popular_shape_refs:\n\t\t\t\tpopular_shape, popular_shape_refs = shape_id, refs\n\n\t\t# now we should have the route's shape\n\t\tassert popular_shape is not None, 'Couldn\\'t find a shape for route %r' % route_id\n\t\ttrips[route_id] = popular_shape\n\n\t# Cleanup unused variables\n\tdel trip_times\n\n\t# lets setup our output file\n\toutput_layer = geojson.FeatureCollection([])\n\t# assume WGS84 CRS\n\toutput_layer.crs = geojson.crs.Named('urn:ogc:def:crs:OGC:1.3:CRS84')\n\n\t# now we have all the shapes available, translate the routes\n\t#TODO\n\troutes_file = [x for x in gtfs.namelist() if 'routes' in x][0]\n\n\troutes_c = csv.reader(swallow_windows_unicode(gtfs.open(routes_file, 'r')))\n\theader = routes_c.next()\n\troute_id_col = header.index('route_id')\n\n\tfor row in routes_c:\n\t\t# make dict of other properties\n\t\tprops = dict()\n\t\tfor i, h in enumerate(header):\n\t\t\tif row[i] != '':\n\t\t\t\tprops[h] = row[i]\n\n\t\tif row[route_id_col] not in trips:\n\t\t\t# Route has no trips!\n\t\t\tprint \"Warning: route has no trips, skipping: %r\" % (row,)\n\t\t\tcontinue\n\n\t\tprops['shape_id'] = trips[row[route_id_col]]\n\t\tprops['shape_refs'] = trips_ref[row[route_id_col]][props['shape_id']]\n\t\tif shape_dist_col is not None and len(shape_lengths) > 0:\n\t\t\tprops['shape_length'] = shape_lengths[props['shape_id']]\n\t\tprops['duration_sec'] = (route_time[row[route_id_col]][3] - route_time[row[route_id_col]][2]).total_seconds()\n\n\t\toutput_layer.features.append(geojson.Feature(\n\t\t\tgeometry=geojson.LineString(\n\t\t\t\tcoordinates=shapes[trips[row[route_id_col]]]\n\t\t\t),\n\t\t\tproperties=props,\n\t\t\tid=row[route_id_col]\n\t\t))\n\n\t# now flush the GeoJSON layer to a file.\n\tgeojson.dump(output_layer, output_f, cls=DecimalEncoder)", "def routes(self) -> pulumi.Output[Sequence['outputs.VirtualHubRouteTableRoute']]:\n return pulumi.get(self, \"routes\")", "def count_routes_max_stops(self, src, dest, max_stops):\n\n criteria = lambda stops, distance: stops <= max_stops # inconsistent max, per test cases\n return len(self.routes_with_criteria(src, dest, criteria))", "def route(self, is_check_lanes=True):\n print 'route'\n # TODO: if too mant vtypes, better go through id_modes\n exectime_start = time.clock()\n\n net = self.get_scenario().net\n edges = net.edges\n vtypes = self.parent.vtypes\n\n ids_edges = []\n ids_trip = []\n costs = []\n for id_vtype in self.get_vtypes():\n id_mode = vtypes.ids_mode[id_vtype]\n\n # no routing for pedestrians\n if id_mode != net.modes.get_id_mode('pedestrian'):\n weights = edges.get_times(id_mode=id_mode,\n speed_max=vtypes.speeds_max[id_vtype],\n is_check_lanes=is_check_lanes)\n\n ids_trip_vtype = self.get_trips_for_vtype(id_vtype)\n # print ' id_vtype,id_mode',id_vtype,id_mode#,ids_trip_vtype\n # print ' weights',weights\n ids_edge_depart = self.ids_edge_depart[ids_trip_vtype]\n ids_edge_arrival = self.ids_edge_arrival[ids_trip_vtype]\n\n for id_trip, id_edge_depart, id_edge_arrival in zip(ids_trip_vtype, ids_edge_depart, ids_edge_arrival):\n cost, route = routing.get_mincostroute_edge2edge(id_edge_depart,\n id_edge_arrival,\n edges=edges,\n weights=weights)\n if len(route) > 0:\n ids_edges.append(route)\n ids_trip.append(id_trip)\n costs.append(cost)\n\n ids_route = self.routes.get_value().add_rows(ids_trip=ids_trip,\n ids_edges=ids_edges,\n costs=costs,\n )\n self.add_routes(ids_trip, ids_route)\n print ' exectime', time.clock()-exectime_start\n return ids_trip, ids_route", "def possible_routes(srcLat, srcLon, destLat, destLon, searchPreference, dateTime):\n\n dateTime = dateTime.split(\",\")\n\n routes = Db().get_best_route(srcLat, srcLon, destLat, destLon)\n try:\n best_routes = get_three_best_routes(routes, searchPreference, dateTime)\n except IndexError:\n best_routes = \"No Journey Found\"\n\n # Get the address for map display purposes\n try:\n for i in range(len(best_routes)):\n #address is a dataframe, hency the use of .loc\n address = Db().get_single_address(best_routes[i][2]).loc[0,\"Address\"]\n best_routes[i].append(address)\n except IndexError:\n # In case the source is outside Dublin\n best_routes = \"No Journey Found\"\n\n return json.dumps(best_routes, ensure_ascii=False)", "def get_solution(data, manager, routing, solution):\n route_list = {}\n max_route_distance = 0\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n route_list[vehicle_id] = []\n route_distance = 0\n while not routing.IsEnd(index):\n route_list[vehicle_id].append(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n route_list[vehicle_id].append(manager.IndexToNode(index))\n #_set_route_distance(vehicle_id, route_distance, route_list)\n return route_list" ]
[ "0.6546947", "0.6405214", "0.6124267", "0.58605796", "0.5816492", "0.5779385", "0.57356185", "0.5716994", "0.56505096", "0.5648405", "0.5640563", "0.56386757", "0.5620506", "0.55997664", "0.55781066", "0.5576032", "0.5576032", "0.5568899", "0.55527645", "0.5546889", "0.5546889", "0.55176437", "0.551729", "0.5501877", "0.54965717", "0.54648733", "0.54459864", "0.5440342", "0.5430563", "0.5423542" ]
0.6941076
0
Perform one weighted dba iteration and return the new average
def _dba_iteration(tseries, ftvector, avg, weights, fs): # the number of time series in the set n = len(tseries) # length of the time series ntime = avg.shape[0] # features of avg avg_ft = ExtraFeatures(avg,fs) scaler.fit(ftvector) # number of dimensions (useful for MTS) # array containing the new weighted average sequence new_avg = np.zeros((ntime),dtype=np.float64) # array of sum of weights sum_weights = np.zeros((ntime),dtype=np.float64) # loop the time series for s in range(n): series = tseries[s] ft = ftvector[s] dist = np.linalg.norm(scaler.transform(avg_ft.reshape(1,-1))-scaler.transform(ft.reshape(1,-1))) new_avg = new_avg + dist*weights[s]*series sum_weights = sum_weights + dist*weights[s] new_avg = new_avg/sum_weights return new_avg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def average_weights(self):\n for feat, weight in self.weights.items():\n total = self._totals[feat]\n total += (self.i - self._tstamps[feat]) * weight\n averaged = total / float(self.i)\n self.weights[feat] = averaged\n return None", "def calculate_average(precisions, weights):\n tmp_res = 1\n for id, item in enumerate(precisions):\n tmp_res = tmp_res*np.power(item, weights[id])\n tmp_res = np.power(tmp_res, np.sum(weights))\n return tmp_res", "def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg", "def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg", "def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg", "def calculate_weighted_results():\n pass", "def calculate_average(precisions, weights):\r\n tmp_res = 1\r\n for id, item in enumerate(precisions):\r\n tmp_res = tmp_res*np.power(item, weights[id])\r\n tmp_res = np.power(tmp_res, np.sum(weights))\r\n return tmp_res", "def weighted_average(model_df,cols):\n #wa = model_df[cols].apply(lambda x: (x[0]*x[1]).sum()/x[0].sum())\n wa = (model_df[cols[0]]*model_df[cols[1]]).sum()/model_df[cols[0]].sum()\n return wa", "def update_average(self,result):\n a = 1/self.iters\n b = 1 - a\n self.average = a * result + b * self.average\n self.iters += 1", "def weighted_average(items, weights):\n assert len(items) > 0\n assert len(items) == len(weights)\n # declare total as the return value which is a decimal\n total = 0.0\n # for all pairs from two lists\n for i in range(len(items)):\n \t# we increment the total for the product of both value\n \ttotal += items[i] * weights[i]\n # we return the total divided by sum of weights\n return total / sum(weights)", "def weighted_average(listofvalues):\n total = 0\n weights = 0\n for [w, v] in listofvalues:\n total += w*v\n weights += w\n return total/weights", "def _weightedAverage(list_):\n\n\t\taccum = [0, 0]\n\n\t\tfor point, weight in list_:\n\n\t\t\taccum[0] += point[0] * weight\n\t\t\taccum[1] += point[1] * weight\n\n\t\ttotalWeight = sum([weight for point, weight in list_])\n\n\n\t\tif totalWeight == 0:\n\t\t\t\n\t\t\treturn (0, 0)\n\n\n\t\taccum[0] /= float(totalWeight)\n\t\taccum[1] /= float(totalWeight)\n\n\t\treturn (accum[0], accum[1])", "def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg", "def set_weights_iter_average(self):\n self.nn.set_param_values(np.average(self.w_after_iter, axis=0))", "def calcAverage(dat):\n return sum(dat)/len(dat)", "def weighted_average(array, weights):\n assert len(array) == len(weights)\n return sum([x * w for x, w in zip(array, weights)]) / sum(weights)", "def _evaluate(self, estimator, generator):\n return np.mean([np.mean(np.power(estimator.estimate(A, b) - x, 2))\n for A, x, b in[generator.generate()\n for _ in range(self.repetitions)]])", "def update_weights(self, alpha, ind):\n inside = -alpha * self.labels * self.predictions[ind, :]\n new_weights = self.weights * np.exp(inside)\n self.weights = new_weights / np.sum(new_weights)", "def average_over_interval(raw_rate, weight_function, intervals):\n\n def averaging_function(t):\n return raw_rate(t) * weight_function(t)\n\n results = np.zeros(len(intervals), dtype=np.float)\n\n for interval_idx in range(len(intervals)):\n start = intervals.start[interval_idx]\n finish = intervals.finish[interval_idx]\n results[interval_idx] = quad(averaging_function, start, finish)[0]\n\n return results", "def _avg(value1, value2, weight):\r\n if value1 is None:\r\n return value2\r\n if value2 is None:\r\n return value1\r\n return value2 * weight + value1 * (1 - weight)", "def _avg(value1, value2, weight):\r\n if value1 is None:\r\n return value2\r\n if value2 is None:\r\n return value1\r\n return value2 * weight + value1 * (1 - weight)", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def average(self):\n return self.summation() / self.count()", "def update(\n self,\n value: float,\n weight: Optional[float] = None\n ):\n\n if weight is not None and not self.weighted:\n raise ValueError('Cannot pass a weight to an unweighted averager.')\n\n self.n += 1\n\n if self.has_alpha:\n step_size = self.alpha\n elif self.weighted:\n\n if weight is None:\n raise ValueError('The averager is weighted, so non-None values must be passed for weight.')\n\n self.cumulative_weight += weight\n step_size = weight / self.cumulative_weight\n\n else:\n step_size = 1 / self.n\n\n self.average = self.average + step_size * (value - self.average)", "def numba_ewma(X, alpha, state=None, adjust=True, ignore_na=True, minp=1):\n\n N = len(X)\n if N == 0:\n output = np.empty(N, dtype=float64)\n output_state = state\n else:\n # np.put(X, np.isinf(X), np.nan)\n X[np.isinf(X)] = np.nan\n\n if state is None:\n old_wt = 1.\n drop_first = False\n else:\n X = np.array([state[0]] + list(X))\n old_wt = state[1]\n N += 1\n drop_first = True\n\n minp = max(minp, 1)\n old_wt_factor = 1. - alpha\n new_wt = 1. if adjust else alpha\n output = np.empty(N, dtype=float64)\n\n weighted_avg = X[0]\n is_observation = (weighted_avg == weighted_avg)\n nobs = int(is_observation)\n output[0] = weighted_avg if (nobs >= minp) else np.nan\n\n for i in range(1, N):\n cur = X[i]\n is_observation = (cur == cur)\n nobs += int(is_observation)\n if weighted_avg == weighted_avg:\n\n if is_observation or (not ignore_na):\n\n old_wt *= old_wt_factor\n if is_observation:\n\n # avoid numerical errors on constant series\n if weighted_avg != cur:\n weighted_avg = ((old_wt * weighted_avg) + (new_wt * cur)) / (old_wt + new_wt)\n if adjust:\n old_wt += new_wt\n else:\n old_wt = 1.\n elif is_observation:\n weighted_avg = cur\n\n output[i] = weighted_avg if (nobs >= minp) else np.nan\n\n output = output[1:] if drop_first else output\n output_state = np.array([weighted_avg, old_wt])\n return output, output_state", "def avg():\n\n # call sum method to add up the values in the collection & div by the num of items\n # call len method to compute the # of vals in collection which is divided by sum total \n mean = sum(inlist) / len(inlist)\n return mean \n\n # alternate method would be calling the reduce method with lamda \n # return reduce(lambda a, b: a + b, inlist) / len(inlist)", "def compute_average(self, error=None):\n\n nbjobs = len(self)\n if not nbjobs:\n return\n max_xsec = max(one.xsec for one in self)\n min_xsec = min(one.xsec for one in self)\n self.axsec = sum([one.axsec for one in self]) / nbjobs\n self.xsec = sum([one.xsec for one in self]) /nbjobs\n self.xerrc = sum([one.xerrc for one in self]) /nbjobs\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self])) /nbjobs\n if error:\n self.xerrc = error\n self.xerru = error\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = 0#sum([one.nw for one in self])\n self.maxit = 0#len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = sum([one.luminosity for one in self])\n self.ysec_iter = []\n self.yerr_iter = []\n self.th_maxwgt = 0.0\n self.th_nunwgt = 0 \n for result in self:\n self.ysec_iter+=result.ysec_iter\n self.yerr_iter+=result.yerr_iter\n self.yasec_iter += result.yasec_iter\n self.eff_iter += result.eff_iter\n self.maxwgt_iter += result.maxwgt_iter\n\n #check full consistency\n onefail = False\n for one in list(self):\n if one.xsec < (self.xsec - 25* one.xerru):\n if not onefail:\n logger.debug('multi run are inconsistent: %s < %s - 25* %s: assign error %s', one.xsec, self.xsec, one.xerru, error if error else max_xsec-min_xsec)\n onefail = True\n self.remove(one)\n if onefail:\n if error:\n return self.compute_average(error)\n else:\n return self.compute_average((max_xsec-min_xsec)/2.)", "def test_multiple_equal_weights(self):\n df = self.df.copy()\n weights = np.array([1.0 / len(df.index)] * len(df.index))\n out = nan_weighted_mean(df.values, weights=weights)\n self.assertTrue(\n np.allclose(out, np.average(df.values, weights=weights, axis=0))\n )", "def calcweighted(store):\n nobs = store['yvec'].shape[0]\n store['Upper'].put(-store['rho'], range(0, nobs - 1), range(1, nobs))\n store['Upper'].matvec(store['yvec'], store['yvectil'])\n for i in xrange(store['xmat'].shape[1]):\n store['Upper'].matvec(store['xmat'][:, i], store['xmattil'][:, i])", "def weighted_avg(x, weights):\n return weights.unsqueeze(1).bmm(x).squeeze(1)" ]
[ "0.6735685", "0.65043914", "0.6491874", "0.6491874", "0.6491874", "0.6479693", "0.64793587", "0.6329713", "0.6180607", "0.61702657", "0.6145819", "0.61137694", "0.6075552", "0.59591585", "0.59500974", "0.5940664", "0.5910114", "0.58602995", "0.58529896", "0.5830189", "0.5830189", "0.5820239", "0.5818662", "0.58175457", "0.5817323", "0.57940257", "0.5794021", "0.5790272", "0.5789682", "0.5788627" ]
0.7245355
0
team_str is either 'TEA' or 'TEA'; team code is FD code. Returns standard 3letter code.
def ParseTeam(team_str): fd_team_code = team_str[3:-4] if '<b>' in team_str else team_str return { 'SA': 'SAS', 'NO': 'NOP', 'GS': 'GSW', 'NY': 'NYK', 'BKN': 'BRK', 'CHA': 'CHO', }.get(fd_team_code, fd_team_code)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_team(team):\n if team == \"left\":\n return \"0\"\n elif team == \"right\":\n return \"1\"\n elif team == \"spec\":\n return \"-1\"", "def get_team_alliance(event: str, match: int, team: int) -> typing.Optional[str]:\n \n if team in get_match_alliances(event, match)['red']:\n return 'red'\n elif team in get_match_alliances(event, match)['blue']:\n return 'blue'\n else:\n return None", "def _translateTeam(self, db, column, optteam):\n db_filename = self.registryValue('dbLocation')\n \n if not os.path.exists(db_filename):\n self.log.error(\"ERROR: I could not find: %s\" % db_filename)\n return\n \n conn = sqlite3.connect(db_filename)\n cursor = conn.cursor()\n query = \"select %s from nfl where %s='%s'\" % (db, column, optteam)\n cursor.execute(query)\n row = cursor.fetchone()\n\n cursor.close() \n\n return (str(row[0]))", "def get_team_id(team_name):\n\n team_name = team_name.lower()\n endpoint = \"/teams\"\n response = api.nhl_api(endpoint)\n\n if not response:\n raise ConnectionError(\"An invalid response was returned from the NHL Teams API.\")\n\n teams_json = response.json()\n teams = teams_json[\"teams\"]\n\n team_id = None\n for team in teams:\n if team[\"name\"].lower() == team_name:\n team_id = team[\"id\"]\n break\n\n if not team_id:\n raise ValueError(\"{} is not a valid NHL team. Check your configuraiton file!\".format(team_name))\n\n return team_id", "def set_team(self, team):\n self.team = team.upper()", "def _get_team_class(self):\n return '_e4y4wr'", "def _translateTeam(self, db, column, optteam):\n db_filename = self.registryValue('dbLocation')\n with sqlite3.connect(db_filename) as conn:\n cursor = conn.cursor()\n query = \"select %s from mlb where %s='%s'\" % (db, column, optteam)\n #self.log.info(query)\n cursor.execute(query)\n row = cursor.fetchone()\n \n return (str(row[0]))", "def get_team_slug_name(project_name, role):\n sanitized_role = pluralized(role).replace(\"Project \", \"\")\n team_name = f\"CT: {project_name} {sanitized_role}\"\n team_slug = slugified(team_name)\n return team_slug, team_name", "def team_name(self):\n return 'Team Name'", "def get_team_name_from_message(message):\n return message.split(GlobalConstants.GOAL_KEYWORD)[1].strip()", "def get_info_from_api(team_name):\n if \"-\" in team_name:\n team_name = team_name.replace(\"-\", \"+\")\n if \"brighton\" in team_name: # some teams has different names than in sofa-score\n team_name = \"brighton\"\n if \"leicester\" in team_name:\n team_name = \"leicester\"\n if \"norwich\" in team_name:\n team_name = \"norwich\"\n if \"mallorca\" in team_name:\n team_name = \"mallorca\"\n if \"parma\" in team_name:\n team_name = \"parma+calcio\"\n if \"bayern\" in team_name:\n team_name = \"bayern\"\n if \"koln\" in team_name:\n team_name = \"fc+koln\"\n if \"union+berlin\" in team_name:\n team_name = \"union+berlin\"\n if \"fsv+mainz\" in team_name:\n team_name = \"mainz\"\n if \"hoffenheim\" in team_name:\n team_name = \"hoffenheim\"\n if \"mgladbach\" in team_name:\n team_name = \"borussia+monchengladbach\"\n if \"schalke\" in team_name:\n team_name = \"schalke\"\n if \"leverkusen\" in team_name:\n team_name = \"leverkusen\"\n if \"paderborn\" in team_name:\n team_name = \"paderborn\"\n print(team_name)\n response = requests.get(cfg.API_URL + team_name)\n team_data = json.loads(response.text)\n return team_data['teams'][0]", "def teamname(record):\n\n tname = \"\"\n if record and record.name:\n tname = \"%s \" % record.name.strip()\n return tname", "def league_id(self):\n if self.league_string == NBA_STRING:\n return NBA_GAME_ID_PREFIX\n elif self.league_string == WNBA_STRING:\n return WNBA_GAME_ID_PREFIX\n elif self.league_string == G_LEAGUE_STRING:\n return G_LEAGUE_GAME_ID_PREFIX", "def remove_long_names(self, team_str):\n long_names = {'Western Bulldogs': 'Bulldogs', 'West Coast': 'Eagles',\n 'St Kilda': 'Saints', 'North Melbourne' : 'North',\n 'Port Adelaide': 'Port', 'Gold Coast' : 'Suns'}\n \n for key in long_names:\n team_str = team_str.replace(key, long_names[key])\n \n return team_str", "def chn_season_type_recognize(str):\n season_type = None\n if '第一季度' in str:\n season_type = 'Q1'\n elif '第二季度' in str:\n season_type = 'Q2'\n elif '半年度' in str:\n season_type = 'Q2'\n elif '第三季度' in str:\n season_type = 'Q3'\n elif '年度报告' in str:\n season_type = 'FY'\n\n return season_type", "def update_team(self, team) -> None:\n if isinstance(team, Teambuilder):\n self._team = team\n elif isinstance(team, str):\n self._team = ConstantTeambuilder(team)\n else:\n raise TypeError(\n \"Team must be a showdown team string or a Teambuilder object.\"\n )", "def CODE(string):\n return ord(string[0])", "def getOpposition(self, team):\n if team.lower() == self.homeTeam['name']:\n return self.awayTeam['name']\n elif team.lower() == self.awayTeam['name']:\n return self.homeTeam['name']\n else:\n return None", "def schoolNameFromPassCode(pass_code):\n try:\n return Team.objects.get(pass_code=pass_code).organization.name\n except Team.DoesNotExist:\n return None", "def fa_finder(league_no, team_name):\n ros_proj_b_list = BatterProjection.objects.all()\n ros_proj_p_list = PitcherProjection.objects.all()\n player_comp = {}\n pitching_fa_list = yahoo_players(league_no, \"P\")\n batting_fa_list = yahoo_players(LEAGUE_NO, \"B\")\n avail_pitching_fas = rate_avail_players(pitching_fa_list, ros_proj_p_list)\n yahoo_team = get_single_yahoo_team(league_no, team_name)\n team_pitching_values = rate_team(yahoo_team, ros_proj_p_list)\n avail_batting_fas = rate_avail_players(batting_fa_list, ros_proj_b_list)\n team_batting_values = rate_team(yahoo_team, ros_proj_b_list)\n\n player_comp['Team Name'] = yahoo_team['team_name']\n player_comp['Pitching FAs'] = avail_pitching_fas\n player_comp['Pitching Team'] = team_pitching_values\n player_comp['Batting FAs'] = avail_batting_fas\n player_comp['Batting Team'] = team_batting_values\n\n return player_comp", "def change_team():\n\n # Use team in global scope\n global team\n\n # Change the value of team in global: team\n team = 'newEclerx'", "def check_valid_team(words, teams):\n\n # <= 1 since if we only get \"jets\" it doesn't mean anything, \n # same as only mentioning us in a comment.\n if len(words) <= 1:\n return None, []\n\n # this is either a legit single team name and stat, or a two name team that \n # isn't valid since there is no stat.\n if len(words) == 2:\n if words[0] in teams:\n return words[0], words[1:]\n else:\n return None, []\n\n # possible we have a two name team or a one name team with a stat request.\n if len(words) >= 3:\n # check two name first since checking single name first can fail on \"maple leafs\"\n full_name = words[0] + words[1]\n if full_name in teams:\n return full_name, words[2:]\n elif words[0] in teams:\n return words[0], words[1:]\n\n return None, words", "def _two_factor_code(self):\n code = ''\n while not code:\n code = input('Enter 2FA code: ')\n return code", "def ra_code(string):\n code_pattern = 'ra{0,1}[efgk]s{0,1}\\d{2}[a-z][0-9a-z]{0,1}'\n code = re.search(code_pattern, string.lower())\n if not code:\n print \"No code found\"\n return\n c = code.group()\n if c[:1] == 'rk':\n code = 'raks' + c[2:]\n elif c[:1] == 're':\n code = 'raes' + c[2:]\n elif c[:1] == 'rg':\n code = 'rags' + c[2:]\n elif c[:1] == 'rf':\n code = 'rafs' + c[2:]\n else:\n code = c\n return code", "def code_to_name(code):\n upper_code = code.upper()\n if upper_code in code_dict:\n return code_dict[upper_code]\n else:\n return code", "def yield_team(self) -> str: # pragma: no cover", "def get_offense_team_id(self):\n pass", "def get_team(self):\n if self.team:\n return self.team\n return None", "def _select_market_code(code):\n code = str(code)\n if code[0] in ['5', '6', '9'] or code[:3] in [\"009\", \"126\", \"110\", \"201\", \"202\", \"203\", \"204\"]:\n return 1\n return 0", "def team_name(self, team_name):\n self.team_id = self.get_team_id(team_name)\n LOG.debug(\"Mattermost team id: %s\", self.team_id)" ]
[ "0.6921741", "0.5983766", "0.573982", "0.5738846", "0.56040615", "0.5561792", "0.54862833", "0.54771405", "0.54294246", "0.52948797", "0.5249644", "0.51467144", "0.51280034", "0.5080651", "0.505249", "0.503452", "0.5026184", "0.5024583", "0.5010263", "0.49874726", "0.49641785", "0.4945749", "0.49256748", "0.4919928", "0.49117097", "0.49111804", "0.48937872", "0.4893068", "0.4860153", "0.4847134" ]
0.7903837
0
Encode to a binary vector a list of literals
def binary_encode(self, literals): arr = np.zeros(len(self.encoder), dtype='bool') for p in literals: assert isinstance(p, Literal) arr[self.encoder[p]] = True return arr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode_vector_of_t(value: list):\n return encode_u32(len(value)) + bytes([i for j in value for i in j])", "def vectorize_doc_list(docList):\n vecList = bc.encode(docList)\n return vecList", "def encode_list(L):\n return \"&\".join([\"%s=%s\" % (index, element) for index, element in enumerate(L)])", "def _encode_list(source: list) -> bytes:\n result_data = b\"l\"\n\n for item in source:\n result_data += encode(item)\n\n return result_data + b\"e\"", "def mk_bitvecs(self):\n self.bitvec = ''.join([f'{b:#010b}'[2:] for b in self.code ][::-1])\n self.bitvec_data = ''.join([f'{b:#010b}'[2:] for b in self.input][::-1])\n\n # Pad with some zeros to catch the last instructions.\n self.bitvec = '0'*64 + self.bitvec", "def encode_list(value: list, inner_encoder: typing.Callable) -> bytes:\n return encode_vector_of_t(list(map(inner_encoder, value)))", "def pack_varint_list(data: List[int]) -> bytes:\n result = b\"\"\n for value in data:\n result += pack_varint(value)\n return result", "def _bytes_list_feature(values):\n def norm2bytes(value):\n return value.encode() if isinstance(value, str) and six.PY3 else value\n \n return tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[norm2bytes(values)]))", "def _bytes_list_feature(values):\n def norm2bytes(value):\n return value.encode() if isinstance(value, str) and six.PY3 else value\n\n return tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[norm2bytes(values)]))", "def test_serialize_list():\n assert bytes([\n *UnsignedInt.to_bytes(3), # Number of values\n *String.to_bytes(\"Hello, world!\"),\n *String.to_bytes(\"This is the middle value.\"),\n *String.to_bytes(\"Goodbye, world!\")\n ]) == bytes(List(String).to_bytes([\n \"Hello, world!\",\n \"This is the middle value.\",\n \"Goodbye, world!\",\n ]))", "def _reg_encode_utf16_list(self, xlist):\n t = '' \n for x in xlist: \n t += self._reg_encode_utf16(x + u'\\u0000') # null term \n t += self._reg_encode_utf16(u'\\u0000') # end of list (double null) \n return t", "def encode(input_: list):\n global n_bytes\n block = bytearray()\n\n for tup in input_:\n arr = np.array(tup[0], dtype=tup[1]).tobytes()\n n_bytes += len(arr)\n block += arr\n\n return block", "def encode(self, seq):", "def encode_map(value: list) -> bytes:\n raise NotImplementedError()", "def binary_encode(x: int) -> List[int]:\n return [x >> i & 1 for i in range(10)]", "def binary_encode(x: int) -> List[int]:\n return [x >> i & 1 for i in range(10)]", "def encode_lists(field_lists):\n assert isinstance(field_lists, list)\n assert len(field_lists) == GAME_COLS\n\n bits = []\n len_bits = []\n for col in field_lists:\n bits.extend(col)\n free_len = GAME_ROWS - len(col)\n bits.extend([0] * free_len)\n len_bits.extend(int_to_bits(free_len, bits=BITS_IN_LEN))\n bits.extend(len_bits)\n return bits_to_int(bits)", "def vec_to_bytes(val):\n return [int(x*255) for x in val]", "def _bytes_list_feature(values):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))", "def encode_base32_from_list(list_of_int: List[int]) -> str:\n data = BytesIO()\n for i in list_of_int:\n buf = b\"\"\n while True:\n towrite = i & 0x7f\n i >>= 7\n if i:\n buf += bytes((towrite | 0x80,))\n else:\n buf += bytes((towrite,))\n break\n data.write(buf)\n data.seek(0)\n return b32encode(data.read()).decode().replace('=', '')", "def create_bitvector(bitvector, *bits):\n if not bitvector in bitvectors:\n bitvectors[bitvector] = []\n \n if bits:\n bitvectors[bitvector].extend(bits)", "def encoder(list_of_str, key):\n tokenized = self.tokenizer.encode_commands(list_of_str)\n hidden = self.tokenizer.tokenize(tokenized)\n hidden = hidden.permute(1, 0, 2).reshape(hidden.size(1), -1) # correct for bididrectional\n return hidden", "def pack(self, v):\n\n raise NotImplementedError()", "def writeVector(q):\n return str(len(q))+'\\t'+' '.join(str(v) for v in q)", "def encode_raw(objs):\n return RawWire().encode(objs)", "def _bitlist_to_string(self, data):\n result = []\n pos = 0\n c = 0\n while pos < len(data):\n c += data[pos] << (7 - (pos % 8))\n if pos % 8 == 7:\n result.append(c)\n c = 0\n pos += 1\n return bytes(result)", "def bytes_feature(values):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))", "def bytes_feature(values):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))", "def encode_vecs(self, vecs, lstm):\n initial_state = lstm.initial_state()\n states = initial_state.transduce(vecs)\n return states", "def string_vector(self):\n pass" ]
[ "0.7348028", "0.6782181", "0.6696202", "0.66823965", "0.66077155", "0.6551032", "0.65074164", "0.63473594", "0.6289376", "0.62325335", "0.61880744", "0.6120585", "0.6110631", "0.6085503", "0.6076761", "0.6076761", "0.594411", "0.5888492", "0.58595353", "0.58571666", "0.58304", "0.5747892", "0.5721629", "0.5717727", "0.5689079", "0.5685413", "0.5677148", "0.5671446", "0.566748", "0.5647977" ]
0.70335406
1
This function reads a segment of datafile (corresponding a cycle) and generates a dataframe with columns 'Potential' and 'Current'
def read_cycle(data): current = [] potential = [] for i in data[3:]: current.append(float(i.split("\t")[4])) potential.append(float(i.split("\t")[3])) zipped_list = list(zip(potential, current)) dataframe = pd.DataFrame(zipped_list, columns=['Potential', 'Current']) return dataframe
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_frame(dict_cycle, number):\n list1, list2 = (list(dict_cycle.get('cycle_'+str(number)).items()))\n zipped_list = list(zip(list1[1], list2[1]))\n data = pd.DataFrame(zipped_list, columns=['Potential', 'Current'])\n return data", "def load_sep_cycles(file_name, database_name, datatype):\n #df_single = pd.read_excel(file_name,1)\n (cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col, char_cap_col, charge_or_discharge) = col_variables(datatype)\n\n while '/' in file_name:\n file_name = file_name.split('/', maxsplit = 1)[1]\n name = file_name.split('.')[0] + 'Raw'\n \n #name = file_name.split('.')[0] + 'Raw'\n df_single = dbfs.get_file_from_database(name, database_name)\n gb = df_single.groupby(by=[cycle_ind_col])\n cycle_dict = dict(iter(gb))\n battname = file_name.split('.')[0]\n for i in range(1, len(cycle_dict)+1):\n cycle_dict[i]['Battery_Label'] = battname\n for i in range(1, len(cycle_dict)+1):\n dbfs.update_database_newtable(cycle_dict[i], battname+'-'+'Cycle'+ str(i), database_name)\n print('All data separated into cycles and saved in database.')\n return cycle_dict", "def read_file(file):\n dict_of_df = {}\n h_val = 0\n l_val = 0\n n_cycle = 0\n #a = []\n with open(file, 'rt') as f_val:\n print(file + ' Opened')\n for line in f_val:\n record = 0\n if not (h_val and l_val):\n if line.startswith('SCANRATE'):\n scan_rate = float(line.split()[2])\n h_val = 1\n if line.startswith('STEPSIZE'):\n step_size = float(line.split()[2])\n l_val = 1\n if line.startswith('CURVE'):\n n_cycle += 1\n if n_cycle > 1:\n number = n_cycle - 1\n data = read_cycle(a_val)\n key_name = 'cycle_' + str(number)\n #key_name = number\n dict_of_df[key_name] = copy.deepcopy(data)\n a_val = []\n if n_cycle:\n a_val.append(line)\n return dict_of_df, number", "def load_segment(self):\n \n data = pd.read_csv(self.metadata['file_info']['path'], header = [0, 1], index_col = 0, parse_dates=True)\n \n # Check cycle length against 5 minute duration minimum\n cycle_len_secs = (data.index[-1] - data.index[0]).total_seconds()\n self.data = data\n \n diff = data.index.to_series().diff()[1:2]\n s_freq = 1000000/diff[0].microseconds\n\n self.metadata['file_info']['start_time'] = str(data.index[0])\n self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_len_secs': cycle_len_secs}\n self.s_freq = s_freq\n\n print('EEG successfully imported.')", "def find_inflections(study, record, sensor, segment, range):\n\n # check if the inflections have already been found\n path = [study, 'analyzed', 'inflections', 'all_times', str(range), record, segment]\n pathJoined = os.path.join(*path)\n file = os.path.join(pathJoined, sensor + \".csv\")\n\n if os.path.isfile(file):\n print('file found, not recalculated.')\n return\n\n print('finding inflections to build : ' + file)\n\n # retrieve the timestamped measurements for the study - record - sensor - segment\n format_type = 'truncate'\n source = os.path.join(study, 'formatted', format_type, record, segment, sensor + '.csv')\n print('source = ' + source)\n df = pd.read_csv(source)\n\n for colName in df.columns:\n\n # remove extra columns because the dataframe will be saved\n if 'Unnamed' in str(colName):\n del df[colName]\n\n # save the timestamps as a list\n elif 'Minutes' in str(colName):\n timeMinutes = list(df[colName])\n\n # find the measurement\n elif 'meas' in colName:\n\n # add new columns to the dataframe to save the new variables\n newColNames = ['inflectionDecision', 'inflectionLocation', 'polyfitCoefficients', 'polyfitEquation', 'polyfitSolution', 'derivativeEquation', 'derivativeSolution']\n colNameSplit = colName.split('_')\n print('colNameSplit[0] = ' + colNameSplit[0])\n\n for suffix in newColNames:\n label = str(colNameSplit[0] + '_' + suffix)\n print('label = ' + label)\n if label not in df.columns:\n df[label] = [None]*len((list(df['timeMinutes'])))\n\n df['timeBegin'] = [None]*len((list(df['timeMinutes'])))\n df['timeEnd'] = [None]*len((list(df['timeMinutes'])))\n\n for timeMinute in timeMinutes:\n\n i = df[ df['timeMinutes']== timeMinute].index.values[0]\n\n timeDif = (float(df.loc[2,'timeMinutes']) - float(df.loc[1,'timeMinutes']))\n timeTolerance = timeDif/2\n iRange = int(range/60*1/(timeDif))\n # print('iRange = ' + str(iRange))\n\n if len(list(df['timeMinutes'])) - i <= iRange+2:\n continue\n\n timeMedian = df.loc[int(i+iRange/2), 'timeMinutes']\n timeBegin = df.loc[int(i), 'timeMinutes']\n timeEnd = df.loc[int(i+iRange), 'timeMinutes']\n\n # print('timeMedian = ' + str(timeMedian) + ' timeBegin = ' + str(timeBegin) + ' timeEnd = ' + str(timeEnd))\n # print('range = ' + str(range/60) + ' timeEnd-timeBegin = ' + str(timeEnd-timeBegin) + ' % = ' + str(range/60/(timeEnd-timeBegin)))\n\n df_truncate = df[df['timeMinutes'] >= timeMinute]\n df_truncate = df_truncate[df_truncate['timeMinutes'] <= timeMinute + range/60]\n # df_truncate = df[df['timeMinutes'] >= timeMinute & df_truncate['timeMinutes'] <= timeMinute + range/60]\n\n timeTruncate = list(df_truncate['timeMinutes'])\n df.loc[int(i+iRange/2), 'timeBegin'] = min(timeTruncate)\n df.loc[int(i+iRange/2), 'timeEnd'] = max(timeTruncate)\n\n measTruncate = list(df_truncate[colName])\n\n coef = np.polyfit(timeTruncate, measTruncate, 2)\n # coef = [float(x) for x in coef]\n\n x = sym.Symbol('x')\n\n f = coef[0]*x*x+coef[1]*x+coef[2]\n # print('f = ')\n # print(f)\n\n dff = sym.diff(f,x)\n # print('dff = ')\n # print(dff)\n\n solf = sym.solve(f)\n soldf = sym.solve(dff)\n soldf = soldf[0]\n\n\n label = str(colNameSplit[0] + '_' + 'inflectionDecision')\n df.loc[int(i+iRange/2), label] = 'No'\n\n label = str(colNameSplit[0] + '_' + 'inflectionLocation')\n df.loc[int(i+iRange/2), label] = timeMinute\n\n label = str(colNameSplit[0] + '_' + 'polyfitCoefficients')\n df.loc[int(i+iRange/2), label] = str(''.join([str(x) for x in coef]))\n\n label = str(colNameSplit[0] + '_' + 'polyfitEquation')\n df.loc[int(i+iRange/2), label] = str(f)\n\n label = str(colNameSplit[0] + '_' + 'polyfitSolution')\n df.loc[int(i+iRange/2), label] = str(''.join([str(x) for x in solf]))\n\n label = str(colNameSplit[0] + '_' + 'derivativeEquation')\n df.loc[int(i+iRange/2), label] = str(dff)\n\n label = str(colNameSplit[0] + '_' + 'derivativeSolution')\n df.loc[int(i+iRange/2), label] = str(soldf)\n\n if soldf < timeMedian + timeTolerance:\n\n if soldf > timeMedian - timeTolerance:\n\n print('inflection found at time = ' + str(soldf))\n label = str(colNameSplit[0] + '_' + 'inflectionDecision')\n df.loc[int(i+iRange/2), label] = 'Yes'\n\n path = build_path(path)\n file = os.path.join(path, sensor + \".csv\")\n df.to_csv(file)\n print('inflection list saved : ' + file)\n return(file)", "def txt_to_dataframe(folder,name_parcellation):\n column_weight = ['patients','degree', 'density', 'global_efficiency', 'transitivity', 'assortavity', 'clustering_coef',\n 'fiedler_value', 'small_worldness','Null']\n\n file_name=folder+name_parcellation+'.txt'\n data=pd.read_csv(file_name,header=None,delimiter=';')\n data.columns=column_weight\n data=data.drop(['Null'],axis=1)\n file_len=folder+name_parcellation+'_len.txt'\n data_len=only_connected_patients(file_len)\n data_len=data_len.values\n data['length']=data_len\n data=data[data['length']>-1.0]\n data=data.reset_index(drop=True)\n return data", "def ReadData( fileName ):\n \n # define column names\n colNames = ['Date','Precip','Max Temp', 'Min Temp','Wind Speed'] #NOTE: I changed the column names because .query() would not work when referencing column names with spaces\n global DataDF #added this line to make the dataframe visible in the variable explorer\n global ReplacedValuesDF #added this line to make the dataframe visible in the variable explorer\n # open and read the file\n DataDF = pd.read_csv(\"DataQualityChecking.txt\",header=None, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n \n # define and initialize the missing data dictionary\n ReplacedValuesDF = pd.DataFrame(0, index=[\"1. No Data\",\"2. Gross Error\",\"3. Swapped\",\"4. Range Fail\"], columns=colNames[1:]) #added additional indexed rows to make adding the values later easier\n \n return( DataDF, ReplacedValuesDF )", "def read_traveltime(self):\r\n \r\n #### read Travel time from txt file\r\n \r\n \r\n #### Particle travel time branch 1\r\n excelfile_surface_branch1_high = r'excel\\flow_rate\\particle_surface_branch1_high.xlsx'\r\n inarray_surface_branch1_high = pd.read_excel(excelfile_surface_branch1_high).to_numpy() \r\n \r\n excelfile_surface_branch1_medium = r'excel\\flow_rate\\particle_surface_branch1_medium.xlsx'\r\n inarray_surface_branch1_medium = pd.read_excel(excelfile_surface_branch1_medium).to_numpy() \r\n \r\n excelfile_surface_branch1_low = r'excel\\flow_rate\\particle_surface_branch1_low.xlsx'\r\n inarray_surface_branch1_low = pd.read_excel(excelfile_surface_branch1_low).to_numpy()\r\n \r\n excelfile_bottom_branch1_high = r'excel\\flow_rate\\particle_bottom_branch1_high.xlsx'\r\n inarray_bottom_branch1_high = pd.read_excel(excelfile_bottom_branch1_high).to_numpy()\r\n \r\n excelfile_bottom_branch1_medium = r'excel\\flow_rate\\particle_bottom_branch1_medium.xlsx'\r\n inarray_bottom_branch1_medium = pd.read_excel(excelfile_bottom_branch1_medium).to_numpy()\r\n \r\n excelfile_bottom_branch1_low = r'excel\\flow_rate\\particle_bottom_branch1_low.xlsx'\r\n inarray_bottom_branch1_low = pd.read_excel(excelfile_bottom_branch1_low).to_numpy()\r\n \r\n \r\n #### Tracer travel time branch 1\r\n excelfile_tracer_branch1_high = r'excel\\flow_rate\\tracer_branch1_high.xlsx'\r\n inarray_tracer_branch1_high = pd.read_excel(excelfile_tracer_branch1_high).to_numpy()\r\n \r\n excelfile_tracer_branch1_medium = r'excel\\flow_rate\\tracer_branch1_medium.xlsx'\r\n inarray_tracer_branch1_medium = pd.read_excel(excelfile_tracer_branch1_medium).to_numpy()\r\n \r\n excelfile_tracer_branch1_low = r'excel\\flow_rate\\tracer_branch1_low.xlsx'\r\n inarray_tracer_branch1_low = pd.read_excel(excelfile_tracer_branch1_low).to_numpy()\r\n \r\n self.inarrays_branch1 = [inarray_surface_branch1_high, inarray_surface_branch1_medium, inarray_surface_branch1_low, \\\r\n inarray_bottom_branch1_high, inarray_bottom_branch1_medium, inarray_bottom_branch1_low, \\\r\n inarray_tracer_branch1_high, inarray_tracer_branch1_medium, inarray_tracer_branch1_low]\r\n \r\n \r\n #### Particle travel time branch 5\r\n excelfile_surface_branch5_high = r'excel\\flow_rate\\particle_surface_branch5_high.xlsx'\r\n inarray_surface_branch5_high = pd.read_excel(excelfile_surface_branch5_high).to_numpy()\r\n \r\n excelfile_surface_branch5_medium = r'excel\\flow_rate\\particle_surface_branch5_medium.xlsx'\r\n inarray_surface_branch5_medium = pd.read_excel(excelfile_surface_branch5_medium).to_numpy()\r\n \r\n excelfile_surface_branch5_low = r'excel\\flow_rate\\particle_surface_branch5_low.xlsx'\r\n inarray_surface_branch5_low = pd.read_excel(excelfile_surface_branch5_low).to_numpy()\r\n \r\n excelfile_bottom_branch5_high = r'excel\\flow_rate\\particle_bottom_branch5_high.xlsx'\r\n inarray_bottom_branch5_high = pd.read_excel(excelfile_bottom_branch5_high).to_numpy()\r\n \r\n excelfile_bottom_branch5_medium = r'excel\\flow_rate\\particle_bottom_branch5_medium.xlsx'\r\n inarray_bottom_branch5_medium = pd.read_excel(excelfile_bottom_branch5_medium).to_numpy()\r\n \r\n excelfile_bottom_branch5_low = r'excel\\flow_rate\\particle_bottom_branch5_low.xlsx'\r\n inarray_bottom_branch5_low = pd.read_excel(excelfile_bottom_branch5_low).to_numpy()\r\n \r\n \r\n #### Tracer travel time branch 5\r\n excelfile_tracer_branch5_high = r'excel\\flow_rate\\tracer_branch5_high.xlsx'\r\n inarray_tracer_branch5_high = pd.read_excel(excelfile_tracer_branch5_high).to_numpy()\r\n \r\n excelfile_tracer_branch5_medium = r'excel\\flow_rate\\tracer_branch5_medium.xlsx'\r\n inarray_tracer_branch5_medium = pd.read_excel(excelfile_tracer_branch5_medium).to_numpy()\r\n \r\n excelfile_tracer_branch5_low = r'excel\\flow_rate\\tracer_branch5_low.xlsx'\r\n inarray_tracer_branch5_low = pd.read_excel(excelfile_tracer_branch5_low).to_numpy()\r\n \r\n \r\n self.inarrays_branch5 = [inarray_surface_branch5_high, inarray_surface_branch5_medium, inarray_surface_branch5_low, \\\r\n inarray_bottom_branch5_high, inarray_bottom_branch5_medium, inarray_bottom_branch5_low, \\\r\n inarray_tracer_branch5_high, inarray_tracer_branch5_medium, inarray_tracer_branch5_low]", "def bufr_to_dataframe(file=''):\n \n if debug:\n print(\"Running bufr_to_dataframe for: \", file)\n \n check_read_file (file = file, read= False)\n f = open(file)\n #source_file = [l for l in file.split('/') if '.bfr' in l][0]\n read_data = []\n \n \"\"\" Name of the columns as they will appear in the pandas dataframe (not necessarily CDM compliant) \"\"\"\n #column_names = ['report_timestamp' , 'iday', 'station_id', 'latitude', 'longitude', 'pressure', 'value','varno@body']\n \n lat, lon, alt, blockNumber, stationNumber, statid = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n \n obs_id, report_id = -1, 0 # progressive observation id\n stations_id = [] \n \n while 1:\n #lista = [] # temporary list\n bufr = codes_bufr_new_from_file(f)\n \n if bufr is None:\n break\n \n codes_set(bufr, 'unpack', 1) # eCcodes must expand all the descriptors and unpack the data section\n \n date = '19'+codes_get_array(bufr, \"typicalDate\")[0][2:]\n timePeriod = codes_get_array(bufr, \"typicalTime\")[0] \n \n year, month, day = date[0:4], date[4:6] , date[6:8]\n hour, minutes = timePeriod[0:2] , timePeriod[2:4]\n \n idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n iday = int(year + month + day )\n\n pressure = codes_get_array(bufr, \"pressure\") \n temperature = codes_get_array(bufr, \"airTemperature\") \n wind_direction = codes_get_array(bufr, \"windDirection\")\n wind_speed = codes_get_array(bufr, \"windSpeed\")\n \n try: # not all the bufr files have the dewpoint \n dew_point = codes_get_array(bufr, \"dewpointTemperature\")\n except:\n dew_point= np.empty((1, len(temperature)))\n dew_point[:] = np.nan\n \n num_lev = len(pressure) # number of distinct pressure levels \n \n try:\n geopotential = codes_get_array(bufr, \"nonCoordinateGeopotentialHeight\") \n except:\n geopotential = np.full( (1,len(temperature)) , np.nan )[0,:]\n \n if report_id == 0:\n ''' Check again but these values should remain the same for all cnt, so it makes no sense to read them every time '''\n lat = codes_get(bufr, \"latitude\")\n lon = codes_get(bufr, \"longitude\")\n alt = float(codes_get(bufr, \"heightOfStation\"))\n blockNumber = codes_get(bufr, \"blockNumber\")\n stationNumber = codes_get(bufr, \"stationNumber\")\n #statid = str(blockNumber*1000+stationNumber) # changed to int instead of str\n statid = blockNumber*1000+stationNumber\n if statid not in stations_id:\n stations_id.append(statid) \n \n codes_release(bufr)\n \n miss_value = -1.e100 \n \n for i in range(len(temperature)):\n obs_id = obs_id + 1 \n airT = temperature[i]\n winds = wind_speed[i]\n windd = wind_direction[i]\n press = pressure[i]\n gph = geopotential[i]\n dp = dew_point[i]\n if press == miss_value:\n press = np.nan \n if dp == miss_value:\n dp = np.nan\n if airT == miss_value : # replacing none values with numpy nans\n airT = np.nan \n if winds == miss_value:\n winds = np.nan\n if gph == miss_value:\n gph = np.nan \n if windd == 2147483647 or windd == -2147483647:\n windd = np.nan \n \n \n for value,var in zip( [gph, airT, winds, windd, dp], ['gph', 'temperature', 'wind_speed', 'wind_direction', 'dew_point'] ):\n obs_id = obs_id + 1 \n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1\n z_type = 1 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n else:\n z_type = -2147483648 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n\n\n report_id += 1\n \n df = pd.DataFrame(data= read_data, columns= column_names) \n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n \n return df, stations_id", "def _read_csvs(self):\n self.data = pd.read_csv(self.path+self.name, index_col=0)", "def create_df(filename):\n data = pd.read_csv(filename)\n data = data.dropna(axis='index')\n data['inc_angle'] = np.radians(data['inc_angle'])\n data = data.astype('float64')\n data = data[data['inc_angle'] <= np.deg2rad(80)]\n return data", "def get_flow_and_frequency_data(self) -> pd.DataFrame:\n return pd.read_feather(self.figure_data_paths.flow_and_frequency_path)", "def read_insitu_gas(cls, full_file_path):\n\n with open(full_file_path, 'r') as f:\n hlines = f.readline().rstrip().split(': ')[1]\n\n df = pd.read_csv(full_file_path, skiprows=int(hlines), skipinitialspace=True,\n delimiter=' ', header=None, names=['site', 'year', 'month', cls._gas_name])\n\n # set datetime index in df (requires 'day' column)\n df['day'] = 1\n df.set_index(pd.to_datetime(df[['year', 'month', 'day']]), inplace=True)\n\n return df", "def _get_data(self, gas, loc, voltage, speed, trial):\n cols = []\n for g in gas:\n for l in loc:\n try:\n (sub, files) = self._get_sensor_col_files(g, l)\n except OSError as e:\n print('{}\\n Keeping calm and carrying on.'.format(e))\n continue\n for v in voltage:\n for s in speed:\n end = \"_board_setPoint_%s_fan_setPoint_%s_mfc_setPoint_%sppm_p%s\" % (\n self.SensorVoltages[v],\n self.FanSpeeds[s],\n self.GasNames[g],\n self.AltLocs[l])\n filtered = [f.split('/')[-1] for f in files if f.endswith(end)]\n if not filtered:\n if self._args['verbose']:\n print('No valid files found for \"%s\", skipping!' % sub)\n continue\n timeStamp = [filt.split('_', 1)[0] for filt in filtered]\n date = [time.strptime(ts, '%Y%m%d%H%M') for ts in timeStamp]\n date = [time.strftime('%Y-%m-%d %H:%M', d) for d in date]\n filtered = [os.path.join(sub, f) for f in filtered]\n for i, filt in enumerate(filtered):\n j = i + 1\n if j in trial:\n p = os.path.sep.join([self.dataloc_prefix,\n self.data_location,\n filt])\n\n cols.append(SensorColumn(data_location=p,\n gas=self.GasNames[g],\n loc=self.Locs[l],\n voltage=self.SensorVoltages[v],\n speed=self.AltFanSpeeds[s],\n trial=j,\n _args=self._args))\n\n if self._args['verbose']:\n print('\\nSelected %i single trial SensorColumns!' % len(cols))\n return cols", "def _borehole_structure_data(self):\n file_loc = self.data_path / \"03_GeologicalMapping\" / \"02_BoreholeIntersections\"\n columns = [\"depth\", \"azimuth\", \"dip\", \"aperture\", \"type\"]\n\n data = []\n for borehole in self.boreholes:\n path = file_loc / (borehole + \"_structures.txt\")\n frame = pd.read_csv(\n path, sep=None, names=columns, skiprows=2, engine=\"python\"\n )\n frame[\"borehole\"] = borehole\n data.append(frame)\n df = pd.concat(data, ignore_index=True)\n return df", "def path_to_df(path, orig) :\n with open(path, 'r') as fich :\n strinfo = fich.readline()\n [strn, strm] = strinfo.split(\",\")\n info = {'n':int(strn.split(\"=\")[1]), 'm':int(strm.split(\"=\")[1])}\n data = pd.read_csv(fich, sep=\",\")\n data['origin'] = orig\n return info, data", "def raw_individual(self) -> pd.DataFrame:\n\n # raw individual file name\n file_name = \"indivTripData_\" + str(self.iteration) + \".csv\"\n\n trips = pd.read_csv(\n os.path.join(self.scenario_path,\n \"output\",\n file_name),\n usecols=[\"trip_mode\", # trip mode\n \"micro_walkMode\", # micro-mobility choice on walk mode\n \"micro_trnAcc\", # micro-mobility choice on transit access mode\n \"micro_trnEgr\"]) # micro-mobility choice on transit egress mode\n\n trips = trips.rename({\"trip_mode\": \"tripMode\"}, axis=1)\n\n return trips", "def read_sad( self, relS = 0, verbose = 0 ):\n\n df = read_table( self.tfs, sep = r'\\s+', index_col = False )\n \n if relS: \n print(\" Add column 'rel_S' -- S position shifted with IP in the center.\")\n df[\"rel_S\"] = df.apply( lambda row: rel_s( df, row ), axis = 1 )\n \n return df", "def read_ipea(\n file_name:str,time_freq:str\n ) -> pd.DataFrame:\n df = pd.read_csv(f'../data/{time_freq}/{file_name}.csv',\n index_col='Date',parse_dates=True).fillna(0)\n df = df.pct_change().replace([np.inf, -np.inf, np.nan], 0)\n\n df = lagger(df,2,list(df))\n df.index = df.index - pd.Timedelta('1 days')\n df = df.resample('Q', convention='start').asfreq()\n\n return df", "def _get_datas(self):\n print(f'base name {self.base_name}')\n data_file_name = glob(osp.join(self.root_dir, MERGED_PATTERN))[0]\n data_df = pd.read_csv(data_file_name)\n\n ppg_d = data_df[['CurrentTimeMillis', 'ch1']].values\n acc_d = data_df[[\n 'EventTimestamp(ns)', 'accel_x', 'accel_y', 'accel_z'\n ]].values\n ppg_d = ppg_d[::2]\n acc_d = acc_d[::2]\n\n return acc_d, ppg_d", "def read_data_file(input_file):\n header_lines = 0\n last_pound_pos = -1\n with open(input_file, 'r') as data_file:\n while (data_file.read(1) == '#'):\n last_pound_pos = data_file.tell()\n header = data_file.readline()\n header_lines += 1\n\n #Read the next lines\n data_1 = data_file.readline().split()\n data_2 = data_file.readline().split()\n data_file.seek(last_pound_pos + 1) #Goes back to the last line of the header\n\n if header_lines == 0:\n data = pd.read_csv(data_file, sep=\" \", header=None).dropna(axis=1, how='all')\n\n else:\n # Single line file\n if len(data_2) == 0:\n data_file.readline()\n\n else:\n\n if len(data_1) != len(\n data_2): #If there is a line containing the number of particles,\n data_file.readline()\n data_file.readline()\n\n try:\n data = pd.read_csv(data_file, sep=\" \", header=None).dropna(axis=1, how='all')\n data.columns = header.split()\n except:\n raise Exception(\"The input file '%s' is corrupted, usually the problem is because \"\\\n \"there is an end of a line that has an additional space\" %input_file)\n\n return data", "def load_steels():\n path = os.path.join(DATA_DIR, \"yieldstrength-citrination-312.csv\")\n df = pd.read_csv(path, index_col=False)\n return df", "def _read_trajectory_files(self):\n dflist = []\n self.Ntimes = {}\n for downD in self.case.downstreamD:\n outputs = self.case.get_outputs(self.method,downD)\n print(outputs['trajectory_file'])\n df = pd.read_csv(outputs['trajectory_file'],\n header=None,\n usecols=[0,1,2])\n df.columns = ['t','y','z']\n df['x'] = downD * self.case.turbine.D\n df['z'] -= self.case.turbine.zhub\n df = df.set_index(['t','x'])[['y','z']]\n self.Ntimes[downD] = len(df.index.levels[0])\n dflist.append(df)\n self.df = pd.concat(dflist).sort_index()", "def get_clk_spr_df(self) -> pd.DataFrame:\n return pd.read_feather(self.figure_data_paths.clk_spr_path)", "def get_data(filename):\n df = gpd.read_file(filename, index=\"OBJECTID\")\n # Sort along the length column, which orders the points from top\n # to bottom\n fplencol = [x for x in df.columns if x.startswith(\"fpLen\")][0]\n df = df.sort_values(fplencol, ascending=True)\n df = run_checks(df)\n snapdf = gpd.read_file(\n filename.replace(\"smpldef3m\", \"snaps3m\"), index=\"OBJECTID\"\n )\n # Compute full rotation string\n # OK, be careful here. Presently, the 8 char field covers\n # 2010 thru 2017, so we rotate to cover the first and last years\n # 2007 2011[1]\n # 2008 2010[0]\n # 2009 2011[1]\n # 2018 2016[6]\n # 2019 2017[7]\n # 2020 2018[6]\n s = df[\"CropRotatn_CY_2017\"]\n df[\"landuse\"] = (\n s.str[1]\n + s.str[0]\n + s.str[1]\n + s\n + s.str[6]\n + s.str[7]\n + s.str[6]\n + s.str[7]\n )\n s = df[\"Management_CY_2017\"]\n df[\"management\"] = (\n s.str[1]\n + s.str[0]\n + s.str[1]\n + s\n + s.str[6]\n + s.str[7]\n + s.str[6]\n + s.str[7]\n )\n return df, snapdf", "def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):\n with open(self.signal_fname, 'r') as f_signal:\n signal_reader = csv.reader(f_signal, delimiter=\";\")\n for _ in range(start + 1):\n next(signal_reader)\n data_raw = [row[1:-1] for row in signal_reader]\n\n data_source = self.scale * np.asarray(data_raw, dtype=np.float64).T # rotate for mne\n if self.stim_channel is not None:\n data_source = np.append(data_source, [self.stim_channel[start:stop + 1]], axis=0)\n # not sure, copied from mne.io.utils._mult_cal_one\n if mult is not None:\n data[:] = np.dot(mult, data_source)\n else:\n if isinstance(idx, slice):\n data[:] = data_source[idx]\n else:\n np.take(data_source, idx, axis=0, out=data)\n if cals is not None:\n data *= cals", "def test_get_spiral_slice_by_file():\n import os.path\n filename = os.path.join(directory, \"tests\", \"test_data\", \"test_lbv.dat\")\n spiral_arm = survey.get_spiral_slice(filename = filename)\n\n track = np.array([np.arange(10)+1, np.arange(10)+1, np.arange(10)+1]).T\n spiral_arm2 = survey.get_spiral_slice(track = track)\n\n assert np.allclose(spiral_arm[\"INTEN\"], spiral_arm2[\"INTEN\"], equal_nan = True)", "def startmeup():\r\n global iteration\r\n\r\n #The readout function is called...\r\n thetime, T_BL100, MF_BL100, Rho_BL100, T_Cori, MF_Cori, Rho_Cori, DP = readout()\r\n #... and the results are added to the dataframe.\r\n df.loc[iteration]= [thetime, T_BL100, MF_BL100, Rho_BL100, T_Cori, MF_Cori, Rho_Cori, DP]\r\n iteration = iteration + 1 #Moving on to the next entry.\r", "def read_pendf_xs(file,start,finish):\n with open(file) as f:\n e = []\n cs = []\n\n break_outer = False\n\n for i,line in enumerate(f):\n # -------------------------------\n # Stop the loop once finish is reached\n # -------------------------------\n if i == finish:\n break\n if i >= start-1:\n \t# -------------------------------\n \t# Only include first 66 columns, split on space\n \t# and convert to an array of strings\n \t# -------------------------------\n word_len = 11\n word_start = 0\n for j in range(6):\n word = line[word_start:word_start+11]\n\n if( j%2 == 0 ):\n # -------------------------------\n # Grab the energies, convert to readable format\n # -------------------------------\n if( word == ' ' ):\n break_outer = True\n break # end of TAB1\n e.append(word.replace('-','e-').replace('+','e+'))\n else:\n # -------------------------------\n # Grab cross section, convert to readable format\n # -------------------------------\n if( word == ' ' ):\n break_outer = True\n break # end of TAB1\n cs.append(word.replace('-','e-').replace('+','e+'))\n word_start+=word_len\n\n if( break_outer ):\n break # end of TAB1\n \n # -------------------------------\n # Convert to floats\n # -------------------------------\n e = np.array(e).astype(float)\n cs = np.array(cs).astype(float)\n\n # -------------------------------\n # Stack them into a numpy array\n # -------------------------------\n pointwise_cs = np.array([e,cs])\n \n return pointwise_cs", "def coopsCurrentRequest(station_id, tides_dt_start, tides_dt_end):\n tides_data_options = \"time_zone=gmt&application=ports_screen&format=json\"\n tides_url = \"http://tidesandcurrents.noaa.gov/api/datagetter?\"\n begin_datetime = \"begin_date=\" + tides_dt_start\n end_datetime = \"&end_date=\" + tides_dt_end\n current_dp = \"&station=\" + station_id\n full_url = (tides_url + begin_datetime + end_datetime+current_dp +\n \"&application=web_services&product=currents&units=english&\" +\n tides_data_options)\n r = requests.get(full_url)\n try:\n r = r.json()\n except:\n return None\n if 'data' in r:\n r = r['data']\n data_dt = []\n data_spd = []\n data_dir = []\n for row in r:\n # Convert from knots to cm/s.\n data_spd.append(float(row['s']) * 51.4444444)\n data_dir.append(float(row['d']))\n date_time_val = datetime.strptime(row['t'], '%Y-%m-%d %H:%M')\n data_dt.append(date_time_val)\n\n data = dict()\n data['sea_water_speed (cm/s)'] = np.array(data_spd)\n data['direction_of_sea_water_velocity (degree)'] = np.array(data_dir)\n time = np.array(data_dt)\n columns = ['sea_water_speed (cm/s)',\n 'direction_of_sea_water_velocity (degree)']\n df = DataFrame(data=data, index=time, columns=columns)\n return df\n else:\n return None" ]
[ "0.5800998", "0.57862395", "0.56656605", "0.5580664", "0.5480087", "0.54421705", "0.54055905", "0.53888214", "0.5330437", "0.5316921", "0.5250387", "0.5235828", "0.5213562", "0.5127126", "0.5084637", "0.507864", "0.50635403", "0.50632465", "0.5061292", "0.5049956", "0.50403017", "0.5028723", "0.5017891", "0.49990064", "0.499657", "0.49861148", "0.49835467", "0.49731988", "0.49670166", "0.49526033" ]
0.76940286
0
This function reads the raw data file, gets the scanrate and stepsize and then reads the lines according to cycle number. Once it reads the data for one cycle, it calls read_cycle function to denerate a dataframe. It does the same thing for all the cycles and finally returns a dictionary, the keys of which are the cycle numbers and the values are the corresponding dataframes.
def read_file(file): dict_of_df = {} h_val = 0 l_val = 0 n_cycle = 0 #a = [] with open(file, 'rt') as f_val: print(file + ' Opened') for line in f_val: record = 0 if not (h_val and l_val): if line.startswith('SCANRATE'): scan_rate = float(line.split()[2]) h_val = 1 if line.startswith('STEPSIZE'): step_size = float(line.split()[2]) l_val = 1 if line.startswith('CURVE'): n_cycle += 1 if n_cycle > 1: number = n_cycle - 1 data = read_cycle(a_val) key_name = 'cycle_' + str(number) #key_name = number dict_of_df[key_name] = copy.deepcopy(data) a_val = [] if n_cycle: a_val.append(line) return dict_of_df, number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_sep_cycles(file_name, database_name, datatype):\n #df_single = pd.read_excel(file_name,1)\n (cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col, char_cap_col, charge_or_discharge) = col_variables(datatype)\n\n while '/' in file_name:\n file_name = file_name.split('/', maxsplit = 1)[1]\n name = file_name.split('.')[0] + 'Raw'\n \n #name = file_name.split('.')[0] + 'Raw'\n df_single = dbfs.get_file_from_database(name, database_name)\n gb = df_single.groupby(by=[cycle_ind_col])\n cycle_dict = dict(iter(gb))\n battname = file_name.split('.')[0]\n for i in range(1, len(cycle_dict)+1):\n cycle_dict[i]['Battery_Label'] = battname\n for i in range(1, len(cycle_dict)+1):\n dbfs.update_database_newtable(cycle_dict[i], battname+'-'+'Cycle'+ str(i), database_name)\n print('All data separated into cycles and saved in database.')\n return cycle_dict", "def read_cycle(data):\n\n current = []\n potential = []\n for i in data[3:]:\n current.append(float(i.split(\"\\t\")[4]))\n potential.append(float(i.split(\"\\t\")[3]))\n zipped_list = list(zip(potential, current))\n dataframe = pd.DataFrame(zipped_list, columns=['Potential', 'Current'])\n return dataframe", "def parse_file(filepath):\n\n #number_pattern = '(\\d+(?:\\.\\d+)?)'\n #number_pattern = '(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?'\n #line_pattern = '^\\s*%s\\.*hr.*min.*$' % ('\\s+'.join([number_pattern for x in range(5)]))\n\n line_pattern = r'^\\s*(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+.*hr.*min.*$'\n\n data = [] # create an empty list to collect the data\n # open the file and read through it line by line\n with open(filepath, 'r') as file_object:\n line = file_object.readline()\n while line:\n #print(\"line: \", line)\n match = re.match(line_pattern, line)\n if match:\n #print(\"match line: \", line)\n #print(match.groups())\n row = {\n 'l_rate': match.group(1),\n 'iter': match.group(2),\n 'epoch': match.group(3),\n 'num': match.group(4),\n 'valid_loss': match.group(5),\n 'valid_acc': match.group(6),\n 'train_loss': match.group(7),\n 'train_acc': match.group(8),\n 'batch_loss': match.group(9),\n 'batch_acc': match.group(10)\n }\n #print(row)\n #return match.groups()\n\n # append the dictionary to the data list\n data.append(row)\n\n line = file_object.readline()\n\n # create a pandas DataFrame from the list of dicts\n print(\"data: \", data)\n df = pd.DataFrame(data)\n print(df.ndim)\n print(df.shape)\n print(df.dtypes)\n print(\"data frame: \", df)\n # set the School, Grade, and Student number as the index\n #df.set_index(['epoch', 'valid_loss', 'valid_acc', 'train_loss', 'train_acc'], inplace=True)\n #df.set_index(['epoch'], inplace=True)\n # consolidate df to remove nans\n #df = df.groupby(level=data.index.epoch).first()\n # upgrade Score from float to integer\n df = df.apply(pd.to_numeric, errors='ignore')\n return df", "def read_tfrecord(file_path, cycle_length=5, num_parallel_calls=10):\n\n files = tf.data.Dataset.list_files(file_path)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset, cycle_length=cycle_length))\n dataset = dataset.map(_parse_bytes_sample, num_parallel_calls)\n return dataset", "def parse_file(self):\n file_time = ''\n num_dir = 0\n num_freq = 0\n freq_w_band = 0.0\n freq_0 = 0.0\n start_dir = 0.0\n\n dspec_matrix = []\n\n # Extract the file time from the file name\n input_file_name = self._stream_handle.name\n\n match = FILE_NAME_MATCHER.match(input_file_name)\n\n if match:\n file_time = match.group(1)\n else:\n error_message = 'Unable to extract file time from DSpec input file name: %s '\\\n % input_file_name\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # read the first line in the file\n line = self._stream_handle.readline()\n\n # loop over all lines in the data file\n while line:\n\n if EMPTY_LINE_MATCHER.match(line):\n # ignore blank lines, do nothing\n pass\n\n elif HEADER_MATCHER.match(line):\n\n # we need header records to extract useful information\n for matcher in HEADER_MATCHER_LIST:\n header_match = matcher.match(line)\n\n if header_match is not None:\n\n # Look for specific header lines and extract header fields\n if matcher is DIR_FREQ_MATCHER:\n num_dir = int(header_match.group(1))\n num_freq = int(header_match.group(2))\n\n elif matcher is FREQ_BAND_MATCHER:\n freq_w_band = header_match.group(1)\n freq_0 = header_match.group(2)\n\n elif matcher is START_DIR_MATCHER:\n start_dir = header_match.group(1)\n\n else:\n #ignore\n pass\n\n elif DSPEC_DATA_MATCHER.match(line):\n\n # Extract a row of the Directional Surface Spectrum matrix\n sensor_match = DSPEC_DATA_MATCHER.match(line)\n data = sensor_match.group(1)\n values = [int(x) for x in data.split()]\n\n num_values = len(values)\n\n # If the number of values in a line of data doesn't match num_dir,\n # Drop the record, throw a recoverable exception and continue parsing\n if num_values != num_dir:\n error_message = 'Unexpected Number of directions in line: expected %s, got %s'\\\n % (num_dir, num_values)\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n else:\n # Add the row to the dspec matrix\n dspec_matrix.append(values)\n\n else:\n # Generate a warning for unknown data\n error_message = 'Unexpected data found in line %s' % line\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # read the next line in the file\n line = self._stream_handle.readline()\n\n # Check to see if the specified number of frequencies were retrieved from the data\n dspec_matrix_length = len(dspec_matrix)\n if dspec_matrix_length != num_freq:\n error_message = 'Unexpected Number of frequencies in DSpec Matrix: expected %s, got %s'\\\n % (num_freq, dspec_matrix_length)\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # Construct the parsed data list to hand over to the Data Particle class for particle creation\n parsed_data = [\n file_time, # ('file_time', 0, str),\n num_dir, # ('num_dir', 1, int),\n num_freq, # ('num_freq', 2, int),\n freq_w_band, # ('freq_w_band', 3, float),\n freq_0, # ('freq_0', 4, float),\n start_dir, # ('start_dir', 5, float),\n dspec_matrix # ('directional_surface_spectrum', 6, list)]\n ]\n\n # Extract a particle and append it to the record buffer\n particle = self._extract_sample(AdcptMDspecInstrumentDataParticle, None, parsed_data)\n self._record_buffer.append(particle)", "def dataset_read(self):\n # while self.running:\n # grab current data_list and own it locally per cycle\n # to avoid mid-parse changes\n self.local_data_list = self.data_list\n\n # set a random duration for reading from random line\n # before choosing another from current set\n dataset_read_dur = (random.randrange(3000, 13000) / 1000) * self.glob_speed\n\n # prepare start line to read\n starting_line = self.line_to_read()\n\n # sorts out durations\n if self.debug_choose:\n print('B1 dataset line read duration = ', dataset_read_dur)\n end_time = self.end_time_calc(dataset_read_dur)\n\n # determine if read is to be looped or sequential\n looped = self.is_loop()\n\n while time.time() < end_time:\n # calc baudrate and cycle clock for speed of line read\n baudrate = self.baudrate()\n\n # if looped\n if looped > 0:\n loop_end = time.time() + looped\n\n # reset the start read point\n line_to_read = starting_line\n\n # for each loop\n while time.time() < loop_end:\n active_line = self.local_data_list[line_to_read]\n self.parse_active_line(active_line)\n line_to_read += 1\n if self.debug_read:\n print(f'******** line to read LOOPING {line_to_read}')\n # print(f'config data = {config.x_ds}, {config.y_ds}, {config.z_ds}')\n\n # pause for 10th of baudrate, while parse_active_line slides\n time.sleep(baudrate/10)\n else:\n # if no loop\n active_line = self.local_data_list[starting_line]\n self.parse_active_line(active_line)\n starting_line += 1\n if self.debug_read:\n print(f'******** line to read NO LOOP {starting_line}')\n # print(f'config data = {config.x_ds}, {config.y_ds}, {config.z_ds}')\n\n # pause for 10th of baudrate, while parse_active_line slides\n time.sleep(baudrate/10)", "def read(file):\n\n blocks = ['bus', 'load', 'fshunt', 'gen', 'branch', 'transf', 'area',\n 'twotermdc', 'vscdc', 'impedcorr', 'mtdc', 'msline', 'zone',\n 'interarea', 'owner', 'facts', 'swshunt', 'gne', 'Q']\n nol = [1, 1, 1, 1, 1, 4, 1,\n 0, 0, 0, 0, 0, 1,\n 0, 1, 0, 0, 0, 0]\n rawd = re.compile('rawd\\d\\d')\n\n retval = True\n version = 0\n b = 0 # current block index\n raw = {}\n for item in blocks:\n raw[item] = []\n\n data = []\n mdata = [] # multi-line data\n mline = 0 # line counter for multi-line models\n\n # parse file into raw with to_number conversions\n fid = open(file, 'r')\n for num, line in enumerate(fid.readlines()):\n line = line.strip()\n if num == 0: # get basemva and frequency\n data = line.split('/')[0]\n data = data.split(',')\n\n mva = float(data[1])\n freq = float(data[5])\n version = int(data[2])\n\n if not version:\n version = int(rawd.search(line).group(0).strip('rawd'))\n if version < 32 or version > 33:\n logging.warning('RAW file version is not 32 or 33. Error may occur.')\n continue\n elif num == 1: # store the case info line\n logging.info(line)\n continue\n elif num == 2:\n continue\n elif num >= 3:\n if line[0:2] == '0 ' or line[0:3] == ' 0 ': # end of block\n b += 1\n continue\n elif line[0] is 'Q': # end of file\n break\n data = line.split(',')\n\n data = [to_number(item) for item in data]\n mdata.append(data)\n mline += 1\n if mline == nol[b]:\n if nol[b] == 1:\n mdata = mdata[0]\n raw[blocks[b]].append(mdata)\n mdata = []\n mline = 0\n fid.close()\n\n # add device elements params and add to PSAT formatted dictionary\n\n for data in raw['bus']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10\n ID, NAME, BasekV, Type, Area Zone Owner Va, Vm, latitude longitude\n \"\"\"\n idx = data[0]\n ty = data[3]\n angle = data[8]\n try:\n lat = data[9]\n except:\n # logging.warning('<No Coordinates in .raw file>')\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5]]\n else:\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n 'latitude': data[9],\n 'longitude': data[10]\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5], data[9], data[10]]\n Settings.Bus.append(psatlist)\n Settings.BusNames.append(data[1])\n # Add BusSTORE Dictionary For Later Reference\n Settings.BusStore[idx] = param\n\n xcoord = [34.560040, 34.938385, 34.360040, 40.5152473, 40.3142473, 36.527401, 36.857401, 36.687401, 36.856401,\n 40.487041, 36.903901, 36.702901, 35.832561, 33.386047, 33.185047, 37.105571, 37.104154, 33.706718,\n 37.103549, 36.703539, 37.103559, 36.703549, 36.033561, 35.631561, 36.032561, 35.732561, 36.525401,\n 36.857401, 49.869314, 50.969314, 51.979314, 52.481674, 54.973192, 56.276212, 41.734596, 34.551015,\n 34.652015, 34.537507, 34.587507, 34.157904, 33.714453, 33.762453, 39.548160, 39.496160, 34.313143,\n 34.545782, 34.380686, 34.111686, 34.137762, 34.118650, 34.158650, 33.918650, 33.718650, 34.018650,\n 34.018650, 34.018650, 34.018650, 34.018650, 34.312456, 34.315456, 34.243600, 34.566258, 34.565258,\n 46.064672, 46.565672, 45.514571, 45.606833, 45.806833, 44.890000, 45.596416, 45.295416, 45.891161,\n 47.954899, 46.511440, 45.913936, 45.713936, 46.669335, 47.954899, 47.624154, 43.784730, 44.482350,\n 42.006860, 42.934919, 42.731919, 43.013135, 44.068350, 43.558350, 42.438350, 42.938350, 44.068350,\n 43.558350, 43.048350, 42.638350, 44.068350, 43.558350, 43.048350, 42.638350, 43.620189, 39.120428,\n 40.398031, 35.216200, 35.215200, 36.202099, 39.777745, 39.539598, 37.052929, 35.403217, 35.352217,\n 36.807243, 39.567450, 40.807689, 40.806689, 41.008689, 39.555494, 37.954721, 38.406721, 38.906721,\n 38.656721]\n ycoord = [-109.277313, -110.303798, -109.777313, -107.546455, -107.546455, -108.325669, -108.654569, -108.486669,\n -108.325669, -107.185575, -111.390408, -111.390408, -111.448566, -112.860397, -112.659397, -108.243555,\n -108.441191, -112.322033, -111.590816, -111.190816, -111.190816, -111.590806, -111.648566, -111.248566,\n -111.249566, -111.647566, -108.655669, -108.323669, -122.150895, -122.150895, -122.150895, -121.61684,\n -121.924221, -122.21370, -108.790427, -117.568105, -117.538105, -118.607375, -118.658375, -118.280282,\n -118.146319, -118.096319, -112.52797, -112.72797, -118.690631, -118.389938, -118.478496, -118.478496,\n -118.299917, -118.095428, -118.095428, -118.095428, -118.095428, -118.195428, -118.395428, -117.995428,\n -117.795428, -117.995428, -118.481217, -118.891217, -118.391667, -117.166428, -117.368428, -106.60906,\n -106.80906, -122.681289, -121.114785, -122.113785, -123.29000, -121.312202, -121.114202, -106.612578,\n -118.997945, -112.88531, -120.692286, -120.693974, -119.571501, -120.997945, -122.219492, -118.77463,\n -121.019484, -121.316546, -114.419206, -114.419206, -120.956476, -120.79484, -120.93484, -121.216546,\n -121.156546, -121.215484, -121.135484, -121.255484, -121.175484, -121.013484, -120.733484, -121.053484,\n -120.973484, -118.865882, -122.073631, -122.263453, -120.847567, -120.900567, -120.129849, -122.142965,\n -122.262993, -121.021929, -119.450452, -119.450452, -121.779037, -122.276225, -122.135718, -121.935718,\n -121.935718, -121.24000, -121.18379, -121.10879, -121.27379, -121.23979]\n\n #for idx, line in enumerate(Settings.Bus):\n # line.extend([xcoord[idx], ycoord[idx]])\n\n maxV = 1.1\n minV = 0.9\n maxQ = 1\n minQ = 0\n convimp = 0\n status = 1\n loss = 1\n\n for data in raw['load']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11\n Bus, Id, Status, Area, Zone, PL(MW), QL (MW), IP, IQ, YP, YQ, OWNER\n \"\"\"\n\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n voltage = Settings.BusStore[busidx]['voltage']\n param = {'bus': busidx,\n 'Vn': vn,\n 'Sn': mva,\n 'p': (data[5] + data[7] * voltage + data[9] * voltage ** 2) / mva,\n 'q': (data[6] + data[8] * voltage - data[10] * voltage ** 2) / mva,\n 'owner': data[11],\n 'type': Settings.BusStore[busidx]['type'],\n 'voltage': voltage\n }\n\n psatlist = [busidx, mva, vn, param['p'], param['q'], maxV, minV, convimp, status]\n Settings.PQ.append(psatlist)\n \"\"\"CONFIRM THAT OTHER BUSES HAVE 0 P and 0 Q which are not added\"\"\"\n\n for data in raw['fshunt']:\n \"\"\"\n 0, 1, 2, 3, 4\n Bus, name, Status, g (MW), b (Mvar)\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n param = {'bus': busidx,\n 'Vn': vn,\n 'status': data[2],\n 'Sn': mva,\n 'g': data[3] / mva,\n 'b': data[4] / mva,\n }\n\n psatlist = [busidx, mva, vn, freq, param['g'], param['b'], param['status']]\n Settings.Shunt.append(psatlist)\n\n gen_idx = 0\n type = 6\n\n for data in raw['gen']:\n \"\"\"\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 12, 13, 14, 15, 16,17,18,19\n I,ID,PG,QG,QT,QB,VS,IREG,MBASE,ZR,ZX,RT,XT,GTAP,STAT,RMPCT,PT,PB,O1,F1\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n gen_mva = data[8]\n gen_idx += 1\n status = data[14]\n leak = 0\n param = {'Sn': gen_mva,\n 'Vn': vn,\n 'u': status,\n 'idx': gen_idx,\n 'bus': busidx,\n 'pg': status * data[2] / mva,\n 'qg': status * data[3] / mva,\n 'qmax': data[4] / mva,\n 'qmin': data[5] / mva,\n 'v0': data[6],\n 'ra': data[9], # ra armature resistance\n 'xs': data[10], # xs synchronous reactance\n 'pmax': data[16] / mva,\n 'pmin': data[17] / mva,\n }\n\n if Settings.BusStore[busidx]['type'] == 3: #Check Bus Type for Slack\n refangle = 0\n refBus = 1\n PGuess = 1\n swlist = [busidx, gen_mva, vn, param['v0'], refangle, param['qmax'], param['qmin'],\n maxV, minV, PGuess, loss, refBus, status]\n SW = swlist\n Settings.SW.append(swlist)\n Settings.SWStore[busidx] = param\n Settings.SynStore[busidx] = param\n continue\n\n if busidx not in Settings.BusStore.keys():\n \"\"\" Need data from .dyr file. Create initial list, then append data from .dyr\"\"\"\n else:\n # psatlist = [busidx, gen_mva, vn, freq, type, leak, param['ra'],param['xs']]\n # Syn.append(psatlist)\n Settings.SynStore[busidx] = param\n pvlist = [busidx, gen_mva, vn, param['pg'], Settings.BusStore[busidx]['voltage'],\n param['qmax'], param['qmin'], maxV, minV, loss, status]\n Settings.PV.append(pvlist)\n\n\n for data in raw['branch']:\n \"\"\"\n I,J,ID,R,X,B,RATEA,RATEB,RATEC,GI,BI,GJ,BJ,ST,LEN,O1,F1,...,O4,F4\n \"\"\"\n param = {'bus1': data[0],\n 'bus2': data[1],\n 'id' : data[2],\n 'r': data[3],\n 'x': data[4],\n 'b': data[5],\n 'rate_a': data[6],\n 'rate_b': data[7],\n 'rate_c': data[8],\n 'Vn': Settings.BusStore[data[0]]['Vn'],\n 'Vn2': Settings.BusStore[data[1]]['Vn'],\n 'length': data[14],\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n 'status': data[13]\n }\n\n psatlist = [param['bus1'], param['bus2'], param['rate_c'], param['Vn'], freq, EMPTY,\n param['length'], param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['status']]\n Settings.Lineij.append([data[0], data[1], data[2]])\n Settings.Lineji.append([data[1], data[0], data[2]])\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.branches += 1\n Settings.linecount += 1\n Settings.LineBusMatij[param['bus2']].append(Settings.branches)\n Settings.LineBusMatji[param['bus1']].append(Settings.branches)\n\n for data in raw['transf']:\n \"\"\"\n I,J,K,CKT,CW,CZ,CM,MAG1,MAG2,NMETR,'NAME',STAT,O1,F1,...,O4,F4\n R1-2,X1-2,SBASE1-2\n WINDV1,NOMV1,ANG1,RATA1,RATB1,RATC1,COD1,CONT1,RMA1,RMI1,VMA1,VMI1,NTP1,TAB1,CR1,CX1\n WINDV2,NOMV2\n \"\"\"\n if len(data[1]) < 5:\n ty = 2\n else:\n ty = 3\n if ty == 3:\n continue\n # raise NotImplementedError('Three-winding transformer not implemented')\n\n tap = data[2][0]\n phi = data[2][2]\n\n if tap == 1 and phi == 0:\n trasf = False\n else:\n trasf = True\n param = {'trasf': trasf,\n 'bus1': data[0][0],\n 'bus2': data[0][1],\n 'u': data[0][11],\n 'b': data[0][8],\n 'r': data[1][0],\n 'x': data[1][1],\n 'tap': tap,\n 'phi': phi,\n 'rate_a': data[2][3],\n 'Vn': Settings.BusStore[busidx]['Vn'],\n 'Vn2': Settings.BusStore[busidx]['Vn'],\n # 'length': data[?][?], FIND CORRECT INDEX\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n }\n psatlist = [param['bus1'], param['bus2'], param['rate_a'], param['Vn'], freq, EMPTY,\n EMPTY, param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['u']]\n\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.linecount += 1\n Settings.transformers += 1\n # ADD Line Data(All Branch Types) to Sys Param Dict after .dyr Transformer Data Added\n # Re-Order Line Data for correct sequence\n for key in Settings.LineOrd:\n for item in Settings.LineOrd[key]:\n Settings.Line.append(item)\n\n for data in raw['area']:\n Settings.Areas.append(data[4])\n\n for data in raw['zone']:\n Settings.Regions.append(data[1])\n\n return retval", "def parse_data(filepath):\n settings = dict()\n intensity = list()\n # Boolean flags to check when to start/stop\n # reading parameters\n read_params = False\n read_int = False\n read_zeeman = False\n finished = False\n fieldoff_intensities = list()\n fieldon_intensities = list()\n with open(filepath) as read_file:\n for line in read_file:\n if \"*****\" in line:\n read_int = False\n if finished is True:\n break\n if \"Scan\" in line:\n if \"[Field ON]\" in line:\n read_zeeman = True\n scan_details = line.split()\n settings[\"ID\"] = int(scan_details[1])\n # settings[\"Date\"] = str(scan_details[4])\n read_params = True\n read_int = False\n continue\n if read_int is True:\n if read_zeeman is False:\n fieldoff_intensities += [float(value) for value in line.split()]\n else:\n fieldon_intensities += [float(value) for value in line.split()]\n finished = True\n if read_params is True and len(line.split()) > 1:\n # Read in the frequency step, frequency, and other info\n # needed to reconstruct the frequency data\n scan_params = line.split()\n shift = 1\n settings[\"Frequency\"] = float(scan_params[0])\n settings[\"Frequency step\"] = float(scan_params[1])\n if len(scan_params) == 4:\n settings[\"Multiplier\"] = 1.\n shift = 0\n # If the multiplier data is there, we don't shift the read\n # index over by one\n else:\n settings[\"Multiplier\"] = float(scan_params[2])\n settings[\"Center\"] = float(scan_params[2 + shift])\n settings[\"Points\"] = int(scan_params[3 + shift])\n read_params = False\n # Start reading intensities immediately afterwards\n read_int = True\n continue\n fieldoff_intensities = np.array(fieldoff_intensities)\n fieldon_intensities = np.array(fieldon_intensities)\n\n # Generate the frequency grid\n settings[\"Frequency step\"] = settings[\"Frequency step\"] * settings[\"Multiplier\"]\n # This calculates the length of either side\n side_length = settings[\"Frequency step\"] * (settings[\"Points\"] // 2)\n start_freq = settings[\"Frequency\"] - side_length\n end_freq = settings[\"Frequency\"] + side_length\n frequency = np.linspace(start_freq, end_freq, settings[\"Points\"])\n\n return frequency, fieldoff_intensities, fieldon_intensities, settings", "def read_spectral_k(filename=\"tc_dos_l.dat\"):\n # column headers for the data \n #tcdosl_labels = [\n # \"wavelength\",\n # \"k_xx_raw\",\"k_xx_smooth\",\n # \"k_yy_raw\",\"k_yy_smooth\",\n # \"k_zz_raw\",\"k_zz_smooth\"]\n\n tcdosl_labels = [\n \"wavelength\",\n \"k_xx_raw\",\"k_yy_raw\",\"k_zz_raw\",\n \"k_xx_smooth\",\"k_yy_smooth\",\"k_zz_smooth\"]\n\n def subselect_table_block(i_start,lines):\n i = i_start + 1\n\n table = []\n while(lines[i].strip() != \"\"):\n args = lines[i].split()\n args = [arg.strip() for arg in args]\n args = [float(arg) for arg in args]\n table.append(args)\n i += 1 \n return np.array(table)\n\n line = None # initialize\n with open(filename,'r') as f:\n lines = f.readlines()\n lines = [s.strip() for s in lines]\n\n temperatures = []\n tcdosl_dict = OrderedDict()\n\n for il,line in enumerate(lines):\n if line.startswith('# Temp:'):\n args = line.split(':')\n T = int(float(args[1].strip()))\n temperatures.append(T)\n tcdosl_dict[T] = subselect_table_block(il,lines)\n\n tcdosl_df_dict = OrderedDict()\n for temp in temperatures:\n tcdosl_df_dict[temp] = pd.DataFrame(\n copy.deepcopy(tcdosl_dict[temp]),\n columns=list(tcdosl_labels))\n\n return {k:v.copy() for k,v in tcdosl_df_dict.items()}", "def make_stride_dict(filename):\r\n MAX_ACC=getMAXASA()\r\n stride = {}\r\n handle = open(filename, \"r\")\r\n #print \"herxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\r\n try:\r\n #kk=0\r\n for l in handle.readlines():\r\n #kk=kk+1\r\n #print kk\r\n sl = l.split()\r\n if sl[0] != \"ASG\": #if not detailed secondary structure record\r\n continue\r\n #REM |---Residue---| |--Structure--| |-Phi-| |-Psi-| |-Area-| ~~~~\r\n #ASG ALA A 1 1 C Coil 360.00 -35.26 120.7 ~~~~\r\n #0 1 2 3 4 5 6 7 8 9 10 \r\n # In cases where stride cannot recognize the residue type, it puts a '-' there\r\n # However, Bio.PDB uses ' ' so convert between the two \r\n if sl[2]=='-':\r\n sl[2]=' '\r\n \r\n resid=(sl[2],sl[3])\r\n aa=sl[1]\r\n ss=sl[5].upper() #There was b and B both from Bridge\r\n phi=float(sl[7])\r\n psi=float(sl[8])\r\n asa=float(sl[9])\r\n try:\r\n rasa=asa/MAX_ACC[aa]\r\n if rasa > 1.0: # we do get values greater than 1\r\n rasa=1.0\r\n except KeyError:\r\n rasa=np.nan\r\n stride[resid]=(aa,ss,phi,psi,asa,rasa)\r\n #construct a key,value pair\r\n #pdb.set_trace() \r\n finally:\r\n handle.close()\r\n return stride\r\n #return dssp, keys\r", "def bufr_to_dataframe(file=''):\n \n if debug:\n print(\"Running bufr_to_dataframe for: \", file)\n \n check_read_file (file = file, read= False)\n f = open(file)\n #source_file = [l for l in file.split('/') if '.bfr' in l][0]\n read_data = []\n \n \"\"\" Name of the columns as they will appear in the pandas dataframe (not necessarily CDM compliant) \"\"\"\n #column_names = ['report_timestamp' , 'iday', 'station_id', 'latitude', 'longitude', 'pressure', 'value','varno@body']\n \n lat, lon, alt, blockNumber, stationNumber, statid = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n \n obs_id, report_id = -1, 0 # progressive observation id\n stations_id = [] \n \n while 1:\n #lista = [] # temporary list\n bufr = codes_bufr_new_from_file(f)\n \n if bufr is None:\n break\n \n codes_set(bufr, 'unpack', 1) # eCcodes must expand all the descriptors and unpack the data section\n \n date = '19'+codes_get_array(bufr, \"typicalDate\")[0][2:]\n timePeriod = codes_get_array(bufr, \"typicalTime\")[0] \n \n year, month, day = date[0:4], date[4:6] , date[6:8]\n hour, minutes = timePeriod[0:2] , timePeriod[2:4]\n \n idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n iday = int(year + month + day )\n\n pressure = codes_get_array(bufr, \"pressure\") \n temperature = codes_get_array(bufr, \"airTemperature\") \n wind_direction = codes_get_array(bufr, \"windDirection\")\n wind_speed = codes_get_array(bufr, \"windSpeed\")\n \n try: # not all the bufr files have the dewpoint \n dew_point = codes_get_array(bufr, \"dewpointTemperature\")\n except:\n dew_point= np.empty((1, len(temperature)))\n dew_point[:] = np.nan\n \n num_lev = len(pressure) # number of distinct pressure levels \n \n try:\n geopotential = codes_get_array(bufr, \"nonCoordinateGeopotentialHeight\") \n except:\n geopotential = np.full( (1,len(temperature)) , np.nan )[0,:]\n \n if report_id == 0:\n ''' Check again but these values should remain the same for all cnt, so it makes no sense to read them every time '''\n lat = codes_get(bufr, \"latitude\")\n lon = codes_get(bufr, \"longitude\")\n alt = float(codes_get(bufr, \"heightOfStation\"))\n blockNumber = codes_get(bufr, \"blockNumber\")\n stationNumber = codes_get(bufr, \"stationNumber\")\n #statid = str(blockNumber*1000+stationNumber) # changed to int instead of str\n statid = blockNumber*1000+stationNumber\n if statid not in stations_id:\n stations_id.append(statid) \n \n codes_release(bufr)\n \n miss_value = -1.e100 \n \n for i in range(len(temperature)):\n obs_id = obs_id + 1 \n airT = temperature[i]\n winds = wind_speed[i]\n windd = wind_direction[i]\n press = pressure[i]\n gph = geopotential[i]\n dp = dew_point[i]\n if press == miss_value:\n press = np.nan \n if dp == miss_value:\n dp = np.nan\n if airT == miss_value : # replacing none values with numpy nans\n airT = np.nan \n if winds == miss_value:\n winds = np.nan\n if gph == miss_value:\n gph = np.nan \n if windd == 2147483647 or windd == -2147483647:\n windd = np.nan \n \n \n for value,var in zip( [gph, airT, winds, windd, dp], ['gph', 'temperature', 'wind_speed', 'wind_direction', 'dew_point'] ):\n obs_id = obs_id + 1 \n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1\n z_type = 1 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n else:\n z_type = -2147483648 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n\n\n report_id += 1\n \n df = pd.DataFrame(data= read_data, columns= column_names) \n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n \n return df, stations_id", "def readProcessedFCD():\n procFcdDict = {}\n pqDateDict = {} # each date is a period / quota tupel assigned\n simDate = '2007-07-18 '\n day = 0\n # create keys for the procFcdDict\n for p in period:\n for q in quota:\n day += 86400\n date, time = calcTime.getDateFromDepart(day).split(\" \")\n pqDateDict.setdefault(date, (p, q))\n procFcdDict.setdefault((p, q), {})\n # print date,p,q\n\n inputFile = open(path.FQprocessedFCD, 'r')\n for line in inputFile:\n timestamp, edge, speed, cover, id = line.split('\\t')\n date, time = calcTime.getNiceTimeLabel(timestamp).split(\" \")\n # add values to actual Dict\n timestep = calcTime.getTimeInSecs(simDate + time)\n procFcdDict[pqDateDict[date]].setdefault(\n timestep, []).append((id, edge, float(speed) / 3.6))\n inputFile.close()\n\n return procFcdDict", "def read_2hps2_acc(filename, multi_header=True):\n\n num_headers = 27\n header_row = 16\n units_row = 17\n timestamp_row = 20\n\n with open(filename, \"r\") as f:\n accreader = csv.reader(f, delimiter=\" \")\n\n # Skip file info headers\n for i in range(num_headers):\n if i == header_row - 1:\n channels = next(accreader)\n elif i == units_row - 1:\n units = next(accreader)\n elif i == timestamp_row - 1:\n ts_start = next(accreader)\n else:\n next(accreader)\n\n # Read body - drop blanks\n data = [[x for x in line if x != \"\"] for line in accreader]\n\n # Convert column names list so that split by \",\" not \" \", drop \"Time\" item and trim\n channels = \" \".join(channels).split(\",\")[1:]\n channels = [c.strip() for c in channels]\n\n # Read the start timestamp marker and get start datetime\n ts_start = [int(i) for i in ts_start[5:]]\n dt_start = datetime(\n ts_start[5], # year\n ts_start[4], # month\n ts_start[3], # day\n ts_start[2], # hour\n ts_start[1], # minute\n ts_start[0], # second\n )\n\n # Create dataframe and timestamps using start timestamp marker and time steps column\n df = pd.DataFrame(data, dtype=\"float\")\n ts = df.iloc[:, 0].values\n timestamps = [dt_start + timedelta(seconds=t) for t in ts]\n\n # For raw data module\n if multi_header is True:\n # Create multi-index header of channel names and units and time steps index\n units = \" \".join(units).split(\",\")[1:]\n units = [i.strip().split(\"(\")[1][:-1] for i in units]\n header = list(zip(channels, units))\n header.insert(0, (\"Timestamp\", \"\"))\n header = pd.MultiIndex.from_tuples(header, names=[\"channels\", \"units\"])\n df = df.set_index(df.columns[0])\n df.index.name = \"Time (s)\"\n df.insert(loc=0, column=\"Timestamp\", value=timestamps)\n # For screening module\n else:\n # Create single row header of only channel names (i.e. strip out the units)\n # Replace time steps column with timestamps and use range index\n header = [\"Timestamp\"] + channels\n df.iloc[:, 0] = timestamps\n\n # Set desired header (single or multi-index)\n df.columns = header\n\n return df", "def read_simulation_files(self):\n\n # Check if simulation files exist in current directory, if not kill process\n if not os.path.isfile('{}.xyz'.format(self.prefix)):\n print('Cannot find simulation file \"{}.xyz\"'.format(self.prefix))\n sys.exit()\n if not os.path.isfile('{}_vis2d.dat'.format(self.prefix)):\n print('Cannot find simulation file \"{}_vis2d.dat\"'.format(self.prefix))\n sys.exit()\n if not os.path.isfile('{}_dia.dat'.format(self.prefix)):\n print('Cannot find simulation file \"{}_dia.dat\"'.format(self.prefix))\n sys.exit()\n\n # Read coordinate file\n print('Reading simulation xyz file')\n with open('{}.xyz'.format(self.prefix),'r') as f:\n self.n = int(f.readline().split()[0])\n self.crds = np.zeros((self.n,2))\n f.seek(0,0)\n for i in range(self.frame):\n for j in range(2+self.n):\n f.readline()\n f.readline()\n f.readline()\n for j in range(self.n):\n self.crds[j,:] = np.array([float(c) for c in f.readline().split()[1:3]])\n\n # Read rings file\n print('Reading simulation ring file')\n with open('{}_vis2d.dat'.format(self.prefix),'r') as f:\n self.rings = []\n if self.vis_vortype != 0:\n while True:\n frame = int(f.readline().split()[0])\n vor_type = int(f.readline().split()[0])\n self.param = float(f.readline().split()[0])\n self.m = int(f.readline().split()[0])\n if frame==self.frame and vor_type==self.vis_vortype:\n for i in range(self.m):\n ring = np.array([float(c) for c in f.readline().split()])\n self.rings.append(ring.reshape(ring.shape[0]//2,2))\n break\n else:\n for i in range(self.m):\n f.readline()\n\n # Read diameter file\n print('Reading simulation radii and weights file')\n data = np.genfromtxt('{}_dia.dat'.format(self.prefix)).astype(float)\n self.radii = data[:self.n]/2\n self.weights = data[self.n:]", "def read_traveltime(self):\r\n \r\n #### read Travel time from txt file\r\n \r\n \r\n #### Particle travel time branch 1\r\n excelfile_surface_branch1_high = r'excel\\flow_rate\\particle_surface_branch1_high.xlsx'\r\n inarray_surface_branch1_high = pd.read_excel(excelfile_surface_branch1_high).to_numpy() \r\n \r\n excelfile_surface_branch1_medium = r'excel\\flow_rate\\particle_surface_branch1_medium.xlsx'\r\n inarray_surface_branch1_medium = pd.read_excel(excelfile_surface_branch1_medium).to_numpy() \r\n \r\n excelfile_surface_branch1_low = r'excel\\flow_rate\\particle_surface_branch1_low.xlsx'\r\n inarray_surface_branch1_low = pd.read_excel(excelfile_surface_branch1_low).to_numpy()\r\n \r\n excelfile_bottom_branch1_high = r'excel\\flow_rate\\particle_bottom_branch1_high.xlsx'\r\n inarray_bottom_branch1_high = pd.read_excel(excelfile_bottom_branch1_high).to_numpy()\r\n \r\n excelfile_bottom_branch1_medium = r'excel\\flow_rate\\particle_bottom_branch1_medium.xlsx'\r\n inarray_bottom_branch1_medium = pd.read_excel(excelfile_bottom_branch1_medium).to_numpy()\r\n \r\n excelfile_bottom_branch1_low = r'excel\\flow_rate\\particle_bottom_branch1_low.xlsx'\r\n inarray_bottom_branch1_low = pd.read_excel(excelfile_bottom_branch1_low).to_numpy()\r\n \r\n \r\n #### Tracer travel time branch 1\r\n excelfile_tracer_branch1_high = r'excel\\flow_rate\\tracer_branch1_high.xlsx'\r\n inarray_tracer_branch1_high = pd.read_excel(excelfile_tracer_branch1_high).to_numpy()\r\n \r\n excelfile_tracer_branch1_medium = r'excel\\flow_rate\\tracer_branch1_medium.xlsx'\r\n inarray_tracer_branch1_medium = pd.read_excel(excelfile_tracer_branch1_medium).to_numpy()\r\n \r\n excelfile_tracer_branch1_low = r'excel\\flow_rate\\tracer_branch1_low.xlsx'\r\n inarray_tracer_branch1_low = pd.read_excel(excelfile_tracer_branch1_low).to_numpy()\r\n \r\n self.inarrays_branch1 = [inarray_surface_branch1_high, inarray_surface_branch1_medium, inarray_surface_branch1_low, \\\r\n inarray_bottom_branch1_high, inarray_bottom_branch1_medium, inarray_bottom_branch1_low, \\\r\n inarray_tracer_branch1_high, inarray_tracer_branch1_medium, inarray_tracer_branch1_low]\r\n \r\n \r\n #### Particle travel time branch 5\r\n excelfile_surface_branch5_high = r'excel\\flow_rate\\particle_surface_branch5_high.xlsx'\r\n inarray_surface_branch5_high = pd.read_excel(excelfile_surface_branch5_high).to_numpy()\r\n \r\n excelfile_surface_branch5_medium = r'excel\\flow_rate\\particle_surface_branch5_medium.xlsx'\r\n inarray_surface_branch5_medium = pd.read_excel(excelfile_surface_branch5_medium).to_numpy()\r\n \r\n excelfile_surface_branch5_low = r'excel\\flow_rate\\particle_surface_branch5_low.xlsx'\r\n inarray_surface_branch5_low = pd.read_excel(excelfile_surface_branch5_low).to_numpy()\r\n \r\n excelfile_bottom_branch5_high = r'excel\\flow_rate\\particle_bottom_branch5_high.xlsx'\r\n inarray_bottom_branch5_high = pd.read_excel(excelfile_bottom_branch5_high).to_numpy()\r\n \r\n excelfile_bottom_branch5_medium = r'excel\\flow_rate\\particle_bottom_branch5_medium.xlsx'\r\n inarray_bottom_branch5_medium = pd.read_excel(excelfile_bottom_branch5_medium).to_numpy()\r\n \r\n excelfile_bottom_branch5_low = r'excel\\flow_rate\\particle_bottom_branch5_low.xlsx'\r\n inarray_bottom_branch5_low = pd.read_excel(excelfile_bottom_branch5_low).to_numpy()\r\n \r\n \r\n #### Tracer travel time branch 5\r\n excelfile_tracer_branch5_high = r'excel\\flow_rate\\tracer_branch5_high.xlsx'\r\n inarray_tracer_branch5_high = pd.read_excel(excelfile_tracer_branch5_high).to_numpy()\r\n \r\n excelfile_tracer_branch5_medium = r'excel\\flow_rate\\tracer_branch5_medium.xlsx'\r\n inarray_tracer_branch5_medium = pd.read_excel(excelfile_tracer_branch5_medium).to_numpy()\r\n \r\n excelfile_tracer_branch5_low = r'excel\\flow_rate\\tracer_branch5_low.xlsx'\r\n inarray_tracer_branch5_low = pd.read_excel(excelfile_tracer_branch5_low).to_numpy()\r\n \r\n \r\n self.inarrays_branch5 = [inarray_surface_branch5_high, inarray_surface_branch5_medium, inarray_surface_branch5_low, \\\r\n inarray_bottom_branch5_high, inarray_bottom_branch5_medium, inarray_bottom_branch5_low, \\\r\n inarray_tracer_branch5_high, inarray_tracer_branch5_medium, inarray_tracer_branch5_low]", "def _read(self, openf=None, stepfilter=None):\n itemstack = []\n current = None\n result = {}\n xkeys = None\n timeskip = False\n laststep = False\n\n if openf is None:\n f = open(self.filepath)\n else:\n f = openf\n\n line = 'start'\n while line != '':\n lastpos = f.tell()\n line = f.readline()\n if line == '':\n continue\n \n if itemstack is not None and len(itemstack) > 0: \n cast = itemstack.pop()\n raw = line.split()\n values = [t(r) for t, r in zip(cast, raw)]\n if len(values) == 1:\n values = values[0]\n\n if current == \"time\":\n if stepfilter is not None and values not in stepfilter:\n timeskip = True\n elif (self.index is not None and values != self.index):\n if values > self.index:\n if openf is None:\n return {}\n else:\n timeskip = True\n laststep = True\n else:\n timeskip = True\n elif self.index is None:\n self.index = values\n else:\n timeskip = False\n \n if len(itemstack) == 0 and current not in result:\n result[current] = values\n else:\n if current not in result:\n result[current] = []\n result[current].append(values)\n continue\n elif itemstack is None and current == \"atoms\":\n if \"ITEM\" in line:\n current = None\n if openf is not None:\n f.seek(lastpos)\n break\n else:\n #E.g. line: 1 4 -65.9625 1.54915 1.46824 5 30.976 \n vals = line.split()\n sid, atype = tuple(map(int, vals[0:2]))\n result[\"type\"].append(atype)\n result[\"id\"].append(sid)\n x, y, z = tuple(map(float, vals[2:5]))\n result[\"xyz\"].append((x, y, z))\n if len(vals) > 5 and xkeys is not None:\n for ikey, v in enumerate(vals[5:]):\n result[xkeys[ikey]].append(eval(v))\n continue # pragma: no cover\n \n if \"ITEM: TIMESTEP\" in line:\n if laststep:\n f.seek(lastpos)\n break\n itemstack.append((int,))\n current = \"time\"\n timeskip = False\n elif not timeskip:\n if \"ITEM: NUMBER OF ATOMS\" in line:\n itemstack.append((int,))\n current = \"natoms\"\n elif \"ITEM: BOX BOUNDS\" in line:\n period = line.strip().split(\"BOX BOUNDS\")\n if len(period) == 2 and period[1] != '':\n result[\"periodic\"] = period[1].strip().split()\n else:\n result[\"periodic\"] = (\"ss\", \"ss\" ,\"ss\")\n \n\t\t # Changes by JPRIEDS to accommodate triclinic boxes\n\t\t # Written 170719\n\t\t if len(result[\"periodic\"]) == 6:\n\t\t\titemstack.extend([(float, float, float)]*3)\n\t\t\tcurrent = \"box\"\n\t\t\tresult[\"periodic\"] = result[\"periodic\"][3:]\n\t\t elif len(result[\"periodic\"]) == 3:\n\t\t\titemstack.extend([(float, float)]*3)\n\t\t\tcurrent = \"box\"\n\t\t else:\n emsg = \"Could not classify periodic bounds: {}\"\n raise ValueError(emsg.format(result[\"periodic\"]))\n elif \"ITEM: ATOMS\" in line:\n itemstack = None\n current = \"atoms\"\n result[\"type\"] = []\n result[\"id\"] = []\n result[\"xyz\"] = []\n \n #The first two headings in the line have \"ITEM: ATOMS\", the\n #rest are usuall id, type, x, y, z, rest...\n headings = line.split()\n extras = len(headings) > 7\n if extras:\n xkeys = []\n xheadings = headings[7:]\n for xhead in xheadings:\n key = \"atom:{}\".format(xhead)\n result[key] = []\n xkeys.append(key)\n \n if openf is None:\n #Close the file since we opened it.\n f.close()\n \n return result", "def read(lookup_cnfg, lookup_qn, diagram, T, directory, verbose=0):\n\n data = []\n comb = True if diagram == 'C4+D' else False\n\n for cnfg in lookup_cnfg:\n # filename and path\n filename = directory + '/' + diagram + '_cnfg%i' % cnfg + '.h5'\n try:\n fh = h5py.File(filename, \"r\")\n except IOError:\n print 'file %s not found' % filename\n raise\n\n # to achieve hirarchical indexing for quantum numbers build DataFrame for\n # each loop seperately\n # TODO: is it necessary to build that completely or can that be \n # constructed by successively storing each operator with pd.HDFStore()?\n data_qn = DataFrame()\n# print DataFrame(lookup_p)\n# print DataFrame(lookup_g)\n\n for op in lookup_qn.index:\n p = lookup_qn.ix[op, ['p_{so}', 'p_{si}']]\n g = lookup_qn.ix[op, ['\\gamma_{so}', '\\gamma_{si}']]\n groupname = set_groupname(diagram, p, g)\n\n # read data from file as numpy array and interpret as complex\n # numbers for easier treatment\n try:\n tmp = np.asarray(fh[groupname]).view(complex)\n except KeyError:\n print(\"could not read %s for config %d\" % (groupname, cnfg))\n continue\n\n # in case diagram is C4+D perform last mutliplication of factorizing\n # traces\n # the file contains 4 numbers per time slice: ReRe, ReIm, ImRe, and ImIm,\n # here combined 2 complex number\n if comb:\n # reshaping so we can extract the data easier\n tmp = tmp.reshape((-1,2))\n # extracting right combination, assuming ImIm contains only noise\n dtmp = 1.j * (tmp[:,1].real + tmp[:,0].imag) + tmp[:,0].real\n tmp = dtmp.copy()\n\n # save data into data frame\n data_qn[op] = pd.DataFrame(tmp, columns=['re/im'])\n data.append(data_qn)\n data = pd.concat(data, keys=lookup_cnfg, axis=0, names=['cnfg', 'T'])\n\n if verbose:\n print '\\tfinished reading'\n\n return data.sort_index(level=[0,1])", "def get_clean_cycles(cycle_dict, file_name, database_name, datatype, thresh1, thresh2):\n #name = file_name.split('.')[0]\n (cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col, char_cap_col, charge_or_discharge) = col_variables(datatype)\n while '/' in file_name:\n file_name = file_name.split('/', maxsplit = 1)[1]\n name = file_name.split('.')[0]\n\n clean_cycle_dict = {} \n #ex_data = 'y'\n #ex_data = input('Are there any voltages that should not be included? (y/n): ')\n #if ex_data == 'y': \n #thresh1 = '4.17'\n #thresh2 = '4.25'\n #thresh1 = input('Please enter the start voltage of the range to exclude from the data: ')\n #thresh2 = input('Please enter the end voltage of the range to exclude from the data: ')\n #print('Datapoints within 0.03V of that voltage will be deleted')\n for i in range(1, len(cycle_dict)+1):\n charge, discharge = clean_calc_sep_smooth(cycle_dict[i], 9, 3, thresh1, thresh2, datatype)\n clean_data = charge.append(discharge, ignore_index=True)\n clean_data = clean_data.sort_values([data_point_col], ascending = True)\n clean_data = clean_data.reset_index(drop=True)\n cyclename = name + '-CleanCycle' + str(i)\n \t#print(cyclename)\n clean_cycle_dict.update({cyclename : clean_data})\n dbfs.update_database_newtable(clean_data, cyclename, database_name)\n \t#run the peak finding peak fitting part here \n # for key in clean_cycle_dict:\n # \tprint(key)\n print('All cycles cleaned and saved in database')\n return clean_cycle_dict", "def _data_reader(file):\n # Create a dictionary so that filename matches a site name.\n site_dict = {'D05536000': 'NB Niles', 'D05536101': 'NS Channel-Wilmette',\n 'D05536105': 'NB Albany', 'D05536118': 'NB Grand Avenue',\n 'D05536121': 'CH River-Lock', 'D05536123': 'CH River-Columbus',\n 'D05536137': 'CSSC-Western Avenue', 'D05536140': 'CSSC-Stickney',\n 'D05536275': 'Thorn Creek', 'D05536290': 'Little Calument',\n 'D05536340': 'Midlothian Creek', 'D05536343': 'Natalie Creek',\n 'D05536357': 'Grand Calumet', 'D05536500': 'Tinley Creek',\n 'D05536700': 'Calumet-Sag Channel', 'D05536890': 'CSSC-Lemont',\n 'D05536995': 'CSSC-Romeoville'}\n df_raw = pd.read_csv(file)\n df_raw['dateTime'] = pd.to_datetime(df_raw['dateTime'])\n # Creating a dataframe with the data we only need.\n df = df_raw[['dateTime', 'X_00065_00000']]\n df = df.set_index(df_raw['dateTime'])\n\n # Retrieve site information to be used in saved excel filenames.\n site_code = file[-9:]\n site_name = [v for v in site_dict.items() if site_code in v][0]\n site = site_code + '_' + site_name[1].replace(' ', '-')\n\n # Convert index into a datetime index for easier indexing.\n df.index = pd.to_datetime(df.index)\n return df_raw, df, site, site_code", "def parse(self):\r\n # open the file\r\n # try to find the first line with Time =\r\n # send the file to a recursive residualParser\r\n try:\r\n with open(self.filepath, 'rb') as f:\r\n for line in f:\r\n if line.startswith('Time ='):\r\n self.timestep = self.__getTime(line)\r\n self.__residuals[self.timestep] = {}\r\n self.__parseResiduals(f)\r\n except Exception as e:\r\n raise 'Failed to parse {}:\\n\\t{}'.format(self.filepath, e)", "def read_stream(params):\n # Ignore file integrity issues; thus far the only station affected is DR11,\n # with no seeming impact on the seismic trace itself. Consider treating as\n # an error in future implementation.\n warnings.simplefilter(\"error\", category=InternalMSEEDWarning)\n\n start_search = params.start_processing.floor('D')\n stop_search = params.stop_processing.floor('D')\n dts = pd.date_range(start_search, stop_search)\n count = 0\n for i, dt in enumerate(dts):\n if params.name_format == 1:\n fname = f\"{params.network}.{params.station}.{params.channel}.{dt.year}.{dt.dayofyear:03d}.mseed\"\n if params.station == \"*\":\n # filespec = f\"{params.network}/**/{params.network}.{params.station}.{params.channel}.{dt.year}.{dt.dayofyear:03d}.mseed\"\n filespec = os.path.join(params.network, \"**\", fname)\n else:\n # filespec = f\"{params.network}/{params.station}/{params.network}.{params.station}.{params.channel}.{dt.year}.{dt.dayofyear:03d}.mseed\"\n filespec = os.path.join(params.network, params.station, fname)\n elif params.name_format == 2:\n filespec = f\"{params.network}.{params.station}..{params.channel}__{dt.year}{dt.month:02d}{dt.day:02d}T*\"\n\n try:\n if count == 0:\n # st = read(f\"{params.sourcepath}/MSEED/{filespec}\")\n st = read(os.path.join(params.sourcepath, \"MSEED\", filespec))\n # st = read(f\"{params.sourcepath}/{filespec}\")\n else:\n # st += read(f\"{params.sourcepath}/MSEED/{filespec}\")\n st += read(os.path.join(params.sourcepath, \"MSEED\", filespec))\n # st += read(f\"{params.sourcepath}/{filespec}\")\n count += 1\n except:\n pass\n\n if count > 0:\n st.merge(fill_value=\"interpolate\", interpolation_samples=1)\n st.trim(\n starttime=UTCDateTime(params.start_processing),\n endtime=UTCDateTime(params.stop_processing)\n )\n return st\n else:\n return -1", "def parse_data(infile):\n blocks = re.compile(' '.join(['=' * 9] * 8))\n dashes = re.compile('^-{79}$')\n title = re.compile('^Timings for (.*)$')\n row = re.compile(' '.join(['(.{9})'] * 7) + ' (.{8,9})')\n\n lines = infile.readlines()\n\n data = co.OrderedDict()\n index = 0\n\n while index < len(lines):\n line = lines[index]\n\n if blocks.match(line):\n try:\n name = title.match(lines[index + 1]).group(1)\n except Exception:\n index += 1\n continue\n\n data[name] = {}\n\n assert dashes.match(lines[index + 2])\n\n cols = parse_row(row, lines[index + 3])\n\n assert blocks.match(lines[index + 4])\n\n get_row = parse_row(row, lines[index + 5])\n assert get_row[0] == 'get'\n\n set_row = parse_row(row, lines[index + 6])\n assert set_row[0] == 'set'\n\n delete_row = parse_row(row, lines[index + 7])\n assert delete_row[0] == 'delete'\n\n assert blocks.match(lines[index + 9])\n\n data[name]['get'] = dict(zip(cols, get_row))\n data[name]['set'] = dict(zip(cols, set_row))\n data[name]['delete'] = dict(zip(cols, delete_row))\n\n index += 10\n else:\n index += 1\n\n return data", "def adjust_sff_cycles(sff_data, num_cycles):\r\n # TODO: Move to PyCogent\r\n num_flows = num_cycles * 4\r\n header, reads = sff_data\r\n\r\n h = header.copy()\r\n h['number_of_flows_per_read'] = num_flows\r\n h['header_length'] = num_flows + 40\r\n h['index_offset'] = 0\r\n h['index_length'] = 0\r\n h['flow_chars'] = 'TACG' * num_cycles\r\n\r\n read_clip_keys = [\r\n 'clip_qual_left', 'clip_qual_right', 'clip_adapter_left',\r\n 'clip_adapter_right',\r\n ]\r\n\r\n def adjust_read(read):\r\n r = read.copy()\r\n r['flowgram_values'] = read['flowgram_values'][:num_flows]\r\n enumerated_flow_indices = list(enumerate(\r\n _cumulative_sum(read['flow_index_per_base'])))\r\n\r\n # Brain teaser: find the largest base index having a flow\r\n # index less than num_flows\r\n num_bases = 6789\r\n for base_idx, flow_idx in reversed(enumerated_flow_indices):\r\n num_bases = base_idx + 1\r\n if flow_idx <= num_flows:\r\n break\r\n\r\n r['number_of_bases'] = num_bases\r\n r['flow_index_per_base'] = read['flow_index_per_base'][:num_bases]\r\n r['Bases'] = read['Bases'][:num_bases]\r\n r['quality_scores'] = read['quality_scores'][:num_bases]\r\n\r\n for key in read_clip_keys:\r\n if r[key] > num_bases:\r\n r[key] = num_bases\r\n\r\n return r\r\n\r\n return (h, imap(adjust_read, reads))", "def read_dict(path):\n\n # Open the dataset\n miriad_data = aipy.miriad.UV(path)\n\n # Construct the set of frequency channels (in GHz)\n nfreq = miriad_data['nchan']\n delta_freq = miriad_data['sdf'] # GHz\n sfreq = miriad_data['sfreq'] # GHz\n freq = np.arange(nfreq) * delta_freq + sfreq\n\n # TODO: should generalise this to select other polarisation types\n miriad_data.select('polarization', -8, -5, include=True)\n miriad_data.select('polarization', -7, -5, include=True)\n miriad_data.select('polarization', -6, -5, include=True)\n miriad_data.select('polarization', -5, -5, include=True)\n\n miriad_data.rewind()\n\n data, mask, times, lengths, uvw, ant, pol = [], [], [], [], [], [], []\n\n # Iterate over all entries in MIRIAD dataset and pull out their useful\n # quantities\n for pream, data_row, mask_row in miriad_data.all(raw=True):\n\n # Ensure that data arrays are of the correct type\n data_row = data_row.astype(np.complex64)\n mask_row = mask_row.astype(np.bool)\n\n # Unpack co-ordinates\n uvw_row, t, ant_row = pream\n pp = aipy.miriad.pol2str[miriad_data['pol']]\n\n # Append this rows data to the global set\n lengths.append(len(data))\n times.append(t)\n ant.append(ant_row)\n uvw.append(uvw_row)\n data.append(data_row)\n mask.append(mask_row)\n pol.append(pp)\n\n data_dict = {\n 'data': np.array(data),\n 'mask': np.array(mask),\n 'time': np.array(times),\n 'length': np.array(lengths),\n 'uvw': np.array(uvw),\n 'ant': np.array(ant),\n 'pol': np.array(pol),\n 'freq': freq\n }\n\n return data_dict", "def read_data(self):\n self.days = [0, 2, 3, 5, 6, 8, 9, 11, 13, 14]\n path = '../data/'\n data = []\n for day in self.days:\n filename = path + 'spectrum_day{}.txt'.format(day)\n data.append(read_file(filename))\n return data", "def _read_trajectory_files(self):\n dflist = []\n self.Ntimes = {}\n for downD in self.case.downstreamD:\n outputs = self.case.get_outputs(self.method,downD)\n print(outputs['trajectory_file'])\n df = pd.read_csv(outputs['trajectory_file'],\n header=None,\n usecols=[0,1,2])\n df.columns = ['t','y','z']\n df['x'] = downD * self.case.turbine.D\n df['z'] -= self.case.turbine.zhub\n df = df.set_index(['t','x'])[['y','z']]\n self.Ntimes[downD] = len(df.index.levels[0])\n dflist.append(df)\n self.df = pd.concat(dflist).sort_index()", "def parse(self):\n\t\tfirst = None\n\t\tf = open(self.input_file)\n\t\tfor line in f.readlines():\n\t\t\tif line.startswith(\"#\"):\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tflow,t,sequence,size = line.split()\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t# append data to a list of tuples\n\t\t\tflow = int(flow)\n\t\t\tt = float(t)\n\t\t\tsequence = int(sequence)\n\t\t\tif size == \"x\":\n\t\t\t\tcontinue\n\t\t\tsize = int(size)\n\t\t\tif not size == 0:\n\t\t\t\tif flow == 1:\n\t\t\t\t\tself.data1.append((t,sequence,size))\n\t\t\t\telif flow == 2:\n\t\t\t\t\tself.data2.append((t,sequence,size))\n\t\t\t\telif flow == 3:\n\t\t\t\t\tself.data3.append((t, sequence, size))\n\t\t\t\telif flow == 4:\n\t\t\t\t\tself.data4.append((t, sequence, size))\n\t\t\t\telif flow == 5:\n\t\t\t\t\tself.data5.append((t, sequence, size))\n\t\t\t\telse:\n\t\t\t\t\tprint \"Erroneous data: \",flow, t, sequence, size\n\t\t\t# Keep track of the minimum and maximum time seen\n\t\t\tif not self.min_time or t < self.min_time:\n\t\t\t\tself.min_time = t\n\t\t\tif not self.max_time or t > self.max_time:\n\t\t\t\tself.max_time = t\n\n\t\t\t# print len(self.data1),len(self.data2),len(self.data3),len(self.data4),len(self.data5)", "def read_raw_timings(fin):\n\n REGEX_CFG = re.compile(r\".*__CFG([0-9]+)$\")\n\n def inner():\n raw = list(parse_raw_timing(fin))\n for slice_name, site_name, bel_name, speed_model, properties in raw:\n\n # Get timings from properties\n timings = [(k, properties[k]) for k in [\n \"DELAY\",\n \"FAST_MAX\",\n \"FAST_MIN\",\n \"SLOW_MAX\",\n \"SLOW_MIN\",\n ]]\n\n speed_model_orig = speed_model\n\n # There are \"bel\" and \"net\" delays\n if speed_model.startswith(\"bel_\"):\n is_net = False\n speed_model = speed_model[4:]\n\n elif speed_model.startswith(\"net_\"):\n is_net = True\n speed_model = speed_model[4:]\n\n else:\n continue\n\n # Get configuration. Look for \"__CFG<n>\"\n # FIXME: How to correlate that with a configuration name ?\n match = REGEX_CFG.match(speed_model)\n if match is not None:\n cfg = match.group(1)\n else:\n cfg = None\n\n # Parse the rest of the model name\n fields = speed_model.split(\"__\")\n src_pin, dst_pin = fields[2:4]\n\n # Cell type\n if is_net:\n cell_type = \"NET\"\n\n else:\n cell_type = bel_name\n if cfg is not None:\n cell_type += \"_CFG{}\".format(cfg)\n\n # Cell instance\n if is_net:\n instance = site_name\n else:\n instance = \"{}/{}\".format(site_name, bel_name)\n\n # Yield stuff\n key = (site_name, cell_type, instance, speed_model)\n yield (*key, \"cell_type\"), cell_type.upper()\n yield (*key, \"instance\"), instance.upper()\n yield (*key, \"input\"), src_pin.upper()\n yield (*key, \"output\"), dst_pin.upper()\n yield (*key, \"model\"), speed_model\n yield (*key, \"is_net\"), is_net\n\n for t, v in timings:\n yield (*key, t), v\n\n return merged_dict(inner())", "def read(self):\n\t\twith open(self.__path(), 'r') as f:\n\t\t\tf.readline()\n\t\t\tself.price = dict()\n\t\t\tfor line in f:\n\t\t\t\td = workingday.strptime(line[:10], '%Y-%m-%d')\n\t\t\t\tc = line[13:-1].split()\n\t\t\t\trow = []\n\t\t\t\tfor i in xrange(5):\n\t\t\t\t\trow.append(float(c[i]))\n\t\t\t\trow.append(int(c[5]))\n\t\t\t\tself.price[d] = row\n\n\t\tself.dividend = dict()\n\t\tself.split = dict()\n\t\ttry:\n\t\t\twith open(self.__path_dividends(), 'r') as f:\n\t\t\t\tf.readline()\n\t\t\t\tfor line in f:\n\t\t\t\t\td = workingday.strptime(line[:10], '%Y-%m-%d')\n\t\t\t\t\tc = line[10:-1].split()\n\t\t\t\t\tif c[0] == 'Dividend':\n\t\t\t\t\t\tself.dividend[d] = float(c[1])\n\t\t\t\t\telif c[0] == 'Split':\n\t\t\t\t\t\tself.split[d] = tuple(map(int, c[1].split(':')))\n\t\texcept:\n\t\t\tpass", "def parse_file(file_name, barcode_map=barcode_map):\n\n with open(file_name) as file_handle:\n results = defaultdict(Counter)\n try:\n while True:\n name = file_handle.next()\n seq = file_handle.next()\n plus = file_handle.next()\n qual = file_handle.next()\n handle_seq(seq, barcode_map, results)\n except StopIteration:\n pass\n return pd.DataFrame(results).T.fillna(0)" ]
[ "0.65068936", "0.60459214", "0.5898123", "0.5829248", "0.5802381", "0.5798535", "0.5600928", "0.55067945", "0.55019885", "0.5468192", "0.5442655", "0.5402407", "0.5380463", "0.535162", "0.53467834", "0.5337483", "0.5331417", "0.53102356", "0.53098416", "0.5296011", "0.52932394", "0.52892464", "0.5279072", "0.5278659", "0.52359676", "0.52346414", "0.52236646", "0.52015615", "0.5198728", "0.5193682" ]
0.7742191
0
For basic plotting of the cycle data
def plot_fig(dict_cycle, number): for i in range(number): print(i+1) data = data_frame(dict_cycle, i+1) plt.plot(data.Potential, data.Current, label="Cycle{}".format(i+1)) print(data.head()) plt.xlabel('Voltage') plt.ylabel('Current') plt.legend() plt.savefig('cycle.png') print('executed')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot(self):\n\t\tself.plotOfLoopVoltage()", "def plot_data(self):", "def plot(self):\n pass", "def plot_graph(self) -> None:", "def multi_cycle_plot(df, cycles, colormap='viridis'):\n import matplotlib.cm as cm\n from matplotlib.colors import Normalize\n\n fig, ax = plt.subplots()\n cm = plt.get_cmap(colormap)\n norm = Normalize(vmin=int(np.ceil(min(cycles)/2)), vmax=int(np.ceil(max(cycles)/2)))\n sm = plt.cm.ScalarMappable(cmap=cm, norm=norm)\n\n for cycle in cycles:\n mask = df['half cycle'] == cycle\n ax.plot(df['Capacity'][mask], df['Voltage'][mask], color=cm(norm(np.ceil(cycle/2))))\n\n cbar = fig.colorbar(sm)\n cbar.set_label('Cycle', rotation=270, labelpad=10)\n ax.set_ylabel('Voltage / V')\n ax.set_xlabel('Capacity / mAh')\n return fig, ax", "def __plot(data, days: int = None):\n if days is not None:\n points = days * 144\n else:\n points = len(data)\n\n temp = data[-points:, 1]\n\n plt.plot(range(points), temp)\n plt.grid()\n plt.show()", "def multi_dqdv_plot(df, cycles, colormap='viridis', \n capacity_label='Capacity', \n voltage_label='Voltage',\n polynomial_spline=3, s_spline=1e-5,\n polyorder_1 = 5, window_size_1=101,\n polyorder_2 = 5, window_size_2=1001,\n final_smooth=True):\n import matplotlib.cm as cm\n from matplotlib.colors import Normalize\n\n fig, ax = plt.subplots()\n cm = plt.get_cmap(colormap)\n norm = Normalize(vmin=int(np.ceil(min(cycles)/2)), vmax=int(np.ceil(max(cycles)/2)))\n sm = plt.cm.ScalarMappable(cmap=cm, norm=norm)\n\n for cycle in cycles:\n df_cycle = df[df['half cycle'] == cycle]\n voltage, dqdv, cap = dqdv_single_cycle(df_cycle[capacity_label], \n df_cycle[voltage_label], \n window_size_1=window_size_1,\n polyorder_1=polyorder_1,\n s_spline=s_spline,\n window_size_2=window_size_2,\n polyorder_2=polyorder_2,\n final_smooth=final_smooth)\n \n ax.plot(voltage, dqdv, color=cm(norm(np.ceil(cycle/2))))\n\n cbar = fig.colorbar(sm)\n cbar.set_label('Cycle', rotation=270, labelpad=10)\n ax.set_xlabel('Voltage / V')\n ax.set_ylabel('dQ/dV / $mAhV^{-1}$')\n return fig, ax", "def plot():\n pass", "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def cplot(self, figure, i, n):\n xx, yy = np.meshgrid(range(self.L), range(self.L))\n ax = figure.add_subplot(2,2,n)\n plt.setp(ax.get_yticklabels(), visible=False)\n plt.setp(ax.get_xticklabels(), visible=False) \n plt.pcolormesh(xx, yy, self.config, cmap=plt.cm.RdBu);\n plt.title('Time=%d'%i, fontsize=20)\n plt.xlabel('X', fontsize=12)\n plt.ylabel('Y',fontsize=12) \n plt.axis('tight') \n self.ax = ax", "def plot_example_psds(example,rate):\r\n plt.figure()\r\n \r\n ##YOUR CODE HERE \r\n \r\n return", "def plot(self, *args, **kwargs):\n pass", "def plot(self):\n\t\tself.plotOfSpect()", "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def plot_control_loops(data):\n plot_attitude_rate_loops(data)\n plot_attitude_loops(data)\n plot_velocity_loops(data)\n plot_position_loops(data)", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "def simple_plot(self):\n for i in np.arange(len(self.e2)):\n self.ax.plot(self.e2[i], 'o', label=self.labels[i])", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def liveplot(x, y, xlim, ylim, title):\n plt.plot(x,y,'b.')\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.xlabel('North-South Axis')\n plt.ylabel('East-West Axis')\n plt.title(title)\n plt.show()", "def peek(self, **kwargs):\n\n plt.figure()\n axes = plt.gca()\n data_lab=self.meta['OBS-FREQ'][0:2] + ' ' + self.meta['OBS-FREQ'][2:5]\n axes.plot(self.data.index,self.data,label=data_lab)\n axes.set_yscale(\"log\")\n axes.set_ylim(1e-4,1)\n axes.set_title('Nobeyama Radioheliograph')\n axes.set_xlabel('Start time: ' + self.data.index[0].strftime(TIME_FORMAT))\n axes.set_ylabel('Correlation')\n axes.legend()\n plt.show()", "def plot(self):\n t = np.linspace(0, self.days, self.days + 1)\n fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(nrows=5, sharex='all')\n ax1.plot(t, self.S, label=\"Susceptible\", color='r')\n ax1.set_ylabel(\"Number of Susceptible People\")\n ax1.set_title(\"Strong Infecitous Model SEIRV Simulation\")\n ax3.plot(t, self.I, label=\"Active Cases\", color='b')\n ax3.set_ylabel(\"Active Cases\")\n ax2.plot(t, self.E, label=\"Exposed\", color='c')\n ax2.set_ylabel(\"# of Exposed\")\n ax4.plot(t, self.R, label=\"Recovered\", color='m')\n ax5.set_xlabel(\"Days\")\n ax4.set_ylabel('Number of Recovered')\n ax5.plot(t, self.V, label=\"Vaccinated\")\n ax5.set_ylabel(\"# Vaccinated\")\n ax1.legend()\n ax2.legend()\n ax3.legend()\n ax4.legend()\n plt.show()\n return fig", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "def plot(self):\n\n # initialize outside the loop to avoid memory leak\n\n plot_a = None\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n pmin = 0\n pmax = 0\n\n sr = self.dio.get_properties(self.channel)['samples_per_second']\n\n if self.control.verbose:\n print 'sample rate: ', sr\n\n # initial time info\n display_lag = 60\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print 'data bounds: ', b\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print 'start sample st0: ', st0\n print 'end sample et0: ', et0\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = self.control.num_fft * \\\n self.control.integration * self.control.decimation\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print 'Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld' % (samples_per_stripe, blocks, st0, et0)\n return\n\n stripe_stride = (et0 - st0) / blocks\n\n bin_stride = stripe_stride / self.control.bins\n\n start_sample = st0\n\n print 'first ', start_sample\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[mdt.keys()[0]]\n cfreq = md['center_frequencies'].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print 'processing info : ', self.control.frames, self.control.bins, samples_per_stripe, bin_stride\n\n for p in numpy.arange(self.control.frames):\n sti_psd_data = numpy.zeros(\n [self.control.num_fft, self.control.bins], numpy.float)\n sti_times = numpy.zeros([self.control.bins], numpy.complex128)\n\n for b in numpy.arange(self.control.bins):\n\n if self.control.verbose:\n print 'read vector :', self.channel, start_sample, samples_per_stripe\n\n d_vec = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel)\n data = d_vec[:, self.sub_channel]\n\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr / self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data, NFFT=self.control.num_fft, Fs=float(sample_freq), detrend=detrend_fn, scale_by_freq=False)\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = numpy.real(\n 10.0 * numpy.log10(numpy.abs(psd_data) + 1E-12))\n\n sti_times[b] = start_sample / sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n numpy.min(freq_axis) / 1e3,\n numpy.max(freq_axis) / 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n if self.control.zaxis:\n vmin = int(string.split(self.control.zaxis, ':')[0])\n vmax = int(string.split(self.control.zaxis, ':')[1])\n else:\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n im = ax.imshow(sti_psd_data, cmap='jet', origin='lower', extent=extent,\n interpolation='nearest', vmin=vmin, vmax=vmax, aspect='auto')\n\n ax.set_ylabel('f (kHz)', fontsize=8)\n\n # plot dates\n\n tick_spacing = numpy.arange(\n self.control.bins / 8, self.control.bins, self.control.bins / 8)\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = ''\n else:\n gm_tick_time = time.gmtime(numpy.real(tick_time))\n tick_string = '%02d:%02d:%02d' % (\n gm_tick_time[3], gm_tick_time[4], gm_tick_time[5])\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print 'last ', start_sample\n\n # create a time stamp\n start_time = st0 / sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (srt_time[0], srt_time[\n 1], srt_time[2], srt_time[3], srt_time[4], srt_time[5], sub_second)\n\n self.f.suptitle('%s %s %4.2f MHz (%s)' % (\n self.control.title, timestamp, cfreq / 1E6, self.control.path), fontsize=10)\n\n # ax.legend(fontsize=8)\n ax.set_xlabel('time (UTC)', fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == '':\n ext = '.png'\n print \"Save plot as {}\".format(fname+ext)\n matplotlib.pyplot.savefig(fname+ext)\n if self.control.appear or not self.control.outname:\n print \"Show plot\"\n matplotlib.pyplot.show()", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def visualize_signal(self):\n plt.figure()\n plt.title('Accelerometer Signal')\n plt.plot(range(len(self.data)), self.data[1])", "def charge_discharge_plot(df, full_cycle, colormap=None):\n fig, ax = plt.subplots()\n\n try:\n iter(full_cycle)\n\n except TypeError:\n cycles = [full_cycle*2 -1, full_cycle*2]\n for cycle in cycles:\n mask = df['half cycle'] == cycle\n # Making sure cycle exists within the data\n if sum(mask) > 0:\n ax.plot(df['Capacity'][mask], df['Voltage'][mask])\n\n ax.set_xlabel('Capacity / mAh')\n ax.set_ylabel('Voltage / V')\n return fig, ax\n \n if not colormap:\n if len(full_cycle) < 11:\n colormap = 'tab10'\n elif len(full_cycle) < 21:\n colormap = 'tab20'\n else:\n raise ValueError(\"Too many cycles for default colormaps. Use multi_cycle_plot instead\")\n \n cm = plt.get_cmap(colormap)\n for count, full_cycle_number in enumerate(full_cycle):\n cycles = [full_cycle_number*2 -1, full_cycle_number*2]\n for cycle in cycles:\n mask = df['half cycle'] == cycle\n # Making sure cycle exists within the data\n if sum(mask) > 0:\n ax.plot(df['Capacity'][mask], df['Voltage'][mask], color=cm(count))\n\n from matplotlib.lines import Line2D\n custom_lines = [Line2D([0], [0], color=cm(count), lw=2) for count, i in enumerate(full_cycle)]\n \n ax.legend(custom_lines, [f'Cycle {i}' for i in full_cycle])\n ax.set_xlabel('Capacity / mAh')\n ax.set_ylabel('Voltage / V')\n return fig, ax" ]
[ "0.72083366", "0.6998115", "0.6924712", "0.6818845", "0.67426956", "0.66528636", "0.6642367", "0.66289514", "0.65917873", "0.6526204", "0.6482531", "0.64119107", "0.6405369", "0.6392448", "0.638955", "0.6359681", "0.63554764", "0.63265246", "0.62756735", "0.62715703", "0.6266615", "0.6263428", "0.6259879", "0.6258654", "0.62548983", "0.6248245", "0.62367064", "0.6234459", "0.6221964", "0.6206872" ]
0.7004428
1
Ask for the file path to the latest banking data and load the CSV file.
def load_bank_data(): print("\n" * 8) print(" * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *", "\n") csvpath = questionary.text("Enter a file path to a rate-sheet (.csv):").ask() print("\n","* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *", "\n") csvpath = Path(csvpath) if not csvpath.exists(): sys.exit("Oops! Can't find this path: ", csvpath) return load_csv(csvpath)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(self, csv_file):\n pass", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def loadCSV(input_file):", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def load_bhav_data_csv(self, filepath: str) -> None:\n # create redis pipeline\n pipeline = self.redis.pipeline(transaction=True)\n pipeline.flushdb()\n total_items_counter = 0\n with open(filepath, \"r\") as csv_file:\n csv_dict_reader = csv.DictReader(csv_file)\n for row in csv_dict_reader:\n prefixed_sc_name = f'BHAV:{row[\"SC_NAME\"].strip()}'\n data = {\n \"SC_CODE\": row[\"SC_CODE\"],\n \"OPEN\": row[\"OPEN\"],\n \"CLOSE\": row[\"CLOSE\"],\n \"HIGH\": row[\"HIGH\"],\n \"LOW\": row[\"LOW\"],\n }\n\n pipeline.hset(\n name=self.sc_name_hash_stored,\n key=prefixed_sc_name,\n value=json.dumps(data),\n )\n pipeline.zadd(\n self.sc_name_sorted_set,\n {prefixed_sc_name: 0},\n )\n total_items_counter += 1\n pipeline.set(self.last_updated, str(timezone.now().date()))\n pipeline.set(self.total_items_count, total_items_counter)\n\n # Execute\n pipeline.execute()\n return", "def import_data():\n data = pd.read_csv('partA/bikes_October18.csv', ',')\n return data", "def read_csv_file(self):\n pass", "def test_load_csv_file():\n data = loader.load_csv_file(\"buildup/reference/comsol_solution/lofi/voltage.csv.bz2\")\n\n assert data.any()", "def load_info(self, file_path):\r\n info = pd.read_csv(file_path, header=0, index_col=0)\r\n self.set_info(info)\r\n logger.info(f'{self} load info')", "def load_csv(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n files, _ = QFileDialog.getOpenFileNames(\n self,\n \"Select one or more files\",\n \"\",\n \"csv files (*.csv);;All Files (*)\",\n options=options,\n )\n self.show()\n\n if files:\n self.files_now = files\n else:\n self.files_now = None\n\n if self.files_now:\n self.lineEdit_file_name.setText(self.files_now[0])\n self.update_gui_from_csv()", "def loan_data():\n return pd.read_csv(data_path / \"credit_data.csv\")", "def read_csv():", "def get_local_dataset(\n self, \n file_name: str\n ):\n pd.read_csv(file_name)\n #save", "def import_csv(self):\r\n path = tk.filedialog.askopenfile(initialdir=\"/\", title=\"Select File\",\r\n filetypes=((\"Comma-separated values (.csv)\", \"*.csv\"), (\"Text Document (.txt)\", \"*.txt\"),\r\n (\"All Files\", \"*.*\")))\r\n\r\n items = []\r\n if path is not None:\r\n for ticker in path:\r\n items.append(ticker)\r\n else:\r\n return\r\n\r\n tickers = items[0].split(',')\r\n for ticker in tickers:\r\n self.root.main.get_quote(ticker)", "def process_file_import(self):\r\n directory_csv = [file for file in os.listdir() if file.endswith(\".csv\")]\r\n self.print_options(directory_csv,2)\r\n\r\n \"\"\"\r\n Asks for user input. Then imports csv file based on user's input.\r\n \"\"\"\r\n n = (input(\"Which csv would you like to import? Please input the corresponding integer:\"))\r\n\r\n try:\r\n n = int(n)\r\n except:\r\n pass\r\n\r\n if isinstance(n, int) is True and n <= len(directory_csv):\r\n self.population.import_csv(directory_csv[int(n)-1])\r\n print(self.population)\r\n self.file_import()\r\n elif n == 'q':\r\n quit()\r\n elif n == 'b':\r\n self.menu_page()\r\n else:\r\n raise InputError(\"\\nPlease input a valid digit, 'q' or 'b'\")", "def __loaddata(filename, datatype='flightcsv', minprob=0.001, maxprob=0.20):\n if datatype is 'flightcsv':\n return extract_flight_csv(filename, minprob=minprob, maxprob=maxprob)\n else:\n raise Exception('unknown datatype %s' % datatype)", "def upload_csv_data(self, upload_file):\n db = DataBase(self.DATABASE_DATA)\n db.insert_data_from_file(\n 'triagedata.historicdata',\n ('clinic_id', 'severity', 'date_received', 'date_seen'),\n upload_file,\n ','\n )", "def import_from_csv(self) -> None:\n logging.info('import_from_csv')\n if self.target_table and str(self.target_table).lower() in [\"issue\", \"version\"]:\n if self.file_path and exists(self.file_path):\n # Read CSV file\n csv_data = pd.read_csv(self.file_path).to_dict('records')\n\n # Import Version\n if str(self.target_table).capitalize() == \"Version\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Version).delete()\n click.echo('Overwrite Version table')\n\n for version in csv_data:\n if all(item in list(version.keys()) for item in ['tag', 'start_date', 'end_date']):\n newVersion=Version(\n project_id=version['project_id'],\n name=version[\"name\"], \n tag=version[\"tag\"], \n start_date=datetime.strptime(version[\"start_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n end_date=datetime.strptime(version[\"end_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n )\n \n try:\n self.session.add(newVersion)\n compute_version_metrics(self.session, self.configuration.current_branch, newVersion.project_id)\n click.echo('Importing ' + str(len(csv_data)) + ' version(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields')\n\n # Import Issue\n if str(self.target_table).capitalize() == \"Issue\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Issue).delete()\n click.echo('Overwrite Issue table')\n\n for issue in csv_data:\n if all(item in list(issue.keys()) for item in ['number', 'created_at', 'updated_at']):\n newIssue=Issue(\n project_id=issue['project_id'],\n number=issue[\"number\"],\n title=issue[\"title\"],\n created_at=datetime.strptime(issue[\"created_at\"], '%Y-%m-%d %H:%M:%S.%f'),\n updated_at=datetime.strptime(issue[\"updated_at\"], '%Y-%m-%d %H:%M:%S.%f'))\n\n try:\n self.session.add(newIssue)\n click.echo('Importing ' + str(len(csv_data)) + ' issue(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields') \n\n self.session.commit()\n else:\n logging.error('File not found')\n sys.exit('File not found')\n else:\n logging.error('Target table not found')\n sys.exit('Target table not found')", "def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)", "def read_csv(file_name, company=None):\n try:\n stock_data = pd.read_csv(file_name + '.csv', parse_dates=['date']).dropna()\n stock_data = stock_data.sort_values(by=['date'])\n except ValueError:\n stock_data = pd.read_csv(file_name + '.csv').dropna()\n\n if company is not None:\n stock_data = stock_data.loc[stock_data['company'] == company]\n\n return stock_data", "def onLoadCSVList(self, evt):\n dlg = wx.FileDialog(self.view, \"Choose a file:\", wildcard = \"*.txt; *.csv\" ,\n style=wx.FD_DEFAULT_STYLE | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n print \"You chose %s\" % dlg.GetPath()\n self.config.CSVFilePath = dlg.GetPath()", "def _open_csv_file(self):\n for s in self.symbol_list:\n self.symbol_data[s] = pd.read_csv(\n os.path.join(self.csv_dir, '%s.csv' % s),\n header=0, parse_dates=True,\n\n )\n self.symbol_data[s] = self.symbol_data[s][self.symbol_data[s]['Time'] >= self.start_time]\n self.symbol_data[s] = self.symbol_data[s][self.symbol_data[s]['Time'] <= self.end_time]\n for s in self.symbol_list:\n self.symbol_data[s] = self.symbol_data[s].iterrows()", "def csvfileUsage(self):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.DictReader(file_obj, delimiter=',') # CSV DictReader object\n \"\"\" reader.fieldnames returns header , slicing intial 'Month' and\n 'Year' header from list\n \"\"\"\n for com_names in reader.fieldnames[2:]:\n self.company_data[com_names] = {}\n # iterating each row\n for row in reader:\n month, year = self.parse_my(row) # parsing the year and month from row\n # pop the `Month` and `Year` Key to minimize iteration below\n row.pop('Month'), row.pop('Year')\n \"\"\" saving and updating the data at same point of time\n each iteration time, checking the max value and updating \n `Month` `Year` and `Value`\n \"\"\"\n self.prepare_company_data(month, year, row, self.company_data)\n file_obj.close() # close file\n return self.company_data", "def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()", "def process_csv(filepath):\n suburb = get_suburb(filepath)\n read_file = pd.read_csv(filepath,\n infer_datetime_format=True,\n parse_dates=[\"SALE DATE\"],\n dayfirst=True)\n read_file[\"SUBURB\"] = suburb\n separate_date(read_file)\n return read_file", "def CSV_Load_File( self, infilename ):\n print( 'Loading \"{}\"'.format(infilename) )\n IN = open( infilename, 'r' )\n standname = None\n laststand = None\n for L in IN:\n if( L[0:9] == 'Site/Plot' ): continue\n col = L.split( ',' )\n standname = col[0]\n year = int(col[1])\n #if( re.search( '-', standname ) != None ):\n # loc = re.search( '-', standname )\n # year = int(standname[loc.start()+1:])\n # standname = standname[0:loc.start()]\n #print standname, year\n if( (standname != None ) & (standname != laststand) ): self.Data.Stand[standname] = StandData( standname )\n (treeno, species, dbh, ht, live, status, cclass, tpa) = \\\n (int(col[2]), col[3], float(col[4]), float(col[5]), col[6], col[7], int(float(col[8])), float(col[9]))\n if( OPT['d'] ):\n if( dbh > 10.0 ): dbh *= 1.25\n if( dbh > 15.0 ): dbh *= 1.50\n for t in range( 1, int( math.ceil( tpa ))+1, 1 ):\n ntree = len( self.Data.Stand[standname].Tree ) + 1\n self.Data.Stand[standname].Tree[ntree] = TreeData( species, TreeNumber=treeno )\n self.Data.Stand[standname].Tree[ntree].Year[year] = MeasurementData( dbh, ht, '', 1, live, status, cclass )\n laststand = standname\n IN.close()", "def import_data():\n import pandas as pd\n \n df = pd.read_csv('Company_Bankruptcy_Prediction.csv')\n return df", "def load_csv(self):\n self.database = pd.read_csv(\n self.settings['database_path'],\n encoding='utf-8')", "def loadData(self):\n\n\n path = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.getcwd(), 'CSV, XLSX(*.csv *.xlsx)')\n\n # If a file was specified, load it up. If not, tell the user to pick a valid file\n if path[0] != '':\n\n if os.path.exists(path[0]) and os.path.getsize(path[0]):\n\n filepath, filename = os.path.split(path[0])\n pandaData = procedures.load(filename, filepath)\n\n while self.tabWidget.count() != 0:\n self.closeTab()\n self.createTab(pandaData)\n\n else:\n self.notifyUser(\"Please pick a valid file.\")" ]
[ "0.6564447", "0.65159214", "0.6428639", "0.6289074", "0.61714965", "0.6103537", "0.60740215", "0.6071866", "0.60457027", "0.60127985", "0.5915446", "0.59071153", "0.5904035", "0.5896971", "0.5839247", "0.5829717", "0.5804731", "0.5802005", "0.5783648", "0.5783445", "0.5778229", "0.577286", "0.5768219", "0.5764142", "0.5736227", "0.56978804", "0.56888235", "0.56840456", "0.56784153", "0.5671818" ]
0.7770881
0
Prompt dialog to get the applicant's financial information.
def get_applicant_info(): credit_score = questionary.text("Enter a credit score between 300 and 850: ").ask() credit_score = number_checker(credit_score) if credit_score == False or credit_score < 300 and credit_score > 850: print("\u001b[31m", "\n") print("Credit score must be a number between 300 and 850.", "\n") print("Exiting app...", "\u001b[0m", "\n") exit() debt = questionary.text("What's your current monthly debt? ").ask() debt = number_checker(debt) if debt == False or debt < 0: print("\u001b[31m", "\n") print("Monthly debt must be greater than or equal to 0 to use this app.", "\n") print("Exiting system...", "\u001b[0m", "\n") exit() income = questionary.text("What's your total monthly income?").ask() income = number_checker(income) if income == False or income < 0: print("\u001b[31m", "\n") print("Your Monthly INCOME must be greater than 0 to sue this app.", "\n") print("Exiting system...", "\u001b[0m", "\n") exit() else: True loan_amount = questionary.text("What's your desired loan amount?").ask() loan_amount = number_checker(loan_amount) if loan_amount == False or loan_amount < 0: print("\u001b[31m", "\n") print("Loan amount must be greater than 0.", "\n") print("Exiting system...", "\u001b[0m", "\n") exit() home_value = questionary.text("What's your home value?").ask() home_value = number_checker(home_value) if home_value == False or home_value < 0: print("\u001b[31m", "\n") print("Your home value must be greater than or equal to 0.", "\n") print("Exiting system...", "\u001b[0m", "\n") exit() return credit_score, debt, income, loan_amount, home_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prompt_user_account_to_deposit():\n print('What account do you want to deposit to?:')\n return input()", "def prompt_user_account_to_withdrawl():\n print('What account do you want to withdrawl from?:')\n return input()", "def prompt_user_money_to_deposit():\n print('What amount of money do you want to deposit?:')\n return input()", "def prompt_user_money_to_withdrawl():\n print('What amount of money do you want to withdrawl?:')\n return input()", "def prompt_user_account_to_get_interest():\n print('What account do you want 0.5% automatic interest?:')\n return input()", "def Withdrawal(self):\n self.amount = (int)(raw_input (\" Enter your withdrawal amount \"))\n return self.amount", "def _prompt_user(self):\n print '\\nPlease enter the ff. Just leave blank to accept default.\\n'\n self._handle_param(param='start',\n msg='Starting date (Ex. Feb 1, 2016): ')\n self._handle_param(param='end',\n msg='Ending date (Ex. Aug 1, 2016): ')", "def ask_dialog(self, title=\"\", vars=[], help=\"\"):\n\t\tpass", "def prompt():\n\n\t# Inform the user on what price data has been taken\n\tprint(\"\\nCurrent available historical data for calibration: \")\n\tdata_files = listdir('call_data/')\n\tfor i in range(len(data_files)):\n\t\tprint(data_files[i])\n\t# Ask the user if they would like to sample more points\n\tdone = False\n\twhile done != True:\n\t\tinp = input(\"\\nWould you like to sample more historical price data? (y/n) \")\n\t\ttry:\n\t\t\tif inp.lower() == 'y':\n\t\t\t\tsample_calls.random_calls()\n\t\t\tif inp.lower() == 'n':\n\t\t\t\tprint(\"Continuing to analysis.\\n\")\n\t\t\t\tdone = True\n\t\t\telse:\n\t\t\t\tprint(\"Invalid input.\")\n\t\texcept ValueError:\n\t\t\tprint(\"\\nUnable to interpret input. Please try again.\")", "def passPrompt(title, prompt):\n answer = tkSimpleDialog.askstring(title, prompt, show=\"*\")\n print answer", "def req_qry_trading_account(self):\n pass", "def donation_prompt(name):\n input_msg = \"Please enter the donation amount for {}: \"\n donation_amount = input(input_msg.format(name))\n while True:\n try:\n return float(donation_amount)\n except ValueError:\n donation_amount=input(\"INVALID Amount: \"+input_msg.format(name))", "def prompt_loan_amount():\n loan_amount = normalize_loan_amount(input('\\nEnter loan amount: '))\n while loan_amount is None:\n loan_amount = normalize_loan_amount(input('Invalid input! Enter loan amount:'))\n return loan_amount", "def prompt_user_what_to_do_next():\n print('What do you, user, want to do next for banking?')\n print('Enter 1 to open account, 2 to check your balance, 3 to deposit to your account, 4 to withdrawl from your account,')\n print('or 5 to automatically earn 0.5% interest on your account, or q to quit:')\n return input()", "def get_user_input():\n st.sidebar.header('Parámetros de entrada') \n acti2 = st.sidebar.selectbox('Código de Actividad Económica', ['ACABADO DE PRODUCTOS TEXTILES',\n 'ACTIVIDADES COMBINADAS DE SERVICIOS ADMINISTRATIVOS DE OFICINA', \n 'ACTIVIDADES CREATIVAS, ARTÍSTICAS Y DE ENTRETENIMIENTO', \n 'ACTIVIDADES DE AGENCIAS DE COBRO Y AGENCIAS DE CALIFICACIÓN CREDITICIA', \n 'ACTIVIDADES DE AGENCIAS DE EMPLEO', \n 'ACTIVIDADES DE AGENCIAS DE VIAJES', \n 'ACTIVIDADES DE AGENTES Y CORREDORES DE SEGUROS', \n 'ACTIVIDADES DE ALOJAMIENTO PARA ESTANCIAS CORTAS', \n 'ACTIVIDADES DE APOYO A LA ENSEÑANZA', \n 'ACTIVIDADES DE APOYO PARA LA EXTRACCIÓN DE PETRÓLEO Y GAS NATURAL', \n 'ACTIVIDADES DE APOYO PARA OTRAS ACTIVIDADES DE EXPLOTACIÓN DE MINAS Y CANTERAS', \n 'ACTIVIDADES DE ARQUITECTURA E INGENIERÍA Y ACTIVIDADES CONEXAS DE CONSULTORÍA TÉCNICA', \n 'ACTIVIDADES DE ASOCIACIONES EMPRESARIALES Y DE EMPLEADORES', \n 'ACTIVIDADES DE ASOCIACIONES PROFESIONALES', \n 'ACTIVIDADES DE ATENCIÓN DE ENFERMERÍA EN INSTITUCIONES', \n 'ACTIVIDADES DE BIBLIOTECAS Y ARCHIVOS', \n 'ACTIVIDADES DE CENTROS DE LLAMADAS', \n 'ACTIVIDADES DE CLUBES DEPORTIVOS', \n 'ACTIVIDADES DE CONSULTORÍA DE GESTIÓN', \n 'ACTIVIDADES DE CONTABILIDAD, TENEDURÍA DE LIBROS Y AUDITORÍA; CONSULTORÍA FISCAL', \n 'ACTIVIDADES DE DESCONTAMINACIÓN Y OTROS SERVICIOS DE GESTIÓN DE DESECHOS', \n 'ACTIVIDADES DE DISTRIBUCIÓN DE PELÍCULAS CINEMATOGRÁFICAS, VÍDEOS Y PROGRAMAS DE TELEVISIÓN', \n 'ACTIVIDADES DE ENVASADO Y EMPAQUETADO', \n 'ACTIVIDADES DE EXHIBICIÓN DE PELÍCULAS CINEMATOGRÁFICAS Y CINTAS DE VÍDEO', \n 'ACTIVIDADES DE FOTOGRAFÍA', \n 'ACTIVIDADES DE GESTIÓN DE FONDOS', \n 'ACTIVIDADES DE HOSPITALES', \n 'ACTIVIDADES DE INVESTIGACIÓN', \n 'ACTIVIDADES DE JARDINES BOTÁNICOS Y ZOOLÓGICOS Y RESERVAS NATURALES', \n 'ACTIVIDADES DE JUEGOS DE AZAR Y APUESTAS', \n 'ACTIVIDADES DE MENSAJERÍA', \n 'ACTIVIDADES DE MUSEOS Y GESTIÓN DE LUGARES Y EDIFICIOS HISTÓRICOS', \n 'ACTIVIDADES DE MÉDICOS Y ODONTÓLOGOS', \n 'ACTIVIDADES DE OFICINAS CENTRALES', \n 'ACTIVIDADES DE OPERADORES TURÍSTICOS', \n 'ACTIVIDADES DE ORGANIZACIONES RELIGIOSAS', \n 'ACTIVIDADES DE OTRAS ASOCIACIONES N.C.P.', \n 'ACTIVIDADES DE PARQUES DE ATRACCIONES Y PARQUES TEMÁTICOS', \n 'ACTIVIDADES DE PRODUCCIÓN DE PELÍCULAS CINEMATOGRÁFICAS, VÍDEOS Y PROGRAMAS DE TELEVISIÓN', \n 'ACTIVIDADES DE RESTAURANTES Y DE SERVICIO MÓVIL DE COMIDAS', \n 'ACTIVIDADES DE SEGURIDAD PRIVADA', \n 'ACTIVIDADES DE SERVICIO DE BEBIDAS', \n 'ACTIVIDADES DE SERVICIO DE SISTEMAS DE SEGURIDAD', \n 'ACTIVIDADES DE SERVICIOS RELACIONADAS CON LA IMPRESIÓN', \n 'ACTIVIDADES DE SERVICIOS VINCULADAS AL TRANSPORTE ACUÁTICO', \n 'ACTIVIDADES DE SERVICIOS VINCULADAS AL TRANSPORTE AÉREO', \n 'ACTIVIDADES DE SERVICIOS VINCULADAS AL TRANSPORTE TERRESTRE', \n 'ACTIVIDADES DE TELECOMUNICACIONES ALÁMBRICAS', \n 'ACTIVIDADES DE TELECOMUNICACIONES INALÁMBRICAS', \n 'ACTIVIDADES DE TELECOMUNICACIONES POR SATÉLITE.', \n 'ACTIVIDADES ESPECIALIZADAS DE DISEÑO', \n 'ACTIVIDADES INMOBILIARIAS REALIZADAS A CAMBIO DE UNA RETRIBUCIÓN O POR CONTRATA', \n 'ACTIVIDADES INMOBILIARIAS REALIZADAS CON BIENES PROPIOS O ARRENDADOS', \n 'ACTIVIDADES JURÍDICAS', \n 'ACTIVIDADES POSTALES', \n 'ACTIVIDADES VETERINARIAS', \n 'ACUICULTURA DE AGUA DULCE', \n 'ACUICULTURA MARÍTIMA', \n 'ADMINISTRACIÓN DE MERCADOS FINANCIEROS', \n 'ALMACENAMIENTO Y DEPÓSITO', \n 'ALQUILER Y ARRENDAMIENTO DE OTROS EFECTOS PERSONALES Y ENSERES DOMÉSTICOS', \n 'ALQUILER Y ARRENDAMIENTO DE OTROS TIPOS DE MAQUINARIA, EQUIPO Y BIENES TANGIBLES', \n 'ALQUILER Y ARRENDAMIENTO DE VEHÍCULOS AUTOMOTORES', \n 'ARRENDAMIENTO DE PROPIEDAD INTELECTUAL Y PRODUCTOS SIMILARES, EXCEPTO OBRAS PROTEGIDAS POR DERECHOS DE AUTOR', \n 'ARRENDAMIENTO FINANCIERO', \n 'ASERRADOS Y ACEPILLADURA DE MADERA', \n 'CAPTACIÓN, TRATAMIENTO Y DISTRIBUCIÓN DE AGUA', \n 'CONSTRUCCIÓN DE BUQUES Y ESTRUCTURAS FLOTANTES', \n 'CONSTRUCCIÓN DE CARRETERAS Y LÍNEAS DE FERROCARRIL', \n 'CONSTRUCCIÓN DE EDIFICIOS', \n 'CONSTRUCCIÓN DE OTRAS OBRAS DE INGENIERÍA CIVIL', \n 'CONSTRUCCIÓN DE PROYECTOS DE SERVICIO PÚBLICO', \n 'CONSULTORÍA DE INFORMÁTICA Y DE GESTIÓN DE INSTALACIONES INFORMÁTICAS', \n 'CORRETAJE DE VALORES Y DE CONTRATOS DE PRODUCTOS BÁSICOS', \n 'CORTE, TALLA Y ACABADO DE LA PIEDRA', \n 'CURTIDO Y ADOBO DE CUEROS', \n 'DESTILACIÓN, RECTIFICACIÓN Y MEZCLA DE BEBIDAS ALCOHÓLICAS', \n 'EDICIÓN DE LIBROS', \n 'EDICIÓN DE PERIÓDICOS, REVISTAS Y OTRAS PUBLICACIONES PERIÓDICAS', \n 'EDUCACIÓN DEPORTIVA Y RECREATIVA', \n 'ELABORACIÒN Y CONSERVACIÓN DE CARNE', \n 'ELABORACIÒN Y CONSERVACIÓN DE FRUTAS,LEGUMBRES Y HORTALIZAS', \n 'ELABORACIÒN Y CONSERVACIÓN DE PESCADOS, CRUSTÁCEOS Y MOLUSCOS', \n 'ELABORACIÓN DE ACEITES Y GRASAS DE ORIGEN VEGETAL Y ANIMAL', \n 'ELABORACIÓN DE AZÚCAR', \n 'ELABORACIÓN DE BEBIDAS MALTEADAS Y DE MALTA', \n 'ELABORACIÓN DE BEBIDAS NO ALCOHÓLICAS', \n 'ELABORACIÓN DE CACAO Y CHOCOLATE Y DE PRODUCTOS DE CONFITERÍA', \n 'ELABORACIÓN DE COMIDAS Y PLATOS PREPARADOS', \n 'ELABORACIÓN DE MACARRONES, FIDEOS, ALCUZCUS Y PRODUCTOS FARINÁCEOS SIMILARES', \n 'ELABORACIÓN DE OTROS PRODUCTOS ALIMENTICIOS N.C.P.', \n 'ELABORACIÓN DE PIENSOS PREPARADOS PARA ANIMALES', \n 'ELABORACIÓN DE PRODUCTOS DE MOLINERÍA.', \n 'ELABORACIÓN DE PRODUCTOS DE PANADERÍA', \n 'ELABORACIÓN DE PRODUCTOS LÁCTEOS', \n 'ELABORACIÓN DE VINOS', \n 'ENSAYOS Y ANÁLISIS TÉCNICOS', \n 'ENSEÑANZA CULTURAL', \n 'ENSEÑANZA PREESCOLAR Y PRIMARIA', \n 'ENSEÑANZA SECUNDARIA DE FORMACIÓN GENERAL', \n 'ENSEÑANZA SECUNDARIA DE FORMACIÓN TÉCNICA Y PROFESIONAL', \n 'ENSEÑANZA SUPERIOR', \n 'ESTUDIOS DE MERCADO Y ENCUESTAS DE OPINIÓN PÚBLICA', \n 'EVACUACIÓN DE AGUAS RESIDUALES', \n 'EXPLOTACIÓN DE OTRAS MINAS Y CANTERAS N.C.P.', \n 'EXTRACCIÓN DE CARBÓN DE PIEDRA', \n 'EXTRACCIÓN DE GAS NATURAL', \n 'EXTRACCIÓN DE MINERALES DE HIERRO', \n 'EXTRACCIÓN DE MINERALES PARA LA FABRICACIÓN DE ABONOS Y PRODUCTOS QUÍMICOS', \n 'EXTRACCIÓN DE OTROS MINERALES METALÍFEROS NO FERROSOS', \n 'EXTRACCIÓN DE PETRÓLEO CRUDO', \n 'EXTRACCIÓN DE PIEDRA, ARENA Y ARCILLA', \n 'EXTRACCIÓN DE SAL', \n 'FABRICACIÓN ABONOS Y COMPUESTOS DE NITRÓGENO', \n 'FABRICACIÓN DE APARATOS DE USO DOMÉSTICO', \n 'FABRICACIÓN DE ARTICULOS DE PUNTO Y GANCHILLO', \n 'FABRICACIÓN DE ARTÍCULOS CONFECCIONADOS DE MATERIALES TEXTILES, EXCEPTO PRENDAS DE VESTIR', \n 'FABRICACIÓN DE ARTÍCULOS DE CUCHILLERÍA, HERRAMIENTAS DE MANO Y ARTÍCULOS DE FERRETERÍA', \n 'FABRICACIÓN DE ARTÍCULOS DE DEPORTE', \n 'FABRICACIÓN DE ARTÍCULOS DE HORMIGÓN, DE CEMENTO Y DE YESO', \n 'FABRICACIÓN DE ARTÍCULOS DE PIEL', \n 'FABRICACIÓN DE BICICLETAS Y DE SILLONES DE RUEDAS PARA INVÁLIDOS', \n 'FABRICACIÓN DE BISUTERÍA Y ARTÍCULOS CONEXOS', \n 'FABRICACIÓN DE BOMBAS, COMPRESORES, GRIFOS Y VÁLVULAS', \n 'FABRICACIÓN DE CALZADO', \n 'FABRICACIÓN DE CARROCERÍAS PARA VEHÍCULOS AUTOMOTORES', \n 'FABRICACIÓN DE CEMENTO, CAL Y YESO', \n 'FABRICACIÓN DE COMPONENTES Y TABLEROS ELECTRÓNICOS', \n 'FABRICACIÓN DE CUBIERTAS Y CÁMARAS DE CAUCHO', \n 'FABRICACIÓN DE CUERDAS, CORDELES, BRAMANTES Y REDES', \n 'FABRICACIÓN DE EQUIPO DE ELEVACIÓN Y MANIPULACIÓN', \n 'FABRICACIÓN DE EQUIPO DE IRRADIACIÓN Y EQUIPO ELECTRÓNICO DE USO MÉDICO Y TERAPÉUTICO', \n 'FABRICACIÓN DE EQUIPO ELÉCTRICO DE ILUMINACIÓN', \n 'FABRICACIÓN DE FIBRAS ARTIFICIALES', \n 'FABRICACIÓN DE HERRAMIENTAS DE MANO MOTORIZADAS', \n 'FABRICACIÓN DE HOJAS DE MADERA PARA ENCHAPADO Y TABLEROS A BASE DE MADERA', \n 'FABRICACIÓN DE INSTRUMENTOS Y MATERIALES MÉDICOS Y ODONTOLÓGICOS', \n 'FABRICACIÓN DE INSTRUMENTOS ÓPTICOS Y EQUIPO FOTOGRÁFICO', \n 'FABRICACIÓN DE JABONES Y DETERGENTES, PREPARADOS PARA LIMPIAR Y PULIR, PERFUMES Y PREPARADOS DE TOCADOR.', \n 'FABRICACIÓN DE JOYAS Y ARTÍCULOS CONEXOS', \n 'FABRICACIÓN DE JUEGOS Y JUGUETES', \n 'FABRICACIÓN DE MALETAS, BOLSOS DE MANO, Y ARTÍCULOS SIMILARES,Y DE ARTICULOS DE TALABARTERÍA Y GUARNICIONERÍA', \n 'FABRICACIÓN DE MAQUINARIA AGROPECUARIA Y FORESTAL', \n 'FABRICACIÓN DE MAQUINARIA METALÚRGICA', \n 'FABRICACIÓN DE MAQUINARIA PARA EXPLOTACIÓN DE MINAS Y CANTERAS Y PARA OBRAS DE CONSTRUCCIÓN', \n 'FABRICACIÓN DE MAQUINARIA PARA LA ELABORACIÓN DE ALIMENTOS, BEBIDAS Y TABACO', \n 'FABRICACIÓN DE MATERIALES DE CONSTRUCCIÓN DE ARCILLA', \n 'FABRICACIÓN DE MOTOCICLETAS', \n 'FABRICACIÓN DE MOTORES Y TURBINAS, EXCEPTO MOTORES PARA AERONAVES, VEHÍCULOS AUTOMOTORES Y MOTOCICLETAS', \n 'FABRICACIÓN DE MOTORES, GENERADORES Y TRANSFORMADORES ELÉCTRICOS Y APARATOS DE DISTRIBUCIÓN Y CONTROL DE LA ENERGÍA ELÉCTRICA', \n 'FABRICACIÓN DE MUEBLES', \n 'FABRICACIÓN DE OTROS ARTÍCULOS DEL PAPEL Y CARTÓN', \n 'FABRICACIÓN DE OTROS HILOS Y CABLES ELÉCTRICOS', \n 'FABRICACIÓN DE OTROS PRODUCTOS DE CAUCHO', \n 'FABRICACIÓN DE OTROS PRODUCTOS DE MADERA; FABRICACIÓN DE ARTÍCULOS DE CORCHO, PAJA Y MATERIALES TRENZABLES.', \n 'FABRICACIÓN DE OTROS PRODUCTOS DE PORCELANA Y DE CERÁMICA', \n 'FABRICACIÓN DE OTROS PRODUCTOS ELABORADOS DE METAL N.C.P.', \n 'FABRICACIÓN DE OTROS PRODUCTOS MINERALES NO METÁLICOS N.C.P.', \n 'FABRICACIÓN DE OTROS PRODUCTOS QUÍMICOS N.C.P.', \n 'FABRICACIÓN DE OTROS PRODUCTOS TEXTILES N.C.P.', \n 'FABRICACIÓN DE OTROS TIPOS DE EQUIPO DE TRANSPORTE N.C.P.', \n 'FABRICACIÓN DE OTROS TIPOS DE EQUIPO ELÉCTRICO', \n 'FABRICACIÓN DE OTROS TIPOS DE MAQUINARIA DE USO ESPECIAL', \n 'FABRICACIÓN DE OTROS TIPOS DE MAQUINARIA DE USO GENERAL', \n 'FABRICACIÓN DE PARTES Y PIEZAS DE CARPINTERÍA PARA EDIFICIOS Y CONSTRUCCIONES', \n 'FABRICACIÓN DE PARTES, PIEZAS Y ACCESORIOS PARA VEHÍCULOS DE AUTOMOTORES', \n 'FABRICACIÓN DE PASTA DE MADERA, PAPEL Y CARTÓN', \n 'FABRICACIÓN DE PILAS, BATERÍAS Y ACUMULADORES', \n 'FABRICACIÓN DE PINTURAS, BARNICES Y PRODUCTOS DE REVESTIMIENTO SIMILARES, TINTAS DE IMPRENTA Y MASILLAS', \n 'FABRICACIÓN DE PLAGUICIDAS Y OTROS PRODUCTOS QUÍMICOS DE USO AGROPECUARIO', \n 'FABRICACIÓN DE PLÁSTICOS Y DE CAUCHO SINTÉTICO EN FORMAS PRIMARIAS', \n 'FABRICACIÓN DE PRENDAS DE VESTIR, EXCEPTO PRENDAS DE PIEL', \n 'FABRICACIÓN DE PRODUCTOS DE LA REFINACIÓN DEL PETRÓLEO', \n 'FABRICACIÓN DE PRODUCTOS DE PLÁSTICO', \n 'FABRICACIÓN DE PRODUCTOS FARMACÉUTICOS, SUSTANCIAS QUÍMICAS MEDICINALES Y PRODUCTOS BOTÁNICOS DE USO FARMACÉUTICO', \n 'FABRICACIÓN DE PRODUCTOS METÁLICOS PARA USO ESTRUCTURAL', \n 'FABRICACIÓN DE PRODUCTOS PRIMARIOS DE METALES PRECIOSOS Y OTROS METALES NO FERROSOS', \n 'FABRICACIÓN DE PRODUCTOS REFRACTARIOS', \n 'FABRICACIÓN DE RECIPIENTES DE MADERA', \n 'FABRICACIÓN DE SUSTANCIAS QUÍMICAS BÁSICAS', \n 'FABRICACIÓN DE TANQUES, DEPÓSITOS Y RECIPIENTES DE METAL', \n 'FABRICACIÓN DE TAPICES Y ALFOMBRAS', \n 'FABRICACIÓN DE TEJIDOS DE PUNTO Y GANCHILLO', \n 'FABRICACIÓN DE VEHÍCULOS AUTOMOTORES', \n 'FABRICACIÓN DE VIDRIO Y DE PRODUCTOS DE VIDRIO', \n 'FABRICACIÓN DEL GAS', \n 'FABRICACIÓN DEL PAPEL Y CARTÓN ONDULADO Y DE ENVASES DE PAPEL Y CARTÓN', \n 'FONDOS DE PENSIONES', \n 'FONDOS Y SOCIEDADES DE INVERSIÓN Y ENTIDADES FINANCIERAS SIMILARES', \n 'FORJA, PRENSADO, ESTAMPADO Y LAMINADO DE METALES; PULVIMETALURGIA', \n 'FOTOCOPIADO, PREPARACIÓN DE DOCUMENTOS Y OTRAS ACTIVIDADES ESPECIALIZADAS DE APOYO DE OFICINA', \n 'FUNDICIÓN DE HIERRO Y ACERO', \n 'FUNDICIÓN DE METALES NO FERROSOS', \n 'GENERACIÓN, TRANSMISIÓN Y DISTRIBUCIÓN DE ENERGÍA ELÉCTRICA', \n 'GESTIÓN DE INSTALACIONES DEPORTIVAS', \n 'IMPRESIÓN', \n 'INDUSTRIAS BÁSICAS DE HIERRO Y ACERO', \n 'INSTALACIONES DE FONTANERÍA, CALEFACCIÓN Y AIRE ACONDICIONADO', \n 'INSTALACIONES ELÉCTRICAS', \n 'INSTALACIÓN DE MAQUINARIA Y EQUIPO INDUSTRIALES', \n 'INVESTIGACIÓN Y DESARROLLO EXPERIMENTAL EN EL CAMPO DE LAS CIENCIAS NATURALES Y LA INGENIERÍA', \n 'INVESTIGACIÓN Y DESARROLLO EXPERIMENTAL EN EL CAMPO DE LAS CIENCIAS SOCIALES Y LAS HUMANIDADES', \n 'LAVADO Y LIMPIEZA, INCLUIDA LA LIMPIEZA EN SECO, DE PRODUCTOS TEXTILES Y DE PIEL', \n 'LIMPIEZA GENERAL DE EDIFICIOS', \n 'MANIPULACIÓN DE CARGA', \n 'MANTENIMIENTO Y REPARACIÓN DE VEHÍCULOS AUTOMOTORES', \n 'ORGANIZACIÓN DE CONVENCIONES Y EXPOSICIONES COMERCIALES', \n 'OTRAS ACTIVIDADES AUXILIARES DE LAS ACTIVIDADES DE SEGUROS Y FONDOS DE PENSIONES', \n 'OTRAS ACTIVIDADES AUXILIARES DE LAS ACTIVIDADES DE SERVICIOS FINANCIEROS', \n 'OTRAS ACTIVIDADES DE ALOJAMIENTO', \n 'OTRAS ACTIVIDADES DE APOYO AL TRANSPORTE', \n 'OTRAS ACTIVIDADES DE ASISTENCIA SOCIAL SIN ALOJAMIENTO', \n 'OTRAS ACTIVIDADES DE ATENCIÓN DE LA SALUD HUMANA', \n 'OTRAS ACTIVIDADES DE ATENCIÓN EN INSTITUCIONES', \n 'OTRAS ACTIVIDADES DE CONCESIÓN DE CRÉDITO', \n 'OTRAS ACTIVIDADES DE DOTACIÓN DE RECURSOS HUMANOS', \n 'OTRAS ACTIVIDADES DE EDICIÓN', \n 'OTRAS ACTIVIDADES DE ESPARCIMIENTO Y RECREATIVAS N.C.P.', \n 'OTRAS ACTIVIDADES DE LIMPIEZA DE EDIFICIOS E INSTALACIONES INDUSTRIALES', \n 'OTRAS ACTIVIDADES DE SERVICIO DE COMIDAS', \n 'OTRAS ACTIVIDADES DE SERVICIOS DE APOYO A LAS EMPRESAS N.C.P', \n 'OTRAS ACTIVIDADES DE SERVICIOS DE INFORMACIÓN N.C.P.', \n 'OTRAS ACTIVIDADES DE SERVICIOS FINANCIEROS, EXCEPTO LAS DE SEGUROS Y FONDOS DE PENSIONES, N.C.P.', \n 'OTRAS ACTIVIDADES DE SERVICIOS PERSONALES N.C.P.', \n 'OTRAS ACTIVIDADES DE TECNOLOGÍA DE LA INFORMACIÓN Y DE SERVICIOS INFORMÁTICOS', \n 'OTRAS ACTIVIDADES DE TELECOMUNICACIÓN.', \n 'OTRAS ACTIVIDADES DE TRANSPORTE POR VÍA TERRESTRE', \n 'OTRAS ACTIVIDADES DE VENTA AL POR MENOR EN COMERCIOS NO ESPECIALIZADOS', \n 'OTRAS ACTIVIDADES DE VENTA AL POR MENOR NO REALIZADAS EN COMERCIOS, PUESTOS DE VENTA O MERCADOS', \n 'OTRAS ACTIVIDADES DEPORTIVAS', \n 'OTRAS ACTIVIDADES ESPECIALIZADAS DE LA CONSTRUCCIÓN', \n 'OTRAS ACTIVIDADES PROFESIONALES, CIENTÍFICAS Y TÉCNICAS N.C.P.', \n 'OTRAS INDUSTRIAS MANUFACTURERAS N.C.P.', \n 'OTRAS INSTALACIONES PARA OBRAS DE CONSTRUCCIÓN', \n 'OTROS SERVICIOS DE RESERVAS Y ACTIVIDADES CONEXAS', \n 'OTROS TIPOS DE ENSEÑANZA N.C.P.', \n 'OTROS TIPOS DE INTERMEDIACIÓN MONETARIA.', \n 'PELUQUERÍA Y OTROS TRATAMIENTOS DE BELLEZA', \n 'PESCA DE AGUA DULCE', \n 'PESCA MARÍTIMA', \n 'POMPAS FÚNEBRES Y ACTIVIDADES CONEXAS', \n 'PORTALES WEB', \n 'PREPARACIÓN DEL TERRENO', \n 'PREPARACIÓN E HILATURA DE FIBRAS TEXTILES', \n 'PROCESAMIENTO DE DATOS, HOSPEDAJE Y ACTIVIDADES CONEXAS', \n 'PROGRAMACIÓN INFORMÁTICA', \n 'PROGRAMACIÓN Y TRANSMISIONES DE TELEVISIÓN', \n 'PUBLICIDAD', \n 'RECOGIDA DE DESECHOS NO PELIGROSOS', \n 'RECOGIDA DE DESECHOS PELIGROSOS', \n 'RECUPERACIÓN DE MATERIALES', \n 'REPARACIÓN DE APARATOS DE USO DOMÉSTICO Y EQUIPO DOMÉSTICO Y DE JARDINERÍA', \n 'REPARACIÓN DE APARATOS ELECTRÓNICOS DE CONSUMO', \n 'REPARACIÓN DE EQUIPO DE TRANSPORTE, EXCEPTO VEHÍCULOS AUTOMOTORES', \n 'REPARACIÓN DE EQUIPO ELÉCTRICO', \n 'REPARACIÓN DE EQUIPOS COMUNICACIONALES', \n 'REPARACIÓN DE MAQUINARIA', \n 'REPARACIÓN DE ORDENADORES Y EQUIPO PERIFÉRICO', \n 'REPARACIÓN DE OTROS TIPOS DE EQUIPO', \n 'REPARACIÓN DE PRODUCTOS ELABORADOS DE METAL', \n 'SEGUROS DE VIDA', \n 'SEGUROS GENERALES', \n 'SUMINISTRO DE COMIDAS POR ENCARGO', \n 'SUMINISTRO DE VAPOR Y AIRE ACONDICIONADO', \n 'TEJEDURA DE PRODUCTOS TEXTILES', \n 'TERMINACIÓN Y ACABADO DE EDIFICIOS', \n 'TRANSMISIONES DE RADIO', \n 'TRANSPORTE DE CARGA MARÍTIMO Y DE CABOTAJE', \n 'TRANSPORTE DE CARGA POR CARRETERA', \n 'TRANSPORTE DE CARGA POR FERROCARRIL', \n 'TRANSPORTE DE CARGA POR VÍA AÉREA', \n 'TRANSPORTE DE CARGA, POR VÍAS DE NAVEGACIÓN INTERIORES', \n 'TRANSPORTE DE PASAJEROS MARÍTIMO Y DE CABOTAJE', \n 'TRANSPORTE DE PASAJEROS POR VÍA AÉREA', \n 'TRANSPORTE DE PASAJEROS POR VÍAS DE NAVEGACIÓN INTERIORES', \n 'TRANSPORTE INTERURBANO DE PASAJEROS POR FERROCARRIL', \n 'TRANSPORTE URBANO Y SUBURBANO DE PASAJEROS POR VÍA TERRESTRE', \n 'TRATAMIENTO Y ELIMINACIÓN DE DESECHOS NO PELIGROSOS', \n 'TRATAMIENTO Y ELIMINACIÓN DE DESECHOS PELIGROSOS', \n 'TRATAMIENTO Y REVESTIMIENTO DE METALES', \n 'VENTA AL POR MAYOR A CAMBIO DE UNA RETRIBUCIÓN O POR CONTRATA', \n 'VENTA AL POR MAYOR DE ALIMENTOS, BEBIDAS Y TABACO.', \n 'VENTA AL POR MAYOR DE COMBUSTIBLES SÓLIDOS, LÍQUIDOS Y GASEOSOS Y PRODUCTOS CONEXOS', \n 'VENTA AL POR MAYOR DE DESPERDICIOS, DESECHOS, CHATARRA Y OTROS PRODUCTOS N.C.P', \n 'VENTA AL POR MAYOR DE EQUIPO, PARTES Y PIEZAS ELECTRÓNICOS Y DE TELECOMUNICACIONES', \n 'VENTA AL POR MAYOR DE MAQUINARIA, EQUIPO Y MATERIALES AGROPECUARIOS', \n 'VENTA AL POR MAYOR DE MATERIALES DE CONSTRUCCIÓN, ARTÍCULOS DE FERRETERÍA Y EQUIPO Y MATERIALES DE FONTANERÍA Y CALEFACCIÓN.', \n 'VENTA AL POR MAYOR DE MATERIAS PRIMAS AGROPECUARIAS Y ANIMALES VIVOS.', \n 'VENTA AL POR MAYOR DE METALES Y MINERALES METALÍFEROS', \n 'VENTA AL POR MAYOR DE ORDENADORES, EQUIPO PERIFÉRICO Y PROGRAMAS DE INFORMÁTICA', \n 'VENTA AL POR MAYOR DE OTROS ENSERES DOMÉSTICOS', \n 'VENTA AL POR MAYOR DE OTROS TIPOS DE MAQUINARIA Y EQUIPO', \n 'VENTA AL POR MAYOR DE PRODUCTOS TEXTILES, PRENDAS DE VESTIR Y CALZADO', \n 'VENTA AL POR MAYOR NO ESPECIALIZADA', \n 'VENTA AL POR MENOR DE ALIMENTOS EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE ALIMENTOS, BEBIDAS Y TABACO EN PUESTOS DE VENTA Y MERCADOS', \n 'VENTA AL POR MENOR DE APARATOS ELÉCTRICOS DE USO DOMÉSTICO, MUEBLES, EQUIPO DE ILUMINACIÓN Y OTROS ENSERES DOMÉSTICOS EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE ARTÍCULOS DE FERRETERÍA, PINTURAS Y PRODUCTOS DE VIDRIO EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE BEBIDAS EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE COMBUSTIBLES PARA VEHÍCULOS AUTOMOTORES EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE EQUIPO DE DEPORTE EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE EQUIPO DE SONIDO Y DE VÍDEO EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE LIBROS, PERIÓDICOS Y ARTÍCULOS DE PAPELERÍA EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE ORDENADORES, EQUIPO PERIFÉRICO, PROGRAMAS INFORMÁTICOS Y EQUIPO DE TELECOMUNICACIONES EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE OTROS PRODUCTOS EN PUESTOS DE VENTA Y MERCADOS', \n 'VENTA AL POR MENOR DE OTROS PRODUCTOS NUEVOS EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE PRENDAS DE VESTIR, CALZADO Y ARTÍCULOS DE CUERO EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE PRODUCTOS FARMACÉUTICOS Y MEDICINALES, COSMÉTICOS Y ARTÍCULOS DE TOCADOR EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE PRODUCTOS TEXTILES EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE PRODUCTOS TEXTILES, PRENDAS DE VESTIR Y CALZADO EN PUESTOS DE VENTA Y MERCADOS', \n 'VENTA AL POR MENOR EN COMERCIOS NO ESPECIALIZADOS CON PREDOMINIO DE LA VENTA DE ALIMENTOS, BEBIDAS O TABACO', \n 'VENTA AL POR MENOR POR CORREO Y POR INTERNET', \n #'VENTA DE VEHÍCULOS AUTOMOTORES' 128\n #'VENTA, MANTENIMIENTO Y REPARACIÓN DE MOTOCICLETAS Y DE SUS PARTES, PIEZAS Y ACCESORIOS.', \n #'VENTAS DE PARTES, PIEZAS Y ACCESORIOS PARA VEHÍCULOS AUTOMOTORES'\n ])\n Departament = st.sidebar.selectbox('Nombre del Departamento', ['AMAZONAS','AREQUIPA','ÁNCASH','APURÍMAC','AYACUCHO','HUANCAVELICA','HUÁNUCO','JUNÍN','MADRE DE DIOS','MOQUEGUA','PASCO','SAN MARTÍN','TACNA','TUMBES','UCAYALI','PUNO','LIMA','CALLAO','CUSCO','LA LIBERTAD','JUNÍN','CAJAMARCA','LAMBAYEQUE','LORETO'])\n Tama = st.sidebar.selectbox('Tamaño de Empresa', ['MICRO', 'PEQUEÑA','MEDIANA','GRANDE'])\n st.sidebar.header('Seguridad: No(0), Si(1))') \n F1 = st.sidebar.slider('Infraestructura física (alambrado, muros, etc.?', 0,1)\n F2 = st.sidebar.slider('Sistema de video y captura de imágenes?', 0,1)\n F3 = st.sidebar.slider('Sistema de control de acceso de personal?', 0,1)\n F4 = st.sidebar.slider('Sistema de alarma de seguridad electrónica?', 0,1)\n F5 = st.sidebar.slider('Seguridad para el traslado de valores?', 0,1)\n F6 = st.sidebar.slider('Seguridad para el traslado de bienes?', 0,1)\n F7 = st.sidebar.slider('Personal para resguardo (guardaespaldas)?',0,1)\n F8 = st.sidebar.slider('Personal de seguridad de bienes e inmuebles?', 0,1)\n \n features = {'acti2': acti2\t,\n 'Departament': Departament,\n 'Tama': Tama,\n 'F1': F1,\n 'F2': F2,\n 'F3': F3,\n 'F4': F4,\n 'F5': F5,\n 'F6': F6,\n 'F7': F7,\n 'F8': F8}\n data = pd.DataFrame(features,index=[0])\n\n return data", "def _request_donation():\n\n amt = _get_input(\"Enter a donation amount:\\n> \", _quit_responses, allow_new=True)\n if amt in _quit_responses:\n return\n else:\n return float(amt)", "def do_prompt(self):\n # we need _something_ in the dictionary even if the user decides to use all defaults\n # otherwise for some unknown reason it won't work\n user_in = {'__meta__': '__user_input__'}\n\n print('Please enter the information asked for in the following prompts in order to configure your deployment')\n # get the config information from the user\n for p in self.prompts:\n answer = input(p['prompt'])\n if len(answer.strip()) > 0 and 'variable' in p.keys():\n user_in[p['variable']] = answer\n\n # return the data\n return user_in", "def prompt_user_for_starting_balance():\n print('What starting account balance do you want to have for your new account?')\n return input()", "def _prompt_main_menu(self, update, context, message='Please choose an option:'):\n id = context.user_data['id']\n email = context.user_data['email']\n email = 'Not supplied' if email == '' else email\n self._reply_message(update,\n f'ID: {id}\\n'\n f'Email: {email}\\n'\n f'{message}',\n keyboard=self.MAIN_STATE_OPTIONS,\n inline_keyboard=True)", "def get_user_risk_tolerance_port():\n risk_tolerance = questionary.select(\"What's your risk tolerance\", \n choices=[\"Low\", \"Medium\", \"High\"]).ask()\n\n indexes = questionary.select(\"Of the following indexes, select the index that best reflects your current portfolio.\", \n choices=[\"NASDAQ\", \"Russel\", \"S&P 500\", \"EAFE\"]).ask()\n\n # crypto_benchmark = questionary.select(\"Would you like to benchmark your crypto against an\" + \n # \" index, a specific crypto, or a composite of cryptos?\", choices=[\"Index\", \"Crypto\", \"Crypto Composite\"]).ask()\n\n goals = questionary.text(\"Would you like to acomplish a dollar goal or a percentage return goal?\" +\n \" (Start with a '$' if dollar goal and with '%' if percent goal\").ask()\n\n invest_amount = int(questionary.text(\"How much would you like to invest?\").ask())\n\n stock_portfolio = {}\n adding = True\n while adding:\n sp = questionary.text(\"Enter stock from stock portfolio: \").ask()\n sw = int(questionary.text(\"Enter Share Amount: \").ask())\n cont = questionary.confirm(\"Continue?\").ask()\n stock_portfolio[sp] = sw\n if cont == False:\n adding = False\n\n user_dictionary = {\"Risk Tolerance\": risk_tolerance,\n \"Index\": indexes,\n \"Investment Goals\": goals,\n \"Investment Amount\": invest_amount,\n \"Stock Portfolio\": stock_portfolio}\n\n return user_dictionary", "def askText(parent,message,title='',default=''):\r\n dialog = wx.TextEntryDialog(parent,message,title,default)\r\n if dialog.ShowModal() != wx.ID_OK:\r\n dialog.Destroy()\r\n return None\r\n else:\r\n value = dialog.GetValue()\r\n dialog.Destroy()\r\n return value", "def eula_prompt():\n current_file = inspect.getfile(inspect.currentframe())\n current_dir = os.path.dirname(os.path.abspath(current_file))\n eula = os.path.join(current_dir, \"EULA.html\")\n form = cmds.setParent(q=True)\n cmds.formLayout(form, e=True, width=500)\n heading = cmds.text(\n l='Maya Cloud Rendering License Agreement', font=\"boldLabelFont\")\n text = cmds.text(l=\"By loading this plug-in you are agreeing to \"\n \"the following terms and conditions.\")\n if not os.path.exists(eula):\n raise RuntimeError(\"EULA notice not found at {0}\".format(eula))\n\n with open(eula, \"rb\") as eula_text:\n html = eula_text.read()\n unicode = html.decode(\"windows-1252\")\n encoded_str = unicode.encode(\"ascii\", \"xmlcharrefreplace\")\n read = cmds.scrollField(editable=False, wordWrap=True, height=300,\n text=unicode, backgroundColor=(1.0,1.0,1.0))\n agree = cmds.button(l='Agree', c='maya.cmds.layoutDialog( dismiss=\"Agree\" )' )\n disagree = cmds.button(l='Disagree', c='maya.cmds.layoutDialog( dismiss=\"Disagree\" )' )\n cmds.formLayout(form, edit=True,\n attachForm=[(heading, 'top', 10), (heading, 'left', 10),\n (heading, 'right', 10), (read, 'left', 10),\n (read, 'right', 10), (text, 'left', 10),\n (text, 'right', 10), (agree, 'left', 10),\n (agree, 'bottom', 10), (disagree, 'right', 10),\n (disagree, 'bottom', 10)],\n attachNone=[(text, 'bottom'), (read, 'bottom')],\n attachControl=[(text, 'top', 10, heading),\n (read, 'top', 10, text),\n (agree, 'top', 50, read),\n (disagree, 'top', 50, read)],\n attachPosition=[(agree, 'right', 5, 50),\n (disagree, 'left', 5, 50)])", "def input_prompt(self):\n return 'Stock code:'", "def Prompt(self,message):\n\t\tself.acad.ActiveDocument.Utility.Prompt(message)", "def ask_dialog(self, title=\"\", vars=[], help=\"\"):\n\t\t\n\t\t# save current values\n\t\tavars = []\n\t\tfor v in vars:\n\t\t\tavars.append(v.copy())\n\t\t\n\t\t# manage the dialog\n\t\tform = build_form(avars, self)\n\t\tdialog = Gtk.Dialog(\n\t\t\ttitle,\n\t\t\tself.win,\n\t\t\tGtk.DialogFlags.MODAL,\n\t\t\tbuttons = [Gtk.STOCK_OK, Gtk.ResponseType.OK, Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL])\n\t\tdialog.get_content_area().pack_start(form, True, True, 0)\n\t\tres = dialog.run()\n\t\tdialog.hide()\n\t\t\n\t\t# if cancelled, reset the variables\n\t\tif res != Gtk.ResponseType.OK:\n\t\t\treturn False\n\t\telse:\n\t\t\tfor i in range(0, len(vars)):\n\t\t\t\tvars[i].set(avars[i].get())\n\t\t\treturn True", "def UserMenu(self):\n prompt = \"\"\"\n (CD) Certificate of Deposit\n (MM) Money Market\n (MS) Money Savings\n (C) Checking\n Enter Account Type: \"\"\"\n done = 0\n while not done:\n choice = 0\n while not choice:\n try:\n option = raw_input(prompt).strip().upper()\n m = re.search(r'CD|MM|MS|C',option)\n if m:\n print \" Your preferred account type is \",option\n prompt2 = \"\"\"\n (=>) WithDrawal\n (<=) Deposit\n (-) Debit\n (+) Credit\n Enter Choice :\"\"\"\n else:\n print \"Invalid Transaction\"\n except(EOFError, KeyboardInterrupt):\n option = 'C'\n if option == 'E':\n choice = 1\n try:\n option1 = raw_input(prompt2).strip().upper()\n except(KeyboardInterrupt, EOFError):\n option1 = 'E'\n m1 = re.search(r'=>|<=|-|+',option1)\n if not m1:\n print \"Invalid option.. Try again\"\n else:\n choice = 1\n if option1 == '=>': self.Deposit()\n if option1 == '<=': self.Withdrawal()\n if option1 == '-': self.Debit()\n if option1 == '+': self.Credit()\n if option1 == 'E': done = 1", "def retrieve_deposit_agreement(self, dn=None, ResponseId=None, out_path='',\n browser=True):\n\n self.log.info(\"\")\n self.log.info(\"** RETRIEVING DEPOSIT AGREEMENT **\")\n\n if isinstance(ResponseId, type(None)):\n try:\n ResponseId, SurveyId = self.find_deposit_agreement(dn)\n self.log.info(f\"Qualtrics ResponseID : {ResponseId}\")\n self.log.info(f\"Qualtrics SurveyID : {SurveyId}\")\n except ValueError:\n self.log.warn(\"Error with retrieving ResponseId and SurveyId\")\n self.log.info(\"PROMPT: If you wish, you can manually enter ResponseId to retrieve.\")\n if self.interactive:\n ResponseId = input(\"PROMPT: An EMPTY RETURN will generate a custom Qualtrics link to provide ... \")\n self.log.info(f\"RESPONSE: {ResponseId}\")\n self.log.info(\"PROMPT: If you wish, you can manually enter SurveyId to retrieve.\")\n SurveyId = input(\"PROMPT: An EMPTY RETURN will generate a custom Qualtrics link to provide ... \")\n self.log.info(f\"RESPONSE: {SurveyId}\")\n else:\n self.log.info(\"Interactive mode disabled. Skipping manual input\")\n ResponseId = ''\n SurveyId = ''\n\n if ResponseId == '' or SurveyId == '':\n custom_url = self.generate_url(dn.name_dict)\n self.log.info(\"CUSTOM URL BELOW : \")\n self.log.info(custom_url)\n ResponseId = None\n\n if ResponseId != '':\n self.da_response_id = ResponseId\n\n if SurveyId != '':\n self.da_survey_id = SurveyId\n\n if not isinstance(ResponseId, type(None)):\n self.da_response_id = ResponseId\n\n if browser:\n self.log.info(\"Bringing up a window to login to Qualtrics with SSO ....\")\n webbrowser.open('https://qualtrics.arizona.edu', new=2)\n input(\"Press the RETURN/ENTER key when you're signed on via SSO ... \")\n else:\n self.log.info(\"CLI: Not opening a browser!\")\n\n full_url = f\"{self.dict['download_url']}?RID={ResponseId}&SID={SurveyId}\"\n\n # Retrieve PDF via direct URL link\n if out_path:\n if self.interactive:\n pdf_url = 'retrieve'\n else:\n pdf_url = ''\n while pdf_url == 'retrieve':\n pdf_url = input(\"To retrieve PDF via API, provide PDF URL here. Hit enter to skip : \")\n\n if not pdf_url: # Skip PDF retrieval\n break\n\n if 'qualtrics.com' in pdf_url and pdf_url.endswith(\"format=pdf\"):\n self.log.info(f\"RESPONSE: {pdf_url}\")\n try:\n out_pdf = join(out_path, 'Deposit_Agreement.pdf')\n urlretrieve(pdf_url, out_pdf)\n break\n except HTTPError:\n self.log.warning(\"Unable to retrieve PDF\")\n pdf_url = 'retrieve'\n else:\n pdf_url = 'retrieve'\n else:\n self.log.warn(\"No out_path specified. Skipping PDF retrieval\")\n\n if browser:\n webbrowser.open(full_url, new=2)\n else:\n self.log.info(\"Here's the URL : \")\n self.log.info(full_url)", "def button_fac_ent(self):\n invoice = self._fac_ent()\n\n # imprime factura\n datas = {\n 'ids': invoice.ids,\n 'model': 'account.report_invoice',\n 'form': invoice.read()\n }\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'aeroo_report_ar_einvoice',\n 'datas': datas,\n }", "def main():\n print(\"This system will complete a sales, item and advisor review.\")\n print(\"\")\n password_request() # must put in MAGIC to proceed\n data = get_sales_data()\n\n menu(data)", "def get_amount():\n while True:\n try:\n amount = input(\"How much did they donate: \")\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print(\"you have made an invalid choice, try again.\")" ]
[ "0.61911684", "0.6020045", "0.5912385", "0.58639073", "0.5774324", "0.5674912", "0.5621161", "0.5588697", "0.5453837", "0.54020584", "0.526715", "0.52277637", "0.5223498", "0.52142847", "0.52134585", "0.5163553", "0.51488286", "0.51200247", "0.5119526", "0.51182854", "0.5117991", "0.510629", "0.5095643", "0.5079274", "0.5076782", "0.50525045", "0.5035762", "0.5019389", "0.5005791", "0.4981426" ]
0.60426086
1
Saves the qualifying loans to a CSV file.
def save_qualifying_loans(qualifying_loans): # Usability dialog for savings the CSV Files. save_file = questionary.confirm("Would you like to save these loans to a file? y or n: ").ask() print("\n") if save_file: output = questionary.text("Please provide a name for your file. Ex: my_loans.csv: ").ask() csvpath = Path(output) save_csv(csvpath, qualifying_loans, header=None) print("\n","\u001b[32m",(csvpath),"\u001b[0m", "has been saved to:","\u001b[32m", csvpath.absolute(),"\u001b[0m","\n") else: print("Your file will NOT be saved. Goodbye", "\n") print("* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *") print("\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_to_csv(self):\r\n # Save the read values to a csv file\r\n with open(self.fname, \"a\") as f:\r\n wr = csv.writer(f, dialect='excel')\r\n wr.writerow([self.set_time, self.read_time_P_ac, self.read_time_P_bat,\r\n self.soc0, self.set_val, self.P_ac, self.P_bat])", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def write(self, args):\n\t\tnewcsvfile = self.filename[:len(self.filename)-4] + \"NEW.csv\" #clever naming MIGHT NEED TO CHANGE THIS LATER/OVERWRITE OLD FILE?\n\t\twith open(newcsvfile, 'wb') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerows(self.all_likes)", "def save_csv(data): \n bank_data = data\n\n #Creating headers for the csv file\n header = [\"Lender\", \"Max Loan Amount\", \"Max LTV\", \"Max DTI\", \"Max Credit Score\", \"Interest Rate\"]\n\n #Creating output path of the CSV file\n csvpath = Path(\"save_file.csv\")\n\n #Opening the csv file in csvpath by using the open() method\n with open(csvpath, \"w\", newline='') as csvfile:\n\n csvwriter = csv.writer(csvfile, delimiter = \",\")\n csvwriter.writerow(header)\n for row in bank_data:\n csvwriter.writerow(row)\n\n return data", "def save_indicator(table, target_path, var_name, geo):\n table.to_csv(f\"{target_path}/{var_name}.{geo}.csv\", index=False)", "def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)", "def save(self,name):\n with open(\"configurations/\" + name + \".csv\", \"w\", newline=\"\") as file:\n writer = csv.writer(file, dialect = \"excel\")\n writer.writerow([self.costs])\n for battery in self.batteries:\n writer.writerow([battery.id,battery.capacity,battery.location[0],battery.location[1]])\n for house in self.houses:\n if house.connection != \"NOT CONNECTED!\":\n if not house.connection == set():\n writer.writerow([house.id,house.connection.id,house.output, house.location[0],house.location[1]])", "def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))", "def save_csv(self):\n if not self.__is_csv():\n # creates the csv file if it did not exist.\n self.__create_csv()\n try:\n with open(self.__csv_file_name, 'a', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writerow(self.__values)\n except IOError: # this exception avoid a product does not have saved in csv file\n time.sleep(0.5)\n self.save_csv()\n # display on the screen what is being record on csv\n for key, value in self.__values.items():\n print('{}: {}'.format(key, value), end='; ' if key != 'url' else '\\n')", "def save(self, data, outpath):\n data.to_csv(outpath)", "def save_drill_to_goalie_profile(self):\n\n goalie_path = str(Path.home())+\"/Documents/ball_e_profiles/goalie_profiles/{goalie_name}/{goalie_name}.csv\".format(\n goalie_name=self.goalie_name)\n with open(goalie_path, 'a+', newline='') as file:\n csv_writer = csv.writer(file, delimiter=\",\")\n # Row written as \"Drill Name, MM/DD/YYYY\"\n drill_info = [\"{}\".format(self.drill_name.replace(\"_\", \" \").title()), \"{}\".format(\n datetime.datetime.today().strftime(\"%m/%d/%Y\"))]\n csv_writer.writerow(drill_info)", "def _save_data(self):\n self.data.to_csv('data/c&le/{}'.format(self.name))", "def save(self):\n\t\t# save self.dfAnalysis\n\t\tcsvPath = self._getSavePath()\n\t\tprint('saving:', csvPath)\n\t\tself.dfAnalysis.to_csv(csvPath)", "def save_csv(outfile, movies):\n writer = csv.writer(outfile)\n writer.writerow(['Title', 'Rating', 'Year', 'Actors', 'Runtime'])\n for movie in movies:\n writer.writerow(movie)\n\n # ADD SOME CODE OF YOURSELF HERE TO WRITE THE MOVIES TO DISK", "def write_into_csv(self, loc_details=[], itype='atm', mode='w'): \n \n if itype==\"brc\":\n csvfile_name = self.branch_file\n headers = self.branch_headers\n else:\n csvfile_name = self.atm_file\n headers = self.atm_headers\n\n with open(csvfile_name, mode, newline='') as csvfile:\n locwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_ALL)\n if mode=='w':\n locwriter.writerow(headers) \n\n for loc in loc_details:\n locwriter.writerow(loc)", "def save_csv_file(votes: dict) -> None:\r\n with open(\"votingList.csv\", \"w\", newline=\"\") as csvfile:\r\n writer = csv.writer(csvfile)\r\n writer.writerow([\"First Name\", \"Last Name\", \"Vote\"])\r\n for vote in votes.keys():\r\n entry = votes[vote]\r\n fst, snd = vote.split()\r\n writer.writerow([fst, snd, entry])", "def write(self):\n \n self.df.to_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/mls.csv')", "def save_to_csv(self, state):\n\n if state.rolls == [] or state.rolls is None:\n # this should never happen but cancel save process just in case\n return state\n\n if state.selection.category == \"misc\":\n misc = str(state.selection.dice_count) + \"D\" + str(\n state.selection.dice_eyes)\n else:\n misc = ''\n\n if state.selection.category in (\"skill\", \"spell\"):\n if state.mod + state.selection.value < 0:\n # attrs_string example: \"KL(14->12), IN(13->11), FF(12->10)\"\n attrs_list = [i.abbr + '(' + str(i.value) + \"->\" +\n str(i.modified) + ')' for _, i in\n enumerate(state.attrs)]\n else:\n attrs_list = [i.abbr + '(' + str(i.value) + ')' for _, i in\n enumerate(state.attrs)]\n attrs_string = \"; \".join(map(str, attrs_list))\n else:\n attrs_string = ''\n\n # join list of rolls to string\n rolls = \"; \".join(map(str, state.rolls))\n\n desc = f\"Roll#{state.counter}: {state.desc}\"\n # comma is used as delimiter in csv\n desc = desc.replace(\",\", \";\")\n\n timestamp = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n\n save_values = [state.current_hero + \".xml\",\n state.selection.category,\n state.selection.name,\n misc,\n state.selection.value,\n state.mod,\n attrs_string,\n rolls,\n state.result,\n desc,\n timestamp,\n state.dice]\n\n with open(self._result_csv, \"a\", encoding=\"utf-8\") as csv_file:\n file_writer = csv.writer(csv_file,\n delimiter=',',\n quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n file_writer.writerow(save_values)\n\n # only saved rolls increase the roll count\n state.counter += 1\n\n return state", "def safeIndicators(stock, safe_path, stock_name):\r\n stock.to_csv('{}{}_indicators.csv'.format(safe_path, stock_name), index = False)\r\n print('safed data to {}{}_indicators.csv'.format(safe_path, stock_name))", "def save_file(self):\n # paginate over deputies and senators getting their fields\n fieldnames = set([])\n congressmen = self.deputies + self.senators\n for data in congressmen:\n fieldnames = fieldnames.union(data.dump().keys())\n\n\n with open(IDENTITY_FILE_UPDATED, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=list(fieldnames), delimiter=';')\n writer.writeheader()\n\n for data in congressmen:\n writer.writerow(data.dump())", "def save_to_csv(data):\n print(\"Saving file...\")\n\n data = [\"year,rank,company,revenue ($ millions),profit ($ millions)\"] + data\n data = [row.replace(\", \", \"; \").replace(\"\\\"\", \"\") for row in data] # list comprehension\n\n with open(CSV_PATH, \"w\", newline=\"\", encoding=\"utf-8\") as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=\",\")\n for row in data:\n spamwriter.writerow(row.split(\",\"))", "def saveCSV(self, filename='cbpGetHistoricRates.csv'):\n\n p = re.compile(r\"^[\\w\\-. ]+$\")\n if not p.match(filename):\n raise TypeError('Filename required.')\n\n if not isinstance(self.df, pd.DataFrame):\n raise TypeError('Pandas DataFrame required.')\n\n try:\n self.df.to_csv(filename)\n except OSError:\n print('Unable to save: ', filename)", "def save_csv(outfile, cities):\n writer = csv.writer(outfile)\n writer.writerow(['Name'])\n for row in cities:\n writer.writerow([row])", "def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])", "def _save_log(self, save_dir, data):\n date = datetime.datetime.today().strftime('%Y-%m-%d')\n file_dir = os.path.join(save_dir, date + \".csv\")\n with open(file_dir, 'a') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(data)", "def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])", "def SaveToCSV(self):\n import csv \n csvfile = open(f\"Cache/{self.symbol}.csv\", \"w\", newline='')\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow([self.symbol, self.name, self.market])\n writer.writerow(['Latest P/E Ratio:', self.pe_ratio])\n writer.writerow(['Short Percent of Float:', self.short_percent_of_float])\n writer.writerow(['Date', 'Price', 'Dividend', 'Annualized Dividend'])\n for snapshot in self._history:\n writer.writerow([snapshot.date.strftime(\"%m/%d/%Y\"), snapshot.price, snapshot.dividend, snapshot.annualDividend])\n csvfile.close()\n print(f\"{self.name} saved to /Cache/{self.symbol}.csv\")", "def saveCSV(name, ra, dec, ang):\n r = res(ra,dec,ang)\n return r.write('{}.csv'.format(name), overwrite = True)", "def save_csv(filename, save_list):\n with open(filename, mode='w') as csv:\n csv.writelines([','.join(item) + '\\n' for item in save_list])", "def guardar_CSV(self):\n participantes = self.__disparos.copy()\n archivo = input(\"Ingrese nombre del archivo: \")\n with open(f\"{archivo}.txt\", 'a') as csv_file:\n campos = ['idDisparo', 'nroParticipante', 'nombre', 'apellido', 'edad', 'sexo', 'disparos', 'mejor_disparo', 'promedio', 'puntaje_total']\n csv_writer = csv.DictWriter(csv_file, fieldnames=campos)\n csv_writer.writeheader()\n for linea in participantes:\n csv_writer.writerow(linea)\n print(\n f\"\"\"\n ==========================================\n == SE HAN GUARDADO LOS DATOS ==\n ==========================================\n \"\"\"\n )" ]
[ "0.6966328", "0.6870253", "0.68277895", "0.67218447", "0.6585621", "0.65582293", "0.6475791", "0.64576596", "0.64095336", "0.6373284", "0.6370008", "0.6350625", "0.63475597", "0.6340548", "0.6335227", "0.6317062", "0.62919617", "0.62722164", "0.6265656", "0.62428063", "0.6209885", "0.61815304", "0.6172472", "0.61518335", "0.6144356", "0.61413735", "0.61351895", "0.61131185", "0.61117464", "0.6102278" ]
0.7288751
0
Remove a preference from the config file (deprecation).
def remove(key: str): global PREFERENCES if PREFERENCES.get(key): del PREFERENCES[key] write_config(PREFERENCES)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_config(name):\n db = dbm.open(config_file, 'c')\n del db[name]\n db.close()", "def delsetting(name):\r\n if '__delattr__' in settings.__class__.__dict__:\r\n delattr(settings, name)\r\n else:\r\n delattr(settings._wrapped, name)", "def del_conf(self, path):\n\t\tself.monitor.removePath(path)\n\t\tself.cache.pop(path, None)", "def remove_option(self, option):\n splitvals = option.split('/')\n section, key = \"/\".join(splitvals[:-1]), splitvals[-1]\n\n RawConfigParser.remove_option(self, section, key)\n self._dirty = True", "def remove_local_config(self):\n with ignored(OSError):\n os.remove(os.path.join(self.rundir, const.LOCAL_CONFIG_FILE))", "def discard_config(self):\n raise NotImplementedError", "def handle_remove_setting(event):\n forex_type, currency_type, price_type = None, None, None\n tokens = event.message.text.split(\" \")\n if len(tokens) >= 4:\n forex_type = ForexType.get_type(tokens[1])\n currency_type = CurrencyType.get_type(tokens[2])\n price_type = PriceType.get_type(tokens[3])\n\n if forex_type is None or currency_type is None or price_type is None:\n line_bot.replyMessage(event.reply_token, \"設定格式錯誤\\n範例: '取消 買入 美元 低於'\")\n elif forex_notifier.removeNotify(event.source.user_id, currency_type, forex_type, price_type):\n line_bot.replyMessage(event.reply_token, \"成功設定-不通知\")\n else:\n line_bot.replyMessage(event.reply_token, \"設定失敗\")", "def remove_console_setting(db, linenum):\n config_db = db.cfgdb\n\n table = \"CONSOLE_PORT\"\n\n data = config_db.get_entry(table, linenum)\n if data:\n config_db.mod_entry(table, linenum, None)\n else:\n ctx = click.get_current_context()\n ctx.fail(\"Trying to delete console port setting, which is not present.\")", "def delKey(self, key ):\n if key in self.conf:\n del self.conf[key]", "def stop(self):\n self._unbind_observers()\n self._pref_decls.clear()\n pref_path = os.path.join(self.default_folder, self.default_file)\n try:\n prefs = ConfigObj()\n prefs.update(self._prefs)\n prefs.filename = pref_path\n prefs.write()\n except Exception:\n print 'Invalid pref path'\n\n def_path = os.path.join(MODULE_PATH, 'default.ini')\n try:\n defaults = ConfigObj(def_path)\n defaults['folder'] = self.default_folder\n defaults['file'] = self.default_file\n defaults.write()\n except Exception:\n print 'Invalid default pref path'", "def remove_section(self,name):\n if self.__config.has_section(name):\n if name in self.__optionstruct:\n raise Exception(_('EVOGTK: Can\\'t remove section \"%s\" because it\\'s part preferences instance basic struct') % name)\n self.__config.remove_section(name)\n else:\n raise Exception(_('EVOGTK: Section \"%s\" does not exist in this preferences instance') % name)", "def remove_config_object() -> None:\n if G_CONFIG_OBJECT:\n G_CONFIG_OBJECT.clear()", "def remove_option(self, option):\n self.__options.pop(option)", "def switch_off_key(self, key):\n if key not in self.switched_off_keys:\n self._switched_off_keys.append(key)\n self._config[\"# \"+key] = self._config.pop(key)", "def deconfigure(self):\n\n pass", "def _delete_option(key: str) -> None:\n try:\n del _config_options_template[key]\n del cast(Dict[str, ConfigOption], _config_options)[key]\n except Exception:\n # We don't care if the option already doesn't exist.\n pass", "def DeleteSetting(appname, section, key):\n settings = _OptionsDB(appname)\n settings.delete(section, key)", "def delete(self, section, name):\n section = self._getSettingName(section)\n self._config.remove_option(section, name)\n self.save()", "def remove_prompt(name, delete_config):\n\n with open(DATABASE_FILE_PATH) as f:\n config = json.load(f)\n path = config[name]\n del config[name]\n\n with open(DATABASE_FILE_PATH, 'w') as f:\n json.dump(config, f)\n\n if delete_config:\n os.remove(path)", "def _remove_settings_file(self, server_id):\n\t\tsilent_remove(self.SettingsFolder + '{}.yml'.format(server_id))", "def prune_outdated_auth(config_file=None):\n if config_file is None:\n config_file = configure.get_config_path(\"general\")\n if not os.path.exists(config_file):\n return # nothing to do!\n with open(config_file, \"r\") as f:\n try:\n config_data = toml.loads(f.read())\n except Exception as ex:\n raise RuntimeError(f\"configuration file {config_file} is malformed: {ex}\")\n if \"auth\" in config_data:\n del config_data[\"auth\"]\n # only overwrite if we made a change\n with open(config_file, \"w\") as f:\n toml.dump(config_data, f)", "def clearSetting(self, name: unicode) -> None:\n ...", "def remove_stored_config(self):\n stored_config_filename = self.stored_config_filename\n if stored_config_filename.exists():\n stored_config_filename.remove()\n self._stored_cmake_generator = self._stored_config.cmake_generator", "def remove_setting(self, category, setting):\n category_instance = self.get_setting_category(category)\n if not category_instance:\n return\n if not setting in category_instance:\n return\n del category_instance[setting]\n settings = self.get_collection('_settings')\n\n if len(category_instance.keys()) == 1:\n settings.remove(category_instance['_id'])\n else:\n settings.update(category_instance)", "def close_preferences(self,event):\n self.Destroy()\n event.Skip()", "def remove_list_setting(self, category, setting, value):\n category_instance = self.get_setting_category(category)\n\n # To remove the value from the setting, the setting must exist\n if not category_instance:\n return\n if not setting in category_instance:\n return\n\n # Now lets try to remove the named setting\n try:\n category_instance[setting].remove(value)\n except ValueError:\n # It was not in the list.\n return\n\n settings = self.get_collection('_settings')\n settings.save(category_instance)\n return", "def remove_option(self, label):\n del self._options[label]\n index = self._menu.index(label)\n self._menu.delete(index, index)", "def remove(ctx, name, project_root):\n\n if name == 'logme':\n raise LogmeError(\"'logme' master logger configuration cannot be removed!\")\n\n with ensure_conf_exist(project_root) as logme_conf:\n\n config = read_config(logme_conf)\n config.remove_section(name)\n\n with logme_conf.open('w+') as conf:\n config.write(conf)", "def remove(self, key):\n key_str = self.optionxform(key)\n option_key = {\n 'product': self.product,\n 'section': self.name,\n 'option': key_str\n }\n try:\n setting = ProductSetting(self.env, keys=option_key)\n except ResourceNotFound:\n self.env.log.warning(\"No record for product option %s\", option_key)\n else:\n self._cache.pop(key, None)\n setting.delete()\n self.env.log.info(\"Removing product option %s\", option_key)", "def removeOption(self, *args):\n return _libsbml.ConversionProperties_removeOption(self, *args)" ]
[ "0.62867635", "0.6272085", "0.61597675", "0.5970315", "0.5912027", "0.5866205", "0.58238983", "0.5774526", "0.57573044", "0.5739794", "0.5707347", "0.56538785", "0.56532514", "0.56481534", "0.5622709", "0.5592793", "0.5589006", "0.5585213", "0.55792993", "0.55208313", "0.55136204", "0.5505198", "0.5454164", "0.5447728", "0.5412285", "0.5373313", "0.5368112", "0.53519946", "0.5349713", "0.53308403" ]
0.66292775
0
Set the 'data_dir' preference to 'path'
def set_data_directory(path): if not os.path.exists(path): return False set("data_dir", path) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_data_directory(path):\n gdc19.DATA_DIRECTORY = path\n return gdc19.DATA_DIRECTORY", "def set_kale_data_directory(path):\n global KALE_DATA_DIRECTORY\n KALE_DATA_DIRECTORY = path\n # create dir if not exists\n if not os.path.isdir(KALE_DATA_DIRECTORY):\n os.makedirs(KALE_DATA_DIRECTORY, exist_ok=True)", "def changeDataPath(self,path):\n self.dataPath = path", "def setDataRoot(path):\n global dataRoot\n dataRoot = os.path.realpath(path)", "def set_data_dir(proj_data_dir):\n global _USER_PROJ_DATA\n global _VALIDATED_PROJ_DATA\n _USER_PROJ_DATA = proj_data_dir\n # set to none to re-validate\n _VALIDATED_PROJ_DATA = None", "def _set_cache_dir(self, path):\n assert path, 'Must input a directory path'\n self._cache_dir = path\n self.data['info']['root_cache_dir'] = self._cache_dir\n self.write_data_cache(self.data)", "def set_data_dir(directory=None, create=False, save=False):\n if directory is None:\n directory = _data_path\n if _data_path is None:\n raise IOError('default path cannot be determined, please '\n 'set it manually (directory != None)')\n if not op.isdir(directory):\n if not create:\n raise IOError('directory \"%s\" does not exist, perhaps try '\n 'create=True to create it?' % directory)\n os.mkdir(directory)\n config.update(data_path=directory)\n if save:\n save_config(data_path=directory)", "def data_dir(self, dd=None):\n self._data_dir = dd", "def set_data_dir(datadir, update_env=True):\n if os.path.isdir(datadir):\n _config.datadir = datadir\n\n if update_env:\n os.environ[\"TESSDATA_PREFIX\"] = _config.datadir\n else:\n _warn(\"set_data_dir: Invalid directory: '{0}'\".format(datadir))", "def _set_download_dir(self, path):\n assert path, 'Must input a non-empty path.'\n self.data['info']['root_downloads_dir'] = path\n self.write_data_cache(self.data)", "def set_data_directory(self, data_dir_path = '', verbose = False):\n try:\n if verbose: print(data_dir_path, self._default_data_dir_path)\n if os.path.isdir(os.path.abspath(data_dir_path)):\n self.data_directory = os.path.abspath(data_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(data_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_data_dir_path, UserWarning)\n self.data_directory = self._default_data_dir_path\n\n if not os.path.isdir(self.data_directory):\n if verbose: print(os.path.isdir(self.data_directory))\n raise errors.PathError(\"The default data directory '\" + self.data_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default data directory '\" + self._default_data_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_data_dir_path?\")\n pass", "def set_data_directory(self, data_dir_path = '', verbose = False):\n try:\n if verbose: print(data_dir_path, self._default_data_dir_path)\n if os.path.isdir(os.path.abspath(data_dir_path)):\n self.data_directory = os.path.abspath(data_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(data_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_data_dir_path, UserWarning)\n self.data_directory = self._default_data_dir_path\n\n if not os.path.isdir(self.data_directory):\n if verbose: print(os.path.isdir(self.data_directory))\n raise errors.PathError(\"The default data directory '\" + self.data_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default data directory '\" + self._default_data_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_data_dir_path?\")\n pass", "def _get_data_directory(self, path=False):\n\n return self.data_directory", "def _set_cache_dir(self, path):\n assert path, 'Must input a directory path'\n self.manager.cache_dir = path", "def get_data_dir():\n return Path(current_app.config[\"USER_DIR\"]) / \"data\"", "def setDataDir(self, directory):\n if os.path.exists(directory):\n self.__datadir = directory\n print(\"Datadir setted to '%s'\" % directory)\n else:\n raise ValueError(\"Incorrect file path %s\" % directory)", "def setModelDataDir(self):\n datadir = QtGui.QFileDialog.getExistingDirectory(parent=self,\n caption=\"Where to save image data and statistics?\",\n dir=self.model.getDataDir())\n if len(datadir) > 0:\n self.model.setDataDir(datadir)\n else:\n self.cbxSaveImage.setCheckState(QtCore.Qt.Unchecked)", "def data_dir(self) -> Path:\n return self._data_dir", "def projectDir(self, path):\n logger.debug(\"Func: projectDir/setter\")\n self._pathsDict[\"projectDir\"] = path\n # self.init_paths()\n # self.init_database()", "def get_data_path():\n return os.getcwd() + \"/data/\"", "def data_dir():\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')", "def get_data_path(path):\n\n data_path = Path(self.kard.meta.get('data_path', 'data'))\n\n if data_path.is_absolute():\n return str(data_path / path)\n\n return str(self.kard_folder_path / self.kard.name / data_path /\n path)", "def setDataPath(_path_data_bundle, _path_bin_data, preload=True, verbose=True):\n global path_bin_data\n global path_data_bundle\n path_data_bundle = _path_data_bundle\n path_bin_data = _path_bin_data\n if preload:\n loadExistent(verbose)", "def append_data_dir(proj_data_dir):\n set_data_dir(os.pathsep.join([get_data_dir(), proj_data_dir]))", "def data_dir():\n return _config.datadir", "def _set_download_dir(self, path):\n assert path, 'Must input a non-empty path.'\n self.manager.download_dir = path", "def data_dir(path=None, base=None, subdir=None, max_levels=100):\n path = path or _get_caller_path()\n return _data_science_dir(\n path=path, dirname='data', base=base,\n subdir=subdir, max_levels=max_levels)", "def data_path(path: str, createdir: bool = False) -> str:\n path_obj = Path(path)\n if not path_obj.is_absolute():\n if inside_project():\n path_obj = Path(project_data_dir(), path)\n else:\n path_obj = Path(\".scrapy\", path)\n if createdir and not path_obj.exists():\n path_obj.mkdir(parents=True)\n return str(path_obj)", "def get_data(path):\n return os.path.join(_ROOT, 'data', path)", "def setPath(*args):" ]
[ "0.800053", "0.76913375", "0.7490734", "0.7431895", "0.7141615", "0.70857567", "0.7005862", "0.695062", "0.69113535", "0.6862172", "0.6846355", "0.6846355", "0.678642", "0.668388", "0.66111124", "0.6585328", "0.6469561", "0.6453087", "0.64228076", "0.64039373", "0.63982743", "0.63716495", "0.6351841", "0.63253015", "0.6324374", "0.63188744", "0.63034314", "0.6262786", "0.62622666", "0.62526196" ]
0.82967794
0
Given a list of data, return the mean and standard deviation for the population.
def calc_mean_stdev(data): pop_stdev = pstdev(data) pop_mean = mean(data) return pop_mean, pop_stdev
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MeanAndStandardDeviation(data):\n n = len(data)\n if n == 0:\n return 0.0, 0.0\n mean = float(sum(data)) / n\n variance = sum([(element - mean)**2 for element in data]) / n\n return mean, math.sqrt(variance)", "def get_std_dev(self, data):\n mean = 0\n data_arr = []\n for i in data:\n data_arr.append(i[1])\n return statistics.stdev(data_arr)", "def stddev(self, num_list):\n try:\n mean = self.average(num_list)\n\n minus_mean = []\n\n for number in num_list:\n try:\n minus_mean.append((number - mean) ** 2)\n except Exception as e:\n print(\"Error: \", e)\n\n meany_mean = self.average(minus_mean)\n\n meany_mean = meany_mean ** .5\n\n except Exception as e:\n print(\"Error: \", e)\n\n return meany_mean", "def getMeanAndStd(dataset):\n meanAndStd = []\n for i in range(len(dataset[0])-1):\n column = [row[i] for row in dataset]\n mean = sum(column)/len(column)\n sigma = 0\n for datapoint in column:\n sigma += abs((datapoint - mean))**2\n \n std = sqrt(sigma/len(column))\n meanAndStd.append({\"mean\": mean, \"std\": std})\n\n return meanAndStd", "def stat(lst):\n n = float(len(lst))\n mean = sum(lst) / n\n stdev = sqrt((sum(x * x for x in lst) / n) - (mean * mean))\n return mean, stdev", "def meanstd(self):\n\t\tmean = [125.3, 123.0, 113.9] # R,G,B\n\t\tstddev = [63.0, 62.1, 66.7] # R,G,B\n\t\treturn [mean, stddev]", "def stdDev(data):\r\n sum = 0\r\n ave = average(data)\r\n for i in data:\r\n sum += (i-ave)**2\r\n return math.sqrt(sum/len(data))", "def _avg_sd_from_list(lst):\n arr = flex.double(lst)\n avg = round(flex.mean(arr), 5)\n std = round(arr.standard_deviation_of_the_sample(), 5)\n return avg, std", "def summarize(dataset):\n summaries = [(np.mean(attribute), np.std(attribute)) for attribute in zip(*dataset)]\n\n return summaries", "def GetMeansAndStdsFromList(lists):\n means = [np.mean(single_list) for single_list in lists]\n stds = [np.std(single_list) for single_list in lists]\n return means, stds", "def standardize( num_list):\n\n standard_dev = np.std(num_list)\n mean = np.mean(num_list)\n\n print(standard_dev)\n print(mean)\n\n result = list()\n\n for xx in num_list:\n result.append( (xx-mean)/standard_dev )\n\n return result", "def mean_list(data):\n return sum(data) / len(data)", "def stdev(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmean_values=column_matrix.std(0)\n\tstd_values=mean_values.tolist()\n\treturn std_values", "def standard_deviation(list):\n num_items = len(list)\n mean = sum(list) / num_items\n differences = [x - mean for x in list]\n sq_differences = [d ** 2 for d in differences]\n ssd = sum(sq_differences)\n\n\n variance = ssd / num_items\n\n sd = sqrt(variance)\n\n return sd", "def stddev(std_numbers):\n mean = sum(std_numbers) / float(len(std_numbers))\n sum_std = 0.0\n\n for x in std_numbers:\n sum_std += (mean - x) * (mean - x)\n\n variance = sum_std / float(len(std_numbers))\n stddev = math.sqrt(variance)\n\n return stddev", "def deviationAvg(xs):\n\treturn deviation(xs) / sqrt(len(xs))", "def mean_std_calc(dataloader):\n mean = 0\n std = 0\n samples = 0\n for data, _, _ in dataloader:\n batch_samples = data.size(0)\n data = data.view(batch_samples, data.size(1), -1)\n mean += data.mean(2).sum(0)\n std += data.std(2).sum(0)\n samples += batch_samples\n\n return (mean / samples),(std / samples)", "def find_mean_std(self, data):\n if self._data_mean is None:\n self._data_mean = np.mean(data)\n if self._data_std is None:\n self._data_std = np.std(data)", "def sampleStandardDeviation(numlist):\n\tv = sampleVariance(numlist)\n\t#print v\n\treturn math.sqrt(v)", "def pstdev(data):\n n = len(data)\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n pvar = ss/n # the population variance\n return round(pvar**0.5, 1)", "def calc_standard_deviation(data: list) -> float:\n mean = calc_mean(data)\n acc = 0.0\n for n in data:\n acc += (n - mean) ** 2\n acc /= len(data) - 1\n return math.sqrt(acc)", "def get_stdev(cls, data: tuple or list, is_population=False) -> float:\n cls._data_validation(data)\n from math import sqrt\n return sqrt(cls.get_var(data, is_population))", "def get_std_dev(data, n = -1):\n mean = get_mean(data, n =n)\n\n deviations = []\n\n for i in range(0,n):\n deviations.append( (data[i] - mean)**2 )\n\n std_dev = sqrt( sum(deviations)/n )\n\n return std_dev", "def ComputeStats(data):\r\n avg = Mean(data)\r\n stdev = math.sqrt(Mean([(d-avg)**2 for d in data]))\r\n avgSq = Mean([d*d for d in data])\r\n stdevSq = math.sqrt(Mean([(d*d - avgSq)**2 for d in data]))\r\n corrs = [1]\r\n for i in range(1,5):\r\n cov = sum([(a*a-avgSq)*(b*b-avgSq)\r\n for (a, b) in zip(data[0:-i],data[i:])]\r\n ) / float(len(data) - i)\r\n corrs.append(cov/stdevSq/stdevSq)\r\n return avg, stdev, corrs", "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def stdev(items):\n return Series.std(Series(items))", "def deviation(xs):\n\ta = avg(xs)\n\treturn sqrt(sum([(x - a) ** 2 for x in xs]) / (len(xs) - 1))", "def get_mean_and_std(dataset):\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=1, shuffle=True, num_workers=2\n )\n mean = torch.zeros(3)\n std = torch.zeros(3)\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:, i, :, :].mean()\n std[i] += inputs[:, i, :, :].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std", "def GetStandardDeviation(vals_l, mean):\n\n\n sum_deviations_squared = 0\n\n for x in vals_l:\n sum_deviations_squared += (x - mean)**2\n\n return math.sqrt(float(sum_deviations_squared)/float(len(vals_l)))" ]
[ "0.787826", "0.7444271", "0.72712225", "0.72424185", "0.7205196", "0.72044545", "0.71979964", "0.71734655", "0.71105754", "0.6991602", "0.6858698", "0.68321496", "0.67950445", "0.6756606", "0.6726477", "0.6724127", "0.67069155", "0.66939294", "0.6664016", "0.66629595", "0.66580844", "0.6646659", "0.662641", "0.66143876", "0.65624404", "0.65624404", "0.6540738", "0.65249157", "0.65244544", "0.6520238" ]
0.7527847
1
Given a 1d array (list) of data, return the median and median absolute deviation (MAD) for the population.
def calc_median_mad(data): pop_median = median(data) mad = median([abs(x - pop_median) for x in data]) return pop_median, mad
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mad(arr):\n dev = np.array(arr, copy=True)\n med = np.median(dev, overwrite_input=True)\n return np.abs(arr - med)", "def mad(array, axis=None, keepdims=False):\n ad = np.abs(array - np.median(array, axis, keepdims=True))\n mad = np.median(ad, axis, keepdims=keepdims)\n return mad", "def get_median(numlist):\n return np.median(numlist)", "def MAD(X):\n return np.median(np.abs(X - np.median(X)))", "def get_mad(a, med):\n\n\tdiff = (a - med[:, np.newaxis])**2\n\tsq_diff = np.sqrt(diff)\n\tmad = np.median(sq_diff, axis = 1)\n\treturn (mad)", "def calc_median(data: list) -> float:\n if len(data) == 0:\n return 0.0\n py = copy.copy(data)\n py.sort()\n if len(py) % 2 == 1:\n return float(py[len(py) / 2])\n return (py[len(py) // 2] + py[len(py) // 2 - 1]) / 2", "def mad(x):\n median = np.median(x, axis=0)\n diff = np.median(np.abs(median - x), axis=0)\n mad = 1.4826 * np.max(diff) * np.sqrt(\n x.shape[1]) * ((4./3.) / x.shape[0]) ** (.2)\n return mad", "def mad(\n data=None,\n as_sigma=True\n):\n\n if data is None:\n logger.error(\"No data supplied.\")\n\n this_med = np.median(data)\n this_dev = np.abs(data - this_med)\n this_mad = np.median(this_dev)\n if as_sigma:\n return (this_mad / 0.6745)\n else:\n return (this_mad)", "def mad(errors):\n med = np.median(errors)\n return np.median(np.abs(errors - med))", "def Median(data):\n return data.median()", "def median_list(data):\n length = len(data)\n if (length % 2 == 0):\n median = (data[(length)//2] + data[(length)//2-1]) / 2\n else:\n median = data[(length-1)//2]\n return median", "def median(data):\n data = sorted(data)\n data_len = len(data)\n if data_len == 0:\n raise StatisticsError('no median for empty data')\n if data_len % 2 == 1:\n return data[data_len // 2]\n if data_len % 2 == 0:\n i = data_len // 2\n return (data[i - 1] + data[i]) / 2", "def get_median(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n from math import floor\n # Sort the data\n sorted_data = sorted(list(data))\n n = len(sorted_data)\n # get the middle index\n odd_middle_index = floor(n / 2)\n upper_even_index = floor(n / 2)\n lower_even_index = floor(n / 2) - 1\n # print(f\"\\nodd_middle = {odd_middle_index}\")\n # print(f\"upper_even_middle = {upper_even_index}\")\n # print(f\"lower_even_middle = {lower_even_index}\")\n if n % 2 == 1:\n return float(sorted_data[odd_middle_index])\n # If n is even, gets the average of the middle two values\n else:\n median_lower = sorted_data[lower_even_index]\n median_upper = sorted_data[upper_even_index]\n return_median = (median_lower + median_upper) / 2\n return float(return_median)", "def mad(nparray):\n\n\treturn np.median(np.fabs(nparray - np.median(nparray)))", "def mad_median(y):\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n return np.mean(np.abs(y - np.median(y)))", "def ComputeMedY(data):\n NBINSY=data.shape[0]\n NBINSX=data.shape[1]\n the_medianY=np.zeros(NBINSX)\n the_y=np.zeros(NBINSY)\n for ix in np.arange(NBINSX):\n the_ysum=np.sum(data[:,ix])\n for iy in np.arange(NBINSY):\n the_y[iy]=iy*data[iy,ix]\n if(the_ysum>0):\n med=np.sum(the_y)/the_ysum\n the_medianY[ix]=med\n return the_medianY", "def median_absolute_deviation(x: np.ndarray) -> np.ndarray:\n return np.median(abs(x - np.median(x)))", "def med_mad(x, factor=1.4826):\n med = np.median(x)\n mad = np.median(np.absolute(x - med)) * factor\n return med, mad", "def medAbsDev(series, scale=False):\n #ensure input is numpy array (and make 1-D)\n series = (np.array(series, dtype=float)).ravel()\n #mask for NaNs\n series = np.ma.masked_where(np.isnan(series),series)\n #get median absolute deviation of unmasked elements\n perc50 = np.median(series.compressed())\n mad = np.median(abs(series.compressed()-perc50))\n if scale:\n mad *= 1.4826 #scale so that MAD is same as SD for normal distr.\n return mad", "def movingMedian(data,window):\n \n mvavg = []\n npoints = len(data)\n for i in range(npoints-1):\n j1 = max(0,i-window)\n j2 = min(i+window,npoints-1)\n mvavg.append(num.median(data[j1:j2]))\n \n return num.array(mvavg)", "def test_median_absolute_deviation(self):\r\n data = [0, 0, 0, 0, 0, 0]\r\n expected = (0, 0)\r\n self.assertEqual(median_absolute_deviation(data), expected)\r\n data = [1, 1, 1, 1, 1, 1]\r\n expected = (0, 1)\r\n self.assertEqual(median_absolute_deviation(data), expected)\r\n data = [6, 1, 2, 9, 4, 1, 2]\r\n expected = (1, 2)\r\n self.assertEqual(median_absolute_deviation(data), expected)\r\n data = [-6, -1, -2, -9, -4, -1, -2]\r\n expected = (1, -2)\r\n self.assertEqual(median_absolute_deviation(data), expected)", "def mad_std(x, axis=None):\r\n return 1.4826 * np.nanmedian(np.abs(x - np.nanmedian(x, axis)), axis)", "def samples_median(samples):\n return [np.median(s) for s in samples.T]", "def samples_median(samples):\n return [np.median(s) for s in samples.T]", "def mad(v):\n return np.median(np.abs(v-np.median(v)))", "def median(array):\n sorted = [x for x in array]\n sorted.sort()\n middle = len(sorted)/2 #Gets the middle element, if present\n if len(sorted) % 2 == 0: #Even, so need to average together the middle two values\n return float((sorted[middle]+sorted[middle-1]))/2\n else:\n return sorted[middle]", "def compute_medians(window_means):\n return np.median(window_means, axis=0)", "def meanMedianDiff(nums):\n pass", "def median(values):\n # Write the median() function\n midpoint = int(len(values) / 2)\n if len(values) % 2 == 0:\n median = (values[midpoint - 1] + values[midpoint]) / 2\n else:\n median = values[midpoint]\n\n return float(median)", "def median(arr):\n indices = []\n\n list_size = len(arr)\n median = 0\n if list_size % 2 == 0:\n indices.append(int(list_size / 2) - 1) # -1 because index starts from 0\n indices.append(int(list_size / 2))\n median = (arr[indices[0]] + arr[indices[1]]) / 2\n else:\n indices.append(int(list_size / 2))\n median = arr[indices[0]]\n\n return median, indices" ]
[ "0.74380946", "0.7176177", "0.7115197", "0.70874095", "0.7085121", "0.70457613", "0.7031764", "0.70199966", "0.69804996", "0.69363016", "0.6933873", "0.6841836", "0.68270075", "0.6824112", "0.67681855", "0.67631644", "0.67438763", "0.66553277", "0.6653686", "0.65766096", "0.65765184", "0.65752876", "0.6573816", "0.6573816", "0.6507896", "0.64825445", "0.64797133", "0.6438756", "0.6425086", "0.6407172" ]
0.83040637
0
For a given sample, return the column index of the sample in the header.
def get_sample_idx(sample, header): for item in header: if sample in item: return header.index(item) print(sample + " not found in header, check input files.") sys.exit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_colnumber(self, header):\n for i in range(0, len(self.data)):\n if self.data[i][0] == header:\n return i\n return None", "def _get_header_index(self, columnname):\n\n return self.headers.index(columnname)", "def _get_header_position(header_row: List[str], column_title: str) -> int:\n for pos, column in enumerate(header_row):\n if column_title.lower() in column.lower():\n return pos\n\n raise Exception(\"Expected column header not found for {}\".format(column_title))", "def column_index(self, column_label):\n return self.column_labels.index(column_label)", "def getIndividual2ColIndex(cls, header, col_name2index, sampleStartingColumn=9):\n\t\tsys.stderr.write(\"Finding all individuals ...\")\n\t\tno_of_cols = len(header)\n\t\tindividual_name2col_index = {}\t#individual's column name -> an opened file handler to store genetic data\n\t\tcounter = 0\n\t\tfor i in xrange(sampleStartingColumn, no_of_cols):\n\t\t\tindividualName = header[i]\n\t\t\tcol_index = col_name2index.get(individualName)\n\t\t\tif not individualName:\t#ignore empty column\n\t\t\t\tcontinue\n\t\t\tif individualName[:-4]=='.bam':\n\t\t\t\tindividualCode = individualName[:-4]\t#get rid of .bam\n\t\t\telse:\n\t\t\t\tindividualCode = individualName\n\t\t\tindividual_name2col_index[individualCode] = col_index\n\t\t\tcounter += 1\n\t\tsys.stderr.write(\"%s individuals added. Done.\\n\"%(counter))\n\t\treturn individual_name2col_index", "def test_get_sample_col():\n # GIVEN a line with commas as delimiter\n line = \"one two SampleID\"\n # WHEN finding the sample col\n col_nr = samplesheet.get_sample_col(line.split(\" \"))\n # THEN assert correct col nr is returned\n assert col_nr == 2", "def get_column(filename, column_name):\n with open(filename) as f:\n for header in f:\n columns = header.rstrip().split(\"\\t\")\n return columns.index(column_name)", "def column_index(input_file, name):\n col, com = find_columns(input_file)\n col_name = name\n contents = open(input_file, 'r').readlines()\n for line in contents:\n if com[col.index(col_name)] in line:\n line_index = contents.index(line)+1\n return line_index", "def test_get_sample_name():\n # GIVEN a line with commas as delimiter\n line = \"one two SampleName\"\n # WHEN finding the sample col\n col_nr = samplesheet.get_sample_name_col(line.split(\" \"))\n # THEN assert correct col nr is returned\n assert col_nr == 2", "def column_index(self, column_name: str) -> int:\n return self._column_indices[column_name]", "def GetColumn(self, column):\r\n \r\n return self._header_win.GetColumn(column)", "def get_index(self, column):\r\n\r\n\t\treturn self.columns.index(column)", "def getColIdx(self, col):\n try: \n return int(col)\n except:\n return ord(col)-ord('a')", "def getIndex(self,filt):\n indx = [i for i in xrange(len(self._header)) if filt == self._header[i]]\n return indx", "def getColIdx(self, col):\n try:\n return int(col)\n except:\n return ord(col)-ord('a')", "def get_index_in_table_column(self, table_locator, col, expected, loglevel='INFO'):\n has_head=0\n element = self._table_element_finder.find_by_header(self._current_browser(), table_locator, None)\n if element is not None:\n has_head = 1\n index = self._table_element_finder.find_in_col(self._current_browser(), table_locator, col, expected)\n if index <= 0:\n self.log_source(loglevel)\n raise AssertionError(\"Column #%s in table identified by '%s' \"\n \"should have contained text '%s'.\"\n % (col, table_locator, expected))\n return index+has_head", "def get_column_index(i, inputs):\n if isinstance(i, int):\n if i == 0:\n # Useful shortcut, skips the case when end is None\n # (unknown dimension)\n return 0, 0\n vi = 0\n pos = 0\n end = inputs[0][1].shape[1]\n if end is None:\n raise RuntimeError( # pragma: no cover\n \"Cannot extract a specific column %r when \"\n \"one input (%r) has unknown \"\n \"dimension.\" % (i, inputs[0]))\n while True:\n if pos <= i < end:\n return vi, i - pos\n vi += 1\n pos = end\n if vi >= len(inputs):\n raise RuntimeError( # pragma: no cover\n \"Input %r (i=%r, end=%r) is not available in\\n%r\" % (\n vi, i, end, pprint.pformat(inputs)))\n rel_end = inputs[vi][1].shape[1]\n if rel_end is None:\n raise RuntimeError( # pragma: no cover\n \"Cannot extract a specific column %r when \"\n \"one input (%r) has unknown \"\n \"dimension.\" % (i, inputs[vi]))\n end += rel_end\n else:\n for ind, inp in enumerate(inputs):\n if inp[0] == i:\n return ind, 0\n raise RuntimeError( # pragma: no cover\n \"Unable to find column name %r among names %r. \"\n \"Make sure the input names specified with parameter \"\n \"initial_types fits the column names specified in the \"\n \"pipeline to convert. This may happen because a \"\n \"ColumnTransformer follows a transformer without \"\n \"any mapped converter in a pipeline.\" % (\n i, [n[0] for n in inputs]))", "def getExcelColumnNumber(excel_header: str):\n col_number = 0\n for character in excel_header:\n if character == 'A':\n col_number += 1\n\n return 0", "def get_column_index(self, colName):\n\t\treturn self._columns[colName]", "def sample_name_colname(self):\n return SAMPLE_NAME_ATTR \\\n if SAMPLE_NAME_ATTR == self.st_index else self.st_index", "def _get_col(self, idx):\n return self.line[self._fwf.column_slices[idx]]", "def index_value(self):\r\n\t\tfor index, column_header in enumerate(self.header_row):\r\n\t\t\tprint(index, column_header)", "def getColumnIndex(boardName, bltName):\n boardDict = columnIndexDict[boardName]\n columnIndex = boardDict[bltName]\n\n return columnIndex", "def get_column_index(infile, column: str, sep: str=\",\"):\n return list_from_line(infile.readline()).index(column)", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def __get_column(self, index: int) -> int:\n return index % self.columns", "def get_rownumber(self, first_col_val):\n\n try:\n (col_name, col_contents) = self.data[0]\n col_data = [col_name] + col_contents\n return col_data.index(first_col_val)\n except ValueError:\n return None", "def loadHeaderSamplesFile(self, line):\n try:\n line.decode('ascii')\n except UnicodeDecodeError as err:\n raise\n tokens = line.rstrip(\"\\n\").split(\"\\t\")\n if len(tokens) < 12:\n msg = \"header should have at least 12 tab-separated columns\"\n raise ValueError(msg)\n for idx,tok in enumerate(tokens):\n if tok in self.samplesCol2idx:\n self.samplesCol2idx[tok] = idx\n for samplesCol,idx in self.samplesCol2idx.items():\n if idx is None:\n msg = \"column '%s' not found in samples file\" % samplesCol\n raise ValueError(msg)" ]
[ "0.76551473", "0.7203461", "0.66604096", "0.6617303", "0.64651644", "0.6426487", "0.6422889", "0.6335221", "0.6291486", "0.61781806", "0.6107511", "0.6099734", "0.60810536", "0.6061681", "0.60574603", "0.6048488", "0.5998556", "0.59846556", "0.59677076", "0.59497976", "0.5872348", "0.584866", "0.5838557", "0.5824815", "0.5808884", "0.5808884", "0.5808884", "0.5775262", "0.5771481", "0.5748239" ]
0.75644857
1
Call systematic commands without return.
def callSys(cmds): for c in cmds: print c try: os.system(c) except: print "ERROR for %s" % c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def call_sys(cmds):\n for c in cmds:\n logger.info(c)\n try:\n os.system(c)\n except:\n logger.error(c)", "def execute(cmd) :\n return os.system( cmd )", "async def execute_system(self):\n return True", "def system(self,cmd):\n code = 'import os;f=os.popen(\"%s\");res = f.read(-1);f.close();' % cmd\n return self.exec_code(code,returns=['res'])", "def normal(self):\n self.run_command('normal')", "def os_system(command):\n os.system(command)", "def runCommand(command):\n None", "def do_command(self, args):\n pass", "def systemCommand(command):\n\n commStatus, commOut = commands.getstatusoutput(command)\n # If our command fails, abort entirely and notify CloudKick\n if commStatus != 0:\n sys.stderr.write('Error: Failure when executing the following ')\n sys.stderr.write(\"command: '%s'\\n\" % (command,))\n sys.stderr.write(\"Exit status: %d\\n\" % (commStatus,))\n sys.stderr.write(\"Output: %s\\n\\n\" % (commOut,))\n sys.stderr.write('status err System command failure: ')\n sys.stderr.write('%s\\n' % (command,))\n sys.exit(1)\n # If we get a 0 exit code, all is well. Return the data.\n else:\n return commOut", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def os_exec(self, cmd, **kwargs):\n pdb.set_trace()\n try:\n retv = os.system(cmd)\n print(\"Got retv: {}\".format(retv))\n if retv != 0:\n print(\"\\t{} |{}| Got incorrect retv {}!\".format(Timer.UNKN,self.tinfo['name'], retv))\n return\n else:\n print(\"\\t{} |{}| Executed system command successfully.\".format(Timer.OK, self.tinfo['name']))\n return True\n\n except PermissionError as e:\n print(\"{} Permission error in os_exec.\".format(Timer.FAIL, e))\n return False\n except Exception as e:\n print(\"{} Caught exception in os_exec: {}\".format(Timer.FAIL, e))\n return False", "def execute_command(self):\n raise Exception(\"Not implemented\")", "def system_call(command):\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n return process.communicate()[0]", "def system_command(cmd, logger, throw_exception=True, return_output=False, issue_error=True, timeout=None):\n\n # launch command\n status = 0\n try:\n logger.debug('SysCmd: '+cmd)\n s = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n except Exception as e:\n if issue_error:\n logger.error(e)\n logger.error('Problem with launching system command: '+cmd)\n status = -1\n\n # wait for command termination\n if not status:\n try:\n if timeout_supported and timeout:\n s.wait(timeout=int(timeout))\n else:\n s.wait()\n except subprocess.TimeoutExpired:\n logger.error('Timeout for system command: '+cmd)\n s.kill()\n s.wait()\n status = -2\n except Exception as e:\n logger.error(e)\n logger.error('Problem with waiting for system command: '+cmd)\n status = -3\n\n # wait for command termination and log output to logger\n lines=''\n if status != -1:\n while True:\n line = s.stdout.readline()\n if not line:\n break\n lines += line.decode('ascii')\n if not return_output:\n logger.debug(' Out: '+line.rstrip())\n if not status:\n status = s.returncode\n\n # trow exception if requested\n if status: \n if throw_exception:\n raise StopError('Error with system command: '+cmd)\n else:\n if issue_error:\n logger.error('Error with system command: '+cmd)\n\n if return_output:\n return (status,lines)\n else:\n return status", "def command():\n pass", "def system(cmds):\n if isinstance(cmds, six.string_types):\n cmds = [cmds]\n\n output = None\n if isinstance(cmds, (tuple, list)):\n for cmd in cmds:\n logger.debug(cmd)\n\n try:\n output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n if output:\n logger.info(output.decode('utf-8'))\n\n\n except subprocess.CalledProcessError as e:\n if e.returncode != 2:\n msg = \"Command failed: \\n {} \\n \\n Return code: {} \".format(cmd, e.returncode)\n logger.error(msg)\n logger.error(e.output.decode(\"utf-8\"))\n\n sys.exit(1)\n\n else:\n raise TypeError(\"cmd argument is wrong type\")\n\n return output", "def commands():", "def sys_cmd(cmd: list) -> str:\n\n out, err = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()\n # Checking return code\n if err != b\"\":\n log.error(err.decode())\n notify_owner(f\"Exited(1) for: {err.decode()}\")\n exit(1)\n else:\n return out.decode()", "def os_system(cmd):\n print cmd\n failure = os.system(cmd)\n if failure:\n print \"\"\"Command\n %s\nfailed\"\"\" % cmd\n sys.exit(1)\n unix_command_recorder.append(cmd) # record command for bash script", "def LaunchAndWait(cmd):\n call(cmd)", "def run_cmd(self, cmd, timeout,\n force_execution=False,\n wait_for_response=True,\n silent_mode=False):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def run(cmd):\n print(cmd)\n r = os.system(cmd)\n if r:\n print(\"ERROR: command returned {0}\".format(r))\n sys.exit(r)", "def system(command):\n print('[system] {}'.format(command))\n p = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n rc = p.returncode\n if PY3:\n output = output.decode(\"ascii\")\n err = err.decode(\"ascii\")\n return rc, output, err", "def send_command(self, cmd, shell=None, silent=False):", "def execute(cmd_string):\n pass", "def runCommand(self): \\\n # pylint: disable=no-self-use", "def do_sh(self, none):\n print(\"**** Not Implemented\")" ]
[ "0.753476", "0.7109839", "0.70645314", "0.6797796", "0.6696888", "0.66589797", "0.6594726", "0.6555924", "0.65522206", "0.6532648", "0.6532648", "0.6532648", "0.6532648", "0.6515407", "0.65128255", "0.64893115", "0.64536023", "0.6442296", "0.64369935", "0.64029336", "0.6389658", "0.637754", "0.6372708", "0.6342797", "0.6336674", "0.63225645", "0.63057446", "0.629258", "0.6285451", "0.62534785" ]
0.7443336
1
Adds a plugin's template path to the templating engine
def register(name, templating=True): if name in _plugins: return import template _plugins.append(name) if templating: path = os.path.normpath(os.path.join( os.path.dirname(monkey.__file__), '../plugins/%s/templates' % name)) template.add_template_path(path) template.add_template_path(path, prefix=name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_template_renderer(\n self, plugin, template_name, context=default_context\n ):\n self._renderers[plugin] = (template_name, context)", "def render_template(self, tmpl_name, **kwargs):\n tmpl = self.tplenv.get_template(tmpl_name)\n return tmpl.render(plugin_shortname=self.plugin.get_shortname(), plugin_version=self.plugin.get_version(),\n plugin_info=self.plugin.get_info(), p=self.plugin,\n **kwargs)", "def setup_templates(self):\n self.libs[\"template\"] = (\"#libs/templates/include\", None, \"\")\n self[\"CPPPATH\"].append(\"#libs/templates/include\")", "def generate_plugin_template(\n specs_path: str,\n caption: str,\n plugin_id: str,\n author_email: str,\n author_name: str,\n dst_path: Path,\n):\n hm_generator = HookManGenerator(hook_spec_file_path=specs_path)\n hm_generator.generate_plugin_template(\n caption=caption,\n plugin_id=plugin_id,\n author_email=author_email,\n author_name=author_name,\n dst_path=Path(dst_path),\n )\n return 0", "def include_abs_path_in_templates(file_path):\n template_path = get_abs_path(file_path, 'views')\n TEMPLATE_PATH.insert(0, template_path)", "def _add_template(self, alias, template):\n # Construct a function that will do substitution for any placeholders\n # in the template.\n def fname(**kwargs):\n return _substitute(template, self.files(), kwargs)\n\n # Bind the fname function to this instance of FileNames\n self.__dict__[alias] = fname", "def template_path(self):\n return self.get_config(\"templates\")", "def get_template_path(relative_path, **kwargs): # lint-amnesty, pylint: disable=unused-argument\n return relative_path", "def create_template_loader(self, template_path):\n raise NotImplementedError()", "def _update_template(template_path):\n template_definition = template_path\n\n # template output directory is output/templates, so need to create that location before pulling out the templates\n template_location = template_utilities.get_template_directory()\n\n # Install the template and get the path to the template directory for updating the configuration file.\n templates_path = template_utilities.install_template(template_location, template_definition)\n\n if templates_path:\n # Now need to find the templates definition of that zip file and locate it in the file system so that it can be\n settings = get_configuration()\n\n # Override the configuration details with the new template path. This should probably be handled by the\n # publishing plugin, but for now this will work\n settings.publishing.templates = str(templates_path.relative_to(get_configuration_root()))\n configuration_file_path = get_configuration_root() / 'config.yaml'\n\n dump_configuration(configuration_file_path, settings)", "def install(self, phase):\n # TODO: installation file ownership\n if not os.path.isdir(self._tplpath):\n self._logger.warning(\"{td} is not a directory, ignoring for templating\".format(td=self._tplpath))\n return\n\n self._logger.debug(\"Applying templates for phase {p}\".format(p=phase))\n\n for (root, dirs, files) in os.walk(self._tplpath):\n for tplfile in [file for file in files if file.endswith(\".tpl\")]:\n with open(os.path.join(root, tplfile), \"rb\") as tplfp:\n makot = Template(tplfp.read(), strict_undefined=True)\n\n install = {}\n makot.get_def(\"install\").render(i=install)\n install[\"dest\"] = os.path.join(self._image.path, \"origin\", *install[\"filename\"].split(os.sep))\n install[\"mode\"] = int(install.get(\"mode\", \"0644\"), 8)\n\n try:\n # Check the template is relevant to this phase of the build\n if phase not in install[\"phase\"]:\n continue\n except KeyError:\n pass\n\n self._logger.debug(\"Installing {filename} from template to {dest}\".format(**install))\n\n renderctx = {\n \"ymlcfg\": self._ymlcfg,\n \"vmyml\": self._vmyml,\n \"rootpath\": os.path.join(self._image.path, \"origin\"),\n \"phase\": phase\n }\n\n if os.path.isfile(install[\"dest\"]):\n # If there is an existing file at the location generate\n # a checksum for it. This can be used by a template to\n # a) decide how to render content based on current source\n # b) validate that the default file being replaced is the\n # the one template is relevant for, e.g. has upstream\n # made changes the template should account for.\n s256 = hashlib.sha256()\n with open(install[\"dest\"], \"rb\") as fp:\n while True:\n data = fp.read(16 * 4096)\n if not data:\n break\n s256.update(data)\n\n renderctx[\"sha256\"] = s256.hexdigest()\n self._logger.debug(\"Existing {filename} checksum {s}\".format(s=s256.hexdigest(), **install))\n else:\n renderctx[\"sha256\"] = None\n\n try:\n rendered = makot.render(**renderctx)\n except VMCPhaseError:\n continue\n\n try:\n # For some files we may be interested in the old\n # content to ensure that our template replaces\n # it with something compatible.\n if renderctx[\"sha256\"] not in install[\"sha256\"]:\n # The rendered file is not acceptable, check if\n # we already applied this template by examinging\n # the rendered digest before error.\n r256 = hashlib.sha256()\n r256.update(rendered.encode(\"utf-8\"))\n if renderctx[\"sha256\"] == r256.hexdigest():\n self._logger.warning(\"Template was already applied\")\n else:\n raise VMCTemplateChecksumError(\"unacceptable sha256: {c}\".format(c=renderctx[\"sha256\"]))\n except KeyError:\n pass\n\n try:\n # Create the installation path if it isn't already present\n insdir = os.path.join(os.sep, *install[\"dest\"].split(os.sep)[:-1])\n self._logger.debug(\"Creating {insdir} if necessary\".format(insdir=insdir))\n os.makedirs(insdir)\n except FileExistsError:\n if not os.path.isdir(insdir):\n raise\n\n with open(install[\"dest\"], \"wb\") as tplout:\n tplout.write(rendered.encode(\"utf-8\"))\n\n dstat = os.stat(install[\"dest\"])\n dmode = stat.S_IMODE(dstat.st_mode)\n if dmode != install[\"mode\"]:\n self._logger.debug(\"chmod {mode} {dest_oct}\".format(dest_oct=oct(install[\"mode\"]), **install))\n os.chmod(install[\"dest\"], install[\"mode\"])", "def template_path(name):\n template_dir = os.path.join(os.path.dirname(__file__), 'templates')\n return os.path.join(template_dir, (name + \".html\"))", "def get_template_path(self):\n raise NotImplementedError()", "def render_engine_or_search_template(template_name, **context):\n from indico_search.plugin import SearchPlugin\n assert current_plugin == SearchPlugin.instance\n\n templates = ('{}:{}'.format(SearchPlugin.instance.engine_plugin.name, template_name),\n template_name)\n return render_plugin_template(templates, **context)", "def test_register_template(self):\n pass", "def _generate_from_template(self, name, path, context):\n template = self._templates.get_template(name)\n with open(path, 'w') as f:\n f.write(template.render(context))", "def Template(self, *args, **kw):\n temp_wrapper = TemplateWrapper(*args, **kw)\n self.mTemplates.append(temp_wrapper)\n return temp_wrapper", "def template_loader(self):\n return None", "def templates_folder(self):\n return os.path.join(\n os.path.dirname(__file__), \"default_config\", \"divvy_templates\"\n )", "def django_template_include(file_name, mako_context):\r\n\r\n dictionary = dict(mako_context)\r\n return loader.render_to_string(file_name, dictionary=dictionary)", "def load_template_if_needed(self):\n\n class GeneratorProxy(object):\n \"\"\"\n An interface to templates and plugins for\n providing restricted access to the methods.\n \"\"\"\n\n def __init__(self, preprocessor=None, postprocessor=None,\n context_for_path=None):\n self.preprocessor = preprocessor\n self.postprocessor = postprocessor\n self.context_for_path = context_for_path\n\n if not self.template:\n logger.info(\"Generating site at [%s]\" % self.site.sitepath)\n self.template = Template.find_template(self.site)\n logger.debug(\"Using [%s] as the template\",\n self.template.__class__.__name__)\n\n logger.info(\"Configuring the template environment\")\n preprocessor = self.events.begin_text_resource\n postprocessor = self.events.text_resource_complete\n proxy = GeneratorProxy(context_for_path=self.context_for_path,\n preprocessor=preprocessor,\n postprocessor=postprocessor)\n self.template.configure(self.site,\n engine=proxy)\n self.events.template_loaded(self.template)", "def update():\n if Project.use_templates:\n defaults = _project_defaults()\n\n template = Template()\n\n for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))\n for herringlib in HerringFile.herringlib_paths]:\n\n info(\"template directory: %s\" % template_dir)\n # noinspection PyArgumentEqualDefault\n template.generate(template_dir, defaults, overwrite=False)", "def save_path(self):\n return self.template.manager.render_template_txt(self.path, self.template)", "def render_template():\n template_engine = engines['django']\n def func(template_string):\n load_tags_string = '{% load wagtailextensions_tags %}'\n return template_engine.from_string(load_tags_string + template_string).render()\n return func", "def render_legacy_template(template_path: str, context: Mapping[str, str]) -> str:\n\n # will be replaced by django templates in the future\n with open(template_path, 'r', encoding='utf-8') as template:\n template_str = template.read()\n return Template(template_str).substitute(**context)", "def init_templates( path=\"boilerplate\" ):\n global template_env\n template_loader = jinja2.FileSystemLoader(searchpath=\"boilerplate\" )\n template_env = jinja2.Environment(\n loader=template_loader,\n lstrip_blocks=True\n )", "def render(self, template_name, **kwargs):\n currentUser = self.current_user\n from_workspace_str = self.get_argument(\"from_workspace\", default=\"0\", strip=False)\n from_workspace = from_workspace_str == \"1\"\n html = self.render_string(template_name, currentUser=currentUser, from_workspace = from_workspace, **kwargs)\n if from_workspace :\n scriptName = self.__class__.__name__\n\n if scriptName.endswith('Handler') :\n scriptName = scriptName[:-7] \n\n path = self.static_url('scripts/' + scriptName + '.js')\n\n js = '<script src=\"' + escape.xhtml_escape(path) + '\" type=\"text/javascript\"></script>'\n html = html + utf8(js)\n self.finish(html)\n return\n\n # Insert the additional JS and CSS added by the modules on the page\n js_embed = []\n js_files = []\n css_embed = []\n css_files = []\n html_heads = []\n html_bodies = []\n for module in getattr(self, \"_active_modules\", {}).values():\n embed_part = module.embedded_javascript()\n if embed_part:\n js_embed.append(utf8(embed_part))\n file_part = module.javascript_files()\n if file_part:\n if isinstance(file_part, (unicode_type, bytes_type)):\n js_files.append(file_part)\n else:\n js_files.extend(file_part)\n embed_part = module.embedded_css()\n if embed_part:\n css_embed.append(utf8(embed_part))\n file_part = module.css_files()\n if file_part:\n if isinstance(file_part, (unicode_type, bytes_type)):\n css_files.append(file_part)\n else:\n css_files.extend(file_part)\n head_part = module.html_head()\n if head_part:\n html_heads.append(utf8(head_part))\n body_part = module.html_body()\n if body_part:\n html_bodies.append(utf8(body_part))\n\n def is_absolute(path):\n return any(path.startswith(x) for x in [\"/\", \"http:\", \"https:\"])\n if js_files:\n # Maintain order of JavaScript files given by modules\n paths = []\n unique_paths = set()\n for path in js_files:\n if not is_absolute(path):\n path = self.static_url(path)\n if path not in unique_paths:\n paths.append(path)\n unique_paths.add(path)\n js = ''.join('<script src=\"' + escape.xhtml_escape(p) +\n '\" type=\"text/javascript\"></script>'\n for p in paths)\n sloc = html.rindex(b'</body>')\n html = html[:sloc] + utf8(js) + b'\\n' + html[sloc:]\n if js_embed:\n js = b'<script type=\"text/javascript\">\\n//<![CDATA[\\n' + \\\n b'\\n'.join(js_embed) + b'\\n//]]>\\n</script>'\n sloc = html.rindex(b'</body>')\n html = html[:sloc] + js + b'\\n' + html[sloc:]\n if css_files:\n paths = []\n unique_paths = set()\n for path in css_files:\n if not is_absolute(path):\n path = self.static_url(path)\n if path not in unique_paths:\n paths.append(path)\n unique_paths.add(path)\n css = ''.join('<link href=\"' + escape.xhtml_escape(p) + '\" '\n 'type=\"text/css\" rel=\"stylesheet\"/>'\n for p in paths)\n hloc = html.index(b'</head>')\n html = html[:hloc] + utf8(css) + b'\\n' + html[hloc:]\n if css_embed:\n css = b'<style type=\"text/css\">\\n' + b'\\n'.join(css_embed) + \\\n b'\\n</style>'\n hloc = html.index(b'</head>')\n html = html[:hloc] + css + b'\\n' + html[hloc:]\n if html_heads:\n hloc = html.index(b'</head>')\n html = html[:hloc] + b''.join(html_heads) + b'\\n' + html[hloc:]\n if html_bodies:\n hloc = html.index(b'</body>')\n html = html[:hloc] + b''.join(html_bodies) + b'\\n' + html[hloc:]\n self.finish(html)", "def render_template(self, template_path, context={}):\n template_str = self.load_resource(template_path)\n return Template(template_str).render(Context(context))", "def render_plugin_in_context(self, plugin, context=None):\n if plugin.__class__ not in self._renderers:\n raise PluginNotRegistered(\n \"Plugin %s is not registered\" % plugin._meta.label_lower\n )\n template, local_context = self._renderers[plugin.__class__]\n\n if template is None:\n # Simple string renderer\n return local_context(plugin) if callable(local_context) else local_context\n\n if context is None:\n context = Context()\n\n if callable(template):\n template = template(plugin)\n if callable(local_context):\n local_context = local_context(plugin, context)\n\n return render_in_context(context, template, local_context)", "def register_template(self, name, template):\n key = name, len(template.args)\n existing = self.templates.get(key)\n if existing:\n raise mio.MIOException('The template \"%s/%d\" is already registered' % (name, len(template.args)))\n self.templates[key] = template" ]
[ "0.6625346", "0.6261574", "0.6255176", "0.6226953", "0.61784464", "0.61368513", "0.586182", "0.5861061", "0.5786961", "0.5755639", "0.5717798", "0.57148427", "0.5700025", "0.56565166", "0.5653578", "0.56479716", "0.55910724", "0.55575645", "0.5547902", "0.5537928", "0.5493132", "0.54625773", "0.54584694", "0.5413335", "0.5400329", "0.5390889", "0.5373658", "0.5359523", "0.53516483", "0.5337283" ]
0.68103945
0
Calculate the intensity of the profile on a line of Cartesian x coordinates. The input xvalues are translated to a coordinate system centred on the Gaussian, using its centre.
def profile_from_xvalues(self, xvalues): transformed_xvalues = xvalues - self.centre return np.multiply( np.divide(self.intensity, self.sigma * np.sqrt(2.0 * np.pi)), np.exp(-0.5 * np.square(np.divide(transformed_xvalues, self.sigma))), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray:\r\n transformed_xvalues = xvalues - self.centre\r\n\r\n return np.multiply(\r\n np.divide(self.normalization, self.sigma * np.sqrt(2.0 * np.pi)),\r\n np.exp(-0.5 * np.square(np.divide(transformed_xvalues, self.sigma))),\r\n )", "def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray):\r\n transformed_xvalues = np.subtract(xvalues, self.centre)\r\n return np.multiply(\r\n np.divide(self.normalization, self.sigma * np.sqrt(2.0 * np.pi)),\r\n np.exp(-0.5 * np.square(np.divide(transformed_xvalues, self.sigma))),\r\n )", "def gaussian(centre, k, intensity, xpos):\r\n\treturn intensity * np.exp(- np.power(k * (xpos - centre), 2))", "def line_gaussian_activity(self, x_loc):\n dist = np.abs(x_loc - self.pref_line_gaussian) # non_periodic boundary\n dist /= self.sd_gaussianline # standard deviation\n return np.exp(-dist ** 2 / 2)", "def line_gaussian_activity(self, x_loc):\n dist = np.abs(x_loc - self.pref_line_gaussian) # non_periodic boundary\n dist /= self.sd_gaussianline # standard deviation\n return np.exp(-dist ** 2 / 2)", "def make_lineprofile(npix,rstar,xc,vgrid,A,veq,linewidth):\n vc=(np.arange(npix)-xc)/rstar*veq\n vs=vgrid[np.newaxis,:]-vc[:,np.newaxis]\n profile=1.-A*np.exp( -(vs*vs)/2./linewidth**2)\n return profile", "def _mean_scale(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n\n return self.f(x, *self.theta_vals), self.g(x, *self.theta_vals)", "def evaluate(self,x,y):\n\n #below function comes from MATLAB Peaks function\n # return np.multiply(3*np.power((1-x), 2), np.exp(-np.power(x,2) - np.power((y+1), 2))) - np.multiply(10 * (x/5.0 - np.power(x,3) - np.power(y,5)), np.exp(-np.power(x,2)-np.power(y,2)))#- np.exp(-np.power(x+1,2)-np.power(y,2))/3.0\n # return -np.power((x-50),2) - np.power(y, 2)-3\n return 5- (np.multiply(np.multiply(np.sin(x), np.sin(y)), np.power(x,2)) + np.power(y,2))", "def gaussian_many(\n x: float,\n values: np.array,\n uncertainties: np.array\n) -> np.array:\n center = np.array(values)\n width = np.maximum(np.array(uncertainties), 1e-6)\n coefficient = 1 / np.sqrt(2.0 * math.pi * width * width)\n exponent = -0.5 * ((float(x) - center) ** 2) / (width * width)\n return coefficient * np.exp(exponent)", "def psfVal(ix, iy, x, y, sigma1, sigma2, b):\n return (math.exp (-0.5*((ix - x)**2 + (iy - y)**2)/sigma1**2) +\n b*math.exp (-0.5*((ix - x)**2 + (iy - y)**2)/sigma2**2))/(1 + b)", "def calc_distribution_centre_and_spread(xdata, use_iqr=False):\n\n xcentre = np.median(xdata)\n\n xmad = np.median(abs(xdata - xcentre))\n\n xiq_min = np.percentile(xdata,25.0)\n xiq_max = np.percentile(xdata,75.0)\n xiqr = (xiq_max - xiq_min)/2.0\n\n if use_iqr:\n sig_x = xiqr\n else:\n sig_x = xmad\n\n return xcentre, sig_x", "def gauss_spot(self, xy, sigma, center=None):\r\n\r\n x = np.arange(0, xy, 1.)\r\n\r\n y = x[:,np.newaxis]\r\n\r\n \r\n\r\n if center is None:\r\n\r\n x0 = y0 = xy // 2\r\n\r\n else:\r\n\r\n x0 = center[0]\r\n\r\n y0 = center[1]\r\n\r\n \r\n\r\n return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / sigma**2)", "def _prior_gaussian(self, x_start):\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.gaussian_q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)", "def Gauss(self, x, height, centre, width, b=0):\n\n return height * np.exp(-(x - centre)**2 / (2 * width**2)) - b", "def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):\n \n return (amplitude/(np.sqrt(2.*np.pi)*sigma)) * exp(-np.power((1.0*x-center)/(sigma), 2.)/2.)", "def x(self) -> int:\n return self.data.x_centre >> 4", "def __call__(self, x):\n img = self.house * x\n denom = self.psize-img[self.dim-1]\n if denom.is_zero():\n raise ValueError, 'Point cannot coincide with ' \\\n 'coordinate singularity at ' + repr(x)\n return vector(RDF, [img[i]/denom for i in range(self.dim-1)])", "def Gaussian(x, mu=0, sigma=26.4, A=1, y0=0):\r\n #width = sigma*(2*np.sqrt(2*np.log(2)))\r\n b = 1/(sigma*np.sqrt(2*np.pi))\r\n f = b*np.power(np.e, -(((x-mu)**2)/(2*sigma**2)))\r\n return A*f + y0", "def Gauss(self, center_x, center_y, width_x, width_y, height=1.0):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*math.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def center(x):\n return x - x.mean()", "def addGaussian(self, xwidth=100., ywidth=100., xcen=None, ycen=None, value=1.0):\n if xcen == None:\n xcen = self.nx/2.0\n if ycen == None:\n ycen = self.ny/2.0\n self.fimage = None\n gaussian = numpy.exp(-(self.xx-xcen)**2/(2.0*xwidth**2) - (self.yy-ycen)**2/(2.0*ywidth**2))\n self.image += gaussian * value / gaussian.max()\n return", "def evaluate(self, x):\n # Assign 'continuum'\n y = self[\"off\"] + self[\"lin\"] * x\n # Add Voigt lines\n for i in smo.range(self.n):\n p = str(i + 1)\n self._v1d.assignValues(\n {\"A\": self[\"A\" + p], \"al\": self[\"al\" + p], \"ad\": self[\"ad\" + p], \"mu\": self[\"mu\" + p]})\n y += self._v1d.evaluate(x)\n return y", "def Gauss2D(self, x, center_x, width_x, y, center_y, width_y, height=1.0):\n g = math.exp(-0.5*((center_x-x)/width_x)**2)/(width_x*(2.0*math.pi)**0.5)\n g *= math.exp(-0.5*((center_y-y)/width_y)**2)/(width_y*(2.0*math.pi)**0.5)\n g *= height\n return g", "def lin_scale( val, x1, y1, x2, y2 ):\r\n x_range = (x2 - x1)\r\n new_val = 0\r\n if x_range is 0:\r\n new_val = y1\r\n else:\r\n y_range = ( y2 - y1 )\r\n new_val = ( ( ( val - x1 ) * y_range ) / x_range ) + y1\r\n\r\n return new_val", "def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray):\r\n transformed_xvalues = np.subtract(xvalues, self.centre)\r\n return self.normalization * np.multiply(\r\n self.rate, np.exp(-1.0 * self.rate * abs(transformed_xvalues))\r\n )", "def intensity(self):\n LP = 1/np.sin(self.theta)**2/np.cos(self.theta)\n P = 1 + np.cos(2*self.theta)**2\n I = (np.abs(self.F))**2*LP*P\n self.xrd_intensity = I\n self.theta2 = 2*self.theta\n rank = np.argsort(self.theta2)\n self.theta2 = self.theta2[rank]\n self.hkl_list = self.hkl_list[rank]\n self.d_hkl = self.d_hkl[rank]\n self.xrd_intensity = self.xrd_intensity[rank]", "def __call__(self, x):\n f_beam_gaussian = self.i / (np.sqrt(2 * constants.pi) * constants.e * self.sigma * self.w_z) * \\\n np.exp(-(x - self.x_c) ** 2 / (2 * self.sigma ** 2))\n\n # Convert the flux density unit atoms/nm^2s to atoms/cm^2s by multiplying with factor 1e14\n return f_beam_gaussian * 1e14", "def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray:\r\n transformed_xvalues = np.subtract(xvalues, self.centre)\r\n return self.normalization * np.multiply(\r\n self.rate, np.exp(-1.0 * self.rate * abs(transformed_xvalues))\r\n )", "def __call__(self, x):\n return self.slope * x + self.ordinate", "def __call__( self, X, Y, Z):\n xb,yb,zb = self.transform( X,Y,Z)\n \n gauss = beam( xb,yb,zb, self.w[0], self.w[1], self.l)\n intensity = (2/np.pi)* self.mW/1000. /self.w[0]/self.w[1] *gauss # W um^-2\n \n return uL(self.l)*intensity" ]
[ "0.5817927", "0.58009154", "0.5758678", "0.56605244", "0.56605244", "0.56276804", "0.5445396", "0.5339149", "0.5312613", "0.5311276", "0.5290594", "0.5287766", "0.5266982", "0.52203554", "0.52199733", "0.5217667", "0.52032644", "0.51897854", "0.5178143", "0.51761794", "0.51544887", "0.51493776", "0.5147732", "0.5138608", "0.51112175", "0.5106138", "0.51025546", "0.50981367", "0.5097948", "0.5074795" ]
0.7832481
0
Data type of this field. Used by backend database engines to determine proper data type for the field to be used to store the value in database.
def data_type(self): return self._data_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_type(self):\n return self._data_type", "def data_type(self):\n return self._data_type", "def data_type(self):\n return self._data_type", "def datatype(self):\n return self._datatype", "def type(self):\n return self._field.type", "def type(self) -> DataType:\n return self._type", "def type(self):\n return self.data.type", "def data_type_str(self):\n return data_ref_type_str(self.data_type)", "def value_type(self) -> str:\n return pulumi.get(self, \"value_type\")", "def data_type(self) -> int:\n return self.data[\"args\"][\"dataType\"]", "def field_type(self):\n return \"\"", "def getDataType(self):\n\n return self._dataType", "def data_type_id(self) -> str:\n return self._data_type_id", "def required_data_type(self):\n return Data", "def data_type(self) -> pulumi.Input['AssetModelDataType']:\n return pulumi.get(self, \"data_type\")", "def type(self) -> 'Data_Type':\n return Data_Type(self._info.htype, self._info.ptype)", "def field_type_converter(self, old_type):\n\n if old_type == 'String':\n new_type = 'Text'\n elif old_type == 'Integer':\n new_type = 'Short'\n elif old_type == 'Date':\n new_type = 'Date'\n elif old_type == 'GlobalID':\n new_type = 'GUID'\n else:\n new_type = 'Double'\n return new_type", "def data_types(self):", "def data_type():\n return DataTypeUtil.getDTypeForName(DataTypeUtil.getDtypeFromContext())", "def datatype(self):\n hcell = self._get_hcell2()\n celltype = hcell[\"celltype\"]\n assert celltype == \"structured\"\n return hcell[\"datatype\"]", "def datatype(self):\n # datatype is type of first dataarg\n return self[self.dataargs()[0]].typename", "def db_cast(self):\n if self.is_int:\n return 'BIGINT'\n return 'TEXT'", "def dtype(self):\n return self.initial_value.dtype", "def dataType(self, data):\n if isinstance(data,str):\n return STRING\n elif isinstance(data,dict):\n return ASSOC\n elif isinstance(data,int) or isinstance(data,float):\n return STRING\n elif is_python2() and isinstance(data,long):\n return STRING\n elif isinstance(data, SpecArray.SpecArrayData):\n self.rows, self.cols = data.shape\n return data.type", "def type(self, value):\n return value", "def getType(self):\n return \"value\"", "def get_data_type(self, col):\n if ((self.data_df[col].dtype == np.int64) or (self.data_df[col].dtype == np.int32)):\n return 'int'\n elif ((self.data_df[col].dtype == np.float64) or (self.data_df[col].dtype == np.float32)):\n return 'float'\n else:\n raise ValueError(\"Unknown data type of feature %s: must be int or float\" % col)", "def value_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value_type\")", "def convert_data_type(self, datatype):\n converted = Engine.convert_data_type(self, datatype)\n if \"NUMERIC\" in converted:\n converted = \"NUMERIC\"\n elif \"VARCHAR\" in converted:\n try:\n length = int(converted.split('(')[1].split(')')[0].split(',')[0])\n if length > 255:\n converted = \"TEXT\"\n except BaseException:\n pass\n return converted", "def mongo_to_python_type(field, data):\n if isinstance(field, ObjectIdField):\n return str(data)\n elif isinstance(field, DecimalField):\n return data\n elif isinstance(field, BooleanField):\n return data\n else:\n return str(data)" ]
[ "0.83663696", "0.83663696", "0.83663696", "0.7956471", "0.763357", "0.7620257", "0.760874", "0.7598538", "0.7489782", "0.74279207", "0.7414981", "0.74113166", "0.7327989", "0.72937167", "0.7288935", "0.72805035", "0.7219784", "0.7165743", "0.7105825", "0.6995552", "0.69875944", "0.696842", "0.6960088", "0.69495577", "0.69371843", "0.692", "0.6855344", "0.68369704", "0.6829395", "0.6819742" ]
0.8423037
0
Whether this field is marked unique field or not.
def is_unique(self): return self._unique
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_unique(self, field):\n return field.scheme.is_unique", "def isUnique(self):\n if self.isPrimaryKey():\n return True\n else:\n return self._unique", "def is_unique(self, field):\n old_length = len(self.archive)\n self.archive.add(self.create_hash(field))\n return len(self.archive) > old_length", "def hasField(self) -> bool:\n return bool(self.__field)", "def check_unique(self):\n pass", "def unique_field_value(verifield, unique_to_check):\n from polyglot.pyapi.unique import value_combo_exists\n return not value_combo_exists(verifield, **unique_to_check)", "def _is_unique_key(self, key):\n return self._in_keys(key, self._unique_keys)", "def test_uid_is_unique(self):\n\n field = self.image._meta.get_field(\"uid\")\n self.assertTrue(field.unique)", "def enforce_unique_values(self):\n return self.properties.get('enforceUniqueValues', None)", "def isRepeated(self):\n return self._field.label == FieldDescriptor.LABEL_REPEATED", "def is_duplicate(self):\n return bool(self.duplicated)", "def is_one(self) -> bool:\n return self.field.one == self", "def unique_together(self):\n return self._unique_together", "def uni(self):\n return not self.nni", "def tied(self):\n for (x, y) in self.fields:\n if self.fields[x, y] == self.empty:\n return False\n return True", "def make_fields_unique(self, fields):\n ...", "def has_field(self, field_name: str) -> bool:\n return bool(self.try_get_field(field_name))", "def is_id_only(self):\n for key, value in self.items():\n if key not in {'names', 'labels', 'roles'} and value:\n return False\n if self.names or self.labels:\n return True\n return False", "def unique_together(self):\n if self._meta.unique_together:\n return self._meta.unique_together[0]\n return ()", "def creating_unique_field(cls):\n\n return cls.collection.create_index('username', unique=True)", "def validate_unique(self, exclude=None, **kwargs):\n return super().validate_unique(exclude=exclude, user=self.user)", "def has_field(self, field):\n return field in self.extra_fields", "def is_unseen(self):\r\n unseen = self.unseen\r\n if unseen:\r\n self.unseen = False\r\n self.save()\r\n return unseen", "def is_known_field(self, name):\n return (name in self.fields) or (name in self.collections) or (name == self.id_field_name) or (name == 'cid')", "def is_field(self, proof = True):\n return True", "def is_rule(self):\n return self._fields is not None", "def is_unique(x):\n return len(set(x)) == len(x)", "def unique_user_rank(self):\n return False", "def has_field(cls, field) -> bool:\n try:\n cls._meta.get_field(field)\n return True\n except models.FieldDoesNotExist:\n return False", "def has_attribute(self, name):\n return name in self.schema" ]
[ "0.8676528", "0.78448015", "0.71603197", "0.69896996", "0.6803769", "0.67469114", "0.6701074", "0.66103786", "0.65689653", "0.6457723", "0.6402424", "0.6388006", "0.63483936", "0.62289506", "0.6191974", "0.6106518", "0.6102066", "0.6072309", "0.60616606", "0.60573006", "0.6027255", "0.5975544", "0.59641963", "0.5930599", "0.59144455", "0.59119827", "0.5877826", "0.5847769", "0.58302116", "0.5801686" ]
0.8022057
1
Whether this field is indexed or not.
def is_indexed(self): return self._indexed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_indexed(self):\n return self._index is not UnindexedComponent_set", "def has_index(self):\n return self.index is not None", "def indexed(self):\n return self.properties.get('indexed', None)", "def is_indexed(self, is_indexed):\n\n self._is_indexed = is_indexed", "def has_index(self):\n\n if self._check_idx and self._index:\n return self._check_idx", "def is_searchable(self):\n return self._get_search_query() != ''", "def is_index(self, key):\n if key not in self:\n return False\n match = key.base.label if self[key].is_tensor else key\n for i in self.extract(key, readby=True):\n for e in retrieve_indexed(i):\n if any(match in idx.free_symbols for idx in e.indices):\n return True\n return False", "def is_indexed_or_named_property_operation(self):\n return self.is_getter or self.is_setter or self.is_deleter", "def _check_indexes(cls, document: dict) -> bool:\n criteria = [\n field_name\n for field_name in cls._get_index_fields(IndexType.Other, document, \"\")\n ]\n unique_criteria = [\n field_name\n for field_name in cls._get_index_fields(IndexType.Unique, document, \"\")\n ]\n index_name = f\"idx{cls.__collection_name__}\"\n unique_index_name = f\"uidx{cls.__collection_name__}\"\n indexes = cls.__collection__.list_indexes()\n cls.logger.debug(f\"Checking existing indexes: {indexes}\")\n indexes = {\n index[\"name\"]: index[\"key\"].keys()\n for index in indexes\n if \"name\" in index and \"key\" in index\n }\n return (\n (criteria and index_name not in indexes)\n or (not criteria and index_name in indexes)\n or (criteria and index_name in indexes and criteria != indexes[index_name])\n or (unique_criteria and unique_index_name not in indexes)\n or (not unique_criteria and unique_index_name in indexes)\n or (\n unique_criteria\n and unique_index_name in indexes\n and unique_criteria != indexes[unique_index_name]\n )\n )", "def has_index(self, index):\n return index in [s[0] for s in self.get_index_list()]", "def supports_index_feature(attr_name):\n return supports_indexes and hasattr(_test_index, attr_name)", "def hasField(self) -> bool:\n return bool(self.__field)", "def _isIndexedDataframe(self, dataframe):\n return len(dataframe.index.names) > 1 or not dataframe.index.names[0] is None", "def __contains__(self, term):\n\t\tfieldname, text = term\n\t\tquery = dict(fieldname=fieldname, text=text)\n\t\treturn bool(self.index.collection.find(query).count())", "def is_scalar(self, indexable, axis):\n index = self._obj.index\n complete_key = False\n partial_key = False\n duplicated_key = False\n if axis == 0 and self._has_fancy_index():\n try:\n if type(indexable) is tuple:\n complete_key = (len(indexable) == len(index.levshape) and\n indexable in index)\n partial_key = not complete_key and indexable in index\n except TypeError: # Unhashable type, no biggie\n pass\n if index.has_duplicates:\n duplicated_key = indexable in index.get_duplicates()\n return (not duplicated_key and\n ((np.isscalar(indexable) and not partial_key) or complete_key))", "def is_index_separate(self):\n if hasattr(self, '_m_is_index_separate'):\n return self._m_is_index_separate if hasattr(self, '_m_is_index_separate') else None\n\n self._m_is_index_separate = ((self.tag >= 32) and (self.tag <= 34)) \n return self._m_is_index_separate if hasattr(self, '_m_is_index_separate') else None", "def binary(self):\n for h in self.header:\n if h.startswith('index '):\n return True\n return False", "def __contains__(self, key):\n return (key in self.index)", "def index(self, model_name, field_name, force=False):\n model = self.get_model(model_name)\n field = model[field_name]\n if 'index' in field:\n if field['index'] is False and force is False:\n raise ModelIndexError(\n \"Field definition has index: False. \"\n \"Use force=True to override.\")\n collection = self.get_collection(model_name)\n collection.ensure_index(field_name)", "def __contains__(self, item):\n return item in self._index_map", "def has(self, index):\n raise NotImplementedError()", "def define_index_field(DomainName=None, IndexField=None):\n pass", "def __isSet1(self, index):\n self._checkIndex(index)\n return self._items[index].isSet", "def __contains__(self, record):\n with self.session as session:\n query = session.query(IndexRecord)\n query = query.filter(IndexRecord.did == record)\n\n return query.exists()", "def __contains__(self, fieldname):\r\n return fieldname in self._by_name", "def __contains__(self, idx):\n return idx in self._data", "def __contains__(self, key):\n return key in self._index", "def is_field(self, proof = True):\n return True", "def is_indexed(self, url):\n query = self.con.execute(\"select rowid from urllist where url='%s'\" % url).fetchone()\n if query is not None:\n # Check if it actually has been crawled\n crawled = self.con.execute('select * from wordlocation where urlid=%d'\n % query[0]).fetchone()\n if crawled is not None:\n return True\n return False", "def isKnown(self, index):\n return self._items.has_key(index)" ]
[ "0.7761809", "0.73047614", "0.704352", "0.6702681", "0.65726864", "0.62959194", "0.6294209", "0.62628496", "0.61360717", "0.6107155", "0.6089513", "0.60443485", "0.60149866", "0.5958906", "0.5924591", "0.5873752", "0.58698535", "0.5807403", "0.5796295", "0.5792685", "0.57676095", "0.5715433", "0.5686673", "0.5630237", "0.5613753", "0.56072265", "0.56039906", "0.5555361", "0.5546173", "0.54834527" ]
0.8129757
0
Returns current (now) datetime.
def now(): return datetime.datetime.now()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_datetime ( ) :\n return datetime.datetime.now( )", "def now(self):\n return datetime.datetime.now()", "def now(self):\n return datetime.datetime.now()", "def get_now():\n return datetime.now()", "def get_now():\n return datetime.now()", "def now():\n return datetime.datetime.now()", "def current_datetime(self):\n return DateAccessor().today()", "def get_current_time():\n return datetime.now()", "def now (self):\n return datetime.datetime.utcnow ()", "def now():\n return datetime.datetime.utcnow()", "def get_now():\n return dt.datetime.now(dt.timezone.utc)", "def get_current_time():\n return datetime.datetime.now()", "def _get_now():\n return datetime.now(tz=timezone.utc)", "def _Now():\n return datetime.datetime.utcnow()", "def get_now():\n\treturn datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")", "def _now():\n return datetime.datetime.utcnow().replace(tzinfo=pytz.utc)", "def _now():\n return datetime.now(timezone.utc).astimezone()", "def current_time():\n now = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S.%f\")\n return now", "def now():\n return datetime.datetime.now(pytz.utc)", "def now() -> datetime:\n now = datetime.now(tz=timezone.utc)\n return now.replace(microsecond=now.microsecond - now.microsecond % 1000)", "def get_date():\n return datetime.datetime.now()", "def now(cls):\n return DateTime(*time.localtime())", "def now_datetime():\n return datetime.utcnow().replace(tzinfo=timezone)", "def getdate():\r\n import datetime\r\n return datetime.datetime.now()", "def now():\n return utcfromtimestamp(time.time())", "def time_now():\n return datetime.datetime.now().time()", "def datetime_now():\n return datetime.datetime.now(GB_TZ)", "def _get_date():\n return datetime.datetime.now()", "def get_now():\n local_tz = timezone(os.getenv('TZ', settings.TIME_ZONE))\n return datetime.datetime.now(tz=local_tz)", "def get_today():\n return datetime.today()" ]
[ "0.89639217", "0.8777894", "0.8777894", "0.8758675", "0.8758675", "0.87452734", "0.86252713", "0.85248846", "0.8521654", "0.84886837", "0.8479638", "0.8391757", "0.8369979", "0.83554506", "0.8245115", "0.81694186", "0.816558", "0.8131463", "0.8125974", "0.8057586", "0.8038303", "0.796302", "0.7943851", "0.794151", "0.79276085", "0.7915299", "0.79053307", "0.7901151", "0.7861622", "0.78417253" ]
0.8958888
1
Convert a date to a datetime for datastore storage.
def _date_to_datetime(value): assert isinstance(value, datetime.date) return datetime.datetime(value.year, value.month, value.day)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_datetime(self,date):\n\n dt = datetime.datetime(date.year,date.month,date.day)\n return timezone.make_aware(dt, timezone.get_default_timezone())", "def convert_date_to_datetime(date_obj: date) -> datetime:\n # REF: https://stackoverflow.com/a/11619200\n assert isinstance(date_obj, date), \"Not a date object.\"\n # return the original value if the input is a datetime object\n if isinstance(date_obj, datetime):\n return date_obj\n return datetime.combine(date_obj, time())", "def convert(date):\n converted_date = datetime.datetime.strptime(date, \n \"%Y-%m-%d\").date()\n return converted_date", "def convert_date_to_datetime(date):\n return datetime.combine(date, dtime()) if date else None", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def modis_to_from_pydatetime(date):\n \n if isinstance(date, (str, unicode)): \n return dt.datetime.strptime(date[1:], '%Y%j').date()\n return dt.datetime.strftime(date, 'A%Y%j')", "def to_datetime(date: Union[dt.datetime, dt.date]) -> dt.datetime:\n if isinstance(date, dt.datetime):\n return dt.datetime(date.year, date.month, date.day, date.hour, date.minute, date.second)\n elif isinstance(date, dt.date):\n return dt.datetime(date.year, date.month, date.day)\n else:\n raise ValueError(\"<{0}>'s type is not recognized. Its type is <{1}>\".format(date, type(date)))", "def convert_date(date):\n date = get_nummeric_only(date) \n \n \n if len(date) == 8:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n \n date_time = dt.datetime(year,month,day)\n \n return date_time\n \n if len(date) == 12 or len(date) == 14:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n hour = int(date[8:10])\n minute = int(date[10:12])\n \n date_time = dt.datetime(year,month,day, hour, minute)\n \n return date_time\n else:\n return 0", "def convert_datetime_to_date(datetime_obj: datetime) -> date:\n assert isinstance(datetime_obj, datetime), \"Not a datetime object.\"\n return datetime_obj.date()", "def convert_date_time(self, dt):\n return datetime.fromtimestamp(dt).strftime(\"%Y-%m-%d\")", "def convert_str2date(date):\n import datetime\n date = str(date)\n year = int(date[0:4])\n month = int(date[4:6])\n day = int(date[6:8])\n return datetime.datetime(year,month,day)", "def conv_int_to_date(date):\n year = date // 10000\n month = (date % 10000) // 100\n day = date % 100\n return datetime.datetime(year=year, month=month, day=day)", "def convert_date(self, date=None):\n if date is not None:\n format_str = '%d/%m/%Y'\n converted_date = datetime.strptime(date, format_str)\n return converted_date.date()", "def any2datetime_date(d):\n return datetime.date(d.year, d.month, d.day)", "def convert_date(self, dt: datetime) -> Union[datetime, Function]:\n return dt", "def date_to_datetime(self, cr, uid, userdate, context=None):\n # TODO: move to fields.datetime in server after 7.0\n user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATETIME_FORMAT)\n if context and context.get('tz'):\n tz_name = context['tz']\n else:\n tz_name = self.pool.get('res.users').read(cr, 1, uid, ['tz'])['tz']\n if tz_name:\n utc = pytz.timezone('UTC')\n context_tz = pytz.timezone(tz_name)\n # not need if you give default datetime into entry ;)\n user_datetime = user_date # + relativedelta(hours=24.0)\n local_timestamp = context_tz.localize(user_datetime, is_dst=False)\n user_datetime = local_timestamp.astimezone(utc)\n return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)\n return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)", "def int_to_date(date):\n\n year = date / 10**4\n month = date % 10**4 / 10**2\n day = date % 10**2\n\n return datetime.date(year, month, day)", "def date_to_python(self, value):\r\n # this throws away fractions of a second\r\n return datetime(*strptime(value[:-5], \"%Y-%m-%dT%H:%M:%S\")[0:6])", "def DT2dt(dt):\n tz = gettz(dt.timezone())\n value = datetime.datetime(dt.year(), dt.month(), dt.day(),\n dt.hour(), dt.minute(), int(dt.second()),\n int(dt.second()*1000000) % 1000000, tzinfo=tz)\n return value", "def todate(self):\n return date(self.year, self.month, self.day)", "def trost2date(trost_date):\n year, month, day = (int(val) for val in trost_date.split('-'))\n return datetime.date(year, month, day)", "def to_datetime(date_string):\n return dt.strptime(date_string, '%Y-%m-%d')", "def convertDate(indate):\n a = datetime.datetime.fromtimestamp(indate / 1000.0)\n a_str = a.strftime('%m/%d/%y')\n return datetime.datetime.strptime(a_str, '%m/%d/%y').date()", "def CDateToUnoDateTime(date):\n unodate = uno.createUnoStruct('com.sun.star.util.DateTime')\n unodate.Year, unodate.Month, unodate.Day, unodate.Hours, unodate.Minutes, unodate.Seconds, \\\n unodate.NanoSeconds, unodate.IsUTC = \\\n 1899, 12, 30, 0, 0, 0, 0, False # Identical to Basic TimeSerial() function\n\n if isinstance(date, float):\n date = time.localtime(date)\n if isinstance(date, time.struct_time):\n if 1900 <= date[0] <= 32767:\n unodate.Year, unodate.Month, unodate.Day, unodate.Hours, unodate.Minutes, unodate.Seconds = \\\n date[0:6]\n else: # Copy only the time related part\n unodate.Hours, unodate.Minutes, unodate.Seconds = date[3:3]\n elif isinstance(date, (datetime.datetime, datetime.date, datetime.time)):\n if isinstance(date, (datetime.datetime, datetime.date)):\n if 1900 <= date.year <= 32767:\n unodate.Year, unodate.Month, unodate.Day = date.year, date.month, date.day\n if isinstance(date, (datetime.datetime, datetime.time)):\n unodate.Hours, unodate.Minutes, unodate.Seconds, unodate.NanoSeconds = \\\n date.hour, date.minute, date.second, date.microsecond * 1000\n else:\n return date # Not recognized as a date\n return unodate", "def todate(self):\n return self._date", "def to_date(value: Union[datetime.date, str]) -> datetime.date:\n if isinstance(value, datetime.date):\n return value\n return datetime.datetime.strptime(value, '%d%m%y').date()", "def to_mongo(self, value):\n\n try:\n if not isinstance(value, datetime) or not value:\n pdate = value.toPyDate()\n return datetime(pdate.year, pdate.month, pdate.day)\n else:\n return value\n except AttributeError:\n return value\n\n # pyValue = value.toPyDate()\n # return datetime(pyValue.year, pyValue.month, pyValue.day)", "def getDatetime(self, date):\n dt = datetime.datetime.strptime(date, \"%Y-%m-%d@%H:%M\")\n return dt", "def str2date(date):\n return datetime.datetime.strptime(date, \"%m/%d/%Y\").date()", "def str_to_date(date):\n time_format_one = \"%Y-%m-%d\"\n if date is None:\n return None\n try:\n date = datetime.strptime(date, time_format_one)\n except ValueError:\n date = datetime.now()\n return date" ]
[ "0.7139093", "0.7096733", "0.7047866", "0.70283645", "0.7017522", "0.68610114", "0.68505156", "0.6844209", "0.68437934", "0.6843128", "0.6796456", "0.67830783", "0.6672014", "0.66619533", "0.6587397", "0.6580303", "0.6538514", "0.653111", "0.6497565", "0.641356", "0.64057046", "0.6384538", "0.6368759", "0.63578695", "0.63450944", "0.63223714", "0.6246191", "0.62170714", "0.621279", "0.6193421" ]
0.75674117
0
Convert a time to a datetime for datastore storage.
def _time_to_datetime(value): assert isinstance(value, datetime.time) return datetime.datetime(1970, 1, 1, value.hour, value.minute, value.second, value.microsecond)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_to_datetime(time):\n split_time = time.split(':')\n hour = int(split_time[0])\n minutes = int(split_time[1])\n now = dt.datetime.now()\n time_as_datetime = dt.datetime(now.year, now.month, now.day,\n hour=hour, minute=minutes)\n\n # Need to change the day to tommorow if time has already passed\n if time_as_datetime < now:\n day = now.day + 1\n time_as_datetime = dt.datetime(now.year, now.month, day,\n hour=hour, minute=minutes)\n\n return time_as_datetime", "def convert_to_time(value):\n if isinstance(value, datetime.time):\n return value\n elif isinstance(value, str):\n return datetime.time.fromisoformat(value)\n else:\n return datetime.time(value)", "def _time_to_date(parsed_time):\n if not parsed_time:\n return parsed_time\n return datetime.fromtimestamp(calendar.timegm(parsed_time), tz=timezone.utc)", "def datetime_from_time(time: datetime.time, date: datetime.date = datetime.date.today()):\n if type(time) == datetime.time:\n return datetime.datetime.combine(date, time)\n else:\n return time", "def date_to_datetime(date, time=None):\n if time is None:\n time = dt.datetime.min.time()\n return dt.datetime.combine(date, time)", "def convert_datetime(date, time):\n return datetime.datetime.strptime(date + \" \" + time, '%Y-%m-%d %H:%M:%S')", "def get_datetime(time):\n year = int(time[0:4])\n month = int(time[5:7])\n day = int(time[8:10])\n hour = int(time[11:13])\n minute = int(time[14:16])\n second = int(time[17:19])\n return datetime(year, month, day, hour, minute, second)", "def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)", "def convert_time(cls, time_str):\n if cls.date_ignore_pattern:\n time_str = re.sub(cls.date_ignore_pattern, '', time_str)\n return datetime.strptime(time_str, cls.date_format)", "def make_datetime_obj(date, time):\n\n conv_date = datetime.strptime(date, \"%Y-%m-%d\").date()\n conv_time = datetime.strptime(time, \"%H:%M\").time()\n\n return datetime.combine(conv_date, conv_time)", "def time_convert(time):\n try:\n time_data = str(time)\n if time_data:\n try:\n time_data = datetime.strptime(time_data, '%Y%m%d')\n except Exception:\n time_data = datetime.strptime(time_data, '%Y%m%d%H%M%S')\n time_data = time_data.strftime('%Y-%m-%d')\n return time_data\n except Exception:\n return False", "def datetime_from_string(time):\n try:\n if type(time) == datetime.datetime:\n return time\n else:\n try:\n return datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S')\n except ValueError:\n return datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S.%f')\n except ValueError:\n return time\n except TypeError:\n return time", "def str_to_time(my_time):\n time_format = \"%H:%M\"\n try:\n my_time = datetime.strptime(my_time, time_format)\n except:\n my_time = datetime.now()\n\n return my_time", "def _astropy_time(time):\n return time if isinstance(time, astropy.time.Time) else astropy.time.Time(parse_time(time))", "def convertTime(time_string):\n if (not isinstance(time_string, str)) or len(time_string) != 10 or not time_string.startswith('-'):\n print('There was an issue with the passed timestring: ', time_string)\n if time_string == '0':\n return timezone.now()\n else:\n raise ValueError('Date in import file is not valid')\n ## !!! NOTE: Unix using different epoch (1970 start rather than 1900->2036) so this library method is giving the wrong date from our timestamp\n timestamp = int(time_string) + 2085935295\n dt = datetime.fromtimestamp(timestamp, timezone.utc ) # 70 year adjustment for unix library\n print('timestamp (UTC): ', timestamp, 'type: ', type(timestamp))\n print('returning: ', dt, 'type: ', type(dt))\n return dt", "def get_time(time):\n regtime = re.compile(r'^([0-1][0-9]|[2][0-3]):([0-5][0-9])$')\n if not regtime.match(time):\n return None\n time_group = regtime.match(time).groups()\n time_final = datetime.time(int(time_group[0]), int(time_group[1]))\n return time_final", "def unmarshall_time(tyme):\r\n return datetime.datetime(day=tyme['day'],\r\n month=tyme['month'],\r\n year=tyme['year'],\r\n hour=tyme['hour'],\r\n minute=tyme['minute'],\r\n second=tyme['second'],\r\n microsecond=tyme['microsecond'])", "def adapt_timefield_value(self, value):\n if value is None:\n return None\n \n # Expression values are adapted by the database.\n if hasattr(value, 'resolve_expression'):\n return value\n # SQL Server doesn't support microseconds\n if isinstance(value, string_types):\n return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))\n if timezone.is_aware(value):\n raise ValueError(\"DBMaker backend does not support timezone-aware times.\")\n return datetime.time(value.hour, value.minute, value.second)", "def convert_datetime(t):\r\n try:\r\n if isinstance(t, str):\r\n converted = datetime.strptime(t, '%d-%m-%Y')\r\n # to get time in seconds:\r\n t = int(time.mktime(converted.timetuple()))\r\n return t\r\n except Exception as e:\r\n print(e)\r\n return None", "def datetime(self):\n\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + timedelta(hours=time)\n return d\n except:\n return", "def get_datetime_from_time(value: datetime | time) -> datetime:\n if isinstance(value, time):\n value = datetime.combine(dt_util.now().date(), value, dt_util.DEFAULT_TIME_ZONE)\n if isinstance(value, datetime):\n value = value.replace(tzinfo=dt_util.DEFAULT_TIME_ZONE)\n if value > dt_util.now():\n raise ValidationError(\"Time cannot be in the future.\")\n return value", "def asdatetime(self):\n tznaive = self.timezoneNaive()\n if tznaive:\n tzinfo = None\n else:\n tzinfo = _TZINFO[self._tz].tzinfo\n second = int(self._second)\n microsec = self.micros() % 1000000\n dt = datetime(self._year, self._month, self._day, self._hour,\n self._minute, second, microsec, tzinfo)\n return dt", "def datetime(self):\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + \\\n timedelta(hours=time)\n return d\n except:\n return", "def timeConvert(time):\n\n FMTin = '%Y-%m-%d %H:%M:%S'\n FMTout = '%m/%d/%y'\n\n return datetime.strftime(datetime.strptime(time, FMTin), FMTout)", "def time_string2dt(time_string: str)-> datetime:\n return parse(time_string, fuzzy=True)", "def from_datetime_time(cls, dtime_obj, time_obj):\n offset = ((time_obj.seconds - dtime_obj.seconds + SECONDS_PER_DAY // 2)\n % SECONDS_PER_DAY - SECONDS_PER_DAY // 2)\n day_offset, seconds = divmod(dtime_obj.seconds + offset, SECONDS_PER_DAY)\n # pylint: disable=protected-access\n return cls.from_daynum_secs_nanos(dtime_obj._days + day_offset, seconds,\n time_obj.nanosecond)", "def normalize_datetime(raw_date_time):\n return datetime.datetime(*[int(s) for s in raw_date_time.split(\"-\")])", "def DT2dt(dt):\n tz = gettz(dt.timezone())\n value = datetime.datetime(dt.year(), dt.month(), dt.day(),\n dt.hour(), dt.minute(), int(dt.second()),\n int(dt.second()*1000000) % 1000000, tzinfo=tz)\n return value", "def dt(day: date, time: str) -> datetime:\n t = parser.parse_time(time)\n return datetime.combine(day, t)", "def datetime_to_time(day, time):\n try:\n n_day = _day_map.index(day) * 60 * 24\n except KeyError as e:\n raise Exception(\"Invalid date string '{}'\".format(day))\n hour = int(time[:2])\n if hour > 23:\n raise Exception(\"Invalid hour {}\".format(hour))\n minutes = int(time[3:])\n n_time = hour*60 + minutes\n return n_day + n_time" ]
[ "0.76862586", "0.71638316", "0.7140985", "0.70503753", "0.7001265", "0.69803745", "0.68884057", "0.6877281", "0.67336893", "0.67133236", "0.66956383", "0.65040106", "0.64000684", "0.63857096", "0.63808364", "0.6339152", "0.63245136", "0.6307302", "0.6298653", "0.6283557", "0.62594986", "0.6245707", "0.62353116", "0.6170768", "0.6135481", "0.6134607", "0.6110612", "0.61096287", "0.60989726", "0.6093777" ]
0.77680534
0