query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Check if the current view is an OCaml source code.
def is_ocaml(view): ocaml = 'source.ocaml' mlfi = 'source.mlfi' location = view.sel()[0].begin() return view.match_selector(location, ocaml) or view.match_selector(location, mlfi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_non_js_source(self):\n self.view.set_syntax_file(\"Packages/Python/Python.tmLanguage\")\n\n actual = is_js_source(self.view)\n\n self.assertFalse(actual)", "def test_js_source(self):\n actual = is_js_source(self.view)\n\n self.assertTrue(actual)", "def is_authoring_source(view):\n if view.match_selector(0, \"text.hyperhelp.help\"):\n return not view.is_read_only()\n\n return False", "def test_source(self):\n with open(__file__, 'r') as f:\n contents = f.read()\n\n lexer = syntax_highlighting.fetch_lexer(contents)\n self.assertIn(lexer.__class__.__name__, PYTHON_LEXER_CLASS_NAMES)", "def has_code_module(self) -> bool:\n return self.module_info is not None", "def has_source(self):\n return any(map(utils.assert_package_is_source, self.pkg_arguments))", "def has_source_file( self ):\n return self._source_file is not None", "def iscode(object):\r\n return isinstance(object, types.CodeType)", "def menu_check_source_with_pychecker(self, event=None):\n if self.app.children:\n self.app.childActive.check_source_with_pychecker()", "def only_ocaml(func):\n\n @functools.wraps(func)\n def wrapper(self, view, *args, **kwargs):\n if is_ocaml(view):\n return func(self, view, *args, **kwargs)\n\n return wrapper", "def detect_rust(src):\n lines = []\n in_code_block = False\n start_of_code_block = 0\n\n for i, line in enumerate(src.splitlines()):\n if '```rust' in line:\n start_of_code_block = i\n in_code_block = True\n elif '```' in line and in_code_block:\n lines.append((start_of_code_block + 1, i - 1))\n in_code_block = False\n\n return lines", "def is_local(self) -> bool:\n if not self.source:\n return False\n\n if self.source.master_name.startswith(MODULE_NAME):\n return True\n\n if self.is_type_defs():\n return True\n\n return False", "def is_code(self) -> bool:\n return any(seg.is_code for seg in self.segments)", "def is_scala(self):\r\n return self.has_label('scala')", "def has_debug_view(name=None):\r\n for view in sublime.active_window().views():\r\n if is_debug_view(view):\r\n if name is not None:\r\n if view.name() == name:\r\n return True\r\n else:\r\n return True\r\n return False", "def isrst(filename):\n return filename[-4:] == '.rst'", "def is_builtins(self) -> bool:\n return self.source.startswith(self.builtins_import_string)", "def is_codegen(self):\r\n return self.has_label('codegen')", "def test_link_to_source(\n self,\n _needs_unindent,\n _is_source_requested,\n _get_source_code_from_object,\n ):\n _needs_unindent.return_value = False\n _is_source_requested.return_value = True\n _get_source_code_from_object.return_value = \"\"\n\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"basic.html\",\n ),\n \"MyKlass.get_method\",\n )\n content = self._get_fake_project_method()\n nodes = self._get_nodes(data, content) # pylint: disable=no-value-for-parameter\n\n self.assertEqual(2, len(nodes))\n self.assertTrue(any(node for node in nodes if isinstance(\n node,\n extension._SourceCodeHyperlink, # pylint: disable=protected-access\n )))", "def is_module_object(self, obj):\n if not isinstance(obj, BaseException):\n try:\n c = obj.__class__\n source_file = inspect.getsourcefile(c)\n except (TypeError, AttributeError):\n pass\n else:\n if source_file and source_file.startswith(self.path):\n return True\n\n return False", "def can_compile(src):\n src = src if src.endswith(\"\\n\") else src + \"\\n\"\n src = transform_command(src, show_diff=False)\n src = src.lstrip()\n try:\n XSH.execer.compile(src, mode=\"single\", glbs=None, locs=XSH.ctx)\n rtn = True\n except SyntaxError:\n rtn = False\n except Exception:\n rtn = True\n return rtn", "def is_debug_view(view):\r\n return view.name() == TITLE_WINDOW_BREAKPOINT or view.name() == TITLE_WINDOW_CONTEXT or view.name() == TITLE_WINDOW_STACK or view.name() == TITLE_WINDOW_WATCH", "def test_readme_text_directly(readme_path):\n code = []\n code_block = False\n\n with open(readme_path, 'r') as f:\n for line in f:\n if line.endswith(\"```\\n\"):\n code_block = False\n\n # Add all code lines except for the viz function.\n if code_block and not line.startswith('viz_neighbors_imgs'):\n code.append(line)\n\n if line.startswith(\"```python\"):\n code_block = True\n\n exec(('\\n').join(code))", "def is_snippet(abbr, doc_type = 'html'):\n\treturn get_snippet(doc_type, abbr) and True or False", "def isSource(self):\n return (len(self.parents()) == 0)", "def can_trace_source(self, filename: str) -> bool:\n path = Path(filename)\n if not path.is_file():\n return False\n\n extension = path.suffix\n if extension in self.registered_compilers:\n compiler = self.registered_compilers[extension]\n if compiler.supports_source_tracing:\n return True\n\n # We are not able to get coverage for this file.\n return False", "def is_python(self):\r\n return self.has_label('python')", "def is_viewable(miscobj):\n return misctype_byname(miscobj.filetype).viewable", "def _is_vim_object(self, module):\n return isinstance(module, vim.Vim)", "def in_function_code(self):\n return self.lscope is not None and self.sscope is None" ]
[ "0.67141247", "0.61287296", "0.59936684", "0.59469485", "0.59078604", "0.5851658", "0.5787282", "0.5709476", "0.5676872", "0.5662873", "0.56603956", "0.55736876", "0.5511895", "0.5484196", "0.5424797", "0.54031396", "0.53450185", "0.53424245", "0.52996314", "0.5279858", "0.525761", "0.5245247", "0.52412075", "0.52340233", "0.51985896", "0.51962364", "0.51931393", "0.5138563", "0.51350427", "0.5131118" ]
0.69519085
0
Execute the given function if we are in an OCaml source code only.
def only_ocaml(func): @functools.wraps(func) def wrapper(self, view, *args, **kwargs): if is_ocaml(view): return func(self, view, *args, **kwargs) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main_code():\n pass", "def in_function_code(self):\n return self.lscope is not None and self.sscope is None", "def runsource(self, source, filename=\"<input>\", symbol=\"single\"):\n try:\n code = self.compile(source, filename, symbol)\n except (OverflowError, SyntaxError, ValueError):\n # Case 1\n self.showsyntaxerror(filename)\n return None\n\n if code is None:\n # Case 2\n return True\n\n # Case 3\n # We store the code source and object so that threaded shells and\n # custom exception handlers can access all this info if needed.\n self.code_to_run_src = source\n self.code_to_run = code\n # now actually execute the code object\n if self.runcode(code) == 0:\n return False\n else:\n return None", "def aFunction():\n return True", "def main(source):\n pass", "def code():", "def Exec_Python(code):\n # pylint: disable=exec-used\n try:\n exec(code, globals())\n # pylint: disable=broad-except\n # pylint: disable=bare-except\n except:\n _LOGGER.error('Execution of following code has failed %s', code)\n return False\n return True", "def _test():\n if sys.argv[1:]:\n if sys.argv[2:]:\n sys.stderr.write(\"usage: python dis.py [-|file]\\n\")\n sys.exit(2)\n fn = sys.argv[1]\n if not fn or fn == \"-\":\n fn = None\n else:\n fn = None\n if fn is None:\n f = sys.stdin\n else:\n f = open(fn)\n source = f.read()\n if fn is not None:\n f.close()\n else:\n fn = \"<stdin>\"\n code = compile(source, fn, \"exec\")\n dis(code)", "def gen_function(self, function):\n if function.body:\n self.gen_function_def(function)", "def test_readme_text_directly(readme_path):\n code = []\n code_block = False\n\n with open(readme_path, 'r') as f:\n for line in f:\n if line.endswith(\"```\\n\"):\n code_block = False\n\n # Add all code lines except for the viz function.\n if code_block and not line.startswith('viz_neighbors_imgs'):\n code.append(line)\n\n if line.startswith(\"```python\"):\n code_block = True\n\n exec(('\\n').join(code))", "def _func_only(func):\n if inspect.isfunction(func):\n return\n else:\n raise Exception(\"Only functions can be tasks\")", "def test_blockly_callback(code):\n #\n module = types.ModuleType(\"module\")\n # compile into ast (use <string> as a filename)\n ast = compile(code, filename=\"<string>\", mode=\"exec\")\n # execute the code in the context of the module\n exec(ast, module.__dict__)\n # call the function\n result = module.callback()\n assert result == \"callback called\"", "def visit_Python(self, node):\n py_code = compile(node.py_ast, self.filename, mode='exec')\n bp_code = Code.from_code(py_code)\n # Skip the SetLineo and ReturnValue codes\n self.code_ops.extend(bp_code.code[1:-2])", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass", "def is_function(self):\n line = self.line.strip()\n if line.startswith('fu'):\n if line.startswith('function') is False:\n return True", "def get_function_code(f):\n assert isinstance(f, types.FunctionType)\n function_name = f.__code__.co_name\n assert isinstance(function_name, str)\n\n if in_jupyter_notebook() or in_google_colab():\n return extract_function_code(function_name, get_jupyter_raw_code(function_name))\n else:\n return extract_function_code(function_name, dill.source.getsource(f))", "def my_func():\n pass", "def ignore_builtin_verification():\n return not current_space().skip_builtin_verification", "def get_code(func):\n import inspect\n\n raw = \"\".join(inspect.getsource(func))\n found = re.findall(\"(k = .*)\", raw)\n\n if any(found):\n code = found[0]\n return code\n else:\n return \"\"", "def my_function():\n\tpass", "def print_code(func):\n print(inspect.getsource(func))", "def automain(self, function):\n captured = self.main(function)\n if function.__module__ == '__main__':\n self.run_commandline()\n return captured", "def python_code_markdown(func: Callable) -> str:\n return \"\"\"\n ```python\n \"\"\" + inspect.getsource(func) + \"\"\"\n ```\n \"\"\"" ]
[ "0.61175025", "0.59589654", "0.5907236", "0.57341886", "0.57255304", "0.5646994", "0.5632915", "0.55879843", "0.55784357", "0.5551663", "0.55419976", "0.5538157", "0.5525979", "0.54997593", "0.54997593", "0.54997593", "0.54997593", "0.54997593", "0.54997593", "0.54997593", "0.54997593", "0.5472104", "0.5470877", "0.5450619", "0.5448971", "0.5446835", "0.5441624", "0.5441549", "0.5398603", "0.5385526" ]
0.6008382
1
Convert a position returned by Merlin to a Sublime text point. Sublime uses character positions and starts each file at line 0.
def merlin_pos(view, pos): return view.text_point(pos['line'] - 1, pos['col'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_position(tu, file, line, column):\r\n return conf.lib.clang_getLocation(tu, file, line, column)", "def point2pos(self, point):\n row = self._vim.eval('byte2line({})'.format(point))\n col = self._vim.eval('{} - line2byte({})'.format(point, row))\n return (int(row), int(col))", "def getPos(level):\n return str(STARTING_POS[level-1][0]) + \", \" + str(STARTING_POS[level-1][1])", "def updatePosition(char, position):\n line, col = position\n return (line + 1, 1) if (char == '\\n') else (line, col + 1)", "def get_cursor_pos(self):\n return (self.text_maker.pos[0] + 9, self.text_maker.pos[1] + 120 + 8)", "def set_position():\n\n global character\n return character['Position']", "def spot_coords(self,spot):\n if spot == '1':\n return (330 - 60 ,335 - 15)\n if spot == '2':\n return (419 - 60, 335 - 15)\n if spot == '3':\n return (591 - 60, 159 - 15)\n if spot == '4':\n return (588 - 60, 248 - 15)", "def parsenwspt(text):\n lat = int(text[0:4]) / 100\n lon = int(text[4:])\n if lon < 1000:\n lon += 10000\n return (lon / -100, lat)", "def GetSelection(self):\n # STC HELL\n # Translate the UTF8 byte offsets to unicode\n start, end = super(EditraBaseStc, self).GetSelection()\n utf8_txt = self.GetTextUTF8()\n if start != 0:\n start = len(ed_txt.DecodeString(utf8_txt[0:start], 'utf-8'))\n if end != 0:\n end = len(ed_txt.DecodeString(utf8_txt[0:end], 'utf-8'))\n del utf8_txt\n return start, end", "def subl(filepath = None, line_number = 1, dirpath_lwc_root = None):\n if dirpath_lwc_root is None:\n dirpath_lwc_root = da.lwc.discover.path(key = 'root')\n\n filepath_subl = da.lwc.env.cli_path(\n dependency_id = 'subl',\n application_name = 'sublime_text',\n dirpath_lwc_root = dirpath_lwc_root)\n\n if filepath is None:\n logging.debug('Run sublime text')\n return _subprocess_call([filepath_subl])\n\n # The stack trace that is retrieved during the\n # handling of an Exception thrown from within\n # one of PyRun's built-in libraries may have\n # a stack trace that contains filenames of the\n # form \"<pyrun>/filename.py\". It is not possible\n # to open such files in the editor.\n #\n # Although this is an anomalous condition, we\n # do not expect the developer to take any\n # remedial action when it is encountered. We\n # therefore refrain from throwing an exception\n # and instead simply log the fact that it has\n # occurred and return normally.\n #\n # It is conceivable that other similar conditions\n # may be encountered, so as a piece of defensive\n # programming, we also take the same action if\n # the filepath parameter does not indicate a\n # valid file.\n if filepath.startswith('<pyrun>') or not os.path.isfile(filepath):\n logging.warning('Cannot open file: \"%s\"', filepath)\n return 1\n\n argument = '{filepath}:{line_number}'.format(\n filepath = filepath,\n line_number = line_number)\n logging.debug('Open file in sublime text: %s', argument)\n return _subprocess_call([filepath_subl, '-a', argument])", "def get_position(self, position):", "def sunpos(*args):\n return _sunpos.sunpos(*args)", "def location_to_pos(self,row, col):\r\n\r\n pos_row = str(row + 1)\r\n pos_col = chr(col + 97)\r\n return pos_col + pos_row", "def get_single_location(chrom, pos):\n return CHROMOSOME_TO_CODE[chrom] * int(1e9) + pos", "def get_position():\n\n return character['Position']", "def _map_extlit(self, l):\n\n v = abs(l)\n\n if v in self.vmap.e2i:\n return int(copysign(self.vmap.e2i[v], l))\n else:\n self.topv += 1\n\n self.vmap.e2i[v] = self.topv\n self.vmap.i2e[self.topv] = v\n\n return int(copysign(self.topv, l))", "def _format_point(self, point):\n return (point + self.draw_offset).intify()", "def pos(self, x, y):\n\n if isinstance(x, float):\n x = int(x)\n\n self.screen.write(colorama.Cursor.POS(x, y), ansi=True)\n self.x = x\n self.y = y\n\n return x, y", "def position(self):\n return pm.datatypes.Point(self.transform.getTranslation(ws=True))", "def _get_point_source_location(element):\n pos = element.find('%s/%s/%s' %\n (NRML04_POINT_GEOMETRY, gml.GML_POINT, gml.GML_POS))\n pos = pos.text.split()\n\n return float(pos[0]), float(pos[1])", "def __convert_position(self, row_position: int = None, col_position: int = None) -> int:\n if row_position is None or col_position is None:\n return self.__row_position * len(self.__labyrinth[0]) + self.__col_position\n\n return row_position * len(self.__labyrinth[0]) + col_position", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):" ]
[ "0.59632206", "0.5889241", "0.56616706", "0.56562984", "0.54250014", "0.5352155", "0.524134", "0.52322143", "0.5227591", "0.52061164", "0.5179411", "0.5161949", "0.51547635", "0.507839", "0.5063059", "0.5035226", "0.5024355", "0.50153136", "0.5005417", "0.50020736", "0.4999571", "0.4988853", "0.4988853", "0.4988853", "0.4988853", "0.4988853", "0.4988853", "0.4988853", "0.4988853", "0.4988853" ]
0.6376156
0
.get_recommendation_display() will return the correct value of the recommendation choice
def test_recommendation_value(self): john_starks = Athlete(first_name="John", last_name="Starks", sport="NBA", recommendation="a") self.assertEqual(john_starks.get_recommendation_display(), "Hire Joe IMMEDIATELY!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n if self.recommend:\n review = 'recommended by {}: {}'.format(self.reviewer, self.comments)\n else:\n review = 'not recommended by {}: {}'.format(self.reviewer, self.comments)\n\n return review", "def handle_recommendation(request):\n ticker = request.get_slot_value(slot_name=\"stockTicker\").upper()\n recommendation = Analyst(ticker).recommendation()\n\n logger.info(\n f\"recommendationMean for {ticker} is {recommendation}\")\n\n # pick right response based on recommendation mean rating\n if recommendation is None:\n message = strings.INTENT_RCMD_NO_RCMD\n elif _in_interval(recommendation, 1, 1.8):\n message = strings.INTENT_RCMD_STRONG_BUY\n elif _in_interval(recommendation, 1.8, 2.2):\n message = strings.INTENT_RCMD_BUY\n elif _in_interval(recommendation, 2.2, 2.8):\n message = strings.INTENT_RCMD_OPT_HOLD\n elif _in_interval(recommendation, 2.8, 3.2):\n message = strings.INTENT_RCMD_HOLD\n elif _in_interval(recommendation, 3.2, 3.8):\n message = strings.INTENT_RCMD_PES_HOLD\n elif _in_interval(recommendation, 3.8, 4.2):\n message = strings.INTENT_RCMD_SELL\n elif _in_interval(recommendation, 4.2, 5):\n message = strings.INTENT_RCMD_STRONG_SELL\n\n response = ResponseBuilder.create_response(request, message=message)\n reprompt_message = strings.INTENT_GENERAL_REPROMPT\n\n return response.with_reprompt(reprompt_message)", "def show_recommendation_pool(self, top_n=None):\n i = 0\n if top_n is None:\n top_n = self.number_of_recommendations\n\n for _, rdata in self.recommendation_pool.items():\n print(\"\\n{R.movie_id} - {R.title} - {R.genres}\".format(\n R=rdata['movie_obj']))\n\n if 'title_similarity' in rdata:\n print(\" Title Similarity: {} - ({})\".format(\n rdata['title_similarity'], rdata['movie_obj'].title))\n\n if 'genres_similarity' in rdata:\n print(\" Genres Similarity: {} - ({})\".format(\n rdata['genres_similarity'], rdata['movie_obj'].genres))\n\n if 'tags_similarity' in rdata:\n print(\" Tags Similarity: {} - ({})\".format(\n rdata['tags_similarity'], rdata['tags']))\n\n if 'final_similarity' in rdata:\n print(\" -> Final Similarity: {}\".format(\n rdata['final_similarity']))\n\n i += 1\n if top_n and i >= top_n:\n break", "def _get_recommend(self, user):\n return self.user_cf.calculate(target_user_id=user, user_n=self.user_n,\n item_n=self.item_n, type=2)", "def mainRecommendation(user_preferences: dict, matcher: NodeMatcher):\r\n print(\"***************************************************\\n Recomendaciones principales\\n\"\r\n \"***************************************************\")\r\n pareja = ParejaRecommendation(dict, matcher)\r\n apps = likingdatingapps(dict, matcher)\r\n dificultad = difficultydates(dict, matcher)\r\n importancia = samehobbies(dict, matcher)\r\n imp = study(dict, matcher)\r\n gusto = musictaste(dict, matcher)\r\n region = sameregion(dict, matcher)\r\n gustoDif = different(dict, matcher)\r\n Habits = habits(dict, matcher)\r\n goals = goalsRecommendation(dict, matcher)\r\n prof = profesionalRecommendation(dict, matcher)\r\n similar = similarRecommendation(dict, matcher)\r\n\r\n listaopciones = [pareja, apps, dificultad, importancia, imp, gusto, region, gustoDif, Habits, goals, prof, similar]\r\n\r\n Prospectos = {}\r\n for option in listaopciones:\r\n for element in option:\r\n if Prospectos.has_key(element[\"nombre\"]):\r\n Prospectos[element[\"nombre\"]] = 1\r\n else:\r\n Prospectos[element[\"nombre\"]] = Prospectos[element[\"nombre\"]] + 1", "def get_recommendations(soup_recipe):\n ratings = soup_recipe.find(\"meta\", {\"itemprop\": \"ratingValue\"})[\"content\"]\n ratings_count = soup_recipe.find(\"meta\", {\"itemprop\": \"ratingCount\"})[\"content\"]\n if ratings == 0:\n return None, None\n return ratings, ratings_count", "def make_second_recommendation() -> str:\r\n growth_rate_info = highest_growth_rate()\r\n rate_strings = growth_rate_info[0]\r\n most_demand_string = growth_rate_info[1]\r\n return render_template(\"make_second_recommendation.html\",\r\n display_rates=rate_strings,\r\n most_demand_display=most_demand_string)", "def get_recommendations(name, data):\r\n #sorts preferences in alphabetical order\r\n #do this to make it easier to compare\r\n for key in data:\r\n data[key] = selection_sort(data[key])\r\n most_similar_key = \"\"\r\n max_matches = 0\r\n for key in data:\r\n if not(key[-1] == \"$\" or data[key] == data[name]):\r\n \"\"\"if the person is not private or does not have the same data\"\"\"\r\n matches = num_matches(data[key], data[name])\r\n if matches > max_matches:\r\n most_similar_key = key\r\n max_matches = matches\r\n if most_similar_key == \"\":\r\n print(\"No recommendations available at this time\")\r\n return 1\r\n else:\r\n final_recommendations = []\r\n for x in data[most_similar_key]:\r\n if x not in data[name]:\r\n final_recommendations += [x]\r\n return final_recommendations", "def print_recommendations(self):\n\n rec_vector = self.generate_recommendation()\n\n print(\"Recommendations for user {} \".format(self.username))\n\n for ranking, subreddit_name in enumerate(rec_vector, 1):\n print(\"{}.: {}\".format(ranking, subreddit_name))\n\n if ranking%10 == 0 and ranking!=0:\n check_if_move_on = True\n print(\"\\nType c and press enter for the next 10 subreddits.\\n\")\n print(\"Type q and press enter to return to main menu.\\n\")\n\n while check_if_move_on:\n choice = input()\n\n if choice == 'c':\n break\n\n elif choice == 'q':\n break\n\n else:\n print(\"Not a valid entry, please enter again.\")\n\n # break the whole thing if they want to quit\n if choice == 'q':\n break", "def _choose_best_option(self):", "def get_recommendations(self):\n endpoints = '/user/recs'\n return self.get_request(endpoints)", "def make_third_recommendation() -> str:\r\n requirement_mappings = {\r\n \"Organic Red Helles\": 0,\r\n \"Organic Pilsner\": 0,\r\n \"Organic Dunkel\": 0}\r\n helles_info = bottles_required(\"Organic Red Helles\")\r\n pilsner_info = bottles_required(\"Organic Pilsner\")\r\n dunkel_info = bottles_required(\"Organic Dunkel\")\r\n\r\n requirement_mappings[\"Organic Red Helles\"] = helles_info[2]\r\n requirement_mappings[\"Organic Pilsner\"] = pilsner_info[2]\r\n requirement_mappings[\"Organic Dunkel\"] = dunkel_info[2]\r\n most_needed_recipe = max(requirement_mappings, key=requirement_mappings.get)\r\n\r\n return render_template(\"make_third_recommendation.html\",\r\n helles_display=helles_info,\r\n pilsner_display=pilsner_info,\r\n dunkel_display=dunkel_info,\r\n most_needed_display=most_needed_recipe)", "def graphlab_recommendations(aData, user, needed_param, n = 10, cv_ratio = 0.7):\n # change the data into SFrame and the user data into SArray\n import preprocessing\n aData.rename(columns = {needed_param['user_id']:'user_id', needed_param['item_id']: 'item_id', \n needed_param['ratings']: 'ratings'}, inplace = True)\n aData = gl.SFrame(aData)\n train, test= preprocessing.graphlab_split_data(aData, cv_ratio)\n user = gl.SArray([user])\n \n # make models\n methods = ['matrix_factorization', 'linear_model', 'item_similarity', 'popularity', 'item_means']\n sim_type = ['jaccard', 'cosine', 'pearson']\n models = []\n for aMethod in methods:\n print aMethod\n if(aMethod != 'item_similarity'):\n model = gl.recommender.create(observation_data = train, user_id = 'user_id', \n item_id = 'item_id', target = 'ratings', method = aMethod)\n models.append(model)\n else:\n for aSim in sim_type:\n print aSim\n sim_model = gl.recommender.create(observation_data = train, user_id = 'user_id', \n item_id = 'item_id', target = 'ratings', method = aMethod, similarity_type = aSim)\n models.append(sim_model)\n \n # generate results for models as well as the rmse results\n recommended = []\n rmse = []\n for model in models:\n aResult = model.recommend(users = user, k = n)\n recommended.append(aResult)\n aRMSE = gl.evaluation.rmse(test['ratings'], model.predict(test))\n rmse.append(aRMSE)\n \n # create DataFrame\n df = pd.DataFrame({'models':models, 'recommended':recommended, 'rmse':rmse})\n # find the model that gives k least square errors\n df = df.sort('rmse', ascending = True).iloc[0:2]\n df.index = range(0,2)\n \n colnames = df['recommended'].loc[0].column_names()\n results = pd.DataFrame(columns = colnames)\n \n for aResult in df['recommended']:\n aResult = aResult.to_dataframe()\n results = results.append(aResult)\n \n results = results.sort('score', ascending = False)\n\n return results.sort('score', ascending=False), 'item_id'", "def test_recommendation_evaluation_6(model):\n assert recommendation_evaluation(model, cn_test_users=None, k=2, n_pos_interactions=None, novelty=True) == \\\n {'HitRatio@2': 0.0, 'NDCG@2': 0.0, 'Precision@2': 0.0, 'Recall@2': 0.0}", "def make_first_recommendation() -> str:\r\n available_tanks = tank_search()\r\n fermentation_tanks = available_tanks[0]\r\n conditioning_tanks = available_tanks[1]\r\n fermentation_advice = available_tanks[2]\r\n conditioning_advice = available_tanks[3]\r\n more_beer = available_tanks[4]\r\n return render_template(\"make_first_recommendation.html\",\r\n fermentation_string=fermentation_tanks,\r\n conditioning_string=conditioning_tanks,\r\n first_advice=fermentation_advice,\r\n second_advice=conditioning_advice,\r\n next_page=more_beer)", "def check_recommendation_part(self, result):\n assert \"recommendation\" in result, \"Can not find the 'recommendation' node.\"\n recommendation = result[\"recommendation\"]\n assert recommendation == {} or \"component-analyses\" in recommendation, \\\n \"Wrong content of recommendation node\"\n if \"component_analyses\" in recommendation:\n self.check_component_analyses_recommendation(recommendation)", "def recommend(self, u):\n\n sims = {} #similarities\n recommendation = \"\"\n topScore = None\n start = time.time()\n for movie_id, rating in enumerate(u):\n if rating != 0:\n sims[movie_id] = {}\n for r_id, movie in enumerate(self.ratings):\n sims[movie_id][r_id] = self.distance(movie,self.ratings[movie_id])\n # print time.time() - start, \"distance time\"\n\n start = time.time()\n for i, movieRating in enumerate(self.ratings):\n iPrediction = 0\n for movieName in self.ratedMovieList:\n j = self.titlesOnly.index(movieName)\n iPrediction += sims[j][i]*1.0 * self.userRatingVector[j]\n if topScore is None or iPrediction > topScore:\n movie = self.titlesOnly[i]\n if movie not in self.ratedMovieList and movie not in self.recommendedMovies:\n # print(\"prediction score for %s is %.5f\" % (movie, iPrediction))\n topScore = iPrediction\n recommendation = movie\n # print time.time() - start, \"recommendation time\"\n self.recommendedMovies.append(recommendation)\n\n articlePattern = re.match('(.*), (the|a|an|el|la)', recommendation)\n if articlePattern is not None:\n recommendation = articlePattern.group(2) + \" \" + articlePattern.group(1)\n\n return recommendation", "def check_recommendation_in_result(context):\n json_data = context.response.json()\n result = json_data[\"recommendation\"]\n assert result == {}", "def popular_recommend(row):\n actual = new_purchase_row(row)\n return f1(actual, popular_products)", "def next_choice(self, opponent: 'Player') -> str:\n\n if self.adaptive_ai:\n # this is an adaptive_ai player, so see if it has collected\n # enough stats about the current opponent yet:\n if sum(self.opponent_choices[opponent.name].values()) > 5:\n # has enough samples to start adapting to the opponent\n print(' {} is trying to guess the opponent\\'s choice...'.format(self.name))\n\n # AI algorithm 1:\n # simply find the most-frequent selection by the opponent and\n # choose its killer.\n\n guess = self.opponent_choices[opponent.name].most_common(1)[0][0]\n ai_choice = weapon_to_beat(guess)\n print(' ', opponent.name, 'most often chose', guess, 'so he/she chose', ai_choice)\n return ai_choice\n\n # use the standard tendency distribution to choose a weapon:\n n = randint(1, self.randmax)\n if n <= self.tendency[0]:\n return 'rock'\n elif n <= self.tendency[0] + self.tendency[1]:\n return 'paper'\n else:\n return 'scissors'", "def __str__(self):\n return str(self.get_rating())", "def test_get_scored_recommendations_post(self):\n pass", "def measure(self, recommender):\n similarity = 0\n items_shown = recommender.items_shown\n if items_shown.size == 0:\n # at the beginning of the simulation, there are no recommendations yet\n self.observe(None)\n return\n\n for pair in self.pairs:\n itemset_1 = set(items_shown[pair[0], :])\n itemset_2 = set(items_shown[pair[1], :])\n common = len(itemset_1.intersection(itemset_2))\n union = len(itemset_1.union(itemset_2))\n similarity += common / union / len(self.pairs)\n self.observe(similarity)", "def measure(self, recommender):", "def recommended_action(self) -> str:\n return pulumi.get(self, \"recommended_action\")", "def svd_recommend_new(row):\n actual = new_purchase_row(row)\n recommended = svd_rec.recommend_new(u_dict[row[\"user_id\"]], N=10)\n recommended = [p_dict[r[0]] for r in recommended]\n return f1(actual, recommended)", "def GetRecommendation(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def classical_recommendation(self, user, rank, quantum_format=True):\n # Make sure the user and rank are ok\n self._validate_user(user)\n self._validate_rank(rank)\n\n # Do the classical SVD\n _, _, vmat = np.linalg.svd(self.matrix, full_matrices=True)\n\n # Do the projection\n recommendation = np.zeros_like(user, dtype=np.float64)\n for ii in range(rank):\n recommendation += np.dot(np.conj(vmat[ii]), user) * vmat[ii]\n\n if np.allclose(recommendation, np.zeros_like(recommendation)):\n raise RankError(\"Given rank is smaller than the rank of the preference matrix. Recommendations \"\n \"cannot be made for all users.\")\n\n # Return the squared values for probabilities\n probabilities = (recommendation / np.linalg.norm(recommendation, ord=2))**2\n\n # Return the vector if quantum_format is False\n if not quantum_format:\n return probabilities\n\n # Format the same as the quantum recommendation\n prods = []\n probs = []\n for (ii, p) in enumerate(probabilities):\n if p > 0:\n prods.append(ii)\n probs.append(p)\n return prods, probs", "def get_optimal_term_and_amortization_type(self):\n assert self.instance.ownership_time, \"ownership_time missing value.\"\n logger.info('SCENARIO-RECOMMENDATION: %s', self.instance.ownership_time)\n\n return self.SCENARIO_RECOMMENDATION[self.instance.ownership_time]", "def sorted_recommended_products(self):\n return [\n r.recommendation\n for r in self.primary_recommendations.select_related(\"recommendation\").all()\n ]" ]
[ "0.64533204", "0.62321275", "0.6155811", "0.6037975", "0.59529597", "0.58630055", "0.57988244", "0.5778563", "0.5745785", "0.5703222", "0.56989217", "0.56337386", "0.56248695", "0.557974", "0.55621016", "0.5547041", "0.55275047", "0.55242544", "0.5524008", "0.55168205", "0.5512141", "0.55120516", "0.5480696", "0.5479235", "0.54697424", "0.54624045", "0.5445806", "0.5445535", "0.54224813", "0.5404911" ]
0.6441277
1
Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC command.
def on_exchange_declareok(self, _unused_frame): self._channel_ctrl.queue_declare( '', exclusive=True, auto_delete=True, callback=self.on_queue_declareok )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_exchange_declareok(self, unused_frame):\n self.logger.info('exchange declared')\n self.setup_queue()", "def on_exchange_declareok(self, unused_frame):\n LOGGER.debug('Exchange declared')\n\n for queue in self._handlers.keys():\n self._channel.queue_declare(self.on_queue_declareok, queue)\n\n RabbitMQ.on_exchange_declareok(self, unused_frame)", "async def declare_exchange(self) -> asyncio.Future:\n # pylint: disable=protected-access\n future = self._backend._create_future()\n\n def on_declare_exchange(frame):\n future.set_result(frame)\n self.log.debug('Exchange `%s` declared ok', self.exchange)\n\n self._channel.exchange_declare(\n on_declare_exchange,\n self.exchange,\n self.exchange_type\n )\n\n return future", "def declare_queue(self):\n\n self._channel.queue_declare(queue=self._queue_name, durable=True)\n print(\"Queue declared....\")", "def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)", "def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)", "def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)", "async def declare(self) -> 'Queue':\n # we are relying to this in other functions\n self._channel = await self._backend.channel()\n self.log.debug(\"Channel acquired CHANNEL%i\",\n self._channel.channel_number)\n\n if self.exchange:\n await self.declare_exchange()\n\n if self.name is not None:\n await self.declare_queue()\n\n if self.exchange:\n await self.bind_queue()\n\n return self", "def on_queue_declareok(self, method_frame):\n self.logger.info('binding %s and %s together with %s', self.exchange, self.queue, self.routing_key)\n self._channel.queue_bind(self.on_bindok, self.queue, self.exchange, self.routing_key)", "def _declare(self, passive=False):\n arguments = dict(self._arguments)\n if self._expires:\n arguments['x-expires'] = self._expires\n if self._message_ttl:\n arguments['x-message-ttl'] = self._message_ttl\n if self._max_length:\n arguments['x-max-length'] = self._max_length\n if self._dlx:\n arguments['x-dead-letter-exchange'] = self._dlx\n if self._dlr:\n arguments['x-dead-letter-routing-key'] = self._dlr\n return specification.Queue.Declare(queue=self.name,\n durable=self._durable,\n passive=passive,\n exclusive=self._exclusive,\n auto_delete=self._auto_delete,\n arguments=arguments)", "def on_queue_declared(frame):\n channel.basic_consume(handle_delivery, queue='test')", "def declareDone(self, cmd):\n pass", "def setup_exchange(self):\n self.logger.info('declaring exchange %s', self.exchange)\n self._channel.exchange_declare(self.on_exchange_declareok, self.exchange, self.exchange_type)", "def on_queue_declareok(self, method_frame):\n # LOGGER.info('Binding %s to %s with %s',\n # self.EXCHANGE, self.QUEUE, self.ROUTING_KEY)\n # self._channel.queue_bind(self.on_bindok, self.QUEUE,\n # self.EXCHANGE, self.ROUTING_KEY)\n logger.info(\n \"[{}] Binding to {} with queue {} and routing key \\\"\\\"\".format(self.bot_id, self.exchange,\n self.queue_name))\n\n self._channel.queue_bind(self.on_bindok,\n queue=self.queue_name,\n exchange=self.exchange,\n routing_key=\"\")", "def setup_exchange(self, channel, exchange_name, exchange_type):\n logger.info('Declaring exchange : %s', exchange_name)\n # Note: using functools.partial is not required, it is demonstrating\n # how arbitrary data can be passed to the callback when it is called\n channel.exchange_declare(exchange=exchange_name,\n exchange_type=exchange_type,\n durable = True)", "def declare(self):\n self.channel.queue_declare(queue='files_to_database')", "def perform_setup():\n global credentials, connection, channel\n credentials = pika.PlainCredentials('guest', 'guest') # AUTH via Default guest user on RabbitMQ\n connection = pika.BlockingConnection(pika.ConnectionParameters(\"127.0.0.1\", 5672, '/', credentials)) # Using rabbit-mq container name to access the RabbitMQ container from other containers\n channel = connection.channel()\n channel.queue_declare(queue='poll', durable=True)", "def on_queue_declared(self, frame):\n\t\tself.channel.basic_qos(prefetch_count=1)\n\t\tself.channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\t\tself.consumer_tag = self.channel.basic_consume(\n\t\t\tself.handle_delivery, \n\t\t\tframe.method.queue\n\t\t)", "def on_channel_open(new_channel):\n global channel\n channel = new_channel\n channel.queue_declare(queue=\"test\", durable=True, exclusive=False, auto_delete=False, callback=on_queue_declared)", "def setup_queue(self, method_frame):\n logger.info('Declaring queue %s', self.queue_name)\n # self._channel.queue_declare(self.on_queue_declareok, queue_name)\n\n self._channel.queue_declare(self.on_queue_declareok, exclusive=False, durable=True, queue=self.queue_name)", "def task_done(self):\n if self.message is None:\n raise Exception('no message to acknowledge')\n self.handle.delete_message(self.message)\n self.message = None", "def on_queue_declareok(self, method_frame):\n\n for queue in self._handlers.keys():\n LOGGER.debug('Binding %s to %s with %s',\n self.EXCHANGE, queue, self.ROUTING_KEY)\n self._channel.queue_bind(self.on_bindok, queue,\n self.EXCHANGE, self.ROUTING_KEY)", "def acq_done(self, pvname=None, **kws):\n if kws['value'] == 0:\n self.eventq.put('finish')", "def acknowledged(self):\n ...", "async def declare_and_consume(self, handler):\n try:\n await self.declare()\n self.consume(handler)\n except pika.exceptions.ChannelClosed: # pragma: no cover\n self.reconnect()", "def setup_exchange(self):\n LOGGER.info('Declaring exchange: %s', self.topic_ctrl)\n # Note: using functools.partial is not required, it is demonstrating\n # how arbitrary data can be passed to the callback when it is called\n\n self._channel_ctrl.exchange_declare(\n exchange=self.topic_ctrl,\n exchange_type='topic',\n callback=self.on_exchange_declareok)", "def on_bindok(self, unused_frame):\n\n self.logger.info('queue bound')\n if self.acked:\n # if we wish to care about the servers replies, this is were we set up things\n self.logger.info('issuing confirm.select RPC')\n self._channel.confirm_delivery(self.on_delivery_confirmation)\n\n if self.sender:\n pass\n self.send()\n else:\n self.start_consuming()", "async def _connect(self):\n self._connection = await connect_robust(self._connection_string)\n self._channel = await self._connection.channel()\n await self._channel.declare_queue(self._queue, durable=True, arguments={'x-max-priority': 10})", "def rpc_sendback(rpc_flag):\n credential = pika.PlainCredentials('guest', 'guest')\n rpc_connection = pika.BlockingConnection(pika.ConnectionParameters(\n host='localhost', port=5672, virtual_host='/', credentials=credential))\n rpc_channel = rpc_connection.channel()\n rpc_channel.queue_declare(queue=str(rpc_flag))\n #send message to the command center using basic_publish\n if rpc_flag == \"c02\":\n rpc_channel.basic_publish(exchange='', routing_key=str(\n rpc_flag), body='Drone has reached the delivery address')\n elif rpc_flag == \"c03\":\n rpc_channel.basic_publish(exchange='', routing_key=str(rpc_flag),\n body='Drone has unloaded the item')\n elif rpc_flag == \"c04\":\n rpc_channel.basic_publish(exchange='', routing_key=str(rpc_flag),\n body='Drone has reached the parking spot and available for next instruction')", "def process_create_q(self):\n while not self.create_req.empty():\n [new_name, new_type, mvt] = self.create_req.get()\n self.create_new_peer(new_type, new_name, mvt)\n self.create_req.task_done()" ]
[ "0.715761", "0.6964966", "0.64156866", "0.61320156", "0.6064579", "0.6064579", "0.6064579", "0.6040164", "0.60303724", "0.6024567", "0.6008154", "0.59611946", "0.5960115", "0.5942857", "0.59143645", "0.5890094", "0.58587295", "0.58443904", "0.5769042", "0.57334083", "0.56965804", "0.56940883", "0.5686021", "0.56322503", "0.5595592", "0.5518226", "0.55104", "0.54856807", "0.5483979", "0.54575163" ]
0.7317034
0
Get next report ID or False if not available
def next_id(self): try: return Report.objects.filter(id__gt=self.id).order_by("id").first().id except Exception: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nextId(self):\r\n \r\n nextId = -1\r\n if self._wizard.currentId() == SOURCE_PAGE_ID:\r\n nextId = TARGET_PAGE_ID\r\n elif self._wizard.currentId() == TARGET_PAGE_ID:\r\n nextId = DATASTORE_PAGE_ID\r\n elif self._wizard.currentId() == DATASTORE_PAGE_ID:\r\n nextId = PROPERTY_PAGE_ID\r\n return nextId", "def get_report_id(self, reports):\n matching_reports = [\n report for report in reports if report.get('title') in [\n self.api_report_id,\n self.api_test_report_id\n ]\n ]\n\n if self.electiondate: # Can also use the explicit 'if is not none'.\n matching_reports = [\n report for report in matching_reports\n if report.get('electionDate') == self.electiondate\n ]\n\n if matching_reports:\n id = matching_reports[0].get('id').rsplit('/', 1)[-1]\n return id\n\n return None", "def _get_report_id(self):\n report_id = ''\n if self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT, ReportTypes.SEARCH_BODY_REPORT) \\\n and 'payment' in self._report_data:\n report_id = self._report_data['payment']['invoiceId']\n elif self._report_key == ReportTypes.MHR_REGISTRATION and self._report_data.get('mhrNumber'):\n report_id = self._report_data.get('mhrNumber')\n return report_id", "def previous_id(self):\n try:\n return Report.objects.filter(id__lt=self.id).order_by(\"-id\").first().id\n except Exception:\n return False", "def _next_id(self, identifier: Identifier) -> Optional['Identifier']:\n next_id = None\n if identifier.year is not None and \\\n identifier.month is not None and \\\n identifier.num is not None:\n new_year = identifier.year\n new_month = identifier.month\n new_num = identifier.num + 1\n if (identifier.is_old_id and new_num > 999) \\\n or (not identifier.is_old_id\n and identifier.year < 2015\n and new_num > 9999) \\\n or (not identifier.is_old_id\n and identifier.year >= 2015 and new_num > 99999):\n new_num = 1\n new_month = new_month + 1\n if new_month > 12:\n new_month = 1\n new_year = new_year + 1\n\n if identifier.is_old_id:\n next_id = '{}/{:02d}{:02d}{:03d}'.format(\n identifier.archive, new_year % 100, new_month, new_num)\n else:\n if new_year >= 2015:\n next_id = '{:02d}{:02d}.{:05d}'.format(\n new_year % 100, new_month, new_num)\n else:\n next_id = '{:02d}{:02d}.{:04d}'.format(\n new_year % 100, new_month, new_num)\n try:\n return Identifier(arxiv_id=next_id)\n except IdentifierException:\n return None\n else:\n return None", "def latest_report_id(self) -> str:\n return pulumi.get(self, \"latest_report_id\")", "def _GetNextId(self):\r\n ret = self.next_id\r\n self.next_id += 1\r\n return str(self.next_id)", "def get_next_account_id():\n conn = get_connect()\n cursor = conn.execute(\"SELECT accountId FROM account WHERE isSearched = 0 LIMIT 1\")\n result_list = cursor.fetchone()\n conn.close()\n if result_list is None:\n print(\"no more accountId to be searched\")\n return None\n else:\n account_id = result_list[0]\n return account_id", "def has_next(self):\n if self._count is not None:\n # If count is available, use it\n return bool(self._count)\n else:\n # otherwise we have no idea\n return True", "def get_next_submission(self):\r\n success, _next_submission = self.peer_grading.get_next_submission({'location': 'blah'})\r\n self.assertEqual(success, True)", "def get_next_id(identifier: Identifier) -> Optional[Identifier]:\n return current_session().get_next_id(identifier)", "def next_id(self):\n return self.max_id + 1", "def next_id(self):\n next_id = self._nextid\n self._nextid += 1\n return next_id", "def next_jid(self):\n return self._next_jid", "def reserve_next_run_id(self):\n query = \"SELECT NEXTVAL(pg_get_serial_sequence('task_history', 'run_id'))\"\n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n return cur.fetchone()[0]", "def next_link(self) -> Optional[str]:\n return pulumi.get(self, \"next_link\")", "def has_next():\n\n return True", "def next_id(self):\n self.id_counter += 1\n return self.id_counter - 1", "def describeNextReport(self, simulation): \n steps = self._reportInterval - simulation.currentStep%self._reportInterval\n return (steps, True, False, False, False)", "def get_next_id(self, identifier: Identifier) -> Optional['Identifier']:\n next_id = self._next_id(identifier)\n if not next_id:\n return None\n\n path = self._get_parent_path(identifier=next_id)\n file_path = os.path.join(path, f'{next_id.filename}.abs')\n if os.path.isfile(file_path):\n return next_id\n\n next_yymm_id = self._next_yymm_id(identifier)\n if not next_yymm_id:\n return None\n\n path = self._get_parent_path(identifier=next_yymm_id)\n file_path = os.path.join(path, f'{next_yymm_id.filename}.abs')\n if os.path.isfile(file_path):\n return next_yymm_id\n\n return None", "def get_next_match_id():\n conn = get_connect()\n cursor = conn.execute(\"SELECT matchId FROM match WHERE isSearched = 0 LIMIT 1\")\n result_list = cursor.fetchone()\n conn.close()\n if result_list is None:\n print(\"no more matchId to be searched\")\n return None\n else:\n match_id = result_list[0]\n return match_id", "def _get_next_friendly_id(context):\n from indico.modules.events.surveys.models.surveys import Survey\n survey_id = context.current_parameters['survey_id']\n assert survey_id is not None\n return increment_and_get(Survey._last_friendly_submission_id, Survey.id == survey_id)", "def nextId(self):\n if len(ExportDialog.exportSubtypes[ExportDialog.currentType]) > 1:\n return ExportDialog.subtypePage\n return ExportDialog.optionPage", "def has_next():", "def _determine_next_ott_id(self):\n if self._doc_counter_lock is None:\n self._doc_counter_lock = Lock()\n with self._doc_counter_lock:\n _LOG.debug('Reading \"{}\"'.format(self._id_minting_file))\n noi_contents = self._read_master_branch_resource(self._id_minting_file, is_json=True)\n if noi_contents:\n self._next_ott_id = noi_contents['next_ott_id']\n else:\n raise RuntimeError('Stored ottid minting file not found (or invalid)!')", "def reserve_next_agent_id(self):\n query = \"SELECT NEXTVAL(pg_get_serial_sequence('agents', 'agent_id'))\"\n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n return cur.fetchone()[0]", "def get_next_id(self):\n con = self.c._connect()\n last_id = self.c.get_last_id(con.cursor())\n con.close()\n return last_id + 1", "def has_report(self):\n return self.report is not None", "def get_next_position(self):\n return self.record_manager.get_max_record_id() or 0", "def get_next_id():\n with open(WORK_LOG_FILENAME, 'r') as work_log:\n work_log_reader = csv.DictReader(work_log)\n entry_id = 0\n for entry in work_log_reader:\n if int(entry['id']) > entry_id:\n entry_id = int(entry['id'])\n entry_id += 1\n return entry_id" ]
[ "0.62095916", "0.6153769", "0.5885677", "0.5764439", "0.573282", "0.56805384", "0.5647685", "0.5569167", "0.5534752", "0.55111814", "0.5475925", "0.5458879", "0.5441728", "0.5426077", "0.5425727", "0.5425234", "0.5410581", "0.5389253", "0.5372817", "0.5361491", "0.5344166", "0.5321446", "0.5319763", "0.5315735", "0.5304939", "0.528354", "0.5280848", "0.5275618", "0.52754176", "0.52722824" ]
0.8004583
0
Get previous report ID or False if not available
def previous_id(self): try: return Report.objects.filter(id__lt=self.id).order_by("-id").first().id except Exception: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_id(self):\n try:\n return Report.objects.filter(id__gt=self.id).order_by(\"id\").first().id\n except Exception:\n return False", "def get_report_id(self, reports):\n matching_reports = [\n report for report in reports if report.get('title') in [\n self.api_report_id,\n self.api_test_report_id\n ]\n ]\n\n if self.electiondate: # Can also use the explicit 'if is not none'.\n matching_reports = [\n report for report in matching_reports\n if report.get('electionDate') == self.electiondate\n ]\n\n if matching_reports:\n id = matching_reports[0].get('id').rsplit('/', 1)[-1]\n return id\n\n return None", "def latest_report_id(self) -> str:\n return pulumi.get(self, \"latest_report_id\")", "def _get_report_id(self):\n report_id = ''\n if self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT, ReportTypes.SEARCH_BODY_REPORT) \\\n and 'payment' in self._report_data:\n report_id = self._report_data['payment']['invoiceId']\n elif self._report_key == ReportTypes.MHR_REGISTRATION and self._report_data.get('mhrNumber'):\n report_id = self._report_data.get('mhrNumber')\n return report_id", "def has_report(self):\n return self.report is not None", "def ParentReportNumber(self, default=None):\n return self.data.get('parent_report_number', default)", "def ParentReportNumber(self, default=None):\n return self.data.get('parent_report_number', default)", "def _previous_id(self, identifier: Identifier) -> Optional['Identifier']:\n previous_id = None\n if identifier.year is not None and \\\n identifier.month is not None and \\\n identifier.num is not None:\n new_year = identifier.year\n new_month = identifier.month\n new_num = identifier.num - 1\n if new_num == 0:\n new_month = new_month - 1\n if new_month == 0:\n new_month = 12\n new_year = new_year - 1\n\n if identifier.is_old_id:\n if new_num == 0:\n new_num = 999\n previous_id = '{}/{:02d}{:02d}{:03d}'.format(\n identifier.archive, new_year % 100, new_month, new_num)\n else:\n if new_year >= 2015:\n if new_num == 0:\n new_num = 99999\n previous_id = '{:02d}{:02d}.{:05d}'.format(\n new_year % 100, new_month, new_num)\n else:\n if new_num == 0:\n new_num = 9999\n previous_id = '{:02d}{:02d}.{:04d}'.format(\n new_year % 100, new_month, new_num)\n try:\n return Identifier(arxiv_id=previous_id)\n except IdentifierException:\n return None\n else:\n return None", "def _get_existing_report(self, mask, report):\r\n for existing_report in self._reports:\r\n if existing_report['namespace'] == report['namespace']:\r\n if mask == existing_report['queryMask']:\r\n return existing_report\r\n return None", "def _get_existing_report(self, mask, report):\n for existing_report in self._reports:\n if existing_report['namespace'] == report['namespace']:\n if mask == existing_report['queryMask']:\n return existing_report\n return None", "def PAID(self):\n if self.session.get('last_bill_result', None) is None:\n return False\n return self.session['last_bill_result'] == \"\"", "def get_report_file_name(self):\n if os.path.isfile(self.REPORT_FILE_PATH):\n print(\"'{}' is already exist!\".format(self.REPORT_FILE_PATH))\n report_file = self.prompt_report_file_name()\n else:\n report_file = self.REPORT_FILE_PATH\n return report_file", "def last_known_position(self):\n try:\n last_filed = self.report_set.filter(zombies_only=False)\n last_filed = last_filed.order_by('-reported_date')[0]\n except IndexError:\n last_filed = None\n try:\n last_spotted = self.reported_at.order_by('-reported_date')[0]\n except IndexError:\n last_spotted = None\n if last_filed is None and last_spotted is None:\n return u\"Never seen\"\n else:\n if last_filed is None:\n return last_spotted\n elif last_spotted is None:\n return last_filed\n else:\n if last_filed.reported_date >= last_spotted.reported_date:\n return last_filed\n else:\n return last_spotted", "def has_stockrecords(self):\n try:\n a=self.stockrecords.pk\n return True\n except:\n return False", "def has_previous(self):\n return self.current_page > 1", "def has_previous(self):\n return self.page > 1", "def IsLocalRerun(self):\n return self.prev_test_run_key is not None", "def get_previous_id(identifier: Identifier) -> Optional[Identifier]:\n return current_session().get_previous_id(identifier)", "def report_shared(self, reportid):\r\n return reports.ReportsShared(self, reportid)", "def __test_gen_report_id_check():\n # all fresh\n report = dp.Report(md_block, md_block, md_block)\n assert_report(report) # expected_id_count=5)\n # 2 fresh\n report = dp.Report(md_block, md_block_id, md_block)\n assert_report(report) # expected_id_count=4)\n # 0 fresh\n report = dp.Report(md_block_id, dp.Text(\"test\", name=\"test-2\"))\n assert_report(report) # expected_id_count=2)", "def test_get_report_file_id(self):\n vt_analyses = VirusTotalAPIAnalyses('test_api_key')\n vt_analyses.get_report('test_object_id')\n http_err = vt_analyses.get_last_http_error()\n self.assertEqual(http_err, vt_analyses.HTTP_OK)", "def update_report_history(self, request_result):\n report_info = request_result.get('ReportInfo', {})\n report_request_info = request_result.get('ReportRequestInfo', {})\n request_id = report_state = report_id = False\n if report_request_info:\n request_id = str(report_request_info.get('ReportRequestId', {}).get('value', ''))\n report_state = report_request_info.get('ReportProcessingStatus', {}).get('value',\n '_SUBMITTED_')\n report_id = report_request_info.get('GeneratedReportId', {}).get('value', False)\n elif report_info:\n report_id = report_info.get('ReportId', {}).get('value', False)\n request_id = report_info.get('ReportRequestId', {}).get('value', False)\n\n if report_state == '_DONE_' and not report_id:\n self.get_report_list()\n vals = {}\n if not self.report_request_id and request_id:\n vals.update({'report_request_id': request_id})\n if report_state:\n vals.update({'state': report_state})\n if report_id:\n vals.update({'report_id': report_id})\n self.write(vals)\n return True", "def generate_report(self):\n if self.submission_metadata:\n return self._submission_allowed()[1]", "def find_issue_id(self):", "def _is_last_dataset_id(self, instance_id):\n res = self._db.Query(\"\"\"SELECT report_data_set_instance_id\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\n LIMIT 0, 1\"\"\",(self._id, self._segment_value_id))\n if not res:\n return False\n last_data_set_instance = self._db.record[0]\n if last_data_set_instance['report_data_set_instance_id'] == instance_id:\n return True\n\n return False", "def ReporterReference(pidofreporter):\n try:\n pid_list = []\n Mcafee_Reporter_pid = getpid(pidofreporter)\n print \"Now\",Mcafee_Reporter_pid\n listofpid = list(Mcafee_Reporter_pid)\n pid_list.append(listofpid[1])\n split_pids_by_space = [words for segments in pid_list for words in segments.split()]\n print \"split_pids_by_space\", split_pids_by_space\n reporter_current_pid = int(''.join(map(str,split_pids_by_space[1])))\n print \"reporter_current_pid\", reporter_current_pid\n Mcafee_Reporter_Reference = getAppRefByPidofapp(reporter_current_pid)\n #print \"Mcafee_Reporter_Reference\", Mcafee_Reporter_Reference\n except Exception as er:\n return False\n print \"Not able to get Reporter details\"\n print Mcafee_Reporter_Reference\n return Mcafee_Reporter_Reference", "def get_last_worked_on_step_id(self):\n logger.debug(\"Searching for ID of the step last worked on.\")\n last_id = None\n for step in self.steps:\n if any((task for task in step.tasks if task.status == \"DONE\")) and (not last_id or step.id > last_id):\n last_id = step.id\n if not last_id:\n raise ValueError(\"No ID is found for last worked on step for ticket {}\".format(self.id))\n return last_id", "def is_retired(self):\n if str.__str__(self) in UID_dictionary:\n return bool(UID_dictionary[self][3])\n\n return False", "def get_already_raised(self):\r\n return self.already_raised", "def reported_by(self, user):\n return Report.objects.filter(recipe=self, chef=user).exists()" ]
[ "0.69261867", "0.6367051", "0.6234315", "0.61751926", "0.600833", "0.5732639", "0.5732639", "0.5496686", "0.54920655", "0.5474516", "0.5460659", "0.5322554", "0.5255835", "0.52374005", "0.519327", "0.5184205", "0.516578", "0.5152253", "0.51465", "0.51069415", "0.5098686", "0.5068081", "0.5060786", "0.5055873", "0.5053999", "0.5050696", "0.50506514", "0.50493175", "0.5011197", "0.50044596" ]
0.76356864
0
Constructs HttpRequest from string containing an entire HTTP request
def deserialize(cls, data: bytes) -> HttpRequest: try: raw = data.decode("utf-8") raw_headers, raw_body = raw.split("\r\n\r\n") header_lines = raw_headers.split("\r\n") method, path, protocol = header_lines[0].split() headers = HttpRequest._parse_headers(header_lines[1:]) if "content-length" in headers: body = raw_body.encode("utf-8") else: body = b"" return HttpRequest(method, path, headers, body) except Exception as err: raise exceptions.HttpRequestParsingException( f"Failed to parse {data.decode('utf-8')}" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_from_text(text):\n lines = text.splitlines()\n match = re.search('^([a-z]+) (.*) (http/[0-9]\\.[0-9])$', lines[0], re.I)\n method, path, version = match.groups()\n headers = {}\n for idx, line in enumerate(lines[1:], start=1):\n if not line:\n break\n hdr, val = [item.strip() for item in line.split(':', 1)]\n hdr = hdr.lower()\n vals = headers.setdefault(hdr, [])\n vals.append(val)\n headers = {hdr: ','.join(sorted(vals)) for hdr, vals in headers.items()}\n check_url = urlparse(path)\n if check_url.scheme and check_url.netloc:\n # absolute URL in path\n url = path\n else:\n # otherwise need to try to construct url from path and host header\n url = ''.join(['http://' if 'host' in headers else '',\n headers.get('host', ''),\n path])\n body = '\\n'.join(lines[idx+1:])\n req = requests.Request(method, url, headers=headers, data=body)\n return req.prepare()", "def load_request_string(string, format=FORMAT_PEM):\n bio = BIO.MemoryBuffer(string)\n return load_request_bio(bio, format)", "def parse_request(msg):\n start_line, header, body = _parse_message(msg)\n request, path = _parse_request_line(start_line)\n return Request(request, path, header, body)", "def to_httpx_request(cls, **kwargs):\n request = kwargs[\"request\"]\n raw_url = (\n request.url.scheme,\n request.url.host,\n request.url.port,\n request.url.target,\n )\n return httpx.Request(\n request.method,\n parse_url(raw_url),\n headers=request.headers,\n stream=request.stream,\n extensions=request.extensions,\n )", "def build_request(uri='/'):\r\n path, _, querystring = uri.partition('?')\r\n return WSGIRequest({\r\n 'CONTENT_TYPE': 'text/html; charset=utf-8',\r\n 'PATH_INFO': path,\r\n 'QUERY_STRING': querystring,\r\n 'REMOTE_ADDR': '127.0.0.1',\r\n 'REQUEST_METHOD': 'GET',\r\n 'SCRIPT_NAME': '',\r\n 'SERVER_NAME': 'testserver',\r\n 'SERVER_PORT': '80',\r\n 'SERVER_PROTOCOL': 'HTTP/1.1',\r\n 'wsgi.version': (1, 0),\r\n 'wsgi.url_scheme': 'http',\r\n 'wsgi.input': FakePayload(b''),\r\n 'wsgi.errors': six.StringIO(),\r\n 'wsgi.multiprocess': True,\r\n 'wsgi.multithread': False,\r\n 'wsgi.run_once': False,\r\n })", "def request_factory(environ):\n request = Request(environ)\n _LOG.debug('trunctated request body: {b}'.format(b=request.body[:1000]))\n return request", "def urllib_req_to_req(urllib_request):\n from ..networking import Request\n from ..utils.networking import HTTPHeaderDict\n return Request(\n urllib_request.get_full_url(), data=urllib_request.data, method=urllib_request.get_method(),\n headers=HTTPHeaderDict(urllib_request.headers, urllib_request.unredirected_hdrs),\n extensions={'timeout': urllib_request.timeout} if hasattr(urllib_request, 'timeout') else None)", "def _GenHttpRequestProto(self):\n request = jobs_pb2.HttpRequest()\n request.source_ip = \"127.0.0.1\"\n request.user_agent = \"Firefox or something\"\n request.url = \"http://test.com/test?omg=11%45x%20%20\"\n request.user = \"anonymous\"\n request.timestamp = int(time.time() * 1e6)\n request.size = 1000\n return request", "def make_request_message(request):\n url = urlparse(request.url)\n request_headers = dict(request.headers)\n if 'Host' not in request_headers:\n request_headers['Host'] = url.netloc\n return HTTPMessage(\n line='{method} {path} HTTP/1.1'.format(\n method=request.method,\n path=url.path or '/'),\n headers=NEW_LINE.join(str('%s: %s') % (name, value)\n for name, value\n in request_headers.items()),\n body=request._enc_data,\n content_type=request_headers.get('Content-Type')\n )", "def build_request(url, headers, body, initial_request: Request) -> Request:\n updated_request = Request(\n method=initial_request.method,\n url=url,\n headers=headers,\n content=body\n )\n\n if hasattr(initial_request, 'extensions'):\n updated_request.extensions = initial_request.extensions\n\n return updated_request", "def createRequest(test, url, headers=None):\n request = HTTPRequest(url=url)\n if headers: request.headers=headers\n test.record(request, HTTPRequest.getHttpMethodFilter())\n return request", "def __call__(self, requestStr):\n return self.connection.Request(requestStr)", "def parse_request(first_line):\n command = None # set in case of error on the first line\n request_version = version = default_request_version\n close_connection = 1\n path = \"\"\n requestline = first_line.rstrip('\\r\\n')\n words = requestline.split()\n if len(words) == 3:\n command, path, version = words\n if version[:5] != 'HTTP/':\n easyHandler.send_error(400, \"Bad request version (%r)\" % version)\n return False\n try:\n base_version_number = version.split('/', 1)[1]\n version_number = base_version_number.split(\".\")\n # RFC 2145 section 3.1 says there can be only one \".\" and\n # - major and minor numbers MUST be treated as\n # separate integers;\n # - HTTP/2.4 is a lower version than HTTP/2.13, which in\n # turn is lower than HTTP/12.3;\n # - Leading zeros MUST be ignored by recipients.\n if len(version_number) != 2:\n raise ValueError\n version_number = int(version_number[0]), int(version_number[1])\n except (ValueError, IndexError):\n easyHandler.send_error(400, \"Bad request version (%r)\" % version)\n return False\n if version_number >= (1, 1) and protocol_version >= \"HTTP/1.1\":\n close_connection = 0\n if version_number >= (2, 0):\n easyHandler.send_error(505,\n \"Invalid HTTP Version (%s)\" % base_version_number)\n return False\n elif len(words) == 2:\n command, path = words\n close_connection = 1\n if command != 'GET':\n easyHandler.send_error(400, \"Bad HTTP/0.9 request type (%r)\" % command)\n return False\n elif not words:\n return False\n else:\n easyHandler.send_error(400, \"Bad request syntax (%r)\" % requestline)\n return easyRequest(command, path, version)", "def parse_http_request(source_addr, http_raw_data):\n r1 = http_raw_data.split('\\n')[0]\n method = r1.split()[0]\n path = r1.split()[1]\n if path == \"/\":\n r2 = http_raw_data.split('\\n')[1]\n host = r2.split()[0]\n if host == \"Host:\":\n host = re.sub(\"[:]\", \"\", host)\n r3 = r2.split(':')\n url = r2.split()[1]\n headers = []\n r3 = ' '.join(r3).replace('\\r', '').split()\n headers.append(r3)\n headers.append(url)\n headers\n requested_host = headers[0:]\n requested_path = path\n portno = re.findall(r'[0-9]+', r2)\n if portno == []:\n portno = \"80\"\n requested_port = portno\n requested_host = url\n print(\"*\" * 50)\n print(\"[parse_http_request] Implement me!\")\n print(\"*\" * 50)\n # Replace this line with the correct values.\n request_info = HttpRequestInfo(source_addr, method, requested_host, requested_port, requested_path, headers)\n return request_info", "def parse_request(request: bytes) -> Tuple[RequestLineHeader, str]:\r\n\r\n request = request.decode('ascii')\r\n print(request)\r\n split_request = request.split('\\r\\n')\r\n method, path, http_version = split_request[0].split(' ')\r\n path = ROOT_DIR + ('index.html' if path == '/' else path[1:])\r\n args = split_request[-1] if method == 'POST' else ''\r\n\r\n return RequestLineHeader(method, path, http_version), args", "def create(app, client_stream, client_addr, client_sock=None):\n # request line\n line = Request._safe_readline(client_stream).strip().decode()\n if not line:\n return None\n method, url, http_version = line.split()\n http_version = http_version.split('/', 1)[1]\n\n # headers\n headers = NoCaseDict()\n while True:\n line = Request._safe_readline(client_stream).strip().decode()\n if line == '':\n break\n header, value = line.split(':', 1)\n value = value.strip()\n headers[header] = value\n\n return Request(app, client_addr, method, url, http_version, headers,\n stream=client_stream, sock=client_sock)", "def build_http_request(method: bytes, url: bytes,\n protocol_version: bytes = HTTP_1_1,\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n if headers is None:\n headers = {}\n return build_http_pkt(\n [method, url, protocol_version], headers, body)", "def _CreateRequest(self, url, data=None):\r\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\r\n req = urllib2.Request(url, data=data, headers={\"Accept\": \"text/plain\"})\r\n if self.host_override:\r\n req.add_header(\"Host\", self.host_override)\r\n for key, value in self.extra_headers.iteritems():\r\n req.add_header(key, value)\r\n return req", "def build_http_request(method: bytes, url: bytes,\n protocol_version: bytes = b'HTTP/1.1',\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n if headers is None:\n headers = {}\n return build_http_pkt(\n [method, url, protocol_version], headers, body)", "def from_json(json_string: str) -> AnalysisRequest:\n dict_obj = json.loads(json_string)\n\n # make sure the required parameters are present\n required_fields = [\"request_id\"]\n\n for field in required_fields:\n if field not in dict_obj:\n raise Exception(\"JSON string does not represent a DatasetRequest object. Missing \" + field)\n\n # create the object\n request_obj = AnalysisRequest(request_id=dict_obj[\"request_id\"])\n\n return request_obj", "def parse_request(self):\r\n self.command = None # set in case of error on the first line\r\n self.request_version = version = self.default_request_version\r\n self.close_connection = 1\r\n requestline = self.raw_requestline\r\n # hack: quick and dirty fix for doubled request with bad data\r\n ok = 0\r\n if requestline.startswith(\"GET\"):\r\n ok += 1\r\n if requestline.startswith(\"POST\"):\r\n ok += 1\r\n if requestline.startswith(\"QUIT\"):\r\n ok += 1\r\n if ok == 0:\r\n return False\r\n # hack ends here\r\n requestline = requestline.rstrip('\\r\\n')\r\n self.requestline = requestline\r\n words = requestline.split()\r\n if len(words) == 3:\r\n command, path, version = words\r\n if version[:5] != 'HTTP/':\r\n self.send_error(400, \"Bad request version (%r)\" % version)\r\n return False\r\n try:\r\n base_version_number = version.split('/', 1)[1]\r\n version_number = base_version_number.split(\".\")\r\n # RFC 2145 section 3.1 says there can be only one \".\" and\r\n # - major and minor numbers MUST be treated as\r\n # separate integers;\r\n # - HTTP/2.4 is a lower version than HTTP/2.13, which in\r\n # turn is lower than HTTP/12.3;\r\n # - Leading zeros MUST be ignored by recipients.\r\n if len(version_number) != 2:\r\n raise ValueError\r\n version_number = int(version_number[0]), int(version_number[1])\r\n except (ValueError, IndexError):\r\n self.send_error(400, \"Bad request version (%r)\" % version)\r\n return False\r\n if version_number >= (1, 1) and self.protocol_version >= \"HTTP/1.1\":\r\n self.close_connection = 0\r\n if version_number >= (2, 0):\r\n self.send_error(505,\r\n \"Invalid HTTP Version (%s)\" % base_version_number)\r\n return False\r\n elif len(words) == 2:\r\n command, path = words\r\n self.close_connection = 1\r\n if command != 'GET':\r\n self.send_error(400,\r\n \"Bad HTTP/0.9 request type (%r)\" % command)\r\n return False\r\n elif not words:\r\n return False\r\n else:\r\n self.send_error(400, \"Bad request syntax (%r)\" % requestline)\r\n return False\r\n self.command, self.path, self.request_version = command, path, version\r\n\r\n # Examine the http_request_headers and look for a Connection directive\r\n self.headers = self.MessageClass(self.rfile, 0)\r\n\r\n conntype = self.headers.get('Connection', \"\")\r\n if conntype.lower() == 'close':\r\n self.close_connection = 1\r\n elif conntype.lower() == 'keep-alive' and self.protocol_version >= \"HTTP/1.1\":\r\n self.close_connection = 0\r\n return True", "def parseRequest(req):\n\treqHeaders = {}\n\treqLine = ''\n\tlineNum = 0\n\n\tfor line in req.splitlines():\n\t\tif line == '':\n\t\t\tbreak\n\t\telif lineNum == 0:\n\t\t\treqLine = line\n\t\t\tlineNum = 1\n\t\telse:\n\t\t\tsplitLine = line.split(' ', 1)\n\t\t\treqHeaders[splitLine[0]] = splitLine[1]\n\n\tsplitReqLine = reqLine.split(' ')\n\tmethod = splitReqLine[0]\n\tpath = splitReqLine[1]\n\tversion = splitReqLine[2]\n\n\treturn method, path, version, reqHeaders", "def _get_request(self, line, delete_sending=False):\n line = line.replace('\\\\r', '\\r')\n line = line.replace('\\\\n', '\\n')\n # Remove the trailing \\n\n line = line[:-1]\n if delete_sending:\n return ParsedRequest(line.split(SENDING)[1].strip(\"'\"), ignore_dynamic_objects=True)\n return ParsedRequest(line, ignore_dynamic_objects=True)", "def req():\n return Request()", "def create_request(params={}, path='/', method='POST'):\n request = DummyRequest(path)\n request.method = method\n request.args = params\n return request", "def _make_request(self, payload, headers=None):\n pathparts = REQ_PATH.split(b\"/\")\n if pathparts[0] == b\"\":\n pathparts = pathparts[1:]\n dreq = DummyRequest(pathparts)\n dreq.requestHeaders = Headers(headers or {})\n dreq.responseCode = 200 # default to 200\n\n if isinstance(payload, dict):\n payload = json.dumps(payload)\n\n dreq.content = BytesIO(payload.encode())\n dreq.method = \"POST\"\n\n return dreq", "def create_request(path, environ=None):\n # setup the environ\n if environ is None:\n environ = {}\n\n # create a \"blank\" WebOb Request object\n # using TG Request which is a webob Request plus\n # some compatibility methods\n req = request_local.Request.blank(path, environ)\n\n # setup a Registry\n reg = environ.setdefault('paste.registry', Registry())\n reg.prepare()\n\n # Setup turbogears context with request, url and tmpl_context\n tgl = RequestLocals()\n tgl.tmpl_context = ContextObj()\n tgl.request = req\n\n request_local.context._push_object(tgl)\n\n return req", "def _get_request(args):\n input_request = args.input_request\n request = None\n if input_request:\n from pathlib import Path\n req_file = Path(input_request)\n if req_file.is_file():\n request = load_certificate_request(req_file)\n\n if not request:\n request = req_handler(args)\n\n return request", "def buildRequest(self, uri):\r\n req = urllib2.Request(uri)\r\n req.add_header('X-CSRFToken', self.token)\r\n req.add_header('Referer', 'http://www.ingress.com/intel')\r\n req.add_header('Accept-Charset', 'utf-8')\r\n req.add_header('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.64 Safari/537.31')\r\n return req", "def parse (self, request):\n\n data = {}\n body_start = request.find('\\r\\n\\r\\n')\n if body_start == -1:\n data['body'] = None\n else:\n data['body'] = request[body_start+4:]\n parts = request.split(' ', 2)\n data['method'] = parts[0]\n data['resource'] = parts[1]\n return (data)" ]
[ "0.66163635", "0.64092165", "0.6310418", "0.6307847", "0.62922215", "0.6205955", "0.6139031", "0.61217684", "0.60365695", "0.60207903", "0.5994213", "0.5978753", "0.5958118", "0.5937699", "0.5780297", "0.5706186", "0.5587631", "0.5573185", "0.55512106", "0.5533943", "0.5532476", "0.5530401", "0.55293185", "0.55147636", "0.55001533", "0.5458124", "0.5457386", "0.5437374", "0.5419851", "0.54007363" ]
0.64779276
1
Parses headers to a dictionary from a list of strings
def _parse_headers(raw_headers: List[str]) -> Dict[str, str]: headers: Dict[str, str] = {} for header in raw_headers: name = header[: header.find(":")].strip() value = header[header.find(":") + 1 :].strip() headers[name.lower()] = value return headers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_headers(headers):\n try:\n return dict(header.split(\":\") for header in headers)\n except:\n raise ValueError(\"Invalid headers %s\" % headers)", "def generate_header_dic(self, header_strings):\n headers = dict()\n\n for header_values in header_strings:\n header_list = header_values.split(':')\n headers[header_list[0]] = header_list[1]\n return headers", "def headers_raw_to_dict(headers_raw):\n\n if headers_raw is None:\n return None\n headers = headers_raw.splitlines()\n headers_tuples = [header.split(':', 1) for header in headers]\n\n result_dict = {}\n for header_item in headers_tuples:\n if not len(header_item) == 2:\n continue\n\n item_key = header_item[0].strip()\n item_value = header_item[1].strip()\n result_dict[item_key] = item_value\n\n return result_dict", "def get_headers(headers: HTTPHeaders) -> Mapping[str, List[str]]:\r\n return {header.lower(): headers.get_list(header) for header in headers.keys()}", "def readHeaders(lines):\n\n result = {}\n for line in lines:\n if line[0] == \"#\":\n continue\n if line.strip() == \"\":\n continue\n key, value = line.split(\":\", 1)\n result[key.strip()] = parseSloppily(value)\n return result", "def updateheader(self, headerlist=[], http_s_obj=None):\n header = {}\n for headerparam in headerlist:\n key_value = headerparam.split(\":\", 1)\n if len(key_value) == 2:\n try:\n key = key_value[0]\n value = key_value[1].strip()\n header.update({key: value})\n if http_s_obj:\n if http_s_obj.header.get(key):\n http_s_obj.header.update({key: value})\n except Exception:\n continue\n return header", "def _parse_header(lines):\n # The dict into which we will store header fields.\n header = {}\n # Loop over lines in the header.\n for line in lines:\n # Find the first colon.\n index = line.index(COLON)\n # Up to the colon is the field name.\n name = line[: index]\n # After the colon is the field value.\n value = line[index + 1 :]\n # The field value may begin or end with extra space, which is not \n # significant. Remove it.\n value = value.strip()\n # Store the field.\n header[name] = value\n # All done.\n return header", "def _unpack_headers(self, headers):\n return dict((k,v[0]) for (k,v) in headers.getAllRawHeaders())", "def get_headers(s, sep=': ', strip_cookie=False, strip_cl=True, strip_headers: list = []) -> dict():\n d = dict()\n for kv in s.split('\\n'):\n kv = kv.strip()\n if kv and sep in kv:\n v=''\n k = kv.split(sep)[0]\n if len(kv.split(sep)) == 1:\n v = ''\n else:\n v = kv.split(sep)[1]\n if v == '\\'\\'':\n v =''\n # v = kv.split(sep)[1]\n if strip_cookie and k.lower() == 'cookie': continue\n if strip_cl and k.lower() == 'content-length': continue\n if k in strip_headers: continue\n d[k] = v\n return d", "def get_headers(s, sep=': ', strip_cookie=False, strip_cl=True, strip_headers: list = []) -> dict():\n d = dict()\n for kv in s.split('\\n'):\n kv = kv.strip()\n if kv and sep in kv:\n v=''\n k = kv.split(sep)[0]\n if len(kv.split(sep)) == 1:\n v = ''\n else:\n v = kv.split(sep)[1]\n if v == '\\'\\'':\n v =''\n # v = kv.split(sep)[1]\n if strip_cookie and k.lower() == 'cookie': continue\n if strip_cl and k.lower() == 'content-length': continue\n if k in strip_headers: continue\n d[k] = v\n return d", "def decode_header(header):\n new_header = {}\n\n for item in header:\n split = item.split('\\t')\n new_header[split[0].replace(':', '')] = split[1].replace(\"\\r\\n\", \"\")\n\n return new_header", "def parse_headers(file_contents: str) -> dict:\n\n match = re.search(r'#HEADER#(.*?)#', file_contents, re.MULTILINE | re.DOTALL)\n\n if match is None:\n raise Exception('No #HEADER# provided')\n\n headers = {}\n lines = match.group(1).split(\"\\n\")\n\n for line in lines:\n if line.strip() != '':\n parts = line.split(' : ')\n value = re.sub(r'(^[\\'\"]|[\\'\"]$)', '', parts[1].strip())\n headers[parts[0].strip()] = value\n\n return headers", "def __parseHeaders(headers):\n global __all_headers\n if headers and len(headers) > 0:\n for header in headers:\n name = header.getElementsByTagName(\"name\")[0].childNodes[0].data\n value = header.getElementsByTagName(\"value\")[0].childNodes[0].data\n __addHeader(name, value)\n #print(__all_headers)", "def fill_headers(self, headers):\n self.headers = {h[0]: h[1] for h in headers}", "def unpack_header(header):\n header_values = {}\n for line in header.split('\\n'):\n tokens = line.split('=')\n if len(tokens) > 1:\n header_values[tokens[0].strip()] = tokens[1].split(';')[0].strip()\n return header_values", "def _split_headers(headers):\n amz_headers = {}\n reg_headers = {}\n for cur in headers:\n if cur.lower().startswith('x-amz-'):\n amz_headers[cur] = headers[cur]\n else:\n reg_headers[cur] = headers[cur]\n return (amz_headers, reg_headers)", "def extract_header(self, string): \n\n header_list = re.findall(r\"\\$\\*(.*)\\*\\$\", string)[0].split(\",\")\n header = {}\n for i in header_list:\n spl = i.split(\":\")\n header[spl[0]] = spl[1]\n\n return header", "def _parse_header(self, line):\n if self._regex_helper.search_compiled(W._re_header, line):\n if not self.headers:\n for value in re.findall(W._re_header, line):\n self.headers.append(value[0])\n raise ParsingDone\n else:\n # Dictionary which is going to be appended to the returned list\n ret = dict()\n # List of entries\n _entries = list()\n # List of values in WHAT entry\n _what_entry = list()\n for value in re.findall(W._re_header, line):\n _entries.append(value[0])\n for what_index in range(len(self.headers) - 1, len(_entries)):\n _what_entry.append(_entries[what_index])\n _what_entry_string = ' '.join(_what_entry)\n for index in range(len(self.headers)):\n if index < len(self.headers) - 1:\n ret.update({self.headers[index]: _entries[index]})\n else:\n ret.update({self.headers[index]: _what_entry_string})\n self.current_ret['RESULT'].append(ret)\n raise ParsingDone", "def parse_header(header_lines):\n info = {}\n for line in header_lines:\n if line.startswith('Citation'):\n info['Citation'] = line.split()[-1].strip()\n elif ':' in line:\n try:\n field, value = map(strip,line.split(':',1))\n info[field] = value\n except ValueError:\n #no interesting header line\n continue\n else:\n continue\n return Info(info)", "def _convert_list_tuples_to_dict(self, headers_list):\n # type: (List[Tuple[str, str]]) -> Dict[str, str]\n headers_dict = {} # type: Dict\n if headers_list is not None:\n for header_tuple in headers_list:\n key, value = header_tuple[0], header_tuple[1]\n if key in headers_dict:\n headers_dict[key] = \"{}, {}\".format(\n headers_dict[key], value)\n else:\n headers_dict[header_tuple[0]] = value\n return headers_dict", "def _parse_raw_header_entries(header_entries):\n\n def __check_key(key):\n return not(\"_\" in key or \" \" in key or \":\" in key or not len(key))\n\n result = {}\n if (len(header_entries) < 1):\n return result\n\n # Remove leading '--'\n header_entries = header_entries[1:]\n if (not len(header_entries) % 2 == 0):\n raise ValueError(\"last key does not have a value\")\n\n while (len(header_entries)):\n # Retrieve raw key\n logging.debug(\"current header content \" + str(header_entries))\n word = header_entries[0]\n header_entries = header_entries[1:]\n\n # Try to trim equal\n if (word[-1] == ':'):\n word = word[:-1]\n\n if(not __check_key(word)):\n raise ValueError(\"invalid key '{}' in key value list\".format(word))\n\n result[word] = header_entries[0]\n header_entries = header_entries[1:]\n\n return result", "def parseHeader(header):\n tokens = [t for t in header.split(' ') if t]\n result = {}\n for i in range(len(tokens)):\n result[tokens[i]] = i \n\n return result", "def parse_header(self, out):\n self.headers = {}\n for h in out.split(\"\\r\\n\\r\\n\", 1)[0].split(\"\\r\\n\"):\n x = h.split(\":\")\n self.headers[x[0]] = \":\".join(x[1:]).lstrip()\n return True", "def read_prism_hdr(hdr_path): \n with open(hdr_path, 'r') as input_f:\n header_list = input_f.readlines()\n \n return dict(item.strip().split() for item in header_list)", "def trim_headers(all_headers, relevant_headers=[\"From\", \"To\", \"Subject\", \"Date\"]):\n data = {}\n for header in all_headers:\n if header['name'] in relevant_headers:\n data[header['name']] = header['value']\n\n return data", "def _parse_header(path):\n with open(path) as f:\n text = f.read().splitlines()\n raw_segs = [line.split() for line in text if ':' in line]\n\n # convert the content into a giant dict of all key, values\n return dict((i[0][:-1], i[1:]) for i in raw_segs)", "def _parse_headers(headers):\n\n headers_new = []\n # reformat column headers if needed\n for j, hd in enumerate(headers):\n # rename so always have T1/2 (s)\n if hd == \"T1/2 (num)\" or hd == \"T1/2 (seconds)\":\n hd = \"T1/2 (s)\"\n # for uncertainties, add previous column header to it\n if j > 0 and \"Unc\" in hd:\n hd = headers[j - 1] + \" \" + hd\n if \"Unc\" in hd and \"Unc.\" not in hd:\n hd = hd.replace(\"Unc\", \"Unc.\")\n # expand abbreviated headers\n if \"Energy\" in hd and \"Energy Level\" not in hd:\n hd = hd.replace(\"Energy\", \"Energy Level\")\n if \"Par. Elevel\" in hd:\n hd = hd.replace(\"Par. Elevel\", \"Parent Energy Level\")\n if \"Abund.\" in hd:\n hd = hd.replace(\"Abund.\", \"Abundance (%)\")\n if \"Ene.\" in hd:\n hd = hd.replace(\"Ene.\", \"Energy\")\n if \"Int.\" in hd:\n hd = hd.replace(\"Int.\", \"Intensity (%)\")\n if \"Dec\" in hd and \"Decay\" not in hd:\n hd = hd.replace(\"Dec\", \"Decay\")\n if \"Rad\" in hd and \"Radiation\" not in hd:\n hd = hd.replace(\"Rad\", \"Radiation\")\n if \"EP\" in hd:\n hd = hd.replace(\"EP\", \"Endpoint\")\n if \"Mass Exc\" in hd and \"Mass Excess\" not in hd:\n hd = hd.replace(\"Mass Exc\", \"Mass Excess\")\n headers_new.append(hd)\n if len(set(headers_new)) != len(headers_new):\n raise NNDCRequestError(\n \"Duplicate headers after parsing\\n\"\n + f' Original headers: \"{headers}\"\\n'\n + f' Parsed headers: \"{headers_new}\"'\n )\n return headers_new", "def _headers(self) -> Mapping[str, str]:\n return {}", "def parse_header(self):", "def parse_list_header(value):\n result = []\n for item in urllib2.parse_http_list(value):\n if item[:1] == item[-1:] == '\"':\n item = unquote_header_value(item[1:-1])\n result.append(item)\n return result" ]
[ "0.78225195", "0.74253625", "0.73871934", "0.7215364", "0.7209012", "0.71768695", "0.7095645", "0.70905596", "0.70813763", "0.70813763", "0.70154387", "0.6994542", "0.6965097", "0.69445574", "0.6913696", "0.6835682", "0.6808548", "0.6762677", "0.6754819", "0.6710498", "0.6692153", "0.6673916", "0.6601852", "0.6580222", "0.64974713", "0.64949113", "0.6489278", "0.64567626", "0.64103353", "0.64019674" ]
0.806986
0
Reemplaza la(s) ocurrencia(s) de tag en el archivo por nstr
def rep(self,tag,nstr): tmp = [] for line in self.content: if tag in line: tmp.append(line.replace(tag,nstr)) else: tmp.append(line) self.content = tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ler_arquivo_xml(self, diretorio):\r\n with open(diretorio, 'r') as fxml:\r\n\t strfx = fxml.readlines()\r\n\t string = \"\".join(strfx).replace(\"&\",\" e \")\r\n return string", "def archivoXl(archivo):\r\n return ow(archivo)", "def SV_tag_length(tag_file, outPrefix):\r\n outdir = \"/\".join(outPrefix.split(\"/\")[:-1])\r\n outdir = outdir + \"/\"\r\n if not os.path.exists(outdir):\r\n os.mkdir(outdir)\r\n ins_h = open(outPrefix + \"_INS_DEL.txt\", \"w\")\r\n inv_h = open(outPrefix + \"_INV_DUP.txt\", \"w\")\r\n ins_h.write(\"Tag\\tSVType\\tSVLength\\n\")\r\n inv_h.write(\"Tag\\tSVType\\tSVLength\\n\")\r\n \r\n tag_h = open(tag_file, \"r\")\r\n header = tag_h.readline().strip()\r\n for line in tag_h:\r\n lines = line.strip().split(\"\\t\")\r\n tag = lines[0]\r\n tags = tag.split(\"-\")\r\n length = tags[2]\r\n SVType = tags[3]\r\n if SVType == \"INS\" or SVType == \"DEL\":\r\n ins_h.write(\"%s\\t%s\\t%s\\n\" % (tag, SVType, length))\r\n elif SVType == \"INV\" or SVType == \"DUP\":\r\n inv_h.write(\"%s\\t%s\\t%s\\n\" % (tag, SVType, length))\r\n else:\r\n print(\"Please ckeck whether INS, DEL, INV or DUP is in description %s.\" % tag)\r\n tag_h.close()\r\n inv_h.close()\r\n ins_h.close()", "def make_tag_data_raw_fast(mdp,filename):\n #\n fin = open(filename,'r')\n iter = 0\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"comb_path\":\n update_params(mdp,lsp)\n if not mdp.flag_out_open: ## -- try to open output file\n try:\n if mdp.flag_overwrite == \"True\": ## check string value!\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ## -- try open output file\n for file in glob.glob(mdp.input_path):\n # get sign which corrects for boundary condition\n tvals = file.split('/')[-1].split('_')[3].split('t')\n try:\n ## flip sign if requested\n bcsign = ((int(tvals[1])+int(tvals[2])) != (int(tvals[1])+int(tvals[2])) % mdp.corr_len)\n except IndexError:\n ## 2-point function\n bcsign = False\n try:\n # open correlator file\n mdp.corr_file = open(file,'r')\n except IOError:\n print \"Could not open file \",file\n continue\n ## -- get tag\n ## baryons:\n #mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_r'+file.split('/')[-1].split('_')[4][-1]\n ## with time source tag\n #mdp.tag = file.split('/')[-1].split('_')[3][:3]\\\n # +'_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n # +file.split('/')[-1].split('_')[4][3:]\n ## no time source tag\n mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n +file.split('/')[-1].split('_')[4][3:]\n #print file,',',mdp.tag\n iter+=1\n ##endif ! flag_out_open\n\n #save_data_fast(mdp)\n save_data_fast_bc(mdp,bcsign)\n mdp.corr_file.close()\n if iter%400 == 0:\n print \"file\",iter\n max_iter = None\n if not(max_iter is None) and iter==max_iter:\n print \"reached max file iterations, ending loop...\"\n break\n ## end comb_path\n pass\n\n elif lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return", "def save_to_fileobj(self, fileobj):\n writetags(fileobj, self.__dxftags__(), self.ENCODING)", "def tokenize_tag_with_unks(self, path, fname):\n assert os.path.exists(path)\n fpath = os.path.join(path, fname)\n with open(fpath, 'r') as f:\n lines = f.read().split('\\n')\n total_num = len(lines)\n \n # Tokenize file content\n tag_ids = torch.zeros((total_num, self.seq_len), dtype=torch.long)\n for i, line in enumerate(lines):\n if line.strip() != \"\":\n tags = line.strip().split()\n tag_ids[i, 0] = self.tag2idx['<SOS>']\n for j, tag in enumerate(tags[:self.seq_len-1]):\n if tag not in self.tag2idx:\n tag_ids[i, j+1] = self.tag2idx[\"<UNK>\"]\n else:\n tag_ids[i, j+1] = self.tag2idx[tag]\n if j+1 < self.seq_len-1:\n tag_ids[i, j+2] = self.tag2idx['<EOS>']\n return tag_ids", "def geneA(nombreA,listaPGA): #Esta sección fue hecha por Ángel\n with open(nombreA + \".txt\", \"w\") as archivo:\n archivo.writelines(listaPGA)", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def tag(referencefile):\n dirpath = path.abspath(referencefile)\n\n if path.isdir(dirpath):\n dircontents = listdir(dirpath)\n else:\n dirpath = path.split(dirpath)[0]\n dircontents = listdir(dirpath)\n\n while not 'tag' in dircontents:\n dirpath = path.split(dirpath)[0]\n dircontents = listdir(dirpath)\n if len(dircontents) == 0 or path.split(dirpath)[1] == 'chemistry':\n print(\"tag file not found\")\n return None\n\n return path.join(dirpath, 'tag')", "def ins(self,tag,nstr,jumpline=True):\n tmp = []\n strj = '\\n' if jumpline else ''\n for line in self.content:\n tmp.append(line)\n if tag in line:\n tmp.append(nstr + strj)\n self.content = tmp", "def quran_words_frequences_data(fileName):\n\n # Computing unique words\n unique_words = get_unique_words()\n comma_separated_unique_words = ''\n for word in unique_words:\n comma_separated_unique_words += word + ','\n\n # Removing the extra commas\n comma_separated_unique_words = comma_separated_unique_words.strip(',')\n\n\n\n # * Creating quran_words_frequences_data -- the root tag\n root = Element('quran_words_frequences')\n root.set('unique_words', comma_separated_unique_words)\n\n # * Add root to the tree\n tree = ElementTree(root)\n\n\n for suraNumber in range(1, 114 +1):\n\n sura = quran.get_sura(suraNumber)\n\n # * Creating sura Tag\n suraTag = Element('sura')\n\n # * set number attribute\n suraTag.set('number', str(suraNumber))\n\n # * set sura unique words\n # ??? update get_unique_words\n # suraTag.set('sura_unique_words', suraUniquewords)\n\n ayaCounter = 1\n for aya in sura:\n\n # Create aya Tag\n ayaTag = Element('aya')\n ayaTag.set('number', str(ayaCounter))\n\n # * Computes the words frequency for aya\n ayaWordsDict = get_frequency(aya)\n\n words_comma_separated = ''\n occurrence_comma_separated = ''\n\n for word in ayaWordsDict:\n words_comma_separated += word + ','\n occurrence_comma_separated += str(ayaWordsDict[word]) + ','\n\n # * The same order\n words_comma_separated = words_comma_separated.strip(',')\n occurrence_comma_separated = occurrence_comma_separated.strip(',')\n\n # * Add words & frequencies attributes\n ayaTag.set('unique_words', words_comma_separated)\n ayaTag.set('unique_words_frequencies', occurrence_comma_separated)\n\n\n # * Add aya tag to sura tag\n suraTag.append(ayaTag)\n\n ayaCounter += 1\n\n # * add suraTag to the root\n root.append(suraTag)\n\n\n # print(prettify(root))\n\n file = open(fileName, 'w')\n file.write(prettify(root))\n file.close()", "def okoo_merge_label(file_name):\n labels_dic = {}\n label = 0\n with open(\"label_doc_3\", encoding='utf-8') as f:\n for line in f:\n if len(line) < 2:\n continue\n for key in re.findall('(\\d+)', line):\n labels_dic[''.join(key)] = label\n label += 1\n cur_true_label = label + 1\n with open(file_name, encoding='utf-8') as f1:\n texts = []\n data = json.load(f1)['all']\n for text_ in data:\n label = text_['label']\n if label in labels_dic:\n text_['merged_label'] = labels_dic[label]\n else:\n print(text_)\n text_['merged_label'] = cur_true_label\n # text_['text'] = ' '.join([c[0] for c in thu0.fast_cut(text_['text'])])\n texts.append(text_)\n\n with open('okoo-merged-3-label.json', 'w', encoding='utf-8') as f:\n json.dump(texts, f, ensure_ascii=False, indent=4, separators=(',', ': '))", "def ner_nltk(filepath):\n\n out = \"\"\n\n with codecs.open(filepath,'r','utf-8') as current_file:\n\n text = current_file.readlines()\n\n with codecs.open(filepath+\".ner\",'w','utf-8') as outfile:\n\n for line in text:\n\n tokenized = line.split()\n tagged = pos_tag(tokenized)\n ne = ne_chunk(tagged)\n\n for index,token in enumerate(ne):\n if type(token) != tuple:\n outfile.write(' '.join([tok[0]+\"|\"+token.label() for tok in token])+' ')\n else:\n outfile.write(token[0]+' ')\n outfile.write('\\n')", "def extract_tags_to_file(data, filename):\n data.sort(key=lambda tag: tag[1], reverse=True)\n with open(filename, 'w') as f:\n # first four lines for metadata\n f.write(filename + '\\n')\n f.write('tags: %d\\n\\n\\n' % len(data))\n for tag in data:\n f.write('%s\\t\\t\\t%d\\n' % (tag[0], tag[1]))", "def tags():", "def tag_file_process(self, multiple_files):\n # the path is now becoming a string since it goes through the UI\n # text entry box, not a list or tuple any more, so we turn it to a\n # list of paths\n file_list = multiple_files.split(' ')\n # the main dictionary to store all tags\n tag_dict = dict()\n rows = []\n # now for all the tag file under the folder(root directory), we load\n # the data into the dictionary\n if len(file_list) == 0:\n tk.messagebox.showwarning('warning', 'no files chosen')\n else:\n for file_path in file_list:\n if os.path.isfile(file_path):\n with open(file_path, 'r', encoding='utf-8') as \\\n current_tag_file:\n # initialize the dictionary and the inner dictionary\n reader = csv.reader(current_tag_file)\n for row in reader:\n # the encode, decode is use to resolve the \"\\ueffa\"\n # BOM-utf8 problem\n row[0] = row[0].encode('utf-8').decode('utf-8-sig')\n tag_dict[row[0]] = dict()\n rows.append(row)\n # store the tag into the dictionary\n for row in rows:\n # the 1st column is the main key(mob fact col name)\n # the 2nd column is the tag id\n # the 3rd column is the tag with real meaning\n tag_dict[row[0]][row[1]] = row[2]\n\n else:\n tk.messagebox.showinfo('warning', 'can not obtain: ' +\n file_path)\n return tag_dict", "def loeschen(self):\r\n loeschen=self.REQUEST['loeschen']\r\n tit=''\r\n i=0\r\n j=0\r\n index=[]\r\n cursor=[]\r\n for x in self.objectValues('Image'):\r\n if str(x.id())[0:6] not in index:\r\n index.append(str(x.id())[0:6]) \r\n cursor.append([str(x.id())[0:6],str(x.title),[str(x.id())]])\r\n if str(x.id())[0:6]==loeschen:\r\n tit=str(x.title)\r\n j=i\r\n i=i+1\r\n else:\r\n cursor[-1][2].append(str(x.id()))\r\n #for val in cursor[j][2]:\r\n #self._delOb(self, id=str(val))\r\n #delet=delet+str(val)+' '\r\n self.manage_delObjects(ids=cursor[j][2])\r\n return tit+' gel&ouml;scht !'", "def tagger():", "def cleanup(segment):\n cnt = ''.join(segment.file_content)\n index = cnt.find('\\\\annotate')\n if index < 0:\n return\n while index >= 0:\n cnt, new_ind = parse_annotation(cnt, index)\n index = cnt.find('\\\\annotate', new_ind)\n f = codecs.open(segment.filename, 'w', 'utf-8')\n f.write(cnt)\n f.close()\n info('Updated: {} {}'.format(segment.voice_name, segment.name))", "def write_file(self, file_path, acc, dict_tags):\n logging.info('Escrevendo arquivo em {0}'.format(file_path))\n file_write = open(file_path, \"w\")\n file_write.write(\"Taxa de acerto geral: {0:.2f}%\\n\".format(np.mean(acc)*100))\n for key in dict_tags.keys():\n if dict_tags[key]['right'] > 0:\n file_write.write(\"Taxas de acerto para a classe '{0}': {1:.2f}% Total da classe '{0}': {2:.2f}%\\n\".format(key, \n (dict_tags[key]['pred']/dict_tags[key]['right'])*100, \n (dict_tags[key]['right']/dict_tags[key]['pres'])*100))\n else:\n file_write.write(\"Taxas de acerto para a classe '{0}': Nao presente no corpus de teste\\n\".format(key))\n\n file_write.close()", "def archivoXlFormateado(archivo):\r\n return ow(archivo, formatting_info=True)", "def replace_tag(tag, value, file):\r\n with open(file, \"r\") as origin:\r\n with open(file+\".replaced\", \"w\") as dest:\r\n dest.write(origin.read().replace(tag, str(value)))\r\n return file+\".replaced\"", "def escribir_indir(self, FILESYS, id,name_file=\"Xx.xXx.xXx.xXx.\",\n size_file=\"\",inicluster=\"\",cdate=\"\",mdate=\"\",no_use=\"\"):\n byte = 512\n tamanno_indir = 64\n id = int(id)\n try:\n FILESYS[byte+(tamanno_indir*id):byte+(tamanno_indir*id)+15] =\\\n ((\" \"*(15-len(str(name_file))))+str(name_file)).encode('ascii')\n except:\n print(\"Nombre no valido\")\n return False\n FILESYS[byte+(tamanno_indir*id)+16:byte+(tamanno_indir*id)+24] =\\\n (\"0\"*(8-len(str(size_file)))+str(size_file)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+25:byte+(tamanno_indir*id)+30] =\\\n (\"0\"*(5-len(str(inicluster)))+str(inicluster)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+31:byte+(tamanno_indir*id)+45] =\\\n (\"0\"*(14 - len(str(cdate)))+str(cdate)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+46:byte+(tamanno_indir*id)+60] =\\\n (\"0\"*(14 - len(str(mdate)))+str(mdate)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+61:byte+(tamanno_indir*id)+64] =\\\n (\"\\x00\"*(3 - len(str(no_use)))+str(no_use)).encode('ascii')\n return True", "def getArchivoVotacion():", "def carrega_endereco_tag_daruma(self, tag):\r\n daruma_dict = {\"LOCALARQUIVOS\":['START','LocalArquivos'],\r\n \"LOCALARQUIVOSRELATORIOS\":['START','LocalArquivosRelatorios'],\r\n \"LOGTAMMAXMB\":['START','LogTamMaxMB'],\r\n\t\t \"MODOOBSERVER\":['START','ModoObserver'],\r\n \"PATHBIBLIOTECASAUXILIARES\":['START','PathBibliotecasAuxiliares'],\r\n \"PRODUTO\":['START','Produto'],\r\n \"THREADAOINICIAR\":['START','ThreadAoIniciar'],\r\n \"TIPOREGISTRO\":['START','TipoRegistro'],\r\n \"TERMICA\":['DUAL','Termica'],\r\n \"DUALTAMANHOBOBINA\":['DUAL','TamanhoBobina'],\r\n \"DUALPORTACOMUNICACAO\":['DUAL','PortaComunicacao'],\r\n \"DUALVELOCIDADE\":['DUAL','Velocidade'],\r\n \"ROTA1\":['DUAL','Rota1'],\r\n \"ROTA2\":['DUAL','Rota2'],\r\n \"ROTA3\":['DUAL','Rota3'],\r\n \"ROTA4\":['DUAL','Rota4'],\r\n \"ROTA5\":['DUAL','Rota5'],\r\n \"ATIVAROTA\":['DUAL','AtivaRota'],\r\n \"AJUSTARDATAHORA\":['NFCE','AjustarDataHora'],\r\n \"AVISOCONTINGENCIA\":['NFCE','AvisoContingencia'],\r\n \"AUDITORIA\":['NFCE','Auditoria'],\r\n \"ENCONTRARIMPRESSORA\":['NFCE','EncontrarImpressora'],\r\n\t\t \"PATHARQUIVOSCTGOFFLINE\":['NFCE','PathArquivosCtgOffline'],\r\n \"MARCAIMPRESSORA\":['NFCE','IMPRESSORA\\MarcaImpressora'],\r\n \"NFCETAMANHOBOBINA\":['NFCE','IMPRESSORA\\TamanhoBobina'], \r\n \"NFCEPORTACOMUNICACAO\":['NFCE','IMPRESSORA\\PortaComunicacao'], \r\n \"NFCEVELOCIDADE\":['NFCE','IMPRESSORA\\Velocidade']\r\n }\r\n #\"ENDERECOSERVIDOR\":['NFCE','EnderecoServidor'],\r\n\r\n #if tag.upper() not in [x.upper() for x in daruma_dict.keys()]:\r\n if tag.upper() not in daruma_dict:\r\n raise Exception(\"-40: Tag XML DarumaFramework nao encontrada.\")\r\n return daruma_dict[tag.upper()]", "def file(self):\n\n dlos_filename = super(DlosPhotoz, self).file()\n\n photoz_str = 'DLOS_photoz_'\n \n file_name = photoz_str.join( \n dlos_filename.split('DLOS_')\n ) \n\n return file_name", "def createNew(f1):\n file1=open(f1)\n file2=open(r\"C:\\Users\\Devansh\\Desktop\\Projects\\img\\test.txt\",\"w+\")\n count=0\n text=\"\"\n chars=list(file1.read())\n prevChar=chars[0]\n for i in range(1,len(chars)):\n char=chars[i]\n #print(char,prevChar)\n if(char==prevChar):\n count+=1\n elif(char!=prevChar):\n if(count==1):\n text=text+char\n else:\n text=text+str(count)+char\n prevChar=char\n count=1\n file2.write(text)", "def convert(src, dst):\n with open(dst, 'w', encoding = 'utf-8') as myFile:\n records = read(src)\n for tag in sorted(records.keys()):\n myFile.write('%s %s\\n' %(tag, records[tag]))", "def filtraFileDiAnn(fileInput, geneNames):\n\n\t#---------------------\n\t# Creazione di una lista dove ogni elemento e' una riga del file \n\t# Ogni elem e' una lista di informazioni divise per colonne \n\t#\n\t# formato di un elemento di lines:\n\t#\n\t#\tPOSIZIONE \t\t\tCONTENUTO\n\t#\t\t0\t\t\t\t\tcromosoma\n\t#\t\t3\t\t\t\t\tstart\n\t#\t\t4\t\t\t\t\tend\n\t#\t\t6\t\t\t\t\tstrand\n\t#\t\t8\t\t\t\t\tgene_id\n\t#\t\t9\t\t\t\t\ttranscript_id\n\t#\t\t10\t\t\t\t\texon_number\n\t#\t\t11\t\t\t\t\tgene_name\n\t#\t\t12\t\t\t\t\ttranscript_name\t\n\t#\n\n\n\tstringa \t= '\\texon\\t'\n\tlines \t\t= []\n\tdictGeneChr = {}\n\t\n\t# Indici per il file di annotazione\n\t#\n\tidx_cromosoma = 0\n\tidx_geneName = 11\n\tidx_start = 3\n\tidx_end = 4\n\t\n\tfor x in open(fileInput):\n\t\triga = x.strip(';\\n').replace('; ','\\t').split('\\t')\n\n\t\tif not geneNames.has_key(riga[idx_geneName]):\n\t\t\tcontinue\n\t\t\t\t\n\t\t# Creazione del dizionario dei gene_name per ogni cromosoma\n\t\t#\n\t\tkey_geneChr = riga[idx_geneName] + '\\t' + riga[idx_cromosoma]\n\t\tif not dictGeneChr.has_key(key_geneChr):\n\t\t\tdictGeneChr[key_geneChr] = [riga[idx_start], riga[idx_end]]\n\t\telse:\n\t\t\t\n\t\t\t# Si aggiona il valore dello start del gene se si trova un \n\t\t\t# valore piu' piccolo\n\t\t\t#\n\t\t\tif int(dictGeneChr[key_geneChr][0]) > int(riga[idx_start]):\n\t\t\t\tdictGeneChr[key_geneChr][0] = riga[idx_start]\n\t\t\t\t\n\t\t\t# Si aggiorna il valore dell'end del gene se si trova un\n\t\t\t# valore piu' grande\n\t\t\t#\n\t\t\tif int(dictGeneChr[key_geneChr][1]) < int(riga[idx_end]):\t\n\t\t\t\tdictGeneChr[key_geneChr][1] = riga[idx_end]\n\t\t\n\t\t# Si filtra il file considerando solamente le regioni di tipo \"exon\"\n\t\t#\n\t\tif stringa in x:\n\t\t\tlines.append(riga)\n\n\treturn [lines, dictGeneChr]", "def lesFraFil(self, filnavn): \r\n self._sanger.clear()\r\n \r\n # Les en fil med musikk\r\n innfil = open(filnavn, mode='r')\r\n \r\n \r\n for i, linje in enumerate(innfil):\r\n biter = linje.strip().split(';')\r\n # sang = \"sang\" + str(i+1)\r\n # Opprett et objekt for hver sang\r\n # print(sang)\r\n sang = Sang(biter[0], biter[1])\r\n \r\n # Legg objektet(sangen) til i spillelisten\r\n self._sanger.append(sang)\r\n \r\n # Lukk filen\r\n innfil.close()\r\n # print(self._sanger)\r\n # print(allMusikk) NO\r\n print()" ]
[ "0.56019914", "0.55601597", "0.5558506", "0.55486673", "0.5365312", "0.5310506", "0.52868605", "0.5284282", "0.5265695", "0.5260303", "0.52433544", "0.523879", "0.52328277", "0.52186084", "0.52185684", "0.5216578", "0.51640344", "0.5162988", "0.51482356", "0.5122066", "0.51198757", "0.5108365", "0.5107683", "0.51070845", "0.50937676", "0.5070779", "0.506947", "0.5064073", "0.50607663", "0.5050024" ]
0.56634676
0
Salva los cambios al archivo especificado en newfile, o hace una copia del archivo original (filename+'~') y salva el contenido en "filename"
def saveFile(self,newfile=None): if newfile == None: shutil.move(self.filename,self.filename+'~') self.handler = open(self.filename,'w') else: self.handler = open(newfile,'w') self.handler.writelines(self.content) self.handler.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newfile(filename):\n # Open the new file for writing\n with open(filename, \"w\") as file:\n pass", "def renewFile(filename):\n\n\tfileRepo = repertoire + filename + extension # Position du fichier\n\n\t# Ouvre en ecriture et l'ecrase\n\t# La methode with ferme le fichier automatiquement\n\twith open(fileRepo, \"w\") as robFile:\n\t\trobFile.write(filename + \"\\n\") # Ecrit le nom du fichier au debut", "def overwrite_file(self):\n\n new_file = open(self.temp_filename, 'r')\n file = open(self.filename, 'w')\n file.writelines(new_file.readlines())\n new_file.close()\n file.close()\n os.remove(self.temp_filename)", "def copy_file(file_name, new_file_name):\n\n import os\n\n if not os.path.exists(file_name):\n raise FileNotFoundError\n\n with open(str(file_name), 'rb') as infile:\n with open(str(new_file_name), 'wb') as outfile:\n while True:\n buff = infile.read(10240)\n if buff:\n outfile.write(buff)\n else:\n break\n\n return", "def write_output_file(updated_file, file_path):\n orig_file = file_path + \".orig\"\n # remove an existion .orig file\n if os.path.isfile(orig_file):\n os.remove(orig_file)\n # rename the current file\n os.rename(file_path, orig_file)\n # write the new file\n with open(file_path, mode='w', encoding='utf-8', newline='') as file_out:\n for line in updated_file:\n file_out.write(line)", "def write_to_file(original_path, new_path):\n print(f\"[INFO]: Transform data from binary to text file {new_path}\")\n with open(new_path, mode='wt', encoding='utf-8') as new_file:\n with open(original_path, mode='rb') as original_file:\n for line in original_file:\n new_file.write(line.decode())", "def make_backup(file_name):\n copy2(file_name, file_name + '.bak')", "def encrypt_file(filename, key):\n # init fermet\n f = Fernet(key)\n with open(filename, \"rb\") as file:\n # read all file data\n file_data = file.read()\n # encrypt data\n encrypted_data = f.encrypt(file_data)\n # delete file\n remove(filename)\n # generate new filename\n new_filename = generate_new_filename(filename, key, True)\n # write the encrypted file\n with open(new_filename, \"wb\") as file:\n print(\"Encrypted: \" + new_filename)\n file.write(encrypted_data)\n\n return new_filename", "def replace_file(filename, contents):\n filename = path.join(PATH_ROOT, filename)\n filename_bak = \"%s.release.bak\" % filename\n os.rename(filename, filename_bak)\n with open(filename, \"w\") as out_file:\n out_file.write(\"\".join(contents))\n shutil.copymode(filename_bak, filename)\n os.remove(filename_bak)", "def save_uploaded_file(self, file, new_file_name):\n\n root_path = pathlib.Path(__file__).resolve().parents[1]\n\n filepath = os.path.join(root_path, FILE_DIR, new_file_name)\n\n data = file.read()\n\n with open(filepath, 'wb') as f:\n f.write(bytes(data))\n\n relative_filepath = os.path.join(\"/\", FILE_DIR, new_file_name)\n\n return relative_filepath", "def createBackup(self, filename):\n if (not os.path.isfile(filename + '.bak')) and os.path.isfile(filename):\n with open(filename + '.bak', 'wb') as bakf:\n with open(filename, 'rb') as oldf:\n bakf.write(oldf.read())\n print(filename + \" backed up\")", "def single_file_write(self, file_pointer, filename):\n temp_file = \"resources/temp_file\"\n\n file_pointer.seek(0)\n with open(temp_file, \"wb\") as output_file:\n shutil.copyfileobj(file_pointer, output_file)\n\n os.rename(temp_file, filename)\n log.info(\"Saved file: %s\", filename)", "def put_file(self, src_fname, dst_fname):\n dst_fname = os.path.normpath(dst_fname)\n self.mkdirs(os.path.dirname(dst_fname))\n self._put(src_fname, dst_fname)", "def write_file(self, filehandle, filename):\n filehandle.seek(0)\n backuppath = os.path.join(self.FTP_PATH, filename)\n self.ftp.storbinary('STOR ' + backuppath, filehandle)", "def putFile(self, filename):\n basename = os.path.basename(filename)\n fp = open(filename, 'rb')\n self.ftp.storbinary('stor ' + basename, fp)\n fp.close();", "def writeFile(self, filename):\n\n s = self.asString()\n if os.access(filename, os.F_OK):\n raise RuntimeError(\"file %s already exists -- not overwritten.\" % (filename))\n \n f = file(filename, \"w\")\n f.write(s)\n f.close()", "def cp_to_file(fn0, fn):\n\n # keep rewriting attributes\n shutil.copyfile(fn0, fn)", "def overwrite_original_file(self, value):\n self.__overwrite_original_file = value", "def rename_file(original, content_type, condo_name):\n condo_name = sanitize_filename(condo_name)\n original_file = os.path.join(DOWNLOAD_PATH, original)\n new_name = os.path.join(DOWNLOAD_PATH, content_type + \"\\\\\" + condo_name + \".txt\")\n extracted_file = os.path.join(DOWNLOAD_PATH, unzip_file(original_file))\n if os.path.exists(new_name):\n os.remove(new_name)\n os.renames(extracted_file, new_name)\n os.remove(original_file)", "def rename_file (self):\n\t\tassert self.__filename, \"Renaming could not complete because the new filename could not be determined, one or more needed arguments is empty!\"\n\t\tos.rename( self._file.path, self.__filename )\n\t\t\n\t\tif self.verbose and self.log :\tself.log.info( 'File renamed from %s to %s' % (self._file.path, self.__filename))", "def rename_file(path, old_name, new_name):\n \n old_file = os.path.join(path, old_name)\n new_file = os.path.join(path, new_name)\n os.rename(old_file, new_file)", "def copy_source(self, filename, new_filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n new_file_path = os.path.join(self.storage_path, new_filename)\n shutil.copyfile(file_path, new_file_path)", "def write(self, filename, text):\r\n self._call(\"-rm\", filename)\r\n with temporary_file() as fp:\r\n fp.write(text)\r\n fp.flush()\r\n return self._call('-copyFromLocal', fp.name, filename)", "def newfile(self) :\n\n\t\tfrom tempfile import mkstemp\n\t\timport os\n\t\tglobal configurer\n\n\t\tfd,name = mkstemp(suffix='.blend')\n\t\tos.close(fd)\n\t\tself.name = name\n\t\tfd = open(name,'wb', configurer.get('ServerBufferSize'))\n\t\tself.fd = fd\n\t\tprint name\n\t\treturn 1", "def _set_filename(self, filename):\n tmp_file = '_'.join(filename.split())\n# new_file = new_file.replace(\"'\",\n# '_').replace('-',\n# '_').replace(' ',\n# '_').replace('(', '_').replace(')', '_')\n new_file = ''\n pathsep = os.path.sep \n if sys.platform == 'win32':\n pathsep = '/'\n for char in tmp_file:\n if char.isalnum() or char in ['.', '_', ':', pathsep, '-']:\n new_file += char\n try:\n shutil.copy(filename, new_file)\n except shutil.Error, err:\n msg = \"`%s` and `%s` are the same file\" % (filename, new_file)\n if str(err) == msg:\n pass\n else:\n raise err\n utils.ensure_file_exists(new_file)\n self._filename = new_file\n self._basename, self._ext = os.path.splitext(self._filename)", "def write_to_file(self, filename: str) -> None:", "def save_to_file(self, filename, overwrite=False):\n arg = \"x\"\n if overwrite:\n arg = \"w\"\n\n with open(filename, arg) as fd:\n self.save(fd)", "def copy_file(file, destination):\n with open(file, 'rb') as infile, open(destination, 'wb') as outfile:\n outfile.write(infile.read())", "def RenameFile(self, oldname: str, newname: str) -> None:\n ...", "def trash_file(file_to_trash, document_name) :\n dtpo_log('debug', \"trash_file file -> %s\", file_to_trash)\n\n source = Config.config.get_source_directory() + '/' + file_to_trash\n destination = Config.config.get_trash_directory() + '/' + document_name\n\n os.rename(source, destination)" ]
[ "0.68023175", "0.6601903", "0.6470307", "0.6256966", "0.60828876", "0.60267633", "0.59504694", "0.5871682", "0.5786605", "0.5738598", "0.57240206", "0.5673847", "0.5622948", "0.5617441", "0.5615462", "0.55986905", "0.55614066", "0.55208856", "0.551383", "0.55105835", "0.55076134", "0.55026776", "0.55018353", "0.5500804", "0.549015", "0.5486434", "0.54844224", "0.54761565", "0.5466248", "0.54579103" ]
0.68237215
0
``summary'' is a systemgenerated summary. ``references'' is a list of humanmade reference summaries
def score_summary(self, summary, references, summary_id='A'): try: self._write_config(references, Doc(summary_id, summary)) output = self._run_rouge() output = output.decode("utf-8") return self._parse_output(output) except CalledProcessError as e: print("Rouge returned a non-zero error code. Output was: ", file=sys.stderr) print("BEGIN OUTPUT ", file=sys.stderr) print(e.output, file=sys.stderr) print("END OUTPUT", file=sys.stderr) raise e finally: self._cleanup()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def summary(self, summary):\n\n self._summary = summary", "def summary(self, summary):\n\n self._summary = summary", "def summary(self, summary):\n\n self._summary = summary", "def summary(self, summary: str):\n return self.swag({\n 'summary': summary\n })", "def print_summary_and_genomes(summary, genome):\n for sample in summary:\n for ref in summary[sample]:\n if ref == \"metadata\":\n continue\n final = {\n \"sample_identifier\": sample,\n \"reference_organism\": ref,\n \"metadata\": summary[sample][\"metadata\"]\n }\n if genome.get(sample) and genome[sample].get(ref):\n final.update({\"status\": \"complete\"})\n # Add summary statistics\n final.update(summary[sample][ref])\n # Add genomic sequences\n final.update(genome[sample][ref])\n else:\n final.update({\"status\": \"notMapped\"})\n print(json.dumps(final))", "def orders_summary(self, orders_summary):\n\n self._orders_summary = orders_summary", "def errors_summary(self, errors_summary):\n\n self._errors_summary = errors_summary", "def summaries(self, summaries):\n if summaries is None:\n raise ValueError(\"Invalid value for `summaries`, must not be `None`\")\n\n self._summaries = summaries", "def _summary(obj):\n return obj.summary", "def summary(self, summary):\n if summary is None:\n raise ValueError(\"Invalid value for `summary`, must not be `None`\") # noqa: E501\n if summary is not None and len(summary) < 1:\n raise ValueError(\"Invalid value for `summary`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._summary = summary", "def summary_lines(self, summary_lines):\n\n self._summary_lines = summary_lines", "def _process_references0(self, references):\n if \"zarr_consolidated_format\" in references:\n # special case for Ike prototype\n references = _unmodel_hdf5(references)\n self.references = references", "def test_get_brief_summary_output(self):\n \n # Create a Resource object\n resource = Resource(1, \"White Noise\", Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n # Assert expected results \n self.assertEqual(resource.get_brief_summary(), \"Delillo's White \"\\\n \"Noise follows narrator Jack Gladney, a professor \"\\\n \"at a \\nsmall Liberal Arts college and describes an \"\\\n \"academic year. Jack teaches \\nat ...\")", "def show_summary(self, lang):\n return self.summary % self.vars", "def _forward_summary(self, summaries):\n p = self.params\n for summary_key, summary_value in summaries.items():\n logging.info((summary_key, summary_value))\n summary_type = base_layer.get_summary_type_from_key(summary_key)\n assert summary_value.shape[0] == p.num_stages\n if p.unpack_summaries:\n # unstack summary_value\n unstacked_values = jnp.split(summary_value, p.num_stages)\n for i, v in enumerate(unstacked_values):\n base_layer.add_summary(f'{summary_key}/{i}', v, summary_type)\n else:\n base_layer.add_summary('{summary_key}', summary_value, summary_type)", "def printSummary(self):\n pass", "def references(self, references):\n\n self._references = references", "def add_summary(self, collections=None):\n\n name = self.group + '/' + self.name\n print(\"Add summary for \"+name)\n\n if self.stype == 0:\n self._placeholder = tf.placeholder(tf.float32, name=name)\n tf.summary.scalar(name, self._placeholder, collections=[collections])\n elif self.stype == 1:\n self._placeholder = tf.placeholder(\n tf.float32, shape=[None, None], name=name)\n tf.summary.image(name, self._placeholder, collections=[collections])\n elif self.stype == 2:\n self._placeholder = tf.placeholder(tf.float32, shape=[None], name=name)\n tf.summary.histogram(name, self._placeholder, collections=[collections])\n elif self.stype == 3:\n self._placeholder = tf.placeholder(tf.float32, name=name)\n tf.summary.scalar(name, self._placeholder, collections=[collections])\n if self._log:\n self._plot_summary = PlotSummaryLog(\n self.name, self.group, collections=[collections])\n else:\n self._plot_summary = PlotSummaryStandard(\n self.name, self.group, collections=[collections])\n elif self.stype == 4:\n self._plot_summary = PlotSummaryPlot(\n self.name, self.group, collections=[collections])\n elif self.stype == 5:\n self._placeholder = tf.placeholder(tf.float32, name=name)\n self._plot_summary = PlotSummaryImages(self.name, self.group, collections=[collections])\n else:\n raise ValueError('Wrong summary type')", "def rouge_l_summary_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n # total number of words in reference sentences\n m = len(_split_into_words(reference_sentences))\n\n # total number of words in evaluated sentences\n n = len(_split_into_words(evaluated_sentences))\n\n union_lcs_sum_across_all_references = 0\n for ref_s in reference_sentences:\n union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,\n ref_s)\n return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)", "def _forward_summary(self, summaries):\n p = self.params\n for summary_key, summary_value in summaries.items():\n logging.info((summary_key, summary_value))\n summary_type = base_layer.get_summary_type_from_key(summary_key)\n assert summary_value.shape[0] == p.x_times\n if p.unpack_summaries:\n # unstack summary_value\n unstacked_values = jnp.split(summary_value, p.x_times)\n for i, v in enumerate(unstacked_values):\n base_layer.add_summary(f'{summary_key}/{i}', v, summary_type)\n else:\n base_layer.add_summary('{summary_key}', summary_value, summary_type)", "def add_ref_tag(basicSeqs):\r\n\r\n formattedBasicSeqs=list(basicSeqs) \r\n for record in formattedBasicSeqs:\r\n record.id=record.id+'_Ref'\r\n record.name=record.name+'_Ref'\r\n record.description=record.description+'_Ref'\r\n return formattedBasicSeqs", "def rouge_l_summary_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n # total number of words in reference sentences\n m = len(_split_into_words(reference_sentences))\n\n # total number of words in evaluated sentences\n n = len(_split_into_words(evaluated_sentences))\n\n union_lcs_sum_across_all_references = 0\n for ref_s in reference_sentences:\n union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,\n ref_s)\n return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)", "def summary(self, i):\n return self.__summaries[i]", "def summary(self):\r\n return '%s%s: %s%s %s%s' % (BLUE, self.title,\r\n GREEN, self.description,\r\n NORMAL, self.link)", "def _print_summary(case, summary):\n for dof, data in summary.items():\n b4b = data[\"Bit for Bit\"]\n conf = data[\"Configurations\"]\n stdout = data[\"Std. Out Files\"]\n print(\" \" + case + \" \" + str(dof))\n print(\" --------------------\")\n print(\" Bit for bit matches : \" + str(b4b[0]) + \" of \" + str(b4b[1]))\n print(\" Configuration matches : \" + str(conf[0]) + \" of \" + str(conf[1]))\n print(\" Std. Out files parsed : \" + str(stdout))\n print(\"\")", "def summarize(self):\n\n def increment_summary(summary_obj, case_obj):\n \"\"\"increment ReportSummary count was ReportCase status\n\n Whatever the status of the case object, the corresponding property\n will be incremented by 1 in the summary object\n\n Args:\n summary_obj (ReportSummary): summary object to increment\n case_obj (ReportCase): case object\n \"\"\"\n summary_obj.increment(case_obj.get_status())\n\n summary = ReportSummary()\n [increment_summary(summary, case) for case in self.cases]\n self.summary = summary", "def replaces_summary(self):\n return False", "def get_summary(self, s, base=None):\n summary = summary_patt.search(s).group()\n if base is not None:\n self.params[base + \".summary\"] = summary\n return summary", "def get_summary(self, s, base=None):\n summary = summary_patt.search(s).group()\n if base is not None:\n self.params[base + '.summary'] = summary\n return summary", "def publish_summary(self, jobs):\n pass" ]
[ "0.58870405", "0.58870405", "0.58870405", "0.5803425", "0.5781204", "0.5522118", "0.5503257", "0.54215056", "0.53986794", "0.53744864", "0.53230405", "0.5302629", "0.52970827", "0.5282875", "0.5266578", "0.52372295", "0.5206945", "0.5153626", "0.5151684", "0.5148715", "0.51414406", "0.513884", "0.5113788", "0.5096503", "0.5089631", "0.5086856", "0.5086806", "0.508191", "0.5079586", "0.5074926" ]
0.6722548
0
Display unpublished Draft Entries
def drafts(): query = Entry.drafts().order_by(Entry.last_mod_date.desc()) return object_list('index.html', query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drafts_view(self, request, object_id, extra_context=None):\n opts = self.model._meta\n action_list = [{\"revision\": version.revision,\n \"url\": reverse(\"admin:%s_%s_draft\" % (opts.app_label, opts.module_name), args=(version.object_id, version.revision.id))}\n for version in self.get_draft_versions(object_id).select_related(\"revision\")]\n context = {\n \"action_list\": action_list, \n \"title\": _(\"Unpublished items\"), \n 'draft_view':True, \n 'has_draft':self.has_draft(object_id)\n }\n context.update(extra_context or {})\n return super(EasyPublisher, self).history_view(request, object_id, context)", "def get_drafts(self):\n return self.filter(status=\"D\")", "def post_list(request):\n # Only show the posts that have been published\n posts = Post.objects.filter(date_published__isnull=False)\n return render(request,\n 'blog/post_list.html',\n {'posts': posts}\n )", "def get_drafts(self, **kwargs):\n default_kwargs = { \"order\": \"updated_at desc\" }\n default_kwargs.update(kwargs)\n return self.get_messages(statuses=[\"draft\"], **default_kwargs)", "def draft(page):\r\n return app_index(page, cached_apps.get_draft, 'draft',\r\n False, True)", "def render_archives():\n\n\tq = \"SELECT title, text, id, project FROM entries WHERE archived=1 ORDER BY id desc\"\n\tcur = g.db.execute(q)\n\trows = cur.fetchall()\n\tentries = [dict(\n\t\t\ttitle=row[0], \n\t\t\ttext=row[1], \n\t\t\tid=row[2], \n\t\t\tproject=row[3]) for row in rows]\n\n\t\"\"\" filter catagories as to not repeat \"\"\"\n\tfiltered_catagories = set([ x[3] for x in rows ])\n\n\treturn render_template('show_entries.html', \n\t\tentries=entries, \n\t\tcatagories=filtered_catagories,\n\t\tfiltered=False,\n\t\tarchived=True,\n\t\t)", "def list_drafts(self, account):\n account = Account(account, hive_instance=self.hive)\n return self._conveyor_method(account, None,\n \"conveyor.list_drafts\",\n [account['name']])", "def get_draft_revisions(self, object_id):\n content_type = ContentType.objects.get_for_model(self.model)\n return Revision.objects.filter(\n version__object_id=object_id, \n version__content_type=content_type,\n easypublishermetadata__status='draft',\n easypublishermetadata__language=get_language()\n ).select_related().distinct()", "def draft_message(request):\n query = models.Message.query(\n models.Message.sender == request.user.email(),\n models.Message.draft == True,\n ancestor=request.issue.key)\n if query.count() == 0:\n draft_message = None\n else:\n draft_message = query.get()\n if request.method == 'GET':\n return HttpTextResponse(draft_message.text if draft_message else '')\n return HttpTextResponse('An error occurred.', status=500)", "def test_home_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertContains(response, \"No posts are available.\")\n self.assertQuerysetEqual(response.context['posts'], [])", "def list_drafts(self) -> PagingList[Draft]:\n return PagingList(self._generate_drafts, 128)", "def history_view(self, request, object_id, extra_context=None):\n defaults = {\n 'has_draft': self.has_draft(object_id)\n }\n defaults.update(extra_context or {})\n return super(EasyPublisher, self).history_view(request, object_id, defaults)", "def isDraft(self): #$NON-NLS-1$\r", "def get_haikus_unposted(cls, db_session) -> list:\n q = (\n db_session.query(cls)\n .filter(cls.date_posted == None) # noqa: E711\n .filter(cls.date_deleted == None) # noqa: E711\n )\n return q.all()", "def drafts(self):\n if self._drafts is None:\n if self._initialize_drafts():\n self._save_drafts()\n return self._drafts", "def public_timeline():\n return render_template('timeline.html', messages=query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE]))", "def public_timeline():\n return render_template('timeline.html', messages=query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE]))", "def test_show_post_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n draft_post = create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n url = reverse('blog.post', args=(draft_post.id,))\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)", "def education_post_list(request):\n posts = EducationBlogPost.objects.filter(published_date__lte=timezone.now()\n ).order_by('-published_date')\n return render(request, \"education_center/education_blogposts.html\", {'posts': posts})", "def draft_message(request):\n query = models.Message.query(\n models.Message.issue_key == request.issue.key,\n models.Message.sender == request.user.email(),\n models.Message.draft == True)\n if query.count() == 0:\n draft_message = None\n else:\n draft_message = query.get()\n if request.method == 'GET':\n return _get_draft_message(draft_message)\n elif request.method == 'POST':\n return _post_draft_message(request, draft_message)\n elif request.method == 'DELETE':\n return _delete_draft_message(draft_message)\n return HttpTextResponse('An error occurred.', status=500)", "def get_published(self):\n return self.filter(status=\"P\")", "def list_unresolved(self): # new\n feed = self.get_feed(limit=999999)\n posts = feed.get(\"threads\")\n\n for s in posts:\n if (\n s.get(\"approved_status\", \"approved\") != \"rejected\"\n and (\n s.get(\"type\", \"question\") != \"post\" or s.get(\"is_megathread\", True)\n )\n and not s.get(\"is_answered\", True)\n and s.get(\"unresolved_count\", 1)\n ):\n yield s", "def show_entries():\n db = get_db()\n cur = db.execute('select id, title, ingredients, steps, tags, \\\n url from entries order by id asc')\n entries = cur.fetchall()\n return render_template('show_entries.html', entries=entries)", "def _get_draft_message(draft):\n return HttpTextResponse(draft.text if draft else '')", "def test_get_drafts(self):\n r1 = Recipes.objects.create(chef=self.user, name=\"Recipe 1\", draft=True)\n r2 = Recipes.objects.create(chef=self.user, name=\"Recipe 2\", draft=False)\n\n url = '/0/chefs/%i/drafts' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('drafts', resp.data)\n self.assertEqual(1, len(resp.data['drafts']))\n keys = (\"liked\", \"public_url\", \"edit_date\", \"ingredients\", \"shared\", \"tags\", \"commented\",\n \"private\", \"id\", \"chef\", \"reported\", \"nb_shares\", \"added\", \"nb_added\",\n \"nb_comments\", \"draft\", \"commensals\", \"creation_date\", \"nb_likes\", \"name\",\n \"products\", \"prep_time\", \"serves\", \"bought\", \"book_for_sale\", \"description\")\n self.assertEqual(set(keys), set(resp.data['drafts'][0].keys()))\n self.assertEqual(r1.pk, resp.data['drafts'][0]['id'])", "def test_home_view_with_draft_post_and_published_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Published Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Published')\n create_post(category=category, author=author, name='Draft Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertQuerysetEqual(\n response.context['posts'],\n ['<Post: Published Post>']\n )", "def show_entries_stream():\n pass", "def draft_messages(self):\n return self._get_messages_from_folder_name('Drafts')", "def entries_index(request):\n blog_entries = Entry.objects.filter(status=2).order_by('-pub_date')\n paginator = Paginator(blog_entries, 4)#4 posts/page\n try:\n page = int(request.GET.get('page','1'))\n except ValueError:\n page = 1\n try:\n entries = paginator.page(page)\n except (EmptyPage, InvalidPage):\n entries = paginator.page(paginator.num_pages)\n return render_to_response('blog/blog.html', {'entries':entries}, RequestContext(request))", "def full_listing(request, urlname):\n\tif request.user.is_authenticated():\n\t\tblog = Blog.qa_objects.get(urlname=urlname)\n\t\tposts = BlogEntry.qa_objects.filter(blog=blog, posting_time__lte=datetime.now()).order_by('-posting_time')\n\telse:\n\t\tblog = Blog.objects.get(urlname=urlname)\n\t\tposts = BlogEntry.objects.filter(blog=blog).order_by('-posting_time')\n\treturn render_to_response('blogs/full.html', {'blog': blog, 'posts': posts}, context_instance=RequestContext(request))" ]
[ "0.7508209", "0.6815543", "0.6162445", "0.6106849", "0.60844666", "0.6059696", "0.60429025", "0.60228574", "0.6012785", "0.59416395", "0.5828027", "0.5751404", "0.5730366", "0.57115114", "0.5708828", "0.5697832", "0.5697832", "0.5679159", "0.5660224", "0.5650022", "0.56475645", "0.5633808", "0.56066024", "0.55717516", "0.5559834", "0.55557215", "0.5546395", "0.5537633", "0.550518", "0.5502599" ]
0.73618597
1
Create new blog Entry
def create(): if request.method == 'POST': if request.form.get('title') and request.form.get('content'): entry = Entry.create( title = request.form.get('title'), content = request.form.get('content'), published = request.form.get('published') or False) flash('Entry created successfully!', 'success') if entry.published: return redirect(url_for('detail', slug=entry.slug)) else: return redirect(url_for('edit', slug=entry.slug)) else: flash('Title and Content are required!', 'danger') return render_template('create.html')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createNewBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createNewEntryDocument()\r\n self._initNewEntryDocument(atomdoc)\r\n return ZAtomNewBlogEntry(atomdoc)", "def blog_create(request):\n entry = BlogRecord()\n form = BlogCreateForm(request.POST)\n if request.method == 'POST' and form.validate():\n form.populate_obj(entry)\n request.dbsession.add(entry)\n return HTTPFound(location=request.route_url('home'))\n return {'form': form, 'action': request.matchdict.get('action')}", "def newPost(self, postLink, zserverBlogEntry): #$NON-NLS-1$\r\n atomEntry = self.createNewBlogEntry()\r\n self._populateAtomEntry(atomEntry, zserverBlogEntry)\r\n # publish entry\r\n atomRespEntry = self.createAtomEntry(postLink, atomEntry)\r\n return atomRespEntry", "def new(request):\n\n if request.method == 'POST':\n data = request.POST\n form = BlogEntryForm(creator=request.user, data=data)\n if form.is_valid():\n form.save()\n # Should we redirect to single entry view or to all?\n return HttpResponseRedirect(reverse('blog.list_all'))\n else:\n form = BlogEntryForm(creator=request.user)\n\n data = {'form': form, 'blog_info': get_blog_info()}\n data.update(csrf(request))\n return render_to_response('blog/new_blog.html', data,\n context_instance=get_rq(request))", "def post(self):\n data = request.json\n return create_new_blog(data=data)", "def addBlogEntry(self, space, title, content = ''):\n return BlogEntry.create(self.pm_getSpaceManager().addBlogEntry(self._unbox(space), title, content), self._modelDataManager)", "def create(cls, headline, text, blog):\n post = cls()\n post.headline = headline\n post.text = text\n post.blog = blog\n post.posted_date = timezone.now()\n try:\n post.save()\n return post\n except(ValueError, IntegrityError, OperationalError):\n return None", "def post(self):\n\n title = self.request.get(\"title\")\n blogPost = self.request.get(\"blogPost\")\n author = self.request.cookies.get('name')\n\n if title and blogPost:\n\n bp = Blogposts(parent=blog_key(), title=title,\n blogPost=blogPost, author=check_secure_val(author))\n\n bp.put()\n\n self.redirect('/%s' % str(bp.key.integer_id()))\n else:\n error = \"Please submit both a title and a blogpost!\"\n self.render(\"newpost.html\", title=title,\n blogPost=blogPost, error=error)", "def create_entry(entry):\n Entry.create(**entry)\n return entry", "def createEditBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createEditEntryDocument()\r\n self._initEditEntryDocument(atomdoc)\r\n return ZAtomEditBlogEntry(atomdoc)", "def create():\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO post (title, body, author_id)'\n ' VALUES (?, ?, ?)',\n (title, body, g.user['id'])\n )\n db.commit()\n return redirect(url_for('blog.index'))\n\n return render_template('blog/create.html')", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def post(self):\n title = self.request.get(\"title\")\n body = self.request.get(\"body\")\n\n if title and body:\n\n # create a new Post object and store it in the database\n post = Post(\n title=title,\n body=body\n )\n post.put()\n\n # get the id of the new post, so we can render the post's page (via the permalink)\n id = post.key().id()\n self.redirect(\"/blog/%s\" % id)\n else:\n error = \"we need both a title and a body!\"\n #self.render_form(title, body, error)\n self.render(\"newpost.html\", title, body, error)", "def new_blog(blog, template):\n path = '/'.join([POSTS, blog])\n with open(path, 'w') as blg:\n blg.write(template)", "def create(\n\t\trequest: schemas.Blog, db: Session = Depends(get_db),\n\t\tcurrent_user: schemas.User = Depends(oauth2.get_current_user)\n):\n\treturn blog.create(request, db)", "def getBlogEntry(self, id):\n return BlogEntry.create(self.pm_getSpaceManager().getBlogEntry(self._unbox(id)),self._modelDataManager)", "def creating_entry(self):\n response = \"\"\n today = str(date.today())\n curent_time = str(datetime.time(datetime.now()))\n entry = Diary(self.entry_id, self.title, self.body)\n lst = {}\n lst[\"entry_id\"] = entry.entry_id\n lst[\"title\"] = entry.title\n lst[\"date\"] = today\n lst[\"time\"] = curent_time\n lst[\"body\"] = entry.body\n lst[\"updated\"] = entry.updated\n if Validate.validate_entry(Diary.entries, entry):\n response = jsonify({\"message\": \"Duplicate data,Try again\"})\n response.status_code = 409\n else:\n Diary.entries.append(lst)\n response = jsonify({\"message\": \"Entry saved\", \"data\": lst})\n response.status_code = 201\n return response", "def create_entry():\n new_entry = DB_Entry() # Create instance of entry to add the info to\n print('Eratosthenes is ready to add your new entry.\\n')\n new_entry.set_id()\n title = input('Enter the title:\\n')\n new_entry.set_title(title)\n authors = input('Enter the authors as list of surname, firstname separated by semicolons:\\n')\n new_entry.set_authors(authors)\n try:\n year = int(input('Enter the year:\\n'))\n except ValueError:\n try:\n year = int(input('Enter the year as an integer:\\n'))\n except ValueError:\n print('You failed to follow basic instructions. The year is set to 2000\\n')\n year = 2000\n new_entry.set_year(year)\n pub_type = input('Enter the publication type as article/review/book/other:\\n')\n try:\n new_entry.set_type(pub_type)\n except ValueError:\n try:\n pub_type = input('Type must be one of article/review/book/other:\\n')\n new_entry.set_type(pub_type)\n except ValueError:\n print('You failed to follow basic instructions. Type is now set to \\'other\\'\\n')\n pub_type = 'other'\n new_entry.set_type(pub_type)\n keywords = input('Enter list of keywords separated by semicolons:\\n')\n new_entry.set_keywords(keywords.split(';'))\n current_path = input('Enter the current path to the file\\n')\n current_path = current_path.replace('~', '/Users/marcus')\n if not os.path.isfile(current_path):\n print('File not found. Please try again')\n current_path = input('Enter the current path to the file\\n')\n if not os.path.isfile(current_path):\n print('File not found')\n new_entry.set_new_path()\n db_actions.copy_file(new_entry.get_path(), current_path)\n return new_entry", "def new_post(self, content):\n return self.proxy.wp.newPost(self.blog_id, self.username, self.password,\n content)", "def post(self):\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n # if user enter good subject and content, redirect them to new post page\n if subject and content:\n p = Post(parent = blog_key(), subject = subject, content = content)\n p.put() # store the post element into database\n self.redirect('/blog/%s' % str(p.key().id()))\n # otherwise, render an error page \n else:\n error = \"subject and content, please!\"\n self.render(\"newpost.html\", subject=subject, content=content, error=error)", "def new(request):\n assert isinstance(request, HttpRequest)\n if request.method == 'POST': # フォームが提出された\n form = EntryForm(request.POST) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n entry = form.save(commit=False)\n entry.member = request.user\n entry.save()\n return HttpResponseRedirect(reverse('entry_list')) # POST 後のリダイレクト\n else:\n form = EntryForm() # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n auth_form = AuthenticationForm(None, request.POST or None)\n return render(request, 'app/entry_edit.html', { \n 'form': form,\n 'title':'ブログ記事の新規登録',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'登録する',\n 'auth_form':auth_form,\n 'current_user':request.user,\n })", "def newPost(self, useRawHTML):\n print\n content, publish = self._fillPost(useRawHTML)\n\n # Upload to server\n try :\n postid = self.server.metaWeblog.newPost(\n self.blogid, self.username, self.password,\n content, publish\n )\n except xmlrpclib.Fault as fault:\n display_XMLRPC_errors(\"post the new entry\", fault)\n import pdb\n pdb.set_trace()\n else :\n self._setCategorie(postid)\n print \"New post created with ID =\", postid", "def add_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n introduction = request.form['introduction']\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n new_entry = Entries(title, category, buydate, introduction)\n db.session.add(new_entry)\n\n try :\n db.session.commit()\n except IntegrityError as e :\n flash(e.message)\n return redirect(url_for('show_entries_admin'))\n\n flash(u'成功添加新的条目')\n return redirect(url_for('show_entries_admin'))", "def post(self):\n\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n have_errors = False\n\n if not subject:\n error_subject = \"Please write down the subject\"\n have_errors = True\n if not content:\n error_content = \"Content is required\"\n have_errors = True\n\n if have_errors:\n self.render(\"newpost.html\",\n subject=subject,\n content=content,\n error_subject=error_subject,\n error_content=error_content,\n user=self.user)\n else:\n post = Post(parent=blog_key(),\n subject=subject,\n content=content,\n user=self.user)\n post.put()\n self.redirect('/blog/%s' % str(post.key().id()))", "def create_blog_post(user_id):\n \n data = request.get_json()\n\n # Check if the user is in the database\n user = User.query.filter_by(id=user_id).first()\n if not user:\n return jsonify({\"message\": \"user does not exist!\"}), 400\n\n # Create an instance of a HashTable\n ht = hash_table.HashTable(10)\n\n # Create a blog post\n ht.add_key_value(\"title\", data[\"title\"])\n ht.add_key_value(\"body\", data[\"body\"])\n ht.add_key_value(\"date\", now)\n ht.add_key_value(\"user_id\", user_id)\n\n # Add a blog post to the database\n new_blog_post = BlogPost(\n title=ht.get_value(\"title\"),\n body=ht.get_value(\"body\"),\n date=ht.get_value(\"date\"),\n user_id=ht.get_value(\"user_id\"),\n )\n db.session.add(new_blog_post)\n db.session.commit()\n return jsonify({\"message\": \"new blog post created\"}), 200", "def new_post():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(pub_date=datetime.date.today())\n post.title = form.title.data\n post.content = form.content.data\n post.slug = slugify(post.title)\n db.session.add(post)\n db.session.commit()\n return flask.redirect(flask.url_for(\n 'view_post',\n year=post.pub_date.year,\n month=post.pub_date.month,\n day=post.pub_date.day,\n slug=post.slug\n ))\n return flask.render_template('new.html', form=form)", "def add_entry():\n if not session.get('logged_in'):\n abort(401)\n\n if request.method == 'POST':\n db = get_db()\n cur = db.execute('insert into entries (title, ingredients, steps, \\\n tags, url) values (?, ?, ?, ?, ?)',\n [request.form['title'], request.form['ingredients'],\n request.form['steps'], request.form['tags'],\n request.form['url']])\n db.commit()\n flash('Recipe, ' + escape(request.form['title'])\n + ', was successfully added', 'success')\n return view_entry(str(cur.lastrowid))\n else:\n return render_template('add_entry.html')", "def new_entry(title, content):\n\n title.strip # Remove the spaces from both sides.\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n return False\n default_storage.save(filename, ContentFile(content))\n return True", "def main(blog, date):\n template = front_matter({\n \"title\": blog,\n \"date\": get_date(\"%Y-%m-%d %H:%M:%S %z\"),\n })\n new_blog(date + '-' + blog + '.markdown', template)", "def post(self, request):\n\n # crear el formulario con los datos del POST\n blog_with_user = Blog(owner=request.user)\n form = BlogForm(request.POST, instance=blog_with_user)\n\n if form.is_valid():\n #crea el post\n blog = form.save()\n\n #generar mensaje de exito\n msg = \"Blog creado con éxito\"\n\n # limpiamos el formulario creando uno vacío para pasar a la plantilla\n form = BlogForm()\n else:\n msg = \"Ha ocurrido un error al guardar el blog\" \\\n\n\n # renderiza la plantilla con el formulario\n context = {\n \"form\": form,\n \"msg\": msg\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/new-blog.html', context)" ]
[ "0.8232406", "0.7668556", "0.7641279", "0.7607913", "0.73694056", "0.7322824", "0.73227113", "0.7250803", "0.7247709", "0.72167945", "0.7215931", "0.6994082", "0.69597936", "0.69173276", "0.68832356", "0.6816936", "0.68142015", "0.67737234", "0.6718318", "0.670351", "0.66975707", "0.6672287", "0.66480535", "0.66211003", "0.6609822", "0.65995824", "0.6567572", "0.6552912", "0.65434533", "0.6512855" ]
0.77166563
1
a function converting csv output files from operational_sep_quantities to json files for observations
def obs_csv2json(input_file,output_file,example_path,instrument): obs_path = Path(cfg.obs_path) with open(example_path,'r') as e: example = js.load(e) #deleting unused categories del(example['sep_forecast_submission']['forecasts']) del(example['sep_forecast_submission']['triggers'][2]) del(example['sep_forecast_submission']['triggers'][1]) del(example['sep_forecast_submission']['triggers'][0]) del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument']) del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time']) del(example['sep_forecast_submission']['contacts']) del(example['sep_forecast_submission']['model']) del(example['sep_forecast_submission']['issue_time']) example['sep_forecast_submission']['mode'] = 'observation' #json template for observations obs_json = example fieldnames = ('energy_threshold','flux_threshold','start_time','intensity', 'peak_time','rise_time','end_time','duration','fluence>10', 'fluence>100') #extracting data from csv file with open(input_file,'r') as f: reader = csv.DictReader(f, fieldnames) out = js.dumps( [ row for row in reader ] ) obs_data = js.loads(out) data={} (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['observatory']) = instrument #creating data for all energy levels forecast for j in range(1,len(obs_data)): data[j-1]=obs_data[j] #recording start and end times for all events for i in range(len(data)): data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S') data[i]['start_time'] = data[i]['start_time'].isoformat() data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S') data[i]['end_time'] = data[i]['end_time'].isoformat() data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S') data[i]['peak_time'] = data[i]['peak_time'].isoformat() #recording observed values for all events if i > 0: (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['ongoing_events']).append({}) event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['ongoing_events'][i]) #start and end times event['start_time']=data[i]['start_time'] event['threshold'] = data[i]['flux_threshold'] event['energy_min'] = float(data[i]['energy_threshold'][1:]) event['energy_max'] = -1 event['end_time']=data[i]['end_time'] #peak values event['peak_intensity']=data[i]['intensity'] event['peak_time'] = data[i]['peak_time'] event['intensity_units']='pfu' #fluence values event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value', 'units' : 'MeV [cm^-2]'}, {'energy_min' : '100', 'fluence_value' : 'fluence_value', 'units' : 'MeV [cm^-2]'}] event['fluence'][0]['fluence']=data[i]['fluence>10'] event['fluence'][1]['fluence']=data[i]['fluence>100'] if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index (int(event['energy_min']))]: event['all_clear_boolean'] = 'false' else: event['all_clear_boolean'] = 'true' #building json file with open(obs_path / output_file, 'w') as s: js.dump(obs_json,s,indent=1) print('json file %s created' %output_file) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def write_csv_file(csv_output_file, full_data):\n j = 0\n csv_file_path = make_dir(csv_output_file)\n\n # csv_file_path = os.path.join(csv_file_path, csv_output_file)\n try:\n with open(csv_file_path, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['tripId', 'agency_tripId', 'itinerary_nb', 'modes', 'actual_time', 'perceived_time',\n 'start_time', 'end_time', 'walk_time', 'walk_distance','transit_time', 'waiting_time',\n 'boardings', 'bus_lines_numbers', 'boarding_stop_ids', 'debarquer_stop_ids'])\n print(\"======================================\")\n print(\"= Creating CSV file from JSON files =\")\n print(\"======================================\")\n for id in full_data.keys(): # just so we can get all the ids\n data = full_data[id]\n j += 1\n\n printrp('( ' + str(j) + ' / ' + str(len(full_data) - 1) + ' )') if found_CmdPrinter else print(j)\n\n if 'error' in data:\n # if no itineraries were find (ie. there was an error), write the error id and error message\n # note : msg is the short message (eg. PATH_NOT_FOUND), message is the long description\n csvwriter.writerow([id] + ['error'] + [str(data['error']['id'])] +\n [str(data['error']['message'])] + [str(data['error']['msg'])])\n else:\n for itinerary_nb in range(len(data['plan']['itineraries'])):\n\n boarding = 0\n busNbs = \"\"\n boarding_stop_ids = \"\"\n debarquer_stop_ids = \"\"\n agency_trip_ids = \"\"\n modes = \"\"\n for leg in data['plan']['itineraries'][itinerary_nb]['legs']:\n modes += leg['mode'] + ';'\n if leg['mode'] == 'BUS':\n # every time a BUS step is included in the itineraries :\n # add 1 to the boarding counter\n # add the bus line number to busNbs\n # add the stop_ids to boarding_stop_ids and debarquer_stop_ids\n boarding += 1\n busNbs += leg['route'] + \";\"\n\n boarding_stop_ids += str(leg['from']['stopCode']) + ';'\n debarquer_stop_ids += str(leg['to']['stopCode']) + ';'\n agency_trip_ids += str(leg['tripId'].split(':')[1]) + ';'\n # we need to .split that line because tripId is given as agencyId:tripId\n\n\n busNbs = busNbs[:-1] # removing the trailing semi-colon\n boarding_stop_ids = boarding_stop_ids[:-1]\n debarquer_stop_ids = debarquer_stop_ids[:-1]\n agency_trip_ids = agency_trip_ids[:-1]\n modes = modes[:-1]\n startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['startTime']/1000))\n endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['endTime']/1000))\n # those are /1000 because OTP gives Epoch time in milliseconds\n\n walkTime = data['plan']['itineraries'][itinerary_nb]['walkTime']\n transitTime = data['plan']['itineraries'][itinerary_nb]['transitTime']\n waitingTime = data['plan']['itineraries'][itinerary_nb]['waitingTime']\n\n # Write all the information inside a csv file\n csvwriter.writerow([id,\n str(agency_trip_ids),\n str(itinerary_nb+1),\n str(modes),\n str(data['plan']['itineraries'][itinerary_nb]['duration']),\n str(get_perceived_time(walkTime, transitTime, waitingTime)),\n str(startTime),\n str(endTime),\n str(walkTime),\n str(data['plan']['itineraries'][itinerary_nb]['walkDistance']),\n str(transitTime),\n str(waitingTime),\n str(boarding),\n str(busNbs),\n str(boarding_stop_ids),\n str(debarquer_stop_ids)])\n except PermissionError:\n print('ERROR - Cannot write to CSV file. The file might be used by another app.')\n exit()\n except OSError:\n print(\"ERROR - Couldn't open file \" + csv_file_path + \". Please verify the file's permissions.\")\n print('( ' + str(j-1) + ' / ' + str(len(full_data) - 1) + ' )')", "def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split(',')[0]\n partial_paths_list.append(partial_path)\n filing_details=[]\n for partial_path in partial_paths_list:\n temp_dict={}\n split_partial_path=partial_path.split('\\\\')\n temp_dict['cik']=split_partial_path[1]\n temp_dict['date_details']=split_partial_path[2]\n temp_dict['file_type']=split_partial_path[3].split('.')[-1]\n temp_dict['file_path']=directory_details+partial_path\n filing_details.append(temp_dict)\n return filing_details", "def metrics_to_json(metrics_csv_int, region, coords, metrics_filename):\n data = {}\n with open(metrics_csv_int,'r') as f:\n reader = csv.reader(f)\n fields = next(reader)\n for row in reader:\n data[row[0]] = {\"Temporal intermittency\": {},\n \"Spatial intermittency\": {}}\n # skip the first key in fields, clean up field name\n for i,field in enumerate(fields[1:6]):\n data[row[0]][\"Temporal intermittency\"].update({field[5:]:float(row[i+1])})\n for i,field in enumerate(fields[6:]):\n data[row[0]][\"Spatial intermittency\"].update({field[3:]:float(row[i+6])})\n with open(metrics_filename, 'r') as fname:\n metrics = json.load(fname)\n\n # Add region to dimensions information\n metrics['DIMENSIONS']['dimensions']['region'].update({region: coords})\n\n # Update model statistics\n for model in data:\n if not (model in metrics['RESULTS']):\n metrics['RESULTS'][model] = {}\n metrics['DIMENSIONS']['dimensions']['dataset'].update({model: {}})\n metrics['RESULTS'][model][region] = data[model]\n\n # Write new metrics to same file\n with open(metrics_filename, 'w') as fname:\n json.dump(metrics,fname,indent = 2)", "def _json_export(self, exppath):\n # TODO: Settle on JSON format for colortable\n pass", "def test_csv_to_json():\r\n json_dict = {\r\n \"covariates\":{ \r\n \"value\":{\r\n \"subject0\": {\r\n \"attribute0\": 3.0,\r\n \"attribute1\": 12.0\r\n },\r\n \"subject1\": {\r\n \"attribute0\": 1.2,\r\n \"attribute1\": 10.9\r\n }\r\n }\r\n },\r\n \"data\":{\r\n \"fulfilled\": True,\r\n \"value\": {\r\n \"type\": [\"float\"],\r\n \"value\": [\r\n \"attribute0\",\r\n \"attribute1\"\r\n ]\r\n }\r\n },\r\n \"lambda\":{\r\n \"fulfilled\": True,\r\n \"value\": 0\r\n }\r\n }\r\n json_string = \"[\" + json.dumps(json_dict).replace(' ', '').replace('\\n', '') + \"]\"\r\n directory = os.path.join(os.getcwd(), \"test/\")\r\n lambda_ = \"0\"\r\n data_type = [\"float\"]\r\n data_vars = [\"attribute0\", \"attribute1\"]\r\n assert csv_to_json_(directory, lambda_, data_type, data_vars).replace(' ', '').replace('\\n', '') == json_string", "def main(input_filepath, output_filepath):\n productsDict = dataToDict(input_filepath)\n productsList = dictToCSV(productsDict)\n toCSV(productsList, output_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def output_to_cwl_json(\n galaxy_output, get_metadata, get_dataset, get_extra_files, pseduo_location=False,\n):\n def element_to_cwl_json(element):\n element_output = GalaxyOutput(\n galaxy_output.history_id,\n element[\"object\"][\"history_content_type\"],\n element[\"object\"][\"id\"],\n )\n return output_to_cwl_json(element_output, get_metadata, get_dataset, get_extra_files)\n\n output_metadata = get_metadata(galaxy_output.history_content_type, galaxy_output.history_content_id)\n\n def dataset_dict_to_json_content(dataset_dict):\n if \"content\" in dataset_dict:\n return json.loads(dataset_dict[\"content\"])\n else:\n with open(dataset_dict[\"path\"]) as f:\n return json.load(f)\n\n if output_metadata[\"history_content_type\"] == \"dataset\":\n ext = output_metadata[\"file_ext\"]\n assert output_metadata[\"state\"] == \"ok\"\n if ext == \"expression.json\":\n dataset_dict = get_dataset(output_metadata)\n return dataset_dict_to_json_content(dataset_dict)\n else:\n file_or_directory = \"Directory\" if ext == \"directory\" else \"File\"\n if file_or_directory == \"File\":\n dataset_dict = get_dataset(output_metadata)\n properties = output_properties(pseduo_location=pseduo_location, **dataset_dict)\n basename = properties[\"basename\"]\n extra_files = get_extra_files(output_metadata)\n found_index = False\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n if path == SECONDARY_FILES_INDEX_PATH:\n found_index = True\n\n if found_index:\n ec = get_dataset(output_metadata, filename=SECONDARY_FILES_INDEX_PATH)\n index = dataset_dict_to_json_content(ec)\n for basename in index[\"order\"]:\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n if path == os.path.join(SECONDARY_FILES_EXTRA_PREFIX, basename):\n ec = get_dataset(output_metadata, filename=path)\n if not STORE_SECONDARY_FILES_WITH_BASENAME:\n ec[\"basename\"] = basename + os.path.basename(path)\n else:\n ec[\"basename\"] = os.path.basename(path)\n ec_properties = output_properties(pseduo_location=pseduo_location, **ec)\n if \"secondaryFiles\" not in properties:\n properties[\"secondaryFiles\"] = []\n\n properties[\"secondaryFiles\"].append(ec_properties)\n else:\n basename = output_metadata.get(\"cwl_file_name\")\n if not basename:\n basename = output_metadata.get(\"name\")\n\n listing = []\n properties = {\n \"class\": \"Directory\",\n \"basename\": basename,\n \"listing\": listing,\n }\n\n extra_files = get_extra_files(output_metadata)\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n ec = get_dataset(output_metadata, filename=path)\n ec[\"basename\"] = os.path.basename(path)\n ec_properties = output_properties(pseduo_location=pseduo_location, **ec)\n listing.append(ec_properties)\n\n return properties\n\n elif output_metadata[\"history_content_type\"] == \"dataset_collection\":\n if output_metadata[\"collection_type\"] == \"list\":\n rval = []\n for element in output_metadata[\"elements\"]:\n rval.append(element_to_cwl_json(element))\n elif output_metadata[\"collection_type\"] == \"record\":\n rval = {}\n for element in output_metadata[\"elements\"]:\n rval[element[\"element_identifier\"]] = element_to_cwl_json(element)\n return rval\n else:\n raise NotImplementedError(\"Unknown history content type encountered\")", "def convert_to_dict_then_json(row, sep,feature_list):\n feature_values = row.decode('utf-8').replace('\\n', '').replace('\\r', '').split(sep)\n feature_values_clean = [float(x) if is_number(x) else 0 for x in feature_values]\n feat_dict = dict(zip(feature_list, feature_values_clean))\n feat_json = json.dumps(feat_dict).encode('utf-8')\n return(feat_json)", "def _export_jql_items(items, output_file, format='json', compress=False):\n if format == 'json':\n Mixpanel.export_data(items, output_file, format=format, compress=compress)\n elif format == 'csv':\n with open(output_file, 'w') as f:\n f.write(items)\n if compress:\n Mixpanel._gzip_file(output_file)\n os.remove(output_file)\n else:\n Mixpanel.LOGGER.warning('Invalid format must be either json or csv, got: ' + format)\n return", "def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None", "def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return", "def df_to_json(complete_dataset, output_path, static_columns):\n megajson = {}\n\n static_columns = [\"continent\", \"location\"] + list(static_columns)\n\n complete_dataset = complete_dataset.dropna(axis=\"rows\", subset=[\"iso_code\"])\n\n for _, row in complete_dataset.iterrows():\n\n row_iso = row[\"iso_code\"]\n row_dict_static = row.drop(\"iso_code\")[static_columns].dropna().to_dict()\n row_dict_dynamic = row.drop(\"iso_code\").drop(static_columns).dropna().to_dict()\n\n if row_iso not in megajson:\n megajson[row_iso] = row_dict_static\n megajson[row_iso][\"data\"] = [row_dict_dynamic]\n else:\n megajson[row_iso][\"data\"].append(row_dict_dynamic)\n\n with open(output_path, \"w\") as file:\n file.write(json.dumps(megajson, indent=4))", "def export_sampleStorage_csv(self, sample_ids_I, filename_O):\n\n data_O = [];\n for sample_id in sample_ids_I:\n data_tmp =[];\n data_tmp = self.get_rows_sampleID_limsSampleStorage(sample_id);\n data_O.extend(data_tmp);\n if data_O:\n io = base_exportData(data_O);\n io.write_dict2csv(filename_O);", "def csv_to_json(csv_filename):\n csv_trimmed = csv_filename[:-3]\n json_added = csv_trimmed + 'json'\n return json_added", "def csv_to_json(name: str) -> str:\n with open(name) as file: # type: IO[str]\n result: Dict[str, Any] = {}\n for row in DictReader(file):\n item = row[\"Item\"].split(\"(\")[0].strip()\n size = row[\"Serving Size\"].rstrip(\")\").replace(\"(\", \"/ \")\n result[f\"{item} [{size}]\"] = row[\"Calories\"]\n return dumps(result)", "def transform2json(source, target):\n behaviors = pd.read_table(\n source,\n header=None,\n names=['uid', 'time', 'clicked_news', 'impression'])\n f = open(target, \"w\")\n with tqdm(total=len(behaviors), desc=\"Transforming tsv to json\") as pbar:\n for row in behaviors.itertuples(index=False):\n item = {}\n item['uid'] = row.uid[1:]\n item['time'] = row.time\n item['impression'] = {\n x.split('-')[0][1:]: int(x.split('-')[1])\n for x in row.impression.split()\n }\n f.write(json.dumps(item) + '\\n')\n\n pbar.update(1)\n\n f.close()", "def build_csv_write(api):\n\n write_rows = []\n for info in api:\n write_rows.append([info[\"number\"], info[\"status\"], info[\"available_bike_stands\"],\n info[\"available_bikes\"], time])\n\n return write_rows", "def Export_in_files(COVID_data, COVID_data_reconstructed):\r\n F_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')\r\n FR_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')\r\n \r\n COVID_data_lists = [COVID_data, COVID_data_reconstructed]\r\n Data_file_list = [F_data_file, FR_data_file]\r\n Countries_list = list(COVID_data.keys())[1:]\r\n \r\n for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file\r\n COVID_data_temp = COVID_data_lists[Data_set_inc]\r\n Data_file_temp = Data_file_list[Data_set_inc]\r\n \r\n Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\\n')\r\n \r\n for Country in Countries_list:\r\n COVID_data_single_country = COVID_data_temp[Country]\r\n \r\n Date_list = list(COVID_data[Country].keys())\r\n for Date in Date_list:\r\n COVID_data_single_country_single_date = COVID_data_single_country[Date]\r\n Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None\r\n \r\n Data_file_temp.write('%s;%s;' % (Country, Date))\r\n Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))\r\n Data_file_temp.write('\\n')\r\n \r\n Data_file_temp.close()", "def write_to_csv(self, name_suffix = ''):\n f_path = os.path.join(self.root_dir, 'res' + name_suffix + '.csv')\n field_names = [] # the first field in CSV is 'obj_val'\n\n # put the keys in the cost, prim_var_change, dual_var_change and fea_conditions as field names if any\n for key in self.cost.keys():\n field_names.append(key)\n for key in self.cost_change.keys():\n field_names.append(key)\n for key in self.prim_var_change.keys():\n field_names.append(key)\n for key in self.dual_var_change.keys():\n field_names.append(key)\n for key in self.fea_conditions.keys():\n field_names.append(key)\n\n\tprint f_path\n\n with open(f_path, mode = 'wb') as csv_file: # open the file, if not exist, create it\n writer = csv.DictWriter(csv_file, fieldnames = field_names) # create a writer which maps the dictionaries onto output rows in CSV\n writer.writeheader() # write the field names to the header\n temp_dict = {} # create a temporary dict used to output rows\n row_max = self.get_iter_num() # get the max iters which indicates the number of rows in CSV\n print ('number of rows: ' + str(row_max))\n #print (field_names)\n for row in range(row_max + 1):\n temp_dict.clear() # clear all items\n start_idx = 0\n for i in range(len(self.cost)):\n field = field_names[start_idx + i]\n\t\t if row > len(self.cost[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n\t\t else: temp_dict[field] = self.get_cost_val(field, row)\n\n start_idx = start_idx + len(self.cost) # the start pos of fields in field_names for prim_var_change\n for i in range(len(self.cost_change)): # for each cost_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n elif row > len(self.cost_change[field]) - 1:\n\t\t\t temp_dict[field] = ''\n\t\t else:\n temp_dict[field] = self.get_cost_change_value(field, row - 1)\n\n\n start_idx = start_idx + len(self.cost_change)\n for i in range(len(self.prim_var_change)): # for each prim_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n\t\t elif row > len(self.prim_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else:\n temp_dict[field] = self.get_prim_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.prim_var_change) # go to the start pos of fields in field_names for dual_var_change\n for i in range(len(self.dual_var_change)): # for each dual_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of dual variables\n temp_dict[field] = '/'\n elif row > len(self.dual_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = '' \n\t\t else:\n temp_dict[field] = self.get_dual_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.dual_var_change) # go the the start pos of fields in field_names for fea_conditions\n for i in range(len(self.fea_conditions)): # for each fea_condition\n field = field_names[start_idx + i]\n\t\t if row > len(self.fea_conditions[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else: temp_dict[field] = self.get_fea_condition_value(field, row)\n\n writer.writerow(temp_dict)\n\n # we also save the value of primal values if not saved\n if not self.pdv_to_csv:\n self.save_last_prims()", "def main(input_filepath, output_filepath, data_type):\n a = jsonCSV(input_filepath, os.path.join(output_filepath, data_type+'.csv'))\n column_names = a.get_superset_column_names()\n a.read_write(column_names)\n\n logger = logging.getLogger(__name__)\n logger.info('transform log files into csv')", "def opf2json(opf_path: str, output_file: Optional[str] = None) -> None:\n\n logger.info(\"Converting file: %s ...\", opf_path)\n\n header_format = \"<iii\"\n header_size = struct.calcsize(header_format)\n\n with open(opf_path, \"rb\") as f:\n header_data = struct.unpack(header_format, f.read(header_size))\n\n n_samples = header_data[0]\n n_features = header_data[2]\n\n file_format = \"<ii\"\n for _ in range(n_features):\n file_format += \"f\"\n\n data_size = struct.calcsize(file_format)\n\n json = {\"data\": []}\n for _ in range(n_samples):\n data = struct.unpack(file_format, f.read(data_size))\n\n # Note that we subtract 1 from `labels` column\n json[\"data\"].append(\n {\"id\": data[0], \"label\": data[1] - 1, \"features\": list(data[2:])}\n )\n\n if not output_file:\n output_file = opf_path.split(\".\")[0] + \".json\"\n\n with open(output_file, \"w\") as f:\n j.dump(json, f)\n\n logger.info(\"File converted to %s.\", output_file)", "def encode_to_raw_json(self, feature_collection, csv_f):\n clean_name = str(path.splitext(csv_f)[0]) + \".json\"\n with open(path.join(self.uk_postcodes, clean_name), \"wb\") as json_outfile:\n dump(feature_collection, json_outfile)", "def result2json(ifilename, poiname, ofilename):\n nameMap = {\n \"SysWeight1\" : \"mc\",\n \"SysWeight2\" : \"FSR\",\n \"SysWeight3\" : \"bkg\",\n \"SysWeight4\" : \"tagpt\",\n \"SysWeight6\" : \"Prefire\",\n \"SysRecoil2\" : \"recoil_eta\",\n \"SysRecoil3\" : \"recoil_keys\",\n \"SysRecoil6\" : \"recoil_stat0\",\n \"SysRecoil7\" : \"recoil_stat1\",\n \"SysRecoil8\" : \"recoil_stat2\",\n \"SysRecoil9\" : \"recoil_stat3\",\n \"SysRecoil10\": \"recoil_stat4\",\n \"SysRecoil11\": \"recoil_stat5\",\n \"SysRecoil12\": \"recoil_stat6\",\n \"SysRecoil13\": \"recoil_stat7\",\n \"SysRecoil14\": \"recoil_stat8\",\n \"SysRecoil15\": \"recoil_stat9\",\n }\n\n def getNuisName(nuis):\n if nuis in nameMap.keys():\n return nameMap[nuis]\n elif bool(re.match(r\"\\w*bin\\d+shape\", nuis)):\n return \"QCD_\" + nuis\n else:\n return nuis\n\n ifile = ROOT.TFile(ifilename)\n himpact = ifile.Get(\"nuisance_impact_mu\")\n himpact_grouped = ifile.Get(\"nuisance_group_impact_mu\")\n tree = ifile.Get(\"fitresults\")\n tree.GetEntry(0)\n\n # find the POI bin for poiname\n ibinX = -1\n for binX in range(1, himpact.GetNbinsX()+1):\n poi = himpact.GetXaxis().GetBinLabel(binX)\n if poi == poiname:\n ibinX = binX\n continue\n assert ibinX >=0, \"Can not find the POI {} in the postfit file {}. Please check.\".format(poiname, ifilename)\n\n results = OrderedDict()\n results['POIs'] = []\n val = getattr(tree, poiname)\n err = abs(getattr(tree, poiname+\"_err\"))\n poi = OrderedDict()\n poi['fit'] = [val-err, val, val+err]\n poi['name'] = poiname\n results['POIs'].append(poi)\n\n results['method'] = 'default'\n results['params'] = []\n\n # dump impacts\n impacts = OrderedDict()\n for ibinY in range(1, himpact.GetNbinsY()+1):\n nuis = himpact.GetYaxis().GetBinLabel(ibinY)\n impacts[nuis] = himpact.GetBinContent(ibinX, ibinY)\n\n # add the grouped QCD and Recoil systematic\n groupnames = []\n for ibinY in range(1, himpact_grouped.GetNbinsY()+1):\n tmpY = himpact_grouped.GetYaxis().GetBinLabel(ibinY)\n if tmpY == 'stat':\n continue\n impacts[tmpY] = himpact_grouped.GetBinContent(ibinX, ibinY)\n groupnames.append(tmpY)\n\n # sort impacts, descending\n impacts = OrderedDict(sorted(impacts.items(), key=lambda x: abs(x[1]), reverse=True))\n\n pulls = OrderedDict()\n for nuis in impacts.keys():\n if nuis not in groupnames:\n val = getattr(tree, nuis)\n err = getattr(tree, nuis+\"_err\")\n err = abs(err)\n else:\n # manually set the postfit of the grouped sys to [-1,1], and pulled at 0,\n # since only the impacts are useful to us\n val = 0.\n err = 1.\n pulls[nuis] = [val - err, val, val + err]\n\n # save to results\n for nuis in impacts.keys():\n systematic = OrderedDict()\n systematic['fit'] = pulls[nuis]\n systematic['groups'] = []\n systematic['impact_' + poiname] = impacts[nuis]\n systematic['name'] = getNuisName(nuis)\n systematic['prefit'] = [-1.0, 0., 1.0]\n systematic[poiname] = [poi['fit'][1] - impacts[nuis], poi['fit'][1], poi['fit'][1] + impacts[nuis]]\n systematic['type'] = \"Gaussian\"\n print(getNuisName(nuis), pulls[nuis][1], pulls[nuis][1]-pulls[nuis][0], impacts[nuis])\n\n results['params'].append(systematic)\n\n with open(ofilename, 'w') as fp:\n json.dump(results, fp, indent=2)", "def write2json(output, in_data):\n print(\"Writeing \" + output)\n with open(output, 'w') as f:\n json.dump(in_data, f, indent=4, sort_keys=True)", "def out_put_data(OOS_result: dir, category: str) -> pandas.core.frame.DataFrame:\n \n header = ['SKU', 'Store', 'category', 'OOS_days', 'date_list', 'OOS_lastDay','avg_loss_sale_quantity',\n 'avg_loss_net_sale','avg_loss_mergin', 'total_loss_sale_quantity','total_loss_net_sale','total_loss_mergin']\n output_data = pd.DataFrame(columns = header)\n new_row = {}\n \n for key, value in OOS_result.items():\n new_row['Store'] = key[1]\n new_row['SKU'] = key[0]\n new_row['Category'] = category\n new_row['OOS_days'] = value[0]\n new_row['date_list'] = value[5]\n new_row['OOS_lastDay'] = value[4]\n new_row['avg_loss_sale_quantity'] = value[3]\n new_row['avg_loss_net_sale'] = value[2]\n new_row['avg_loss_mergin'] = value[1]\n new_row['total_loss_sale_quantity'] = value[3] *value[0]\n new_row['total_loss_net_sale'] = value[2] *value[0]\n new_row['total_loss_mergin'] = value[1] *value[0]\n \n ## insert the new row \n output_data = output_data.append(new_row, ignore_index=True) \n return output_data", "def main():\n try:\n kerbals_csv = pd.read_csv(\"kerbals.csv\")\n except FileNotFoundError:\n print(\"Kerbals csv file not found in current directory!\")\n sys.exit(1)\n kerbals_csv.to_json(\"kerbals.json\", orient=\"records\")\n kerbals_json = open(\"kerbals.json\")\n print(kerbals_json.read())\n return 0", "def write_data_files(self):\n # build our strings\n header_string = \"\"\n data_string = \"\"\n for value in self.data.values():\n header_string += value[2] + \",\"\n if value[0] != None:\n data_string += value[1].format(value[0])\n else:\n data_string += \",\"\n # remove the extra comma and replace with a newline\n header_string = header_string[:-1]\n header_string += \"\\n\"\n data_string = data_string[:-1]\n data_string += \"\\n\"\n \n # show what we built\n #print(header_string)\n #print(data_string)\n \n # open a temp file\n with open(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"w\") as temp_file:\n #temp_file.write(header_string)\n temp_file.write(data_string)\n \n # move to the input file\n filetools.mv(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"{:s}\\\\VWSInput\\\\data.csv\".format(self.path))\n \n return", "def put_str_repr_on_csv():\n for file_name in tqdm(os.listdir(\"./raw_data\")):\n file = pathlib.Path(f\"./raw_data/{file_name}\")\n if file.suffix == \".csv\" and \"str\" not in file.stem:\n df = pd.read_csv(\"./raw_data/\"+file_name)\n df['jones_str'] = df['Jones_polynomial'].apply(lambda x: eval(x)).transform(lambda x: poly_to_str(x))\n df['alexander_str'] = df['Alexander_polynomial'].apply(lambda x: eval(x)).transform(lambda x: poly_to_str(x))\n df.to_csv(f\"{file.stem}_str.csv\", index=False)", "def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]" ]
[ "0.60670656", "0.5917598", "0.5867013", "0.577265", "0.5737042", "0.5713435", "0.56811106", "0.56413877", "0.56257904", "0.5622406", "0.5606425", "0.5585183", "0.55736536", "0.55720776", "0.5546977", "0.554406", "0.55393744", "0.5519248", "0.5511894", "0.5495278", "0.5494552", "0.5441304", "0.5411523", "0.53953856", "0.53934216", "0.5385327", "0.53577816", "0.53548515", "0.53504646", "0.5348617" ]
0.7022942
0
choose the correct instrument to use for observations for a given date range. inputs must be date objects from the datetime module. used if there is no information about which instrument was primary.
def choose_inst(given_start_date,given_end_date): #INPUTS MUST BE DATE OBJECTS inst_start_dates=[] inst_end_dates=[] good_instruments = [] good_end_dates = [] bad_inst = [] #extracting dates where instruments are active from csv file inst_dates = pd.read_csv(ref_path / 'instrument_dates.csv') for s in inst_dates['start']: inst_start_dates.append(datetime.strptime(str(s),'%Y-%m').date()) for e in inst_dates['end']: if str(e) == 'nan': inst_end_dates.append(datetime.today().date()) else: inst_end_dates.append(datetime.strptime(str(e),'%Y-%m').date()) #checking which instruments are active during given time period and #choosing the correct ones print('checking which instruments are active for given dates') for i in range(len(inst_start_dates)): if (inst_start_dates[i] < given_start_date) and (given_end_date < inst_end_dates[i]): print('%s works' %inst_dates['Instrument'][i]) good_instruments.append(inst_dates['Instrument'][i]) good_end_dates.append(inst_end_dates[i]) else: print('outside of %s range' %inst_dates['Instrument'][i]) #checking if active instruments actually have data for that date for inst in good_instruments: inst_str = inst.replace('-','').lower() year = str(given_start_date).split('-')[0] month = str(given_start_date).split('-')[1] url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' + month + '/' + inst_str) try: request.urlopen(url) print('%s data available' %inst) except: print('%s data NOT available' %inst) bad_inst.append(inst) #not choosing instrument if it doesn't have data for binst in bad_inst: good_instruments.remove(binst) #if more than one instrument is available, choose which one to use if len(good_instruments) > 1: print('Please choose which instrument you would like to use.') for j in range(len(good_instruments)): print('Type ' + str(j) + ' for ' + str(good_instruments[j])) inst_choice = input('Answer:' ) instrument = good_instruments[int(inst_choice)] end_date = good_end_dates[int(inst_choice)] print('we are using %s as our instrument for observations' %instrument) else: instrument = good_instruments[0] end_date = good_end_dates[0] print('we are using %s as our instrument for observations' %instrument) return([instrument,end_date])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_prime_inst(given_start_date,given_end_date):\r\n\r\n #extracting primary dates where instruments are active from csv file\r\n inst_prime_dates = pd.read_csv(ref_path / 'GOES_primary_assignments.csv', header=3)\r\n\r\n #figuring out which instrument is primary for given start date\r\n for d in range(len(inst_prime_dates['Start Date'])):\r\n change_date = parse(inst_prime_dates['Start Date'][d])\r\n if given_start_date >= change_date.date():\r\n prime_inst = inst_prime_dates['EPEAD Primary'][d]\r\n backup_inst = inst_prime_dates['EPEAD Secondary'][d]\r\n end_date = parse(inst_prime_dates['Start Date'][d+1]).date()\r\n\r\n #if no prime instrument available, have to choose which instrument\r\n #to use based on which instruments have data for this date\r\n if str(prime_inst) == 'nan':\r\n if str(backup_inst) == 'nan':\r\n print('no information about primary instrument available.'\r\n 'Choosing instrument based on active date ranges')\r\n alternate_output = choose_inst(given_start_date,given_end_date)\r\n\r\n return(alternate_output)\r\n else:\r\n prime_inst = backup_inst\r\n\r\n break\r\n\r\n prime_inst = str(prime_inst).split('.')[0]\r\n\r\n #reformatting instrument name\r\n if len(prime_inst) == 2:\r\n inst_str = str(prime_inst)\r\n elif len(prime_inst) == 1:\r\n inst_str = '0' + str(prime_inst)\r\n\r\n print('GOES-%s is the primary instrument for given start time' %inst_str)\r\n\r\n #checking to make sure this primary instrument actually has data\r\n year = str(given_start_date).split('-')[0]\r\n month = str(given_start_date).split('-')[1]\r\n url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' +\r\n month + '/goes' + inst_str)\r\n\r\n try:\r\n request.urlopen(url)\r\n print('GOES-%s has data available' %inst_str)\r\n instrument = 'GOES-' + inst_str\r\n print('we are using %s as our instrument for observations' %instrument)\r\n\r\n except request.HTTPError:\r\n #if primary instrument doesn't have data for this date, using backup instrument\r\n print('GOES-%s does NOT have data available' %inst_str)\r\n\r\n #reformatting backup instrument\r\n if len(str(backup_inst)) == 2:\r\n inst_str = str(backup_inst)\r\n elif len(str(backup_inst)) ==1:\r\n inst_str = '0' + str(backup_inst)\r\n\r\n print('checking for data from backup instrument GOES-%s' %inst_str)\r\n\r\n url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/'\r\n + month + '/goes' + inst_str)\r\n\r\n #checking to see if backup instrument has data for this date, if not have\r\n #to manually choose which instrument to use based off which instruments\r\n #have data available\r\n try:\r\n request.urlopen(url)\r\n print('backup instrument data found - using backup instrument')\r\n instrument = 'GOES-' + inst_str\r\n print('we are using %s as our instrument for observations'\r\n %instrument)\r\n\r\n except request.HTTPError:\r\n print('no knowledge of backup or primary instrument - choosing '\r\n 'instrument based on available data')\r\n alternate_output = choose_inst(given_start_date,given_end_date)\r\n\r\n return(alternate_output)\r\n\r\n return([instrument,end_date])", "def test_date_interval(self, init_date, end_date):\n self.calc_earning(self.security[(self.security['Date'] > init_date) &\n (self.security['Date'] < end_date)])", "def database_extraction(mod_start_time,mod_end_time,instrument_chosen,subevent_bool,\r\n detect_previous_event = False,thresholds='100,1',\r\n one_thresh = False):\r\n obs_file_created = False\r\n\r\n #extending time window\r\n window_end_time = (mod_end_time + timedelta(days=2))\r\n window_start_time = (mod_start_time - timedelta(days=2))\r\n \r\n #making a list of all dates within window\r\n day_list=[]\r\n for d in range(10):\r\n day_list.append((window_start_time + timedelta(days=d)).date())\r\n print('day list = %s' %day_list)\r\n \r\n print('determining if an instrument has been chosen')\r\n\r\n if instrument_chosen:\r\n #if an instrument has been chosen, checking to make sure it still works for this date\r\n if inst_end < window_end_time:\r\n instrument_chosen = False\r\n else:\r\n #if insturment hasn't been chosen, figuring out what it should be for given date\r\n try:\r\n #if instrument is specified in cfg using that\r\n instrument = cfg.instrument\r\n inst_end = datetime.today()\r\n print('using %s as our instrument for observations' %instrument)\r\n instrument_chosen = True\r\n\r\n except:\r\n #choosing instrument using function if not given in cfg\r\n instrument_stuff = choose_prime_inst(window_start_time.date(),\r\n window_end_time.date())\r\n instrument = instrument_stuff[0]\r\n #figuring out how long we can use this instrument\r\n inst_end = instrument_stuff[1]\r\n instrument_chosen = True\r\n \r\n #running katie's code to extract data using chosen instrument and dates\r\n print('extracting data from GOES website')\r\n \r\n #running for only one threshold if one_thresh is true, otherwise running for default\r\n #thresholds as well as any additional threshold given\r\n if one_thresh:\r\n one_sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds) \r\n print('ran for threshold %s' %thresholds)\r\n else:\r\n if subevent_bool:\r\n thresholds = '10,1'\r\n #if event is a subevent, changing the threshold in katie's code to\r\n #10 MeV > 1pfu so that it will be recorded\r\n print('********************SUBEVENT**************************')\r\n sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n print('ran for subevent')\r\n else:\r\n #if an event, running with usual thresholds\r\n print('********************EVENT*****************************')\r\n sep.run_all(str(window_start_time), str(window_end_time),str(instrument), \r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n \r\n #reloading function so it doesn't keep old data \r\n reload(sep)\r\n \r\n #reformatting csv created from katie's code to json\r\n print('extracted - reformatting') \r\n for day in day_list: \r\n if not obs_file_created:\r\n #checking each day within the window to find the csv file if it hasn't\r\n #already been found\r\n print('thresholds: %s' %thresholds)\r\n \r\n if one_thresh:\r\n #name includes threshold if only ran for one threshold\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_gt' +\r\n str(thresholds).split(',')[0] + '_' + str(thresholds).split(',')[1] + 'pfu_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n else:\r\n #otherwise only includes date ran for\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n \r\n print('new_os_name %s' %new_obs_name) \r\n \r\n #checking if that file exists\r\n if os.path.exists(katies_path / new_obs_name):\r\n #if a file with this date exists, creating the corresponding json file\r\n \r\n #json name\r\n if one_thresh:\r\n obs_name = (str(instrument) + '_' + str(day) + 'only_' + str(thresholds).split(',')[0] + 'MeV_event.json')\r\n else:\r\n obs_name = (str(instrument) + '_' +\r\n str(day) + '.json')\r\n #creating json file\r\n obs_csv2json((katies_path / new_obs_name), obs_name,\r\n (ref_path/'example_sepscoreboard_json_file_v20190228.json'),\r\n instrument)\r\n \r\n print('obs file created')\r\n #file is created - will not run for anymore dates within window\r\n obs_file_created = True\r\n \r\n return(obs_name)\r\n else:\r\n print('no csv file found with this date, checking next one')", "def __init__(self, code, start_date=\"1900-01-01\", end_date=\"2020-01-01\"):\n base = Base()\n self.datas = base.getData(\n code=code, start_date=start_date, end_date=end_date)\n self._index = 0\n self.period = 14", "def rate_between(self, from_date, to_date):\n print(\"override the above\")", "def date_match(self,dateRange, input_frame):\n # find match with a exact date, output one element\n if dateRange['Start Date'] == dateRange['End Date']:\n # convert dtype to datetime64, match the data in dataframe\n exact_date = np.datetime64(dateRange['Start Date'])\n # key = column name, value = keyword\n target_time = input_frame[input_frame['Start Date'] == exact_date]\n # if search a range\n else:\n # only a start date or an end date entered\n if dateRange['Start Date'] == '' or dateRange['End Date'] == '':\n # only a start date input, then return the data from entered date to most recent\n if dateRange['End Date'] == '':\n start = np.datetime64(dateRange['Start Date'])\n target_time = input_frame[input_frame['Start Date'] >= start]\n # only an ende date input, then return the data before the entered date\n else:\n end = np.datetime64(dateRange['End Date'])\n target_time = input_frame[input_frame['Start Date'] <= end]\n # convert datatype to datetime64, match the data in dataframe\n else:\n start = np.datetime64(dateRange['Start Date'])\n end = np.datetime64(dateRange['End Date'])\n # mask target_time\n target_time = input_frame[(input_frame['Start Date'] <= end) & (input_frame['Start Date'] >= start)]\n # return filtered dataframe\n return target_time", "def returnDatesAndRegions(start=None, end=None, theRegs=None, isWeekly=False, isViral=False):\r\n\t# Default values\r\n\tregions = [\"global\", \"ad\", \"ar\", \"at\", \"au\", \"be\", \"bg\", \"bo\", \"br\", \"ca\", \"ch\", \"cl\", \"co\", \"cr\", \"cy\", \"cz\", \"de\", \"dk\", \"do\", \"ec\", \"ee\", \"es\", \"fi\", \"fr\", \"gb\", \"gr\", \"gt\", \"hk\", \"hn\", \"hu\", \"id\", \"ie\", \"il\", \"is\", \"it\", \"jp\", \"lt\", \"lu\", \"lv\", \"mc\", \"mt\", \"mx\",\"my\", \"ni\", \"nl\", \"no\", \"nz\", \"pa\", \"pe\", \"ph\", \"pl\", \"pt\", \"py\", \"ro\", \"se\", \"sg\", \"sk\", \"sv\", \"th\", \"tr\", \"tw\", \"us\", \"uy\", \"vn\"]\r\n\tviralWeeklyStart = \"2017-01-05\"\r\n\ttopWeeklyStart = \"2016-12-22\"\r\n\tallDailyStart = \"2017-01-01\"\r\n\r\n\t#Required since dates taken are very specific\r\n\tdefaultList = defaultListOfDates(isWeekly, isViral)\r\n\t#--------------------------------------------\r\n\r\n\t# Helper for Exception handling\r\n\tif(isWeekly and isViral):\r\n\t\tfunc = \"viral50Weekly\"\r\n\telif(isWeekly and not isViral):\r\n\t\tfunc = \"top200Weekly\"\r\n\telif(not isWeekly and isViral):\r\n\t\tfunc = \"viral50Daily\"\r\n\telif(not isWeekly and not isViral):\r\n\t\tfunc = \"top200Daily\"\r\n\t# \r\n\r\n\t# Start dates\r\n\tif(start is None): #From the beginning\r\n\t\tif(isWeekly):\r\n\t\t\tif(isViral):\r\n\t\t\t\tstart = datetime.datetime.strptime(viralWeeklyStart, \"%Y-%m-%d\")\r\n\t\t\telse:\r\n\t\t\t\tstart = datetime.datetime.strptime(topWeeklyStart, \"%Y-%m-%d\") \r\n\t\telse:\r\n\t\t\tstart = datetime.datetime.strptime(allDailyStart, \"%Y-%m-%d\")\r\n\telse:\r\n\t\tif(start in defaultList):\r\n\t\t\tstart = datetime.datetime.strptime(start, \"%Y-%m-%d\")\r\n\t\telse:\r\n\t\t\torderedList = sorted(defaultList, key=lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\") - datetime.datetime.strptime(start, \"%Y-%m-%d\"))\r\n\t\t\tclosest = [d for d in orderedList if d >= start]\r\n\t\t\tsuggest = closest[0:5]\r\n\t\t\tlogger.info(f\"The start date {start} provided for {func} is invalid. Wanna give one these a try? {suggest}\")\r\n\t\t\tchoice = input(\"Enter (1) to use the first suggestion, or (2) to quit and set yourself: \")\r\n\t\t\tif(int(choice) == 1):\r\n\t\t\t\tstart = datetime.datetime.strptime(suggest[0], \"%Y-%m-%d\")\r\n\t\t\telif(int(choice) == 2):\r\n\t\t\t\tsys.exit()\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(\"Invalid Choice.\")\r\n\r\n\r\n\t# End dates\r\n\tif(end is None): #Up to now\r\n\t\tend = datetime.datetime.now()\r\n\telse:\r\n\t\tend = datetime.datetime.strptime(end, \"%Y-%m-%d\")\r\n\t\t\r\n\r\n\t# Region\r\n\tregion = []\r\n\tif(theRegs is None):\r\n\t\tregion = regions\r\n\telse:\r\n\t\tif(type(theRegs) is not list):\r\n\t\t\tregs = []\r\n\t\t\tregs.append(theRegs)\r\n\t\t\ttheRegs = regs\r\n\t\t\t\r\n\t\tfor aReg in theRegs:\r\n\t\t\tif(aReg in regions):\r\n\t\t\t\tregion.append(aReg)\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(f\"Data for the region --> {aReg} <-- requested for {func} does not exist. Please try another region\")\r\n\r\n\t#Generate list of dates\r\n\tdates = [] \r\n\tif(isWeekly): \r\n\t\tif(isViral):\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\t\telse:\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\r\n\telse:\r\n\t\tgen = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days+1)]\r\n\t\tfor date in gen:\r\n\t\t\tif(date<=end):\r\n\t\t\t\tdates.append(date.strftime(\"%Y-%m-%d\"))\r\n\r\n\tvar = {\"dates\": dates, \"region\": region}\r\n\treturn var", "def valuation(self, from_date=None):\n import pandas_datareader.data as pdr\n import datetime\n to_date = datetime.date.today()\n if not from_date: from_date = to_date - datetime.timedelta(days=1)\n px = pdr.DataReader(self.ticker, 'yahoo', from_date, to_date)\n\n f = self.Fundamentals\n\n print(\"OF COURSE \", 7, f, px)\n # for i in set(f.perod_end_date):", "def get_values_between_dates(self, date_start=None, date_end=None, dt_max=0.0, start_strict=False, end_strict=True):\n \n if start_strict:\n start_diff_operator = '>'\n else:\n start_diff_operator = '>='\n if end_strict:\n end_diff_operator = '<'\n else:\n end_diff_operator = '<='\n \n if dt_max < 0.:\n raise Exception('dt_max must be > 0')\n \n if (date_start is not None) and (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) AND datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%(start_diff_operator, end_diff_operator), \\\n params=[self.date2str(date_start-timedelta(dt_max)), self.date2str(date_end+timedelta(dt_max))])\n elif (date_start is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%start_diff_operator, \\\n params=[self.date2str(date_start-timedelta(dt_max))])\n elif (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%end_diff_operator, \\\n params=[self.date2str(date_end+timedelta(dt_max))])\n else:\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO ORDER BY datetime(date_data)\")", "def to_stock_data_range(self, start_date=None, end_date=None):\n # standardize dates\n if end_date is None:\n end_date = self.dates[-2]\n if type(end_date) is pd.tslib.Timestamp:\n end_date = end_date.strftime(\"%Y-%m-%d\")\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n try:\n end_date = self.dates[list(self.dates).index(end_date) + 1]\n except:\n end_date = \"Last\"\n\n if start_date is None:\n start_date = self.dates[0]\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n if end_date is \"Last\":\n dates = list(self.dates)[list(self.dates).index(start_date):]\n else:\n dates = list(self.dates)[list(self.dates).index(start_date):list(self.dates).index(end_date)]\n\n # find functions to set\n dataframes = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is pd.DataFrame]\n dictionaries = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is dict]\n constant_values = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and getattr(self, i) is not None and i not in dataframes and i not in dictionaries]\n\n # transfer new data\n new_stock_data = StockData()\n\n for i in constant_values:\n setattr(new_stock_data, i, getattr(self, i))\n\n for i in dataframes:\n if end_date is not \"Last\":\n setattr(new_stock_data, i, getattr(self, i).ix[start_date:end_date])\n else:\n setattr(new_stock_data, i, getattr(self, i).ix[start_date:])\n\n for i in dictionaries:\n new_dict = {}\n for d in dates:\n new_dict[d] = getattr(self, i)[d]\n setattr(new_stock_data, i, new_dict)\n\n new_stock_data.dates = dates\n new_stock_data.str_dates = [str(d)[:USEFUL_TIMESTAMP_CHARS] for d in dates]\n\n return new_stock_data", "def __init__(self, start_date_str: str, end_date_str: str):\r\n start_date, end_date = create_date_from_string(start_date_str, end_date_str)\r\n if is_date_valid(start_date, end_date):\r\n self.days_range_array = create_days_range(start_date, end_date)\r\n self.months_range_array = create_months_range(self.days_range_array)\r\n else:\r\n raise Exception", "def get_interest_variable(\n in_dataset, sensor_var, date_col, hr_col, numeric_var, target_sensor=\"A620\"\n):\n dataset_pproc = in_dataset.loc[\n in_dataset[sensor_var] == target_sensor, [date_col, hr_col] + [numeric_var]\n ]\n hrs_str = dataset_pproc[hr_col].to_string()\n dates_str = dataset_pproc[date_col]\n\n dataset_pproc[date_col] = pd.to_datetime(dataset_pproc[date_col])\n dataset_pproc.set_index([date_col, hr_col], inplace=True)\n dataset_pproc.fillna(method=\"ffill\", inplace=True)\n dataset_pproc.interpolate(method=\"linear\", axis=0)\n\n return dataset_pproc", "def __getQuerysetGivenInterval(model, start_date, end_date):\n cur_model = {\n 'donor': Donor,\n 'donation': Donation,\n 'item': Item\n }.get(model, Donor.objects.none())\n\n # might need following lines when changing back to created_at:\n # date_format = \"%Y-%m-%d\"\n # if start_date is not None:\n # timezone_unaware_start_date = datetime.strptime(start_date, date_format)\n # timezone_aware_start_date = pytz.utc.localize(timezone_unaware_start_date)\n #\n # if end_date is not None:\n # timezone_unaware_end_date = datetime.strptime(end_date, date_format)\n # timezone_aware_end_date = pytz.utc.localize(timezone_unaware_end_date).date()\n\n if start_date is not None and end_date is not None:\n return cur_model.objects.filter(documented_at__range=(start_date, end_date))\n elif start_date is not None and end_date is None:\n return cur_model.objects.filter(documented_at__gte=start_date)\n elif start_date is None and end_date is not None:\n return cur_model.objects.filter(documented_at__lte=end_date)\n else:\n return cur_model.objects.all()", "def __init__(__self__, *,\n end_date: str,\n start_date: str,\n time: str):\n pulumi.set(__self__, \"end_date\", end_date)\n pulumi.set(__self__, \"start_date\", start_date)\n pulumi.set(__self__, \"time\", time)", "def get_scns_for_date(self, date_of_interest, valid=True, ard_prod=True, platform=None):\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n if platform is None:\n if valid and ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False, EDDSentinel1ASF.ARDProduct == True).all()\n elif valid:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False).all()\n elif ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.ARDProduct == True).all()\n else:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest).all()\n else:\n if valid and ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False, EDDSentinel1ASF.ARDProduct == True,\n EDDSentinel1ASF.Platform == platform).all()\n elif valid:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False, EDDSentinel1ASF.Platform == platform).all()\n elif ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.ARDProduct == True, EDDSentinel1ASF.Platform == platform).all()\n else:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Platform == platform).all()\n return scns", "def find_within_dates(self,\r\n datefrom=(1,1,1),\r\n dateto=(3000,12,31),\r\n withinrange=None,\r\n orequal=False,\r\n most_recent=False):\r\n\r\n def convert (date):\r\n\r\n if isinstance(date,str):\r\n #If input is a string convert to a tuple\r\n date += '-01-01'\r\n date = datefrom.split(DASH)\r\n year, month, day = date[0].replace(PLUS,DASH), date[1], date[2]\r\n date = int(year), int(month), int(day)\r\n if isinstance(date, (list,tuple)):\r\n #If a tuple, convert to a datetime object\r\n date = datetime.datetime(date[0],date[1],date[2])\r\n return date\r\n\r\n if withinrange is None:\r\n #If not range assigned, default to all indexes\r\n withinrange = self.indexes()\r\n\r\n datefrom = convert(datefrom)\r\n dateto = convert(dateto)\r\n\r\n\r\n if not orequal:\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True)> datefrom\r\n and self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) < dateto]\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) >= datefrom and\r\n self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) <= dateto]", "def __init__(__self__, *,\n end_date: Optional[str] = None,\n start_date: Optional[str] = None,\n term_unit: Optional[str] = None):\n if end_date is not None:\n pulumi.set(__self__, \"end_date\", end_date)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if term_unit is not None:\n pulumi.set(__self__, \"term_unit\", term_unit)", "def __init__(__self__, *,\n end_date: Optional[str] = None,\n start_date: Optional[str] = None,\n term_unit: Optional[str] = None):\n if end_date is not None:\n pulumi.set(__self__, \"end_date\", end_date)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if term_unit is not None:\n pulumi.set(__self__, \"term_unit\", term_unit)", "def filter_data_by_date(df, ticker, start_date, end_date):\n if start_date is None:\n start_date = MIN_DATE\n\n if end_date is None:\n end_date = MAX_DATE\n\n filtered = df[\n (df[\"ticker\"] == ticker) & (df[\"date\"] >= start_date) & (df[\"date\"] <= end_date)\n ]\n return filtered", "def visitRange(self, date):\n raise NotImplementedError()", "def resampleDataSet(dailyData, resampleString, resampleMethod, customFunction = None):\n\n # Make sure the index is sorted\n dailyData.sort_index(level='Datetime', inplace=True)\n\n # Get today's date\n today = datetime.now()\n\n # Create a new empty series\n resampleData = pd.Series([], index = pd.DatetimeIndex([]))\n\n # Get information about the daily data\n firstDate = dailyData.index[0][0]\n\n # Parse the resample string\n resampleList = resampleString.split('/') # Converts 'R/1978-10-01/P1M/F1Y' into ['R', '1978-10-01', 'P1M', 'F1Y', 'S1Y']\n\n # Validate the list\n if resampleList[0] != 'R' or len(resampleList[1]) != 10 or resampleList[2][0] != 'P' or resampleList[3][0] != 'F': #or len(resampleList) != 4\n return resampleData, 1, 'Invalid Resample String. Format should be similar to R/1978-10-01/P1M/F1Y or R/1978-10-01/P1M/F1Y/S1Y'\n \n # Validate the resample method\n if resampleMethod not in ['accumulation', 'accumulation_cfs_kaf', 'average', 'first', 'last', 'max', 'min', 'custom', 'median']:\n return resampleData, 1, \"Invalid resampling method. Provide one of 'accumulation', 'accumulation_cfs_kaf', 'average', 'first', 'last', 'max', 'min', 'custom', 'median'\"\n\n # Parse into values\n startDate = datetime.strptime(resampleList[1], '%Y-%m-%d') # >>> datetime.date(1978, 10, 1)\n period = isodate.parse_duration(resampleList[2]) # >>> isodate.duration.Duration(0, 0, 0, years=0, months=1)\n # Change the period to 1 day if the resample method is 'first'\n if resampleMethod == 'first':\n period = isodate.parse_duration(\"P1D\")\n frequency = isodate.parse_duration(resampleList[3].replace('F', 'P')) # >>> isodate.duration.Duration(0, 0, 0, years=1, months=1)\n\n # Create all the periods\n periods = []\n tracker = startDate\n while tracker <= today: # >>> periods = [(datetime.datetime(1978-10-01), datetime.datetime(1978-11-01))]\n periods.append((tracker, tracker+period))\n tracker += frequency\n\n # Parse the function\n func = lambda x: np.nan if x.isnull().all() else (np.nanmean(x) if resampleMethod == 'average' else (\n np.nansum(x) if resampleMethod == 'accumulation' else (\n 86400*(1/43560000)*np.nansum(x) if resampleMethod == 'accumulation_cfs_kaf' else (\n x.iloc[0] if resampleMethod == 'first' else (\n x.iloc[-1] if resampleMethod == 'last' else (\n np.nanmedian(x) if resampleMethod == 'median' else (\n np.nanmax(x) if resampleMethod == 'max' else (\n np.nanmin(x) if resampleMethod == 'min' else eval(customFunction)))))))))\n\n # Resample the data\n for idx in pd.IntervalIndex.from_tuples(periods):\n data = dailyData.loc[idx.left : idx.right]\n if resampleMethod != 'first' and resampleMethod != 'last':\n data.isMostlyThere = len(data) > int(0.95*(idx.right-idx.left).days) # Check to make sure 95% of data is there!\n else:\n data.isMostlyThere = True\n resampleData.loc[idx.left] = ( func(data) if (idx.right >= firstDate and today >= idx.right and (data.isMostlyThere)) else np.nan )\n\n if len(resampleList) == 5:\n shiftStrings = list(resampleList[4])\n if shiftStrings[1].isdigit():\n resampleData.index = resampleData.index + pd.offsets.DateOffset(years=int(shiftStrings[1]))\n else:\n return resampleData, 1, \"Invalid Resample String. Format should be similar to R/1978-10-01/P1M/F1Y or R/1978-10-01/P1M/F1Y/S1Y\"\n\n\n # Name the dataframe\n resampleData.name = dailyData.name + '_' + resampleList[1] + '_' + resampleList[2] + '_' + resampleList[3] + '_' + resampleMethod + '_' + str(customFunction)\n\n return resampleData", "def date_range(start, end):\n \"\"\"between the start and end date inclusive.\"\"\"\n # Create a link to the session\n session = Session(engine)\n \n # Get the start and end date of the data\n final_date = session.query(Measurements.date).order_by(Measurements.date.desc()).first()[0]\n first_date = session.query(Measurements.date).order_by(Measurements.date.asc()).first()[0]\n \n # Make sure dates are in range of available data\n if (start > final_date) or (start < first_date) or (end > final_date) or (end < first_date) or (start>end):\n return f\"{start} - {end} is not a proper date range.</br>Try dates between {first_date} - {final_date}\"\n\n # Query the min, avg, and max temps for the given timeframe\n results = []\n while start <= end:\n min_temp = session.query(func.min(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n avg_temp = session.query(func.avg(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n max_temp = session.query(func.max(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n \n # Store the information retrieved\n results.append([start, min_temp, avg_temp, max_temp])\n \n # Update the date to check the next record\n date1 = start.split(\"-\")\n date1 = dt.date(int(date1[0]), int(date1[1]), int(date1[2])) + dt.timedelta(days=1)\n start = date1.strftime(\"%Y-%m-%d\")\n\n session.close()\n\n # Create a dictionary from the query results\n date_temps = []\n for date, min_temp, avg_temp, max_temp in results:\n date_temps_dict = {}\n date_temps_dict[\"date\"] = date\n date_temps_dict[\"min_temp\"] = min_temp\n date_temps_dict[\"avg_temp\"] = round(avg_temp, 2)\n date_temps_dict[\"max_temp\"] = max_temp\n date_temps.append(date_temps_dict)\n \n return jsonify(date_temps)", "def test_new_items_have_increasing_dates(self):\n input_ = [\n self.indicator_record(date=datetime.date(2004, 11, 1), value=0.69),\n self.indicator_record(date=datetime.date(2004, 12, 1), value=0.86),\n self.indicator_record(date=datetime.date(2005, 1, 1), value=0.58),\n ]\n records = self.expander._ipca_from_15_expander(input_)\n\n self.assertTrue(records[-1].date > input_[-1].date)", "def date_range(self):\n start_date = input(\"Enter a start date in the format DD/MM/YYYY> \")\n end_date = input(\"Enter an end date in the format DD/MM/YYYY> \")\n return start_date, end_date", "def test_time_series_intraday_date_integer(self, mock_request):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas', indexing_type='integer')\n url = \"http://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&apikey=test&datatype=json\"\n path_file = self.get_file_from_url(\"mock_time_series\")\n with open(path_file) as f:\n mock_request.get(url, text=f.read())\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n assert type(data.index[0]) == int", "def _get_output_date_range_for(self, from_input_dt, to_input_dt):\n return from_input_dt, to_input_dt", "def date_range_filter(dr):\n assert IDateRange.providedBy(dr) or IDateRangeFactory.providedBy(dr)\n if IDateRangeFactory.providedBy(dr):\n dr = dr(datetime.now())\n factory = queryUtility(IFactory, dottedname(IQueryFilter))\n if factory is None:\n return ComponentLookupError('cannot find factory for query filter')\n return factory(value=(dr.start, dr.end), query_range=dr.query_range)", "def test_fill_data_with_one_date(self):\n # date = pd.to_datetime('2015-06-30')\n date = pd.to_datetime('2011-05-09')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n\n # df_date = self.full_iv.df_all.query('date == %r' % date)\n # df_date = df_date[['date', 'dte', 'mark', 'strike', 'impl_vol']]\n # print df_date.sort_values(['dte', 'strike']).to_string(line_width=1000)\n\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n\n self.assertTrue(len(df_iv))", "def test_rise_timeseries_with_expert_model_for_correct_max_and_min():\n hot_day_index = 6\n cold_day_index = 12\n temperature_timeseries = average_temperature_timeseries_with_1_cold_and_1_hot_day(cold_day_index, hot_day_index)\n\n summer_explanation, winter_explanation = dianna.explain_timeseries(run_expert_model,\n timeseries_data=temperature_timeseries,\n method='rise',\n labels=[0, 1],\n p_keep=0.1, n_masks=10000,\n mask_type=input_train_mean)\n\n assert np.argmax(summer_explanation) == hot_day_index\n assert np.argmin(summer_explanation) == cold_day_index\n assert np.argmax(winter_explanation) == cold_day_index\n assert np.argmin(winter_explanation) == hot_day_index", "def test_output_day(self):\n input_ = [\n self.indicator_record(date=datetime.date(2011, 1, 1), value=0.83),\n self.indicator_record(date=datetime.date(2011, 2, 1), value=0.80),\n ]\n output = self.expander._ipca_from_15_expander(input_)\n\n self.assertEqual(output[-1].date.day, 1)" ]
[ "0.6696412", "0.5244132", "0.5244106", "0.5234904", "0.523354", "0.5202343", "0.50902385", "0.49597186", "0.4926985", "0.49259138", "0.4911848", "0.48814812", "0.4860115", "0.48596224", "0.48520213", "0.48501316", "0.4843819", "0.4843819", "0.48264506", "0.48252285", "0.4812543", "0.4811632", "0.48099044", "0.48043033", "0.4801722", "0.47961712", "0.4783006", "0.47822672", "0.47802654", "0.47764415" ]
0.707812
0
choose the correct instrument to use for observations for a given date range based on the primary instrument for that time period. inputs must be date objects from the datetime module.
def choose_prime_inst(given_start_date,given_end_date): #extracting primary dates where instruments are active from csv file inst_prime_dates = pd.read_csv(ref_path / 'GOES_primary_assignments.csv', header=3) #figuring out which instrument is primary for given start date for d in range(len(inst_prime_dates['Start Date'])): change_date = parse(inst_prime_dates['Start Date'][d]) if given_start_date >= change_date.date(): prime_inst = inst_prime_dates['EPEAD Primary'][d] backup_inst = inst_prime_dates['EPEAD Secondary'][d] end_date = parse(inst_prime_dates['Start Date'][d+1]).date() #if no prime instrument available, have to choose which instrument #to use based on which instruments have data for this date if str(prime_inst) == 'nan': if str(backup_inst) == 'nan': print('no information about primary instrument available.' 'Choosing instrument based on active date ranges') alternate_output = choose_inst(given_start_date,given_end_date) return(alternate_output) else: prime_inst = backup_inst break prime_inst = str(prime_inst).split('.')[0] #reformatting instrument name if len(prime_inst) == 2: inst_str = str(prime_inst) elif len(prime_inst) == 1: inst_str = '0' + str(prime_inst) print('GOES-%s is the primary instrument for given start time' %inst_str) #checking to make sure this primary instrument actually has data year = str(given_start_date).split('-')[0] month = str(given_start_date).split('-')[1] url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' + month + '/goes' + inst_str) try: request.urlopen(url) print('GOES-%s has data available' %inst_str) instrument = 'GOES-' + inst_str print('we are using %s as our instrument for observations' %instrument) except request.HTTPError: #if primary instrument doesn't have data for this date, using backup instrument print('GOES-%s does NOT have data available' %inst_str) #reformatting backup instrument if len(str(backup_inst)) == 2: inst_str = str(backup_inst) elif len(str(backup_inst)) ==1: inst_str = '0' + str(backup_inst) print('checking for data from backup instrument GOES-%s' %inst_str) url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' + month + '/goes' + inst_str) #checking to see if backup instrument has data for this date, if not have #to manually choose which instrument to use based off which instruments #have data available try: request.urlopen(url) print('backup instrument data found - using backup instrument') instrument = 'GOES-' + inst_str print('we are using %s as our instrument for observations' %instrument) except request.HTTPError: print('no knowledge of backup or primary instrument - choosing ' 'instrument based on available data') alternate_output = choose_inst(given_start_date,given_end_date) return(alternate_output) return([instrument,end_date])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_inst(given_start_date,given_end_date): #INPUTS MUST BE DATE OBJECTS\r\n\r\n inst_start_dates=[]\r\n inst_end_dates=[]\r\n good_instruments = []\r\n good_end_dates = []\r\n bad_inst = []\r\n\r\n #extracting dates where instruments are active from csv file\r\n inst_dates = pd.read_csv(ref_path / 'instrument_dates.csv')\r\n\r\n for s in inst_dates['start']:\r\n inst_start_dates.append(datetime.strptime(str(s),'%Y-%m').date())\r\n\r\n for e in inst_dates['end']:\r\n if str(e) == 'nan':\r\n inst_end_dates.append(datetime.today().date())\r\n else:\r\n inst_end_dates.append(datetime.strptime(str(e),'%Y-%m').date())\r\n\r\n #checking which instruments are active during given time period and\r\n #choosing the correct ones\r\n print('checking which instruments are active for given dates')\r\n\r\n for i in range(len(inst_start_dates)):\r\n if (inst_start_dates[i] < given_start_date) and (given_end_date <\r\n inst_end_dates[i]):\r\n print('%s works' %inst_dates['Instrument'][i])\r\n good_instruments.append(inst_dates['Instrument'][i])\r\n good_end_dates.append(inst_end_dates[i])\r\n else:\r\n print('outside of %s range' %inst_dates['Instrument'][i])\r\n\r\n #checking if active instruments actually have data for that date\r\n for inst in good_instruments:\r\n inst_str = inst.replace('-','').lower()\r\n year = str(given_start_date).split('-')[0]\r\n month = str(given_start_date).split('-')[1]\r\n url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' +\r\n month + '/' + inst_str)\r\n\r\n try:\r\n request.urlopen(url)\r\n print('%s data available' %inst)\r\n\r\n except:\r\n print('%s data NOT available' %inst)\r\n bad_inst.append(inst)\r\n\r\n #not choosing instrument if it doesn't have data\r\n for binst in bad_inst:\r\n good_instruments.remove(binst)\r\n\r\n #if more than one instrument is available, choose which one to use\r\n if len(good_instruments) > 1:\r\n print('Please choose which instrument you would like to use.')\r\n\r\n for j in range(len(good_instruments)):\r\n print('Type ' + str(j) + ' for ' + str(good_instruments[j]))\r\n\r\n inst_choice = input('Answer:' )\r\n\r\n instrument = good_instruments[int(inst_choice)]\r\n end_date = good_end_dates[int(inst_choice)]\r\n\r\n print('we are using %s as our instrument for observations' %instrument)\r\n\r\n else:\r\n\r\n instrument = good_instruments[0]\r\n end_date = good_end_dates[0]\r\n print('we are using %s as our instrument for observations' %instrument)\r\n\r\n return([instrument,end_date])", "def __init__(self, code, start_date=\"1900-01-01\", end_date=\"2020-01-01\"):\n base = Base()\n self.datas = base.getData(\n code=code, start_date=start_date, end_date=end_date)\n self._index = 0\n self.period = 14", "def database_extraction(mod_start_time,mod_end_time,instrument_chosen,subevent_bool,\r\n detect_previous_event = False,thresholds='100,1',\r\n one_thresh = False):\r\n obs_file_created = False\r\n\r\n #extending time window\r\n window_end_time = (mod_end_time + timedelta(days=2))\r\n window_start_time = (mod_start_time - timedelta(days=2))\r\n \r\n #making a list of all dates within window\r\n day_list=[]\r\n for d in range(10):\r\n day_list.append((window_start_time + timedelta(days=d)).date())\r\n print('day list = %s' %day_list)\r\n \r\n print('determining if an instrument has been chosen')\r\n\r\n if instrument_chosen:\r\n #if an instrument has been chosen, checking to make sure it still works for this date\r\n if inst_end < window_end_time:\r\n instrument_chosen = False\r\n else:\r\n #if insturment hasn't been chosen, figuring out what it should be for given date\r\n try:\r\n #if instrument is specified in cfg using that\r\n instrument = cfg.instrument\r\n inst_end = datetime.today()\r\n print('using %s as our instrument for observations' %instrument)\r\n instrument_chosen = True\r\n\r\n except:\r\n #choosing instrument using function if not given in cfg\r\n instrument_stuff = choose_prime_inst(window_start_time.date(),\r\n window_end_time.date())\r\n instrument = instrument_stuff[0]\r\n #figuring out how long we can use this instrument\r\n inst_end = instrument_stuff[1]\r\n instrument_chosen = True\r\n \r\n #running katie's code to extract data using chosen instrument and dates\r\n print('extracting data from GOES website')\r\n \r\n #running for only one threshold if one_thresh is true, otherwise running for default\r\n #thresholds as well as any additional threshold given\r\n if one_thresh:\r\n one_sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds) \r\n print('ran for threshold %s' %thresholds)\r\n else:\r\n if subevent_bool:\r\n thresholds = '10,1'\r\n #if event is a subevent, changing the threshold in katie's code to\r\n #10 MeV > 1pfu so that it will be recorded\r\n print('********************SUBEVENT**************************')\r\n sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n print('ran for subevent')\r\n else:\r\n #if an event, running with usual thresholds\r\n print('********************EVENT*****************************')\r\n sep.run_all(str(window_start_time), str(window_end_time),str(instrument), \r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n \r\n #reloading function so it doesn't keep old data \r\n reload(sep)\r\n \r\n #reformatting csv created from katie's code to json\r\n print('extracted - reformatting') \r\n for day in day_list: \r\n if not obs_file_created:\r\n #checking each day within the window to find the csv file if it hasn't\r\n #already been found\r\n print('thresholds: %s' %thresholds)\r\n \r\n if one_thresh:\r\n #name includes threshold if only ran for one threshold\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_gt' +\r\n str(thresholds).split(',')[0] + '_' + str(thresholds).split(',')[1] + 'pfu_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n else:\r\n #otherwise only includes date ran for\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n \r\n print('new_os_name %s' %new_obs_name) \r\n \r\n #checking if that file exists\r\n if os.path.exists(katies_path / new_obs_name):\r\n #if a file with this date exists, creating the corresponding json file\r\n \r\n #json name\r\n if one_thresh:\r\n obs_name = (str(instrument) + '_' + str(day) + 'only_' + str(thresholds).split(',')[0] + 'MeV_event.json')\r\n else:\r\n obs_name = (str(instrument) + '_' +\r\n str(day) + '.json')\r\n #creating json file\r\n obs_csv2json((katies_path / new_obs_name), obs_name,\r\n (ref_path/'example_sepscoreboard_json_file_v20190228.json'),\r\n instrument)\r\n \r\n print('obs file created')\r\n #file is created - will not run for anymore dates within window\r\n obs_file_created = True\r\n \r\n return(obs_name)\r\n else:\r\n print('no csv file found with this date, checking next one')", "def test_date_interval(self, init_date, end_date):\n self.calc_earning(self.security[(self.security['Date'] > init_date) &\n (self.security['Date'] < end_date)])", "def date_match(self,dateRange, input_frame):\n # find match with a exact date, output one element\n if dateRange['Start Date'] == dateRange['End Date']:\n # convert dtype to datetime64, match the data in dataframe\n exact_date = np.datetime64(dateRange['Start Date'])\n # key = column name, value = keyword\n target_time = input_frame[input_frame['Start Date'] == exact_date]\n # if search a range\n else:\n # only a start date or an end date entered\n if dateRange['Start Date'] == '' or dateRange['End Date'] == '':\n # only a start date input, then return the data from entered date to most recent\n if dateRange['End Date'] == '':\n start = np.datetime64(dateRange['Start Date'])\n target_time = input_frame[input_frame['Start Date'] >= start]\n # only an ende date input, then return the data before the entered date\n else:\n end = np.datetime64(dateRange['End Date'])\n target_time = input_frame[input_frame['Start Date'] <= end]\n # convert datatype to datetime64, match the data in dataframe\n else:\n start = np.datetime64(dateRange['Start Date'])\n end = np.datetime64(dateRange['End Date'])\n # mask target_time\n target_time = input_frame[(input_frame['Start Date'] <= end) & (input_frame['Start Date'] >= start)]\n # return filtered dataframe\n return target_time", "def rate_between(self, from_date, to_date):\n print(\"override the above\")", "def returnDatesAndRegions(start=None, end=None, theRegs=None, isWeekly=False, isViral=False):\r\n\t# Default values\r\n\tregions = [\"global\", \"ad\", \"ar\", \"at\", \"au\", \"be\", \"bg\", \"bo\", \"br\", \"ca\", \"ch\", \"cl\", \"co\", \"cr\", \"cy\", \"cz\", \"de\", \"dk\", \"do\", \"ec\", \"ee\", \"es\", \"fi\", \"fr\", \"gb\", \"gr\", \"gt\", \"hk\", \"hn\", \"hu\", \"id\", \"ie\", \"il\", \"is\", \"it\", \"jp\", \"lt\", \"lu\", \"lv\", \"mc\", \"mt\", \"mx\",\"my\", \"ni\", \"nl\", \"no\", \"nz\", \"pa\", \"pe\", \"ph\", \"pl\", \"pt\", \"py\", \"ro\", \"se\", \"sg\", \"sk\", \"sv\", \"th\", \"tr\", \"tw\", \"us\", \"uy\", \"vn\"]\r\n\tviralWeeklyStart = \"2017-01-05\"\r\n\ttopWeeklyStart = \"2016-12-22\"\r\n\tallDailyStart = \"2017-01-01\"\r\n\r\n\t#Required since dates taken are very specific\r\n\tdefaultList = defaultListOfDates(isWeekly, isViral)\r\n\t#--------------------------------------------\r\n\r\n\t# Helper for Exception handling\r\n\tif(isWeekly and isViral):\r\n\t\tfunc = \"viral50Weekly\"\r\n\telif(isWeekly and not isViral):\r\n\t\tfunc = \"top200Weekly\"\r\n\telif(not isWeekly and isViral):\r\n\t\tfunc = \"viral50Daily\"\r\n\telif(not isWeekly and not isViral):\r\n\t\tfunc = \"top200Daily\"\r\n\t# \r\n\r\n\t# Start dates\r\n\tif(start is None): #From the beginning\r\n\t\tif(isWeekly):\r\n\t\t\tif(isViral):\r\n\t\t\t\tstart = datetime.datetime.strptime(viralWeeklyStart, \"%Y-%m-%d\")\r\n\t\t\telse:\r\n\t\t\t\tstart = datetime.datetime.strptime(topWeeklyStart, \"%Y-%m-%d\") \r\n\t\telse:\r\n\t\t\tstart = datetime.datetime.strptime(allDailyStart, \"%Y-%m-%d\")\r\n\telse:\r\n\t\tif(start in defaultList):\r\n\t\t\tstart = datetime.datetime.strptime(start, \"%Y-%m-%d\")\r\n\t\telse:\r\n\t\t\torderedList = sorted(defaultList, key=lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\") - datetime.datetime.strptime(start, \"%Y-%m-%d\"))\r\n\t\t\tclosest = [d for d in orderedList if d >= start]\r\n\t\t\tsuggest = closest[0:5]\r\n\t\t\tlogger.info(f\"The start date {start} provided for {func} is invalid. Wanna give one these a try? {suggest}\")\r\n\t\t\tchoice = input(\"Enter (1) to use the first suggestion, or (2) to quit and set yourself: \")\r\n\t\t\tif(int(choice) == 1):\r\n\t\t\t\tstart = datetime.datetime.strptime(suggest[0], \"%Y-%m-%d\")\r\n\t\t\telif(int(choice) == 2):\r\n\t\t\t\tsys.exit()\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(\"Invalid Choice.\")\r\n\r\n\r\n\t# End dates\r\n\tif(end is None): #Up to now\r\n\t\tend = datetime.datetime.now()\r\n\telse:\r\n\t\tend = datetime.datetime.strptime(end, \"%Y-%m-%d\")\r\n\t\t\r\n\r\n\t# Region\r\n\tregion = []\r\n\tif(theRegs is None):\r\n\t\tregion = regions\r\n\telse:\r\n\t\tif(type(theRegs) is not list):\r\n\t\t\tregs = []\r\n\t\t\tregs.append(theRegs)\r\n\t\t\ttheRegs = regs\r\n\t\t\t\r\n\t\tfor aReg in theRegs:\r\n\t\t\tif(aReg in regions):\r\n\t\t\t\tregion.append(aReg)\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(f\"Data for the region --> {aReg} <-- requested for {func} does not exist. Please try another region\")\r\n\r\n\t#Generate list of dates\r\n\tdates = [] \r\n\tif(isWeekly): \r\n\t\tif(isViral):\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\t\telse:\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\r\n\telse:\r\n\t\tgen = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days+1)]\r\n\t\tfor date in gen:\r\n\t\t\tif(date<=end):\r\n\t\t\t\tdates.append(date.strftime(\"%Y-%m-%d\"))\r\n\r\n\tvar = {\"dates\": dates, \"region\": region}\r\n\treturn var", "def _get_output_date_range_for(self, from_input_dt, to_input_dt):\n return from_input_dt, to_input_dt", "def _get_input_date_range_for(self, from_output_dt, to_output_dt):\n # If comb is adaptive, the required input date range needs to account for the time window\n if self.is_adaptive:\n if from_output_dt is None:\n return from_output_dt, to_output_dt\n return from_output_dt-timedelta(days=self.time_window), to_output_dt\n # Otherwise, the comb is already trained and does not need to fill up the time window first\n return from_output_dt, to_output_dt", "def get_interest_variable(\n in_dataset, sensor_var, date_col, hr_col, numeric_var, target_sensor=\"A620\"\n):\n dataset_pproc = in_dataset.loc[\n in_dataset[sensor_var] == target_sensor, [date_col, hr_col] + [numeric_var]\n ]\n hrs_str = dataset_pproc[hr_col].to_string()\n dates_str = dataset_pproc[date_col]\n\n dataset_pproc[date_col] = pd.to_datetime(dataset_pproc[date_col])\n dataset_pproc.set_index([date_col, hr_col], inplace=True)\n dataset_pproc.fillna(method=\"ffill\", inplace=True)\n dataset_pproc.interpolate(method=\"linear\", axis=0)\n\n return dataset_pproc", "def find_within_dates(self,\r\n datefrom=(1,1,1),\r\n dateto=(3000,12,31),\r\n withinrange=None,\r\n orequal=False,\r\n most_recent=False):\r\n\r\n def convert (date):\r\n\r\n if isinstance(date,str):\r\n #If input is a string convert to a tuple\r\n date += '-01-01'\r\n date = datefrom.split(DASH)\r\n year, month, day = date[0].replace(PLUS,DASH), date[1], date[2]\r\n date = int(year), int(month), int(day)\r\n if isinstance(date, (list,tuple)):\r\n #If a tuple, convert to a datetime object\r\n date = datetime.datetime(date[0],date[1],date[2])\r\n return date\r\n\r\n if withinrange is None:\r\n #If not range assigned, default to all indexes\r\n withinrange = self.indexes()\r\n\r\n datefrom = convert(datefrom)\r\n dateto = convert(dateto)\r\n\r\n\r\n if not orequal:\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True)> datefrom\r\n and self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) < dateto]\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) >= datefrom and\r\n self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) <= dateto]", "def define_secdate(self):\r\n \r\n # Since 2017\r\n self.start_date = datetime.datetime(2017,1,1) + (datetime.datetime(2017,12,31) - datetime.datetime(2017,1,1))/2 \r\n self.end_date = datetime.datetime(2050,1,1)\r\n self.ktime = (self.end_date - self.start_date).days + 1\r\n self.date = np.zeros(self.ktime,dtype=datetime.datetime)\r\n self.t = np.zeros(self.ktime)\r\n self.dt = 1/365.25\r\n \r\n for k in range(0,self.ktime):\r\n \r\n self.date[k] = self.start_date + datetime.timedelta(days=self.t[k]*365.25)\r\n\r\n if k < self.ktime-1:\r\n \r\n self.t[k+1] = self.t[k] + self.dt\r\n \r\n # Since 1990\r\n self.start_date_hist = datetime.datetime(1990,1,1) + (datetime.datetime(1990,12,31) - datetime.datetime(1990,1,1))/2 \r\n self.ktime_1990_2050 = (self.end_date - self.start_date_hist).days + 1\r\n self.date_1990_2050 = np.zeros(self.ktime_1990_2050,dtype=datetime.datetime)\r\n self.t_1990_2050 = np.zeros(self.ktime_1990_2050)\r\n \r\n for k in range(0,self.ktime_1990_2050):\r\n \r\n self.date_1990_2050[k] = self.start_date_hist + datetime.timedelta(days=self.t_1990_2050[k]*365.25)\r\n \r\n if (self.date_1990_2050[k].year == self.start_date.year and self.date_1990_2050[k].month == self.start_date.month and self.date_1990_2050[k].day == self.start_date.day):\r\n \r\n self.ktime_proj_crossing = k\r\n \r\n \r\n if k < self.ktime-1:\r\n \r\n self.t_1990_2050[k+1] = self.t_1990_2050[k] + self.dt \r\n \r\n return", "def update_dates(start_date, end_date, freq):\n if (freq == \"MS\") or (freq == \"M\"):\n try:\n start_date = start_date.split(\"/\")\n end_date = end_date.split(\"/\")\n except AttributeError:\n start_date = [start_date.month, start_date.day, start_date.year]\n end_date = [end_date.month, end_date.day, end_date.year]\n if int(end_date[1]) < 22:\n\n if int(end_date[0]) == 1:\n end_month = 12\n end_year = int(end_date[2]) - 1\n else:\n end_month = int(end_date[0]) - 1\n end_year = end_date[2]\n\n end_date[0] = end_month\n end_date[2] = end_year\n\n start_date = pd.to_datetime(f\"{start_date[0]}/01/{start_date[2]}\")\n\n end_date = pd.to_datetime(\n f\"{end_date[0]}/{calendar.monthrange(int(end_date[2]),int(end_date[0]))[1]}/{end_date[2]}\"\n )\n\n if (freq == \"QS\") or (freq == \"Q\"):\n start_date = (pd.to_datetime(start_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterBegin(\n startingMonth=1\n )\n end_date = (pd.to_datetime(end_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterEnd()\n\n return (start_date, end_date)", "def date_range(start, end):\n \"\"\"between the start and end date inclusive.\"\"\"\n # Create a link to the session\n session = Session(engine)\n \n # Get the start and end date of the data\n final_date = session.query(Measurements.date).order_by(Measurements.date.desc()).first()[0]\n first_date = session.query(Measurements.date).order_by(Measurements.date.asc()).first()[0]\n \n # Make sure dates are in range of available data\n if (start > final_date) or (start < first_date) or (end > final_date) or (end < first_date) or (start>end):\n return f\"{start} - {end} is not a proper date range.</br>Try dates between {first_date} - {final_date}\"\n\n # Query the min, avg, and max temps for the given timeframe\n results = []\n while start <= end:\n min_temp = session.query(func.min(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n avg_temp = session.query(func.avg(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n max_temp = session.query(func.max(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n \n # Store the information retrieved\n results.append([start, min_temp, avg_temp, max_temp])\n \n # Update the date to check the next record\n date1 = start.split(\"-\")\n date1 = dt.date(int(date1[0]), int(date1[1]), int(date1[2])) + dt.timedelta(days=1)\n start = date1.strftime(\"%Y-%m-%d\")\n\n session.close()\n\n # Create a dictionary from the query results\n date_temps = []\n for date, min_temp, avg_temp, max_temp in results:\n date_temps_dict = {}\n date_temps_dict[\"date\"] = date\n date_temps_dict[\"min_temp\"] = min_temp\n date_temps_dict[\"avg_temp\"] = round(avg_temp, 2)\n date_temps_dict[\"max_temp\"] = max_temp\n date_temps.append(date_temps_dict)\n \n return jsonify(date_temps)", "def to_stock_data_range(self, start_date=None, end_date=None):\n # standardize dates\n if end_date is None:\n end_date = self.dates[-2]\n if type(end_date) is pd.tslib.Timestamp:\n end_date = end_date.strftime(\"%Y-%m-%d\")\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n try:\n end_date = self.dates[list(self.dates).index(end_date) + 1]\n except:\n end_date = \"Last\"\n\n if start_date is None:\n start_date = self.dates[0]\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n if end_date is \"Last\":\n dates = list(self.dates)[list(self.dates).index(start_date):]\n else:\n dates = list(self.dates)[list(self.dates).index(start_date):list(self.dates).index(end_date)]\n\n # find functions to set\n dataframes = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is pd.DataFrame]\n dictionaries = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is dict]\n constant_values = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and getattr(self, i) is not None and i not in dataframes and i not in dictionaries]\n\n # transfer new data\n new_stock_data = StockData()\n\n for i in constant_values:\n setattr(new_stock_data, i, getattr(self, i))\n\n for i in dataframes:\n if end_date is not \"Last\":\n setattr(new_stock_data, i, getattr(self, i).ix[start_date:end_date])\n else:\n setattr(new_stock_data, i, getattr(self, i).ix[start_date:])\n\n for i in dictionaries:\n new_dict = {}\n for d in dates:\n new_dict[d] = getattr(self, i)[d]\n setattr(new_stock_data, i, new_dict)\n\n new_stock_data.dates = dates\n new_stock_data.str_dates = [str(d)[:USEFUL_TIMESTAMP_CHARS] for d in dates]\n\n return new_stock_data", "def valuation(self, from_date=None):\n import pandas_datareader.data as pdr\n import datetime\n to_date = datetime.date.today()\n if not from_date: from_date = to_date - datetime.timedelta(days=1)\n px = pdr.DataReader(self.ticker, 'yahoo', from_date, to_date)\n\n f = self.Fundamentals\n\n print(\"OF COURSE \", 7, f, px)\n # for i in set(f.perod_end_date):", "def resampleDataSet(dailyData, resampleString, resampleMethod, customFunction = None):\n\n # Make sure the index is sorted\n dailyData.sort_index(level='Datetime', inplace=True)\n\n # Get today's date\n today = datetime.now()\n\n # Create a new empty series\n resampleData = pd.Series([], index = pd.DatetimeIndex([]))\n\n # Get information about the daily data\n firstDate = dailyData.index[0][0]\n\n # Parse the resample string\n resampleList = resampleString.split('/') # Converts 'R/1978-10-01/P1M/F1Y' into ['R', '1978-10-01', 'P1M', 'F1Y', 'S1Y']\n\n # Validate the list\n if resampleList[0] != 'R' or len(resampleList[1]) != 10 or resampleList[2][0] != 'P' or resampleList[3][0] != 'F': #or len(resampleList) != 4\n return resampleData, 1, 'Invalid Resample String. Format should be similar to R/1978-10-01/P1M/F1Y or R/1978-10-01/P1M/F1Y/S1Y'\n \n # Validate the resample method\n if resampleMethod not in ['accumulation', 'accumulation_cfs_kaf', 'average', 'first', 'last', 'max', 'min', 'custom', 'median']:\n return resampleData, 1, \"Invalid resampling method. Provide one of 'accumulation', 'accumulation_cfs_kaf', 'average', 'first', 'last', 'max', 'min', 'custom', 'median'\"\n\n # Parse into values\n startDate = datetime.strptime(resampleList[1], '%Y-%m-%d') # >>> datetime.date(1978, 10, 1)\n period = isodate.parse_duration(resampleList[2]) # >>> isodate.duration.Duration(0, 0, 0, years=0, months=1)\n # Change the period to 1 day if the resample method is 'first'\n if resampleMethod == 'first':\n period = isodate.parse_duration(\"P1D\")\n frequency = isodate.parse_duration(resampleList[3].replace('F', 'P')) # >>> isodate.duration.Duration(0, 0, 0, years=1, months=1)\n\n # Create all the periods\n periods = []\n tracker = startDate\n while tracker <= today: # >>> periods = [(datetime.datetime(1978-10-01), datetime.datetime(1978-11-01))]\n periods.append((tracker, tracker+period))\n tracker += frequency\n\n # Parse the function\n func = lambda x: np.nan if x.isnull().all() else (np.nanmean(x) if resampleMethod == 'average' else (\n np.nansum(x) if resampleMethod == 'accumulation' else (\n 86400*(1/43560000)*np.nansum(x) if resampleMethod == 'accumulation_cfs_kaf' else (\n x.iloc[0] if resampleMethod == 'first' else (\n x.iloc[-1] if resampleMethod == 'last' else (\n np.nanmedian(x) if resampleMethod == 'median' else (\n np.nanmax(x) if resampleMethod == 'max' else (\n np.nanmin(x) if resampleMethod == 'min' else eval(customFunction)))))))))\n\n # Resample the data\n for idx in pd.IntervalIndex.from_tuples(periods):\n data = dailyData.loc[idx.left : idx.right]\n if resampleMethod != 'first' and resampleMethod != 'last':\n data.isMostlyThere = len(data) > int(0.95*(idx.right-idx.left).days) # Check to make sure 95% of data is there!\n else:\n data.isMostlyThere = True\n resampleData.loc[idx.left] = ( func(data) if (idx.right >= firstDate and today >= idx.right and (data.isMostlyThere)) else np.nan )\n\n if len(resampleList) == 5:\n shiftStrings = list(resampleList[4])\n if shiftStrings[1].isdigit():\n resampleData.index = resampleData.index + pd.offsets.DateOffset(years=int(shiftStrings[1]))\n else:\n return resampleData, 1, \"Invalid Resample String. Format should be similar to R/1978-10-01/P1M/F1Y or R/1978-10-01/P1M/F1Y/S1Y\"\n\n\n # Name the dataframe\n resampleData.name = dailyData.name + '_' + resampleList[1] + '_' + resampleList[2] + '_' + resampleList[3] + '_' + resampleMethod + '_' + str(customFunction)\n\n return resampleData", "def <start>/<end>(<start>/<end>)\ndef calc_temps(start_date, end_date):", "def get_values_between_dates(self, date_start=None, date_end=None, dt_max=0.0, start_strict=False, end_strict=True):\n \n if start_strict:\n start_diff_operator = '>'\n else:\n start_diff_operator = '>='\n if end_strict:\n end_diff_operator = '<'\n else:\n end_diff_operator = '<='\n \n if dt_max < 0.:\n raise Exception('dt_max must be > 0')\n \n if (date_start is not None) and (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) AND datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%(start_diff_operator, end_diff_operator), \\\n params=[self.date2str(date_start-timedelta(dt_max)), self.date2str(date_end+timedelta(dt_max))])\n elif (date_start is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%start_diff_operator, \\\n params=[self.date2str(date_start-timedelta(dt_max))])\n elif (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%end_diff_operator, \\\n params=[self.date2str(date_end+timedelta(dt_max))])\n else:\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO ORDER BY datetime(date_data)\")", "def __getQuerysetGivenInterval(model, start_date, end_date):\n cur_model = {\n 'donor': Donor,\n 'donation': Donation,\n 'item': Item\n }.get(model, Donor.objects.none())\n\n # might need following lines when changing back to created_at:\n # date_format = \"%Y-%m-%d\"\n # if start_date is not None:\n # timezone_unaware_start_date = datetime.strptime(start_date, date_format)\n # timezone_aware_start_date = pytz.utc.localize(timezone_unaware_start_date)\n #\n # if end_date is not None:\n # timezone_unaware_end_date = datetime.strptime(end_date, date_format)\n # timezone_aware_end_date = pytz.utc.localize(timezone_unaware_end_date).date()\n\n if start_date is not None and end_date is not None:\n return cur_model.objects.filter(documented_at__range=(start_date, end_date))\n elif start_date is not None and end_date is None:\n return cur_model.objects.filter(documented_at__gte=start_date)\n elif start_date is None and end_date is not None:\n return cur_model.objects.filter(documented_at__lte=end_date)\n else:\n return cur_model.objects.all()", "def IRIS_ARC_IC(input, clients):\n \n if input[clients + '_ic_auto'] == 'Y':\n global events \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n address = eventpath\n elif input[clients + '_ic'] != 'N':\n address = input[clients + '_ic']\n \n events, address_events = quake_info(address, 'info')\n \n for i in range(0, len(events)):\n sta_ev = read_station_event(address_events[i])\n ls_saved_stas = []\n \n for j in range(0, len(sta_ev[0])):\n if clients == sta_ev[0][j][13]:\n station_id = sta_ev[0][j][0] + '.' + sta_ev[0][j][1] + '.' + \\\n sta_ev[0][j][2] + '.' + sta_ev[0][j][3]\n ls_saved_stas.append(os.path.join(address_events[i], 'BH_RAW',\\\n station_id))\n \n print 'event: ' + str(i+1) + '/' + str(len(events)) + \\\n ' -- ' + clients\n print '------------------------------------'\n inst_correct(input, ls_saved_stas, address_events[i], clients) \n \n print \"**********************************\"\n print clients.upper() + ' Instrument Correction is DONE'\n print \"**********************************\"", "def compute(self, today, asset_ids, out, low):\n today_day = today.weekday()\n current_end_week_idx = today_day\n current_start_week_idx = 4 + today_day\n # current_week_high = high[current_end_week_idx:current_start_week_idx, :].max(axis=0)\n current_week_low = low[current_end_week_idx:current_start_week_idx, :].min(\n axis=0)\n out[:] = current_week_low", "def test_new_items_have_increasing_dates(self):\n input_ = [\n self.indicator_record(date=datetime.date(2004, 11, 1), value=0.69),\n self.indicator_record(date=datetime.date(2004, 12, 1), value=0.86),\n self.indicator_record(date=datetime.date(2005, 1, 1), value=0.58),\n ]\n records = self.expander._ipca_from_15_expander(input_)\n\n self.assertTrue(records[-1].date > input_[-1].date)", "def _recompute(self):\n current_date = self.start_date\n self.quarterly_date_list = []\n self.daily_date_list = []\n while current_date <= self.end_date:\n current_quarter = get_quarter(current_date)\n current_year = current_date.year\n next_year, next_quarter = add_quarter(current_year, current_quarter)\n next_start_quarter_date = date(next_year, get_month(next_quarter),\n 1)\n\n days_till_next_quarter = (next_start_quarter_date -\n current_date).days\n days_till_end = (self.end_date - current_date).days\n if days_till_next_quarter <= days_till_end:\n current_start_quarter_date = date(current_year,\n get_month(current_quarter), 1)\n if current_start_quarter_date == current_date:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n elif days_till_next_quarter > self.balancing_point:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) >= self.start_date))\n current_date = next_start_quarter_date\n else:\n while current_date < next_start_quarter_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)\n else:\n if days_till_end > self.balancing_point:\n if days_till_next_quarter - 1 == days_till_end:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n else:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) <= self.end_date))\n current_date = self.end_date\n else:\n while current_date <= self.end_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)", "def _setup_account_general(insid, start_date, rate_dict, counterparty,\n prf_name, account_name, reinvest,\n funding_instype, external_id=None):\n calendar = acm.FCalendar['ZAR Johannesburg']\n next_bus_day = calendar.AdjustBankingDays(acm.Time.DateToday(), 1)\n day_after_start_date = calendar.AdjustBankingDays(start_date, 1)\n # Make sure that two conditions are met:\n # 1. End date doesn't lie in the past.\n # 2. Start date predates end date.\n end_date = max(next_bus_day, day_after_start_date)\n\n deposit = acm.FInstrument[insid]\n if deposit:\n LOGGER.info(\"The instrument {} already exists\".format(insid))\n if deposit.ExternalId1():\n LOGGER.info(\"Updating the external id from {} to {}\".format(\n deposit.ExternalId1(), external_id))\n deposit.ExternalId1(external_id)\n deposit.Commit()\n return None\n\n LOGGER.info('Creating %s...', insid)\n acm.BeginTransaction()\n try:\n # Instrument\n deposit = acm.FDeposit()\n deposit.Currency(CURRENCY)\n deposit.Name(insid)\n deposit.DayCountMethod(DAY_COUNT_METHOD)\n deposit.SpotBankingDaysOffset(0)\n # this sets the exp_time, which has a higher priority over exp_day,\n # which is set when calling re_rate(...) from ael. If the exp_time\n # is not set, acm (trading manager) uses the exp_day.\n # deposit.ExpiryDate(end_date)\n deposit.ContractSize(1)\n deposit.Quotation('Clean')\n deposit.QuoteType('Clean')\n deposit.OpenEnd('Open End')\n deposit.MinimumPiece(MINIMUM_PIECE)\n deposit.PayOffsetMethod('Business Days')\n if external_id:\n deposit.ExternalId1(external_id)\n\n # Leg\n leg = deposit.CreateLeg(1)\n leg.LegType('Call Fixed Adjustable')\n leg.Decimals(11)\n leg.StartDate(start_date)\n leg.EndDate(end_date)\n leg.EndPeriodUnit('Days')\n leg.DayCountMethod(DAY_COUNT_METHOD)\n if rate_dict['type'] == 'fixed':\n leg.FixedRate(rate_dict['rate'])\n leg.ResetDayOffset(0)\n leg.ResetType('Weighted')\n leg.ResetPeriod('1d')\n leg.ResetDayMethod('Following')\n leg.Currency(CURRENCY)\n leg.NominalFactor(1)\n leg.Rounding('Normal')\n leg.RollingPeriod('1m')\n leg.RollingPeriodBase(acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(\n start_date, 0, 1, 0)))\n leg.PayDayMethod('Following')\n leg.PayCalendar(calendar)\n leg.FixedCoupon(True)\n leg.NominalAtEnd(True)\n leg.FloatRateFactor(1)\n leg.FixedCoupon(True)\n leg.StartPeriod('-1d')\n leg.Reinvest(reinvest)\n if rate_dict['type'] == 'float':\n deposit.AddInfoValue('CallFloatRef', rate_dict['ref'])\n deposit.AddInfoValue('CallFloatSpread', rate_dict['spread'])\n deposit.Commit() # Commits both the instrument and the leg.\n\n # Trade\n trade = acm.FTrade()\n trade.Instrument(deposit)\n trade.Counterparty(counterparty)\n trade.Acquirer('PRIME SERVICES DESK')\n trade.AcquireDay(start_date)\n trade.ValueDay(start_date)\n trade.Quantity(1)\n trade.TradeTime(start_date)\n trade.Currency(CURRENCY)\n trade.Price(0)\n trade.Portfolio(acm.FPhysicalPortfolio[prf_name])\n trade.Type('Normal')\n trade.TradeTime(start_date)\n trade.Status('Simulated') # To allow for delete in case of rollback.\n trade.AddInfoValue('Funding Instype', funding_instype)\n trade.AddInfoValue('Call_Region', 'BB SANDTON')\n trade.AddInfoValue('Account_Name', account_name)\n trade.Commit()\n \n acm.CommitTransaction()\n except Exception as e:\n acm.AbortTransaction()\n LOGGER.exception(\"Could not create call/loan account {}\".format(insid))\n raise e\n\n deposit = acm.FInstrument[insid]\n if deposit:\n trades = deposit.Trades()\n if trades:\n LOGGER.info('The following trade has been created:{}\\n'.format(trades[0].Oid()))\n else:\n raise RuntimeError('Could not create trade!')\n else:\n raise RuntimeError('Could not create deposit!')", "def __init__(__self__, *,\n end_date: str,\n start_date: str,\n time: str):\n pulumi.set(__self__, \"end_date\", end_date)\n pulumi.set(__self__, \"start_date\", start_date)\n pulumi.set(__self__, \"time\", time)", "def billing_choose_dates(self):\n number_of_dates_to_be_generated_per_patient = (\n self.number_of_dates_to_be_generated_per_patient\n )\n dunning_cycle_length = self.dunning_cycle_length\n dates = self.dates\n first_date = random.choice(\n dates\n ) # randomly choose a start date from the list of possible start dates\n last_possible_date = first_date + datetime.timedelta(\n days=dunning_cycle_length\n ) # calculate the last date possible based on Dunnin Cycle\n time_between_dates = last_possible_date - first_date\n subsequent_events = random.sample(\n list(np.arange(0, time_between_dates.days)),\n number_of_dates_to_be_generated_per_patient,\n )\n subsequent_events.sort()\n dates = [\n first_date + datetime.timedelta(days=np.int(subsequent_event))\n for subsequent_event in subsequent_events\n ]\n event_list = pd.DataFrame(dates)\n return event_list", "def select_data(data=pd.DataFrame(), date_initial=\"2005-01-01\", date_final=\"2019-12-31\"):\n data = data[data.index >= date_initial]\n data = data[data.index <= date_final]\n return data", "def filter_data_by_date(df, ticker, start_date, end_date):\n if start_date is None:\n start_date = MIN_DATE\n\n if end_date is None:\n end_date = MAX_DATE\n\n filtered = df[\n (df[\"ticker\"] == ticker) & (df[\"date\"] >= start_date) & (df[\"date\"] <= end_date)\n ]\n return filtered", "def __init__(self, start_date_str: str, end_date_str: str):\r\n start_date, end_date = create_date_from_string(start_date_str, end_date_str)\r\n if is_date_valid(start_date, end_date):\r\n self.days_range_array = create_days_range(start_date, end_date)\r\n self.months_range_array = create_months_range(self.days_range_array)\r\n else:\r\n raise Exception" ]
[ "0.7161972", "0.5350023", "0.5343824", "0.52754986", "0.52716273", "0.5243731", "0.5240982", "0.5149819", "0.5139513", "0.50795436", "0.50315154", "0.5018969", "0.5007179", "0.50046945", "0.49793446", "0.4966744", "0.49542493", "0.4934821", "0.4922149", "0.49216446", "0.48913866", "0.4866242", "0.48629916", "0.4860951", "0.48603606", "0.4838361", "0.48369712", "0.481251", "0.4802354", "0.48008588" ]
0.68636316
1
will create JSON output files if there are two events (for each threshold) in one time window. Ie, if there are two >10MeV >10pfu events as well as two >100MeV >1pfu events, will create files for all four events, but if there are three >100MeV >1pfu events, will only generate JSON files for the first two. Second events have different thresholds in different files as opposed to together.
def two_in_one(obs_file,et,subevent): #in this function, the "original time window" talked about in the comments #refers to the start and end times that were input to create the file obs_file, #which will likely have been created using the database_extraction function #opening first output file created by operational_sep_quantities with open(obs_file, 'r') as o: out = js.load(o) #all events recorded in that output file ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['ongoing_events']) #creating lists for values from each event end_times = [] start_times = [] energy_thresholds = [] flux_thresholds = [] out_names = [] #appending values to lists for each event for i in range(len(ongoing_events)): start_times.append(parse(ongoing_events[i]['start_time'])) end_times.append(parse(ongoing_events[i]['end_time'])) energy_thresholds.append(ongoing_events[i]['energy_min']) flux_thresholds.append(float(ongoing_events[i]['threshold'])) #checking if there was a second event for each threshold for i in range(len(end_times)): end = end_times[i] #if the end time of an event for any threshold was a day before the last day #in the original time window given, will check if ONLY THAT THRESHOLD #had another event after the first one, using the end time of the first #event of that threshold as the new start time of the event window if end.date() < et.date(): print('end time to use as new start time: %s' %end) #figuring out which threshold this end time was for flux_thresh = int(flux_thresholds[i]) energy_thresh = int(energy_thresholds[i]) print('extracting second event for threshold ' + str(flux_thresh) + ' MeV ' + str(energy_thresh) + ' pfu') #new start time (2 days in advance bc the database_extraction function #makes the start time 2 days prior, so will cancel that out) st = end + timedelta(days=2) #thresholds in correct format thresholds = str(energy_thresh) + ',' + str(flux_thresh) print('thresholds: %s' %thresholds) #creating observation data for second event for thresholds given out_names.append(Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent, thresholds = thresholds, one_thresh = True)) #returns list of all new files created by this function return(out_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multi_event(st,et,instrument_chosen,subevent):\r\n print('checking for multiple events within given time window')\r\n \r\n #creating file for time window with first events for all thresholds\r\n out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent)\r\n\r\n #creating files for all second events for all thresholds\r\n new_files = two_in_one(out_name,et,subevent)\r\n \r\n #creating files for any third events for all thresholds that had a second event\r\n for file in new_files:\r\n two_in_one(file,et,subevent) \r\n \r\n return", "def output_files(self):\n o = []\n if 'unweighted' in self.event_types:\n o.append(self.name + \"_unweighted_events.lhe.gz\")\n if 'weighted' in self.event_types:\n o.append(self.name + \"_events.lhe.gz\")\n return o", "def main():\r\n # handle arguments\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-t', '--time', help = 'start time', default = \"2018-12-26 18:11:08.509654\")\r\n parser.add_argument('-bd', '--min_duration', type = int, help = 'minimum duration', default = 25)\r\n parser.add_argument('-td', '--max_duration', type = int, help = 'maximum duration', default = 70)\r\n parser.add_argument('-e', '--events', type = int, help = 'how many events to generate', default = 1000)\r\n\r\n args = parser.parse_args()\r\n\r\n f = open(f\"tests/test_1.json\", \"a\")\r\n\r\n string_time = \"2019-07-08 10:40:00.423123\"\r\n\r\n current_time = datetime.datetime.strptime(string_time, '%Y-%m-%d %H:%M:%S.%f')\r\n\r\n for i in range(0, args.events):\r\n\r\n duration = random.randint(args.min_duration, args.max_duration)\r\n\r\n json = \"{\\\"timestamp\\\": \\\"\" \\\r\n + str(current_time) \\\r\n + \"\\\", \\\"translation_id\\\": \\\"5aa5b2f39f7254a75aa5\\\", \" \\\r\n \"\\\"source_language\\\": \\\"en\\\",\\\"target_language\\\":\" \\\r\n \" \\\"fr\\\",\\\"client_name\\\": \\\"easyjet\\\",\\\"event_name\\\":\" \\\r\n \"\\\"translation_delivered\\\",\\\"nr_words\\\": 30, \\\"duration\\\": \"\\\r\n + str(duration) + \"}\\n\"\r\n\r\n f.write(json)\r\n\r\n minutes = random.randint(0, 59)\r\n seconds = random.randint(0, 59)\r\n\r\n current_time += datetime.timedelta(minutes=minutes, seconds=seconds)\r\n\r\n print(f\"New file is located at inputs/{args.events}.json\")", "def get_metrics_files(project, MIN_DIFFERENCE):\n print(\"LOG: Starting with\", project)\n\n # Get the latest two metrics for this project which are MIN_DIFFERENCE days apart\n re_metrics = re.compile(r\"METRICS-\\d{4}-\\d{2}-\\d{2}.json\")\n all_metrics = []\n\n for filename in os.listdir(project):\n if re_metrics.match(filename):\n all_metrics.append(filename)\n\n all_metrics.sort()\n\n # Come back later when there are atleast two generated metrics files\n if len(all_metrics) < 2:\n return False, {}, {}\n\n current_metrics_json_file = all_metrics.pop()\n print(\"LOG: Current metrics json file\", current_metrics_json_file)\n\n # If the latest Metrics is older than MIN_DIFFERENCE, then don't generate report\n # This is possible in cases of repo turning private or moving out\n today_datestamp = datetime.datetime.now()\n latest_datestamp = datetime.datetime.strptime(current_metrics_json_file, \"METRICS-%Y-%m-%d.json\")\n datetime_delta = today_datestamp - latest_datestamp\n if datetime_delta.days > MIN_DIFFERENCE:\n print(\"Skipping report for\", project, \"Latest metrics file is older than MIN_DIFFERENCE\")\n return False, {}, {}\n\n previous_metrics_json_file = None\n previous_metrics_index_index = len(all_metrics) - 1\n while(previous_metrics_index_index >= 0):\n # Calculate difference between last two metrics\n d1 = datetime.datetime.strptime(current_metrics_json_file, \"METRICS-%Y-%m-%d.json\")\n d2 = datetime.datetime.strptime(all_metrics[previous_metrics_index_index], \"METRICS-%Y-%m-%d.json\")\n if (d1 - d2).days > MIN_DIFFERENCE:\n previous_metrics_json_file = all_metrics[previous_metrics_index_index]\n print(\"LOG: Previous metrics json\", previous_metrics_json_file)\n break\n else:\n previous_metrics_index_index -= 1\n\n # Metrics are not older than MIN_DIFFERENCE days\n if previous_metrics_json_file is None:\n return False, {}, {}\n\n return True, current_metrics_json_file, previous_metrics_json_file", "def publish_burst(burst, num_events_counter, fp):\n for event_dict in burst:\n json_str = json.dumps(event_dict)\n num_events_counter.value += 1\n fp.write(json_str + '\\n')", "def write_to_file(train_file, test_file, log_dict):\n i = 0\n train_events = []\n test_events = []\n\n for key in log_dict:\n trace = log_dict[key]\n if random.randint(0,1) == 0: # Add file to training set with 50% chance\n for e_idx in range(len(trace)):\n train_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Add file to test set\n if random.randint(0,100) > 50: # No anomaly injection with 50% chance\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Anomaly injection\n trace, types = introduce_anomaly(trace, single=False)\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",1,\\\"\" + str(types) + \"\\\"\")\n\n with open(train_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in train_events:\n fout.write(e + \"\\n\")\n\n with open(test_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in test_events:\n fout.write(e + \"\\n\")", "def export_json(self):\r\n export_timestamp = datetime.now().timestamp()\r\n\r\n # Application data\r\n data_dict = { # Dictionary to be exported to json file\r\n \"Monitor App\": {\r\n \"Export Timestamp\": export_timestamp,\r\n \"Servers\": list(self.get_servers()),\r\n \"Event Logs\": {\r\n server: {} for server in self.get_servers()\r\n }\r\n }\r\n }\r\n \r\n # Thread data\r\n for thread in self.get_all_threads():\r\n data_dict[\"Monitor App\"][\"Event Logs\"][thread.get_server_name()][thread.get_log_type()] = {\r\n \"Thread Start Timestamp\": thread.latest_start.timestamp(),\r\n \"Total Processed Events\": thread.get_total_processed_events(),\r\n \"Total Thread Failures\": thread.get_failure_total(),\r\n \"Event IDs\": { # Value built below\r\n # 1111: {\r\n # \"Total\": int,\r\n # \"Description\": str or None,\r\n # \"Timestamps\": [floats] or None\r\n # }\r\n }\r\n }\r\n event_ID_key = data_dict[\"Monitor App\"][\"Event Logs\"][thread.get_server_name()][thread.get_log_type()][\"Event IDs\"]\r\n try: # Build Event IDs dictionary value for data_dict\r\n for event_ID in thread.event_IDs:\r\n event_ID_key[event_ID] = {\r\n \"Total\": thread.get_total_event_occurrences(event_ID),\r\n \"Description\": thread.get_event_description(event_ID),\r\n \"Timestamps\": thread.get_event_occurrence_times(event_ID)\r\n }\r\n except KeyError as err:\r\n print(err)\r\n\r\n # Create log directory\r\n if not os.path.exists(os.path.join(\"windowseventmonitor\", \"eventlogs\")):\r\n os.mkdir(os.path.join(\"windowseventmonitor\", \"eventlogs\"))\r\n \r\n event_log_json_file = os.path.join(\"windowseventmonitor\", \"eventlogs\", f\"{export_timestamp}.json\")\r\n try: # Write to json\r\n with open(event_log_json_file, \"w\") as f:\r\n data = json.dumps(data_dict, indent = 4)\r\n f.write(data)\r\n print(\"Exported logs\")\r\n except PermissionError as err:\r\n print(err)", "def create_foders_files(events, eventpath):\n \n len_events = len(events)\n \n for i in range(0, len_events):\n if os.path.exists(os.path.join(eventpath, events[i]['event_id'])) == True:\n \n if raw_input('Folder for -- the requested Period (min/max) ' + \\\n 'and Magnitude (min/max) -- exists in your directory.' + '\\n\\n' + \\\n 'You could either close the program and try updating your ' + \\\n 'folder OR remove the tree, continue the program and download again.' + \\\n '\\n' + 'Do you want to continue? (Y/N)' + '\\n') == 'Y':\n print '-------------------------------------------------------------'\n shutil.rmtree(os.path.join(eventpath, events[i]['event_id']))\n \n else:\n print '------------------------------------------------'\n print 'So...you decided to update your folder...Ciao'\n print '------------------------------------------------'\n sys.exit()\n\n for i in range(0, len_events):\n try:\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'BH_RAW'))\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'Resp'))\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'info'))\n except Exception, e:\n pass\n \n for i in range(0, len_events):\n Report = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'report_st'), 'a+')\n Report.close()\n \n \n for i in range(0, len_events):\n Exception_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'exception'), 'a+')\n eventsID = events[i]['event_id']\n Exception_file.writelines('\\n' + eventsID + '\\n')\n \n Syn_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'station_event'), 'a+')\n Syn_file.close()\n \n if input['time_iris'] == 'Y':\n for i in range(0, len_events):\n time_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'iris_time'), 'a+')\n time_file.close()\n \n \n for i in range(0, len_events):\n quake_file = open(os.path.join(eventpath, events[i]['event_id'],\\\n 'info', 'quake'), 'a+')\n \n quake_file.writelines(repr(events[i]['datetime'].year).rjust(15)\\\n + repr(events[i]['datetime'].julday).rjust(15) \\\n + repr(events[i]['datetime'].month).rjust(15) \\\n + repr(events[i]['datetime'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['datetime'].hour).rjust(15)\\\n + repr(events[i]['datetime'].minute).rjust(15) + \\\n repr(events[i]['datetime'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')\n \n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % events[i]['latitude'])) + '%.5f' \\\n % events[i]['latitude'] + \\\n ' '*(15 - len('%.5f' % events[i]['longitude'])) + '%.5f' \\\n % events[i]['longitude'] + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % abs(events[i]['depth']))) + '%.5f' \\\n % abs(events[i]['depth']) + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % abs(events[i]['magnitude']))) + '%.5f' \\\n % abs(events[i]['magnitude']) + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len(events[i]['event_id'])) + \\\n events[i]['event_id'] + '-' + '\\n')\n \n quake_file.writelines(repr(events[i]['t1'].year).rjust(15)\\\n + repr(events[i]['t1'].julday).rjust(15) \\\n + repr(events[i]['t1'].month).rjust(15) \\\n + repr(events[i]['t1'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['t1'].hour).rjust(15)\\\n + repr(events[i]['t1'].minute).rjust(15) + \\\n repr(events[i]['t1'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')\n \n quake_file.writelines(repr(events[i]['t2'].year).rjust(15)\\\n + repr(events[i]['t2'].julday).rjust(15) \\\n + repr(events[i]['t2'].month).rjust(15) \\\n + repr(events[i]['t2'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['t2'].hour).rjust(15)\\\n + repr(events[i]['t2'].minute).rjust(15) + \\\n repr(events[i]['t2'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')", "def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))", "def generate_no_time_two_files():\n fname = {'stress': 'resources/simple_stress_no_time.json',\n 'strain': 'resources/simple_strain_no_time.json'}\n expected = [ # makes an array of two pif systems\n pif.System(\n properties=[\n pif.Property(name='stress',\n scalars=list(np.linspace(0, 100))\n )]),\n\n pif.System(\n properties=[\n pif.Property(name='strain',\n scalars=list(np.linspace(0, 1))\n )])\n ]\n # dump the pifs into two seperate files\n with open(fname['stress'], 'w') as stress_file:\n pif.dump(expected[0], stress_file)\n with open(fname['strain'], 'w') as strain_file:\n pif.dump(expected[1], strain_file)\n\n return {\n 'file_names': fname,\n 'expected': {\n 'stress': expected[0],\n 'strain': expected[1]\n }\n }", "def write_logs():\n global log_queue\n global maxcount\n\n # Process the first set\n for count, msg in enumerate(log_queue):\n\n loginfo = {}\n print msg\n for entry in msg['log'].keys():\n\n loginfo[entry] = {}\n\n for key in msg['log'][entry].keys():\n loginfo[entry][key] = msg['log'][entry][key]\n\n with open(msg['info'], 'r') as f:\n metadata = json.load(f)\n\n try:\n metadata[msg['run']]\n\n except(KeyError):\n metadata[msg['run']] = {}\n\n if msg['cmd'] == 'write':\n for key in loginfo.keys():\n metadata[msg['run']][key] = loginfo[key]\n\n elif msg['cmd'] == 'reset':\n metadata[msg['run']] = {}\n\n with open(msg['info'], 'w') as f:\n f.write(json.dumps(metadata, indent=2, sort_keys=True))\n\n log_queue.remove(msg)\n\n if count > maxcount:\n break", "def get_result_json(filename, folder, score, threshold):\n assert(isinstance(filename, str))\n assert(isinstance(folder, str))\n assert(isinstance(score, float))\n assert(isinstance(threshold, float))\n \n return {\n 'filename': filename,\n 'folder': folder,\n 'score': score,\n 'decision': 1 if score <= threshold else 0\n }", "def _events_json(fname, overwrite=False):\n new_data = {\n \"sample\": {\"Description\": \"The event onset time in number of sampling points.\"},\n \"value\": {\n \"Description\": (\n \"The event code (also known as trigger code or event ID) \"\n \"associated with the event.\"\n )\n },\n \"trial_type\": {\"Description\": \"The type, category, or name of the event.\"},\n }\n\n # make sure to append any JSON fields added by the user\n fname = Path(fname)\n if fname.exists():\n orig_data = json.loads(\n fname.read_text(encoding=\"utf-8\"), object_pairs_hook=OrderedDict\n )\n new_data = {**orig_data, **new_data}\n\n _write_json(fname, new_data, overwrite)", "def historize_log_values(write_file_to: str, values: str):\n if path.isfile(write_file_to ):\n historic = read_json_file(write_file_to)\n historic += values\n unique = []\n [unique.append(elem) for elem in historic if elem not in unique]\n unique.sort(key=lambda event: datetime.strptime(event[\"eventTime\"], \"%Y-%m-%d %H:%M:%S\"))\n write_to_json_file(write_file_to, unique)\n else:\n write_to_json_file(write_file_to, values)", "def create_file(output_json):\n folder = \"data/\"\n filename = datetime.now().strftime(\"%d-%m-%Y\") + \"-moisture-read.json\"\n filepath = folder+filename\n\n # Create Local folder\n try:\n os.mkdir(folder)\n except OSError:\n pass\n #print(\"Directory already created or a failure occured on directory (%s)\" % folder)\n\n # Create Empty Json file if it doesnt exists\n if(Path(filepath)).exists():\n pass\n else:\n try:\n f = open(filepath, \"a\")\n f.write('{\\n\"moisture_iot_project\":[]\\n}')\n f.close()\n except Exception as e:\n print(\"Failure occured creating the JSON file (%s)\" % e)\n\n # Open Json file to append current structure\n with open(filepath) as outfile:\n data = json.load(outfile)\n\n # Get list with all dictionaries\n temp = data['moisture_iot_project']\n\n # Append current structure\n temp.append(output_json)\n\n # Reorganize List values and re-write to JSON file\n data['moisture_iot_project'] = temp\n write_json(data, filepath)", "def generate_ev_file(id_test):\n print(\"generate_ev_file\")\n \n ev_output_file_name=id_test+\".ev\"\n ev_input_file_name=id_test+\"_events.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+ev_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+ev_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"campaign\"):\n print(\"First field of events file input should be 'campaing' but is:\"+field_names_list[0])\n print(\"Cannot generate event file\")\n return\n else:\n print(field_names_list)\n for line in input_reader:\n #generate timestamp for campaign\n #campania = int(int(ms)/100)+int(ss)*10+int(mm)*600+int(hh)*36000\n campaign = int(line[0])\n ms = (campaign*100)%1000\n ss = ((campaign*100)//1000)%60\n mm = ((campaign*100)//60000)%60\n hh = ((campaign*100)//360000)\n timeFormat = \"{:0>2d}\"\n msFormat = \"{:0>3d}\"\n timestamp_begin_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms))\n timestamp_end_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms+1))\n \n print(\"timestamp generated: \"+timestamp_begin_event)\n \n #generate events\n #begin events\n \n \n port_idx =0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n begin_event=CELL_DEVS_EXTERNAL_EVENT_BEGIN+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n \n f_output.write(timestamp_begin_event+\" \"+begin_event+\"\\n\")\n \n #end events\n port_idx=0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n end_event=CELL_DEVS_EXTERNAL_EVENT_ENDS+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n f_output.write(timestamp_end_event+\" \"+end_event+\"\\n\")\n \n \n \n f_input.close()\n f_output.close()", "def to_multiple_jsons(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('multiple_jsons')\n else:\n self.output('multiple_jsons')", "def dump_to_json(fileinfos, out):\n jsonarray = json.dumps(fileinfos)\n json_filename = \"all_elements_used.json\"\n text_file = open(os.path.join(out,out_dir_name,json_filename), \"w\")\n text_file.write(jsonarray)\n text_file.close()\n stdout.write(\"... \"+json_filename+\" created\\n\")", "def event_json_to_csv(self, outfileName, data):\n event_raw = data.split('\\n')\n try:\n result = '\\nAPI ERROR! - ' + json.loads(event_raw[0])['error'] + '\\n'\n print result\n return\n except KeyError:\n pass\n\n '''remove the lost line, which is a newline'''\n event_raw.pop()\n\n event_list = []\n jsonfile = outfileName[:-4] + '.json'\n with open(jsonfile,'w') as j:\n j.write('[')\n i = 0\n event_count = len(event_raw)\n for event in event_raw:\n j.write(event)\n i += 1\n if i != event_count:\n j.write(',')\n else:\n j.write(']')\n event_json = json.loads(event)\n event_list.append(event_json)\n print 'JSON saved to ' + j.name\n j.close()\n\n subkeys = get_sub_keys(event_list)\n\n #open the file\n f = open(outfileName, 'w')\n writer = UnicodeWriter(f)\n\n #write the file header\n f.write(codecs.BOM_UTF8)\n\n #writer the top row\n header = [u'event']\n for key in subkeys:\n header.append(key)\n writer.writerow(header)\n\n #write all the data rows\n for event in event_list:\n line = []\n #get the event name\n try:\n line.append(event[u'event'])\n except KeyError:\n line.append(\"\")\n #get each property value\n for subkey in subkeys:\n try:\n line.append(unicode(event[u'properties'][subkey]))\n except KeyError:\n line.append(\"\")\n #write the line\n writer.writerow(line)\n\n print 'CSV saved to ' + f.name\n f.close()", "def testMoreEvents(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=1000,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 1)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(job.getFiles(type=\"lfn\"), [\"/some/file/name\"])\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)", "def annotations_to_json(eaf_dir, json_dir):\n for file in os.listdir(eaf_dir):\n if file.endswith(\".eaf\"):\n print(\"converting\", file, \"to json\")\n file_name = os.path.join(json_dir, file[:-4]) + \".json\"\n file = os.path.join(eaf_dir, file)\n file_elan = Elan.Eaf(file)\n\n # Get all the data under the engagement_tier tier\n annotation_data = file_elan.get_annotation_data_for_tier(\"engagement_tier\")\n labels_for_annotation = elan_annotation_to_binary(annotation_data)\n\n # Create a json file storing the dictionary of {\"timeslot1,timeslot2\": 0/1(engaged/disengaged)}\n j = json.dumps(labels_for_annotation)\n f = open(file_name, \"w\")\n f.write(j)\n f.close()", "def generate_expected_two_files():\n fname = {'stress': 'resources/simple_stress.json',\n 'strain': 'resources/simple_strain.json'}\n expected = [ # makes an array of two pif systems\n pif.System(\n properties=[\n pif.Property(name='stress',\n scalars=list(np.linspace(0, 100)),\n conditions=pif.Value(\n name='time',\n scalars=list(np.linspace(0, 100))))]),\n\n pif.System(\n properties=[\n pif.Property(name='strain',\n scalars=list(np.linspace(0, 1)),\n conditions=pif.Value(\n name='time',\n scalars=list(np.linspace(0, 100))))])\n ]\n # dump the pifs into two seperate files\n with open(fname['stress'], 'w') as stress_file:\n pif.dump(expected[0], stress_file)\n with open(fname['strain'], 'w') as strain_file:\n pif.dump(expected[1], strain_file)\n\n return {\n 'file_names': fname,\n 'expected': {\n 'stress': expected[0],\n 'strain': expected[1]\n }\n }", "def detection_algorithm(f_blacklist, f_seconds, f_spikes):\n blacklist = create_blacklist_dict()\n filtered_traces_user_dict = defaultdict(list)\n\n file_type = get_file_type(f_blacklist, f_seconds, f_spikes)\n\n inspection_interval = 60*5\n\n bucket_list = [1, 5, 10, 30, 60]\n traces_file_1 = open('final_files/user_packets_1_%s'%(file_type), 'w')\n traces_file_5 = open('final_files/user_packets_5_%s'%(file_type), 'w')\n traces_file_10 = open('final_files/user_packets_10_%s'%(file_type), 'w')\n traces_file_30 = open('final_files/user_packets_30_%s'%(file_type), 'w')\n traces_file_60 = open('final_files/user_packets_bucket_60_%s'%(file_type), 'w')\n packets_file = open('final_files/user_packets_true_false_%s'%(file_type), 'w') \n\n for user in users:\n devids = []\n for d in user.devices:\n devids.append(str(d.id))\n\n devs = {}\n for d in user.devices:\n devs[d.id] = d.platform\n\n for elem_id in devids:\n sql_userid = \"\"\"SELECT login FROM devices WHERE id =:d_id\"\"\"\n user_id = ses.execute(text(sql_userid).bindparams(d_id = elem_id)).fetchone()\n idt = user_id[0]\n\n print idt\n packets_file.write(str(idt)+'\\n')\n\n if idt != 'bowen.laptop':\n continue\n\n #list contains Traces -> timestamp, url\n http_traces_list, dns_traces_list = get_test_data(elem_id)\n print len(http_traces_list)\n print len(dns_traces_list)\n\n cont = 0\n packets_true = defaultdict(list)\n packets_false = defaultdict(list)\n for packet in http_traces_list:\n print cont\n packets_list = get_packets_in_interval(packet, http_traces_list, inspection_interval)\n pkt_user_gen = filter_packet(packet, packets_list, blacklist, f_blacklist, f_seconds, f_spikes, packets_true, packets_false)\n packets_file.write(str(packet.timst) + ' ' + str(pkt_user_gen) + '\\n')\n if pkt_user_gen:\n filtered_traces_user_dict[idt].append(packet.timst)\n cont+=1\n\n packets_true = defaultdict(list)\n packets_false = defaultdict(list)\n for packet in dns_traces_list:\n packets_list = get_packets_in_interval(packet, dns_traces_list, inspection_interval)\n pkt_user_gen = filter_packet(packet, packets_list, blacklist, f_blacklist, f_seconds, f_spikes, packets_true, packets_false)\n packets_file.write(str(packet.timst) + ' ' + str(pkt_user_gen) + '\\n')\n if pkt_user_gen:\n filtered_traces_user_dict[idt].append(packet.timst)\n\n for bucket in bucket_list:\n print bucket\n traces_bucket = []\n traces_bucket = get_interval_list_predefined_gap(sorted(filtered_traces_user_dict[idt]), bucket)\n if bucket == 1:\n traces_file_1.write(idt + '\\n')\n elif bucket == 5:\n traces_file_5.write(idt + '\\n')\n elif bucket == 10:\n traces_file_10.write(idt + '\\n')\n elif bucket == 30:\n traces_file_30.write(idt + '\\n')\n elif bucket == 60:\n traces_file_60.write(idt + '\\n')\n\n print len(traces_bucket)\n for timst in traces_bucket:\n if bucket == 1:\n traces_file_1.write(str(timst) + '\\n')\n elif bucket == 5:\n traces_file_5.write(str(timst) + '\\n')\n elif bucket == 10:\n traces_file_10.write(str(timst) + '\\n')\n elif bucket == 30:\n traces_file_30.write(str(timst) + '\\n')\n elif bucket == 60:\n traces_file_60.write(str(timst) + '\\n')\n\n traces_file_1.close()\n traces_file_5.close()\n traces_file_10.close()\n traces_file_30.close()\n traces_file_60.close()", "def export_string_events(self):\n\n # ask user observations to analyze\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=True,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n fn = QFileDialog(self).getSaveFileName(self, \"Export events as strings\", \"\",\n \"Events file (*.txt *.tsv);;All files (*)\")\n fileName = fn[0] if type(fn) is tuple else fn\n\n if fileName:\n\n response = dialog.MessageDialog(programName, \"Include observation(s) information?\", [YES, NO])\n\n try:\n with open(fileName, \"w\", encoding=\"utf-8\") as outFile:\n for obsId in selectedObservations:\n # observation id\n outFile.write(\"\\n# observation id: {}\\n\".format(obsId))\n # observation descrition\n outFile.write(\"# observation description: {}\\n\".format(\n self.pj[OBSERVATIONS][obsId][\"description\"].replace(os.linesep, \" \")))\n # media file name\n if self.pj[OBSERVATIONS][obsId][TYPE] in [MEDIA]:\n outFile.write(\"# Media file name: {0}{1}{1}\".format(\", \".join([os.path.basename(x)\n for x in\n self.pj[OBSERVATIONS]\n [obsId]\n [FILE][PLAYER1]]),\n os.linesep))\n if self.pj[OBSERVATIONS][obsId][TYPE] in [LIVE]:\n outFile.write(\"# Live observation{0}{0}\".format(os.linesep))\n\n # independent variables\n if \"independent_variables\" in self.pj[OBSERVATIONS][obsId]:\n outFile.write(\"# Independent variables\\n\")\n\n # rows.append([\"variable\", \"value\"])\n for variable in self.pj[OBSERVATIONS][obsId][\"independent_variables\"]:\n outFile.write(\"# {0}: {1}\\n\".format(variable,\n self.pj[OBSERVATIONS][obsId][\n \"independent_variables\"][variable]))\n outFile.write(\"\\n\")\n\n # selected subjects\n for subj in plot_parameters[\"selected subjects\"]:\n if subj:\n subj_str = \"\\n# {}:\\n\".format(subj)\n else:\n subj_str = \"\\n# No focal subject:\\n\"\n outFile.write(subj_str)\n\n out = self.create_behavioral_strings(obsId, subj, plot_parameters)\n if out:\n outFile.write(out + \"\\n\")\n\n except:\n logging.critical(sys.exc_info()[1])\n QMessageBox.critical(None, programName, str(sys.exc_info()[1]), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)", "def export_aggregated_events(self):\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out, not_paired_obs_list = \"\", []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n return\n\n parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=False,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not parameters[\"selected subjects\"] or not parameters[\"selected behaviors\"]:\n return\n\n # check for grouping results\n flag_group = True\n if len(selectedObservations) > 1:\n flag_group = dialog.MessageDialog(programName, \"Group events from selected observations in one file?\",\n [YES, NO]) == YES\n\n extended_file_formats = [\"Tab Separated Values (*.tsv)\",\n \"Comma Separated Values (*.csv)\",\n \"Open Document Spreadsheet ODS (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\",\n \"SDIS (*.sds)\",\n \"SQL dump file (*.sql)\"]\n\n if flag_group:\n file_formats = [\"tsv\", \"csv\", \"ods\", \"xlsx\", \"xls\", \"html\", \"sds\",\n \"sql\"] # must be in same order than extended_file_formats\n\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self,\n \"Export aggregated events\",\n \"\", \";;\".join(extended_file_formats))\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Export aggregated events\", \"\",\n \";;\".join(extended_file_formats))\n\n if not fileName:\n return\n\n outputFormat = file_formats[extended_file_formats.index(filter_)]\n if pathlib.Path(fileName).suffix != \".\" + outputFormat:\n fileName = str(pathlib.Path(fileName)) + \".\" + outputFormat\n\n else: # not grouping\n\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma Separated values (*.csv)\",\n \"Open Document Spreadsheet (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\")\n item, ok = QInputDialog.getItem(self, \"Export events format\", \"Available formats\", items, 0, False)\n if not ok:\n return\n outputFormat = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n exportDir = QFileDialog(self).getExistingDirectory(self, \"Choose a directory to export events\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if outputFormat == \"sql\":\n _, _, conn = db_functions.load_aggregated_events_in_db(self.pj,\n parameters[\"selected subjects\"],\n selectedObservations,\n parameters[\"selected behaviors\"])\n try:\n with open(fileName, \"w\") as f:\n for line in conn.iterdump():\n f.write(\"{}\\n\".format(line))\n except:\n errorMsg = sys.exc_info()[1]\n logging.critical(errorMsg)\n QMessageBox.critical(None, programName, str(errorMsg), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n return\n\n data_header = tablib.Dataset()\n data_header.title = \"Aggregated events\"\n header = [\"Observation id\", \"Observation date\", \"Media file\", \"Total length\", \"FPS\"]\n if INDEPENDENT_VARIABLES in self.pj:\n for idx in sorted_keys(self.pj[INDEPENDENT_VARIABLES]):\n header.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n header.extend([\"Subject\", \"Behavior\"])\n header.extend([\"Modifiers\"])\n header.extend([\"Behavior type\", \"Start (s)\", \"Stop (s)\", \"Duration (s)\", \"Comment start\", \"Comment stop\"])\n data_header.append(header)\n\n data = copy.deepcopy(data_header)\n for obsId in selectedObservations:\n d = export_observation.export_aggregated_events(self.pj, parameters, obsId)\n data.extend(d)\n\n if not flag_group:\n fileName = str(\n pathlib.Path(pathlib.Path(exportDir) / safeFileName(obsId)).with_suffix(\".\" + outputFormat))\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n data = copy.deepcopy(data_header)\n\n if outputFormat == \"sds\": # SDIS format\n out = \"% SDIS file created by eMOC (www.eMOC.unito.it) at {}\\nTimed <seconds>;\\n\".format(\n datetime_iso8601())\n for obsId in selectedObservations:\n # observation id\n out += \"\\n<{}>\\n\".format(obsId)\n dataList = list(data[1:])\n for event in sorted(dataList, key=lambda x: x[-4]): # sort events by start time\n if event[0] == obsId:\n behavior = event[-7]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n behavior = behavior.replace(char, \"_\")\n subject = event[-8]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n subject = subject.replace(char, \"_\")\n event_start = \"{0:.3f}\".format(\n round(event[-4], 3)) # start event (from end for independent variables)\n if not event[-3]: # stop event (from end)\n event_stop = \"{0:.3f}\".format(round(event[-4] + 0.001, 3))\n else:\n event_stop = \"{0:.3f}\".format(round(event[-3], 3))\n out += \"{subject}_{behavior},{start}-{stop} \".format(subject=subject, behavior=behavior,\n start=event_start, stop=event_stop)\n out += \"/\\n\\n\"\n with open(fileName, \"wb\") as f:\n f.write(str.encode(out))\n return\n\n if flag_group:\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)", "def output_inversions(folder,threshold):\n \n start_stop_matcher = re.compile(\"(.*):(.*)-(.*)\")\n common_inversions = []\n abs_alt = defaultdict(dict)\n abs_ref = defaultdict(dict)\n perc_alt = defaultdict(dict)\n\n abs_alt,abs_ref,perc_alt,perc_ref,common_inversions = parse_geno_file(folder,True) ## Call the parser, the returned objects are dictionary of dictionaries\n\n FILE_HANDLE = open('output_inversions_'+str(threshold)+\".tsv\",'w')\n output_write = \"\\t\".join(common_inversions)\n print >> FILE_HANDLE,\"Strain\"+\"\\t\"+output_write\n\n for strain in abs_alt.keys():\n for inversion in common_inversions:\n #if(perc_alt[strain][inversion] > threshold):\n match = re.match(start_stop_matcher,inversion)\n #print match.group(1)\n start = int(match.group(2).replace(',',''))\n stop = int(match.group(3).replace(',',''))\n length = stop-start\n print >> FILE_HANDLE,strain+\"\\t\"+str(length)+\"\\t\"+str(perc_alt[strain][inversion])+\"\\t\"+str(perc_ref[strain][inversion])+\"\\t\"+str(abs_alt[strain][inversion])+\"\\t\"+str(abs_ref[strain][inversion])\n\n FILE_HANDLE.close()", "def filter_events(self):\n events_by_b = []\n events_by_npart = []\n\n bfiles = [f for f in glob.glob(self._path+\"/*.b\") if os.path.isfile(f)]\n npfiles = [f for f in glob.glob(self._path+\"/*.npart\") if os.path.isfile(f)]\n\n if bfiles:\n print \"Found a .b file, doing impact parameter filtering.\"\n self.sort_by_bfile(bfiles, events_by_b)\n if npfiles:\n print \"Found a .npart file, doing participant number filtering.\"\n self.sort_by_npartfile(npfiles, events_by_npart)\n\n if not bfiles and not npfiles:\n self.sort_by_logfolder(events_by_b, events_by_npart)\n\n # Return the appropriate list of events\n if events_by_b:\n print len(events_by_b), \"data files remain after filtering.\"\n return events_by_b\n elif events_by_npart:\n print len(events_by_npart), \"data files remain after filtering.\"\n return events_by_npart\n else:\n print \"filter_events: None of the events fulfill the required criteria:\"\n print \"b range:\", self._bmin, self._bmax, \"Npart range:\", self._npmin, self._npmax", "def makeWeights(_files,treeName,category,_outputFile, BINS, PT, ETA):\n\tROOT.gROOT.SetBatch(1)\n\n\t#treeName = 'histoMuFromTk/fitter_tree'\n\t_trees = dict( [ ( name, _file.Get(treeName) ) for name,_file in _files.iteritems()] )\n\t#Check if in both files are the tree\n\tfor _tree in _trees.itervalues():\n\t\tif not _tree:\n\t\t\treturn None\n\t\n\thistos = {}\n\tweights = {}\n\n\t#-- The ':' token in A:B read as 'B conditioned to A' (look this unregular order)\n\t#-- The categories are datamembers which can be 1 or 0, a condition;\n\t#-- if we want to weight the pt-distribution of all probes for the L1Mu3 trigger\n\t#-- category, we must decided with respect which muonID category (Glb, TMLSAT, ...), then\n\t#-- reduce to a subset which the muonID category == 1 and calculate the weight of the\n\t#-- pt-distribution\n\t#-- The category variable can be A:B:C:..., the last one is the only one which we don't \n\t#-- want to reduce (see find category)\n\tcondCategory = ''\n\tstoreCategory = 'weight'\n\tif category.find(':') != -1:\n\t\t_catList = category.split(':')\n\t\t#-- This for is to include the quality cuts and other possible categories\n\t\tfor i in xrange(len(_catList)-1):\n\t\t\tcondCategory += ' && '+_catList[i]+' == 1 '# BUG------> && '+triggerCat+' == 1' \n\t\t\tstoreCategory += '_'+_catList[i]\n\n\tinstName = lambda k,pt : PT+'>>h_'+category+name+str(k)+'(50,'+str(pt[0])+','+str(pt[1])+')'\n\tcuts = lambda pt,eta: PT+' >= '+str(pt[0])+' && '+PT+' <'+str(pt[1])+\\\n\t\t\t' && '+ETA+' >= '+str(eta[0])+' && '+ETA+' < '+str(eta[1])+condCategory\n\t#print cuts #--------------------------> PROVISONAL: PARECE QUE SE RECUPERAN LOS ESPECTROS DE LOS PASSING\n\t #--------------------------> NO DE LOS ALL\n\tk = 0\n\tfor i in xrange(len(BINS.__getattribute__(PT))-1):\n\t\tpt = (BINS.__getattribute__(PT)[i],BINS.__getattribute__(PT)[i+1])\n\t\tfor j in xrange(len(BINS.__getattribute__(ETA))-1):\n\t\t\teta = (BINS.__getattribute__(ETA)[j],BINS.__getattribute__(ETA)[j+1])\n\t\t\tfor name,_t in _trees.iteritems(): \n\t\t\t\tN = _t.Draw( instName(k,pt),cuts(pt,eta) )\n\t\t\t\thistos[name] = ROOT.gDirectory.Get('h_'+category+name+str(k))\n\t\t\tprint ' \\033[1;34mDoing bin'+str(k)+' '+PT+'=('+str(pt[0])+','+str(pt[1])+') '+ETA+'=('+str(eta[0])+','+str(eta[1])+')\\033[1;m'\n\t\t\tswap = histos['numerator'].Clone(category+'_bin'+str(k))\n\t\t\tdummy = swap.Divide(histos['denominator'])\n\t\t\tweights[category+'_bin'+str(k)] =( (eta[0],eta[1]), (pt[0],pt[1]), ROOT.gDirectory.Get(category+'_bin'+str(k)) )\n\t\t\t#Acura els limits\n\t\t\tweights[category+'_bin'+str(k)][2].GetXaxis().SetLimits( pt[0], pt[1] ) \n\t\t\t#weights[category+'_bin'+str(k)][2].SetNormFactor(1) \n\t\t\tk += 1\n\t_out = ROOT.TFile(_outputFile,'RECREATE')\n\tfor name,(etaBins,ptBins,histo) in weights.iteritems():\n\t\thisto.Write()\n\t_out.Close()\t\n\treturn weights", "def preProcess(self,filename,fileoutput):\t\n\tdata=[]\n\tval =set()\n\tfo = open(fileoutput, \"wb\")\n\twith open(filename) as data_file:\n \tfor tags in data_file:\n\t\t\tif \"timestamp\" not in tags: \n \t \t continue\n\t\t\tts = re.search('timestamp: (.+?)\\)', tags).group(1)\n\t\t\tval =set()\n\t\t\tval.update({tag for tag in tags.split() if tag.startswith(\"#\")})\n\t\t\t#print val\n\t\t\tif len(val) >1:\n\t\t\t\tself.maintainWindow(data,ts)\n\t\t\t\tdata.append((ts,val))\n\t\t\t\tself.createAdjList(val,\"add\")\n\t\t\t\tprint(\"***\")\n\t\t\telse:\n\t\t\t\tself.maintainWindow(data,ts)\n\t\t\t\tprint(\"@@@@\")\n\t\t\tresult = self.calculateRollingAverages() \n\t\t\tfo.write(result+\"\\n\")\n fo.close()\n data_file.close()", "def write_filter_spec(filters, filename):\n data = export_filters(filters)\n with open(filename, 'w') as fp:\n json.dump(data, fp, indent = 4)" ]
[ "0.67087024", "0.5573654", "0.5543615", "0.5325832", "0.5257581", "0.523323", "0.5181749", "0.5180666", "0.51276666", "0.5114678", "0.51098496", "0.5098101", "0.5082852", "0.5076031", "0.50719666", "0.5061145", "0.50575876", "0.5052095", "0.5037945", "0.5035007", "0.5021967", "0.5007761", "0.49984068", "0.49963072", "0.49908468", "0.49901322", "0.49865457", "0.49742347", "0.49556065", "0.49537665" ]
0.62420124
1
all events in one time window (not just two) used if there is more than one event occurring within a short time period. will generate an output file for every event that occurs within a given time window not to be confused with many_events, which generates output given multiple time windows. Can create files for up to 3 events within specified time window.
def multi_event(st,et,instrument_chosen,subevent): print('checking for multiple events within given time window') #creating file for time window with first events for all thresholds out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent) #creating files for all second events for all thresholds new_files = two_in_one(out_name,et,subevent) #creating files for any third events for all thresholds that had a second event for file in new_files: two_in_one(file,et,subevent) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def INPUT_Periods_file(input):\n \n global events\n \n tb = 3600\n ta = 3600\n \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n \n len_events = len(events)\n \n input_period = open(os.path.join(os.getcwd(), 'INPUT-Periods'), 'a+')\n\n for i in range(0, len_events):\n \n str_event = str(events[i]['datetime']-tb) + '_' + \\\n str(events[i]['datetime']+ta) + '_' + \\\n str(events[i]['magnitude'] - 0.01) + '_' + \\\n str(events[i]['magnitude'] + 0.01) + '\\n'\n input_period.writelines(str_event)\n \n input_period.close()\n \n print '************************************************************' \n print 'New INPUT-Periods file is generated in your folder.'\n print 'Now, you could run the program again based on your desired event :)' \n print '************************************************************'\n \n sys.exit()", "def generate_ev_file(id_test):\n print(\"generate_ev_file\")\n \n ev_output_file_name=id_test+\".ev\"\n ev_input_file_name=id_test+\"_events.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+ev_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+ev_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"campaign\"):\n print(\"First field of events file input should be 'campaing' but is:\"+field_names_list[0])\n print(\"Cannot generate event file\")\n return\n else:\n print(field_names_list)\n for line in input_reader:\n #generate timestamp for campaign\n #campania = int(int(ms)/100)+int(ss)*10+int(mm)*600+int(hh)*36000\n campaign = int(line[0])\n ms = (campaign*100)%1000\n ss = ((campaign*100)//1000)%60\n mm = ((campaign*100)//60000)%60\n hh = ((campaign*100)//360000)\n timeFormat = \"{:0>2d}\"\n msFormat = \"{:0>3d}\"\n timestamp_begin_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms))\n timestamp_end_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms+1))\n \n print(\"timestamp generated: \"+timestamp_begin_event)\n \n #generate events\n #begin events\n \n \n port_idx =0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n begin_event=CELL_DEVS_EXTERNAL_EVENT_BEGIN+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n \n f_output.write(timestamp_begin_event+\" \"+begin_event+\"\\n\")\n \n #end events\n port_idx=0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n end_event=CELL_DEVS_EXTERNAL_EVENT_ENDS+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n f_output.write(timestamp_end_event+\" \"+end_event+\"\\n\")\n \n \n \n f_input.close()\n f_output.close()", "def main():\r\n # handle arguments\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-t', '--time', help = 'start time', default = \"2018-12-26 18:11:08.509654\")\r\n parser.add_argument('-bd', '--min_duration', type = int, help = 'minimum duration', default = 25)\r\n parser.add_argument('-td', '--max_duration', type = int, help = 'maximum duration', default = 70)\r\n parser.add_argument('-e', '--events', type = int, help = 'how many events to generate', default = 1000)\r\n\r\n args = parser.parse_args()\r\n\r\n f = open(f\"tests/test_1.json\", \"a\")\r\n\r\n string_time = \"2019-07-08 10:40:00.423123\"\r\n\r\n current_time = datetime.datetime.strptime(string_time, '%Y-%m-%d %H:%M:%S.%f')\r\n\r\n for i in range(0, args.events):\r\n\r\n duration = random.randint(args.min_duration, args.max_duration)\r\n\r\n json = \"{\\\"timestamp\\\": \\\"\" \\\r\n + str(current_time) \\\r\n + \"\\\", \\\"translation_id\\\": \\\"5aa5b2f39f7254a75aa5\\\", \" \\\r\n \"\\\"source_language\\\": \\\"en\\\",\\\"target_language\\\":\" \\\r\n \" \\\"fr\\\",\\\"client_name\\\": \\\"easyjet\\\",\\\"event_name\\\":\" \\\r\n \"\\\"translation_delivered\\\",\\\"nr_words\\\": 30, \\\"duration\\\": \"\\\r\n + str(duration) + \"}\\n\"\r\n\r\n f.write(json)\r\n\r\n minutes = random.randint(0, 59)\r\n seconds = random.randint(0, 59)\r\n\r\n current_time += datetime.timedelta(minutes=minutes, seconds=seconds)\r\n\r\n print(f\"New file is located at inputs/{args.events}.json\")", "def export_aggregated_events(self):\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out, not_paired_obs_list = \"\", []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n return\n\n parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=False,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not parameters[\"selected subjects\"] or not parameters[\"selected behaviors\"]:\n return\n\n # check for grouping results\n flag_group = True\n if len(selectedObservations) > 1:\n flag_group = dialog.MessageDialog(programName, \"Group events from selected observations in one file?\",\n [YES, NO]) == YES\n\n extended_file_formats = [\"Tab Separated Values (*.tsv)\",\n \"Comma Separated Values (*.csv)\",\n \"Open Document Spreadsheet ODS (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\",\n \"SDIS (*.sds)\",\n \"SQL dump file (*.sql)\"]\n\n if flag_group:\n file_formats = [\"tsv\", \"csv\", \"ods\", \"xlsx\", \"xls\", \"html\", \"sds\",\n \"sql\"] # must be in same order than extended_file_formats\n\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self,\n \"Export aggregated events\",\n \"\", \";;\".join(extended_file_formats))\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Export aggregated events\", \"\",\n \";;\".join(extended_file_formats))\n\n if not fileName:\n return\n\n outputFormat = file_formats[extended_file_formats.index(filter_)]\n if pathlib.Path(fileName).suffix != \".\" + outputFormat:\n fileName = str(pathlib.Path(fileName)) + \".\" + outputFormat\n\n else: # not grouping\n\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma Separated values (*.csv)\",\n \"Open Document Spreadsheet (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\")\n item, ok = QInputDialog.getItem(self, \"Export events format\", \"Available formats\", items, 0, False)\n if not ok:\n return\n outputFormat = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n exportDir = QFileDialog(self).getExistingDirectory(self, \"Choose a directory to export events\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if outputFormat == \"sql\":\n _, _, conn = db_functions.load_aggregated_events_in_db(self.pj,\n parameters[\"selected subjects\"],\n selectedObservations,\n parameters[\"selected behaviors\"])\n try:\n with open(fileName, \"w\") as f:\n for line in conn.iterdump():\n f.write(\"{}\\n\".format(line))\n except:\n errorMsg = sys.exc_info()[1]\n logging.critical(errorMsg)\n QMessageBox.critical(None, programName, str(errorMsg), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n return\n\n data_header = tablib.Dataset()\n data_header.title = \"Aggregated events\"\n header = [\"Observation id\", \"Observation date\", \"Media file\", \"Total length\", \"FPS\"]\n if INDEPENDENT_VARIABLES in self.pj:\n for idx in sorted_keys(self.pj[INDEPENDENT_VARIABLES]):\n header.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n header.extend([\"Subject\", \"Behavior\"])\n header.extend([\"Modifiers\"])\n header.extend([\"Behavior type\", \"Start (s)\", \"Stop (s)\", \"Duration (s)\", \"Comment start\", \"Comment stop\"])\n data_header.append(header)\n\n data = copy.deepcopy(data_header)\n for obsId in selectedObservations:\n d = export_observation.export_aggregated_events(self.pj, parameters, obsId)\n data.extend(d)\n\n if not flag_group:\n fileName = str(\n pathlib.Path(pathlib.Path(exportDir) / safeFileName(obsId)).with_suffix(\".\" + outputFormat))\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n data = copy.deepcopy(data_header)\n\n if outputFormat == \"sds\": # SDIS format\n out = \"% SDIS file created by eMOC (www.eMOC.unito.it) at {}\\nTimed <seconds>;\\n\".format(\n datetime_iso8601())\n for obsId in selectedObservations:\n # observation id\n out += \"\\n<{}>\\n\".format(obsId)\n dataList = list(data[1:])\n for event in sorted(dataList, key=lambda x: x[-4]): # sort events by start time\n if event[0] == obsId:\n behavior = event[-7]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n behavior = behavior.replace(char, \"_\")\n subject = event[-8]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n subject = subject.replace(char, \"_\")\n event_start = \"{0:.3f}\".format(\n round(event[-4], 3)) # start event (from end for independent variables)\n if not event[-3]: # stop event (from end)\n event_stop = \"{0:.3f}\".format(round(event[-4] + 0.001, 3))\n else:\n event_stop = \"{0:.3f}\".format(round(event[-3], 3))\n out += \"{subject}_{behavior},{start}-{stop} \".format(subject=subject, behavior=behavior,\n start=event_start, stop=event_stop)\n out += \"/\\n\\n\"\n with open(fileName, \"wb\") as f:\n f.write(str.encode(out))\n return\n\n if flag_group:\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)", "def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)", "def runEventCreation():\r\n config = CONFIG['steps']['EventCreation']\r\n ci = config['inputs']\r\n co = config['outputs']\r\n\r\n min_window_size = ci['min_window_size']\r\n change_speed_by = ci['change_speed_by']\r\n speed_ratio = ci['train_zero_speed_ratio']\r\n datetime_limit = ci['datetime_limit']\r\n csv_name_prefix = ci['csv_name_prefix']\r\n input_bucket = ci['bucket']\r\n window_event_bucket = ci['window_event_bucket']\r\n window_events_file = ci['window_events_file']\r\n\r\n output_bucket = co['bucket']\r\n event_dir = co['event_dir']\r\n filename_include = co['filename_include']\r\n\r\n minio_config = CONFIG['artifacts']['minio']\r\n minioClient = create_minio_client(minio_config[\"endpoint_url\"],\r\n access_key=minio_config[\"access_key\"],\r\n secret_key=minio_config[\"secret_key\"],\r\n secure=minio_config['secure'])\r\n\r\n boto_client = boto3.client(\"s3\",\r\n endpoint_url=minio_config[\"endpoint_url\"],\r\n aws_access_key_id=minio_config[\"access_key\"],\r\n aws_secret_access_key=minio_config[\"secret_key\"],\r\n region_name=minio_config[\"region_name\"])\r\n\r\n csv_files = get_files(input_bucket, boto_client,\r\n file_type='csv', prefix='filtered')\r\n csv_files = ['filtered/7016_2020-09-09.csv']\r\n create_window_event(files=csv_files,\r\n input_bucket=input_bucket,\r\n output_bucket=output_bucket,\r\n minio_client=minioClient,\r\n min_window_size=min_window_size,\r\n ouput_dir=event_dir,\r\n window_event_bucket=window_event_bucket,\r\n window_events_file=window_events_file,\r\n csv_name_prefix=csv_name_prefix,\r\n change_speed_by=change_speed_by,\r\n train_zero_speed_ratio=speed_ratio,\r\n datetime_limit=datetime_limit,\r\n filename_include=filename_include)", "def export_events(self, output_file, params, format='json', timezone_offset=None, add_gzip_header=False,\n compress=False, request_per_day=False, raw_stream=False, buffer_size=1024):\n # Increase timeout to 20 minutes if it's still set to default, /export requests can take a long time\n timeout_backup = self.timeout\n if self.timeout == 120:\n self.timeout = 1200\n\n request_count = 0\n if request_per_day:\n date_format = '%Y-%m-%d'\n f = datetime.datetime.strptime(params['from_date'], date_format)\n t = datetime.datetime.strptime(params['to_date'], date_format)\n delta = t - f\n request_count = delta.days\n\n for x in range(request_count + 1):\n params_copy = deepcopy(params)\n current_file = output_file\n\n if request_per_day:\n d = time.strptime(params['from_date'], date_format)\n current_day = (datetime.date(d.tm_year, d.tm_mon, d.tm_mday) + datetime.timedelta(x)).strftime(\n date_format)\n file_components = output_file.split('.')\n current_file = file_components[0] + \"_\" + current_day\n if len(file_components) > 1:\n current_file = current_file + '.' + file_components[1]\n params_copy['from_date'] = current_day\n params_copy['to_date'] = current_day\n\n events = self.query_export(params_copy, add_gzip_header=add_gzip_header, raw_stream=raw_stream)\n\n if raw_stream:\n if add_gzip_header and current_file[-3:] != '.gz':\n current_file = current_file + '.gz'\n with open(current_file, 'wb') as fp:\n shutil.copyfileobj(events, fp, buffer_size)\n else:\n if timezone_offset is not None:\n # Convert timezone_offset from hours to seconds\n timezone_offset = timezone_offset * 3600\n for event in events:\n event['properties']['time'] = int(event['properties']['time'] - timezone_offset)\n\n Mixpanel.export_data(events, current_file, format=format, compress=compress)\n\n # If we modified the default timeout above, restore default setting\n if timeout_backup == 120:\n self.timeout = timeout_backup", "def write_log_events(self, log_events):\n # Create log file name.\n # Replace / with - so LogGroup names can be written to current directory.\n file_name = self.log_group.name.replace('/', '-') + \"-\" + self.name + '-0.log'\n\n # Append LogEvents to log file.\n with open(file_name, 'a') as log_file:\n for event in log_events:\n log_file.write(event.message + '\\n')\n print('Wrote ' + str(len(log_events)) + ' LogEvents to ' + file_name)\n\n # Rotate log file if it's bigger than limit\n log_file_size = os.path.getsize(file_name)\n\n if log_file_size > self.log_file_limit:\n rotated_file_name = file_name.split('.')[0] + '-' + str(int(time.time())) + \".log\"\n print('Rotating ' + file_name + ' to ' + rotated_file_name)\n os.rename(file_name, rotated_file_name)", "def many_events(start_time,end_time,subevent_bools):\r\n \r\n #running through for each event\r\n for j in range(len(start_time)):\r\n \r\n #start, end, and subevent bool for this event\r\n st = start_time[j]\r\n et = end_time[j]\r\n subevent = bool(subevent_bools[j])\r\n \r\n #checking if start time is actually available\r\n if str(st) != 'nan':\r\n try:\r\n st = parse(st)\r\n yes_st = True\r\n except ValueError:\r\n yes_st = False\r\n else:\r\n yes_st = False\r\n \r\n #checking if end time is actually available\r\n if str(et) != 'nan':\r\n try:\r\n et = parse(et)\r\n yes_et = True\r\n except ValueError:\r\n yes_et = False\r\n else:\r\n yes_et = False\r\n \r\n #if both start and end times are available, running the code\r\n if yes_st and yes_et:\r\n #event must be after Nov. 2010 because currently no capability for\r\n #instruments in use before then - change this if you have that\r\n #capability\r\n if st > datetime(2010,9,1):\r\n try:\r\n print('got start and end times! running database extraction') \r\n database_extraction(st,et,instrument_chosen,subevent)\r\n except:\r\n continue\r\n else:\r\n print('cannot run for events before November 2010 because do not have '\r\n 'access to instruments before then')", "def export_state_events_as_textgrid(self):\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n\n if not selectedObservations:\n return\n\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=False,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n exportDir = QFileDialog(self).getExistingDirectory(self, \"Export events as TextGrid\", os.path.expanduser('~'),\n options=QFileDialog(self).ShowDirsOnly)\n if not exportDir:\n return\n\n for obsId in selectedObservations:\n\n out = \"\"\"File type = \"ooTextFile\"\nObject class = \"TextGrid\"\n\nxmin = 0\nxmax = 98.38814058956916\ntiers? <exists>\nsize = {subjectNum}\nitem []:\n\"\"\"\n subjectheader = \"\"\" item [{subjectIdx}]:\n class = \"IntervalTier\"\n name = \"{subject}\"\n xmin = {intervalsMin}\n xmax = {intervalsMax}\n intervals: size = {intervalsSize}\n\"\"\"\n\n template = \"\"\" intervals [{count}]:\n xmin = {xmin}\n xmax = {xmax}\n text = \"{name}\"\n\"\"\"\n\n flagUnpairedEventFound = False\n '''TO BE REMOVED totalMediaDuration = round(self.observationTotalMediaLength(obsId), 3)'''\n totalMediaDuration = round(project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId]), 3)\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n cursor.execute((\"SELECT count(distinct subject) FROM events \"\n \"WHERE observation = '{}' AND subject in ('{}') AND type = 'STATE' \").format(obsId,\n \"','\".join(\n plot_parameters[\n \"selected subjects\"])))\n subjectsNum = int(list(cursor.fetchall())[0][0])\n\n subjectsMin, subjectsMax = 0, totalMediaDuration\n\n out = \"\"\"File type = \"ooTextFile\"\nObject class = \"TextGrid\"\n\nxmin = {subjectsMin}\nxmax = {subjectsMax}\ntiers? <exists>\nsize = {subjectsNum}\nitem []:\n\"\"\".format(subjectsNum=subjectsNum, subjectsMin=subjectsMin, subjectsMax=subjectsMax)\n\n subjectIdx = 0\n for subject in plot_parameters[\"selected subjects\"]:\n\n subjectIdx += 1\n\n cursor.execute(\"SELECT count(*) FROM events WHERE observation = ? AND subject = ? AND type = 'STATE' \",\n (obsId, subject))\n intervalsSize = int(list(cursor.fetchall())[0][0] / 2)\n\n intervalsMin, intervalsMax = 0, totalMediaDuration\n\n out += subjectheader\n\n cursor.execute(\n \"SELECT occurence, code FROM events WHERE observation = ? AND subject = ? AND type = 'STATE' order by occurence\",\n (obsId, subject))\n\n rows = [{\"occurence\": float2decimal(r[\"occurence\"]), \"code\": r[\"code\"]} for r in cursor.fetchall()]\n if not rows:\n continue\n\n count = 0\n\n # check if 1st behavior starts at the beginning\n\n if rows[0][\"occurence\"] > 0:\n count += 1\n out += template.format(count=count, name=\"null\", xmin=0.0, xmax=rows[0][\"occurence\"])\n\n for idx, row in enumerate(rows):\n if idx % 2 == 0:\n\n # check if events not interlacced\n if row[\"key\"] != rows[idx + 1][\"key\"]:\n QMessageBox.critical(None, programName,\n \"The events are interlaced. It is not possible to produce the Praat TextGrid file\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n return\n\n count += 1\n out += template.format(count=count, name=row[\"key\"], xmin=row[\"occurence\"],\n xmax=rows[idx + 1][\"occurence\"])\n\n # check if difference is > 0.001\n if len(rows) > idx + 2:\n if rows[idx + 2][\"occurence\"] - rows[idx + 1][\"occurence\"] > 0.001:\n\n logging.debug(\"difference: {}-{}={}\".format(rows[idx + 2][\"occurence\"],\n rows[idx + 1][\"occurence\"],\n rows[idx + 2][\"occurence\"] - rows[idx + 1][\n \"occurence\"]))\n\n out += template.format(count=count + 1, name=\"null\", xmin=rows[idx + 1][\"occurence\"],\n xmax=rows[idx + 2][\"occurence\"])\n count += 1\n else:\n logging.debug(\"difference <=0.001: {} - {} = {}\".format(rows[idx + 2][\"occurence\"],\n rows[idx + 1][\"occurence\"],\n rows[idx + 2][\"occurence\"] -\n rows[idx + 1][\"occurence\"]))\n rows[idx + 2][\"occurence\"] = rows[idx + 1][\"occurence\"]\n logging.debug(\"difference after: {} - {} = {}\".format(rows[idx + 2][\"occurence\"],\n rows[idx + 1][\"occurence\"],\n rows[idx + 2][\"occurence\"] -\n rows[idx + 1][\"occurence\"]))\n\n # check if last event ends at the end of media file\n if rows[-1][\"occurence\"] < project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId]):\n count += 1\n out += template.format(count=count, name=\"null\", xmin=rows[-1][\"occurence\"],\n xmax=totalMediaDuration)\n\n # add info\n out = out.format(subjectIdx=subjectIdx, subject=subject, intervalsSize=count, intervalsMin=intervalsMin,\n intervalsMax=intervalsMax)\n\n try:\n with open(\"{exportDir}{sep}{obsId}.textGrid\".format(exportDir=exportDir, sep=os.sep, obsId=obsId),\n \"w\") as f:\n f.write(out)\n\n if flagUnpairedEventFound:\n QMessageBox.warning(self, programName,\n \"Some state events are not paired. They were excluded from export\", \\\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n self.statusbar.showMessage(\"Events exported successfully\", 10000)\n\n except:\n errorMsg = sys.exc_info()[1]\n logging.critical(errorMsg)\n QMessageBox.critical(None, programName, str(errorMsg), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)", "def testMoreEvents(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=1000,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 1)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(job.getFiles(type=\"lfn\"), [\"/some/file/name\"])\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)", "def get_events(self):\n\n print \"\\ngetting new Events\"\n path = os.path.join(self.path, 'no_consent')\n for d_cnt, date in sorted(enumerate(os.listdir(path))):\n\n if os.path.isdir(os.path.join(self.events_path, date)):\n print \"%s already processed\" % date\n continue\n\n directory = os.path.join(path, date)\n for recording in os.listdir(directory):\n if os.path.isdir(os.path.join(directory, recording)):\n\n # Can we reduce this list of objects using ROI information?\n try:\n use_objects = {}\n for region, objects in self.soma_objects.items():\n for ob, position in objects.items():\n use_objects[ob] = position\n\n ce.get_event(recording, directory, use_objects, self.config['events'])\n except:\n print \"recording: %s in: %s is broken.\" %(recording, directory)\n else:\n print \"already processed: %s\" % recording\n print \"done.\"", "def _events_tsv(events, durations, raw, fname, trial_type, overwrite=False):\n # Start by filling all data that we know into an ordered dictionary\n first_samp = raw.first_samp\n sfreq = raw.info[\"sfreq\"]\n events = events.copy()\n events[:, 0] -= first_samp\n\n # Onset column needs to be specified in seconds\n data = OrderedDict(\n [\n (\"onset\", events[:, 0] / sfreq),\n (\"duration\", durations),\n (\"trial_type\", None),\n (\"value\", events[:, 2]),\n (\"sample\", events[:, 0]),\n ]\n )\n\n # Now check if trial_type is specified or should be removed\n if trial_type:\n trial_type_map = {v: k for k, v in trial_type.items()}\n data[\"trial_type\"] = [trial_type_map.get(i, \"n/a\") for i in events[:, 2]]\n else:\n del data[\"trial_type\"]\n\n _write_tsv(fname, data, overwrite)", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 20 events')\n try:\n eventsResult = service.events().list(\n calendarId='[email protected]', timeMin=now, maxResults=20, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n if not events:\n print('No upcoming events found.')\n text_file = open(\"scheduledActions.txt\", \"wb\") #May want to use a check on the msg type to only overwrite calendar tasks\n # text_file.write(bytes('Updated '+now[:-8]+'\\n','UTF-8'))\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n start = start[:22] + start[-2:] #Trims the last colon\n start = datetime.datetime.strptime(start,'%Y-%m-%dT%H:%M:%S%z')\n start = int(time.mktime(start.timetuple()))\n end = event['end'].get('dateTime', event['end'].get('date'))\n end = end[:22] + end[-2:] #Trims the last colon\n end = datetime.datetime.strptime(end,'%Y-%m-%dT%H:%M:%S%z')\n end = int(time.mktime(end.timetuple()))\n description = event['description']\n if description.count(',')==5:\n desc1=description.split(\",\")[0] + \",\" + description.split(\",\")[1] + \",\" + description.split(\",\")[2]\n print(start,desc1)\n writeString=str(start)+','+desc1+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n desc2=description.split(\",\")[3] + \",\" + description.split(\",\")[4] + \",\" + description.split(\",\")[5]\n print(end,desc2)\n writeString=str(end)+','+desc2+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n else:\n print(start, description) #event['summary'] event['location']\n writeString=str(start)+','+description+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n text_file.close()\n print('Calendar read complete.')\n except httplib2.ServerNotFoundError:\n print(\"!---- Looks like there's no internet connection just now. Wait till tomorrow.\")", "def create_foders_files(events, eventpath):\n \n len_events = len(events)\n \n for i in range(0, len_events):\n if os.path.exists(os.path.join(eventpath, events[i]['event_id'])) == True:\n \n if raw_input('Folder for -- the requested Period (min/max) ' + \\\n 'and Magnitude (min/max) -- exists in your directory.' + '\\n\\n' + \\\n 'You could either close the program and try updating your ' + \\\n 'folder OR remove the tree, continue the program and download again.' + \\\n '\\n' + 'Do you want to continue? (Y/N)' + '\\n') == 'Y':\n print '-------------------------------------------------------------'\n shutil.rmtree(os.path.join(eventpath, events[i]['event_id']))\n \n else:\n print '------------------------------------------------'\n print 'So...you decided to update your folder...Ciao'\n print '------------------------------------------------'\n sys.exit()\n\n for i in range(0, len_events):\n try:\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'BH_RAW'))\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'Resp'))\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'info'))\n except Exception, e:\n pass\n \n for i in range(0, len_events):\n Report = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'report_st'), 'a+')\n Report.close()\n \n \n for i in range(0, len_events):\n Exception_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'exception'), 'a+')\n eventsID = events[i]['event_id']\n Exception_file.writelines('\\n' + eventsID + '\\n')\n \n Syn_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'station_event'), 'a+')\n Syn_file.close()\n \n if input['time_iris'] == 'Y':\n for i in range(0, len_events):\n time_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'iris_time'), 'a+')\n time_file.close()\n \n \n for i in range(0, len_events):\n quake_file = open(os.path.join(eventpath, events[i]['event_id'],\\\n 'info', 'quake'), 'a+')\n \n quake_file.writelines(repr(events[i]['datetime'].year).rjust(15)\\\n + repr(events[i]['datetime'].julday).rjust(15) \\\n + repr(events[i]['datetime'].month).rjust(15) \\\n + repr(events[i]['datetime'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['datetime'].hour).rjust(15)\\\n + repr(events[i]['datetime'].minute).rjust(15) + \\\n repr(events[i]['datetime'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')\n \n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % events[i]['latitude'])) + '%.5f' \\\n % events[i]['latitude'] + \\\n ' '*(15 - len('%.5f' % events[i]['longitude'])) + '%.5f' \\\n % events[i]['longitude'] + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % abs(events[i]['depth']))) + '%.5f' \\\n % abs(events[i]['depth']) + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % abs(events[i]['magnitude']))) + '%.5f' \\\n % abs(events[i]['magnitude']) + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len(events[i]['event_id'])) + \\\n events[i]['event_id'] + '-' + '\\n')\n \n quake_file.writelines(repr(events[i]['t1'].year).rjust(15)\\\n + repr(events[i]['t1'].julday).rjust(15) \\\n + repr(events[i]['t1'].month).rjust(15) \\\n + repr(events[i]['t1'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['t1'].hour).rjust(15)\\\n + repr(events[i]['t1'].minute).rjust(15) + \\\n repr(events[i]['t1'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')\n \n quake_file.writelines(repr(events[i]['t2'].year).rjust(15)\\\n + repr(events[i]['t2'].julday).rjust(15) \\\n + repr(events[i]['t2'].month).rjust(15) \\\n + repr(events[i]['t2'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['t2'].hour).rjust(15)\\\n + repr(events[i]['t2'].minute).rjust(15) + \\\n repr(events[i]['t2'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')", "def _save_events_summary(self):\n for name, events in self._events.items():\n dict_events = [event.to_dict() for event in events]\n dump_data(dict_events, self._make_event_filename(name))", "def write(self, file_, format=format_, header=header): #@ReservedAssignment\n str_list = []\n if header and '{' in header:\n str_list.append(header.format(len(self)))\n if format == self.format_LATEX or format == self.format_LATEX2:\n for event in self:\n event.latex_datetime = str(event.datetime)[:19]\n event.latex_datetime = (event.latex_datetime[:10] + ' ' +\n event.latex_datetime[11:])\n for event in self:\n try:\n str_list.append(format.format(** event))\n except KeyError:\n event['origin_id'] = 102\n event['author'] = 'unknown'\n event['flynn_region'] = 'unknown'\n str_list.append(format.format(** event))\n #str_list.append(template.safe_substitute(event))\n output = ''.join(str_list)\n if file_:\n with open(file_, 'w') as f:\n f.write(output)\n log.info('Write events to file_ ' + file_)\n else:\n return output", "def process_events_optimised(self, events_chuck_size, data_chunk_size):\n ev = self.events\n tz = self.args[\"timezone\"]\n indexer = ev.Time.str.contains(\"\\d\\d:\\d\\d\", regex=True, na=False)\n timed_events, several_days_events = ev[indexer], ev[~indexer]\n \n if not several_days_events.empty:\n several_days_events.to_csv(\"special_events.csv\", index=False)\n self.log(\"[+] Special events were saved into standalone CSV-file\")\n else:\n self.log(\"[!] Special events not found\")\n\n self.data = pd.read_csv(self.args[\"data\"],\n iterator=True, chunksize=data_chunk_size)\n\n self.log(\"[.] Events and data linking...\")\n\n start, end = 0, events_chuck_size\n relevant_dates = pd.DataFrame()\n count = 1\n while True:\n events_slice = timed_events.iloc[start:end]\n # TODO: remove in release version\n # events_slice.to_csv('slice_{}_{}.csv'.format(start, end),\n # index=False)\n\n if events_slice.empty:\n break\n\n first_date, first_time = events_slice[['Date', 'Time']].iloc[0]\n lower_bound = convert(first_date + \" \" + first_time, mode='date')\n lower_bound += timedelta(hours=tz, minutes=-1)\n\n last_date, last_time = events_slice[['Date', 'Time']].iloc[-1]\n upper_bound = convert(last_date + \" \" + last_time, mode='date')\n upper_bound += timedelta(hours=tz, minutes=5)\n \n self.log(\"[.] Events slice bounded by [%s; %s] is in processing...\",\n lower_bound, upper_bound)\n \n for chunk in self.data:\n bounds = (lower_bound, upper_bound)\n linked, rest = self._process_chuck(\n chunk, bounds, events_slice, relevant_dates)\n\n relevant_dates = rest\n\n if linked is None:\n if relevant_dates.empty:\n err = \"[!] Warning: events from %d to %d have no data\"\n self.log(err, start + 1, end)\n break\n else:\n continue\n\n if linked.empty:\n err = \"[!] Warning: linked dataframe is empty\"\n self.log(err, severe=True)\n continue\n\n self.log(\"[+] Events from %d to %d were linked. \"\n \"Dataframe size: %d\", start + 1, end, linked.shape[0])\n\n filename = 'linked_events_{}_to_{}.csv'.format(start + 1, end)\n filename = os.path.join(self.args[\"output_folder\"], filename)\n linked.to_csv(filename, index=False)\n linked = pd.DataFrame()\n break\n\n count += 1\n start = end\n end += events_chuck_size", "def events(self):\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'", "def events(self):\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'", "def write_to_file(train_file, test_file, log_dict):\n i = 0\n train_events = []\n test_events = []\n\n for key in log_dict:\n trace = log_dict[key]\n if random.randint(0,1) == 0: # Add file to training set with 50% chance\n for e_idx in range(len(trace)):\n train_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Add file to test set\n if random.randint(0,100) > 50: # No anomaly injection with 50% chance\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Anomaly injection\n trace, types = introduce_anomaly(trace, single=False)\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",1,\\\"\" + str(types) + \"\\\"\")\n\n with open(train_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in train_events:\n fout.write(e + \"\\n\")\n\n with open(test_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in test_events:\n fout.write(e + \"\\n\")", "def export_string_events(self):\n\n # ask user observations to analyze\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=True,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n fn = QFileDialog(self).getSaveFileName(self, \"Export events as strings\", \"\",\n \"Events file (*.txt *.tsv);;All files (*)\")\n fileName = fn[0] if type(fn) is tuple else fn\n\n if fileName:\n\n response = dialog.MessageDialog(programName, \"Include observation(s) information?\", [YES, NO])\n\n try:\n with open(fileName, \"w\", encoding=\"utf-8\") as outFile:\n for obsId in selectedObservations:\n # observation id\n outFile.write(\"\\n# observation id: {}\\n\".format(obsId))\n # observation descrition\n outFile.write(\"# observation description: {}\\n\".format(\n self.pj[OBSERVATIONS][obsId][\"description\"].replace(os.linesep, \" \")))\n # media file name\n if self.pj[OBSERVATIONS][obsId][TYPE] in [MEDIA]:\n outFile.write(\"# Media file name: {0}{1}{1}\".format(\", \".join([os.path.basename(x)\n for x in\n self.pj[OBSERVATIONS]\n [obsId]\n [FILE][PLAYER1]]),\n os.linesep))\n if self.pj[OBSERVATIONS][obsId][TYPE] in [LIVE]:\n outFile.write(\"# Live observation{0}{0}\".format(os.linesep))\n\n # independent variables\n if \"independent_variables\" in self.pj[OBSERVATIONS][obsId]:\n outFile.write(\"# Independent variables\\n\")\n\n # rows.append([\"variable\", \"value\"])\n for variable in self.pj[OBSERVATIONS][obsId][\"independent_variables\"]:\n outFile.write(\"# {0}: {1}\\n\".format(variable,\n self.pj[OBSERVATIONS][obsId][\n \"independent_variables\"][variable]))\n outFile.write(\"\\n\")\n\n # selected subjects\n for subj in plot_parameters[\"selected subjects\"]:\n if subj:\n subj_str = \"\\n# {}:\\n\".format(subj)\n else:\n subj_str = \"\\n# No focal subject:\\n\"\n outFile.write(subj_str)\n\n out = self.create_behavioral_strings(obsId, subj, plot_parameters)\n if out:\n outFile.write(out + \"\\n\")\n\n except:\n logging.critical(sys.exc_info()[1])\n QMessageBox.critical(None, programName, str(sys.exc_info()[1]), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)", "def collect_events(namespace, output_dir, k8s_cli, mode=MODE_RESTRICTED):\n if mode != MODE_ALL:\n logger.warning('Cannot collect events in \"restricted\" mode - skipping events collection')\n return\n # events need -n parameter in kubectl\n if not namespace:\n logger.warning(\"Cannot collect events without namespace - \"\n \"skipping events collection\")\n return\n cmd = \"{} get events -n {} -o wide\".format(k8s_cli, namespace)\n collect_helper(output_dir, cmd=cmd,\n file_name=\"events\", resource_name=\"events\", namespace=namespace)\n\n # We get the events in YAML format as well since in YAML format they are a bit more informative.\n output = run_get_resource_yaml(namespace, \"Event\", k8s_cli)\n with open(os.path.join(output_dir, \"Event.yaml\"), \"w+\", encoding='UTF-8') as file_handle:\n file_handle.write(output)", "def generate(start_date, episodes, steps, output_file):\n header = ','.join(FIELDS) + '\\n'\n with open(output_file, 'w') as fd:\n fd.write(header)\n data_arrays = []\n first_dp = generate_data_point(start_date)\n data_arrays.append(first_dp)\n\n interval = int(1440/steps)\n cur_ts = increment_ts(start_date, interval)\n\n while step_diff(start_date, cur_ts, interval) < steps*episodes:\n dp_tmp = generate_data_point(cur_ts)\n data_arrays.append(dp_tmp)\n cur_ts = increment_ts(cur_ts, interval)\n\n for dp in data_arrays:\n row = ','.join(dp) + '\\n'\n fd.write(row)", "def output_files(self):\n o = []\n if 'unweighted' in self.event_types:\n o.append(self.name + \"_unweighted_events.lhe.gz\")\n if 'weighted' in self.event_types:\n o.append(self.name + \"_events.lhe.gz\")\n return o", "def test_log_filenames_multiple_date_in_past(self):\n time_lower = datetime.datetime.now() - datetime.timedelta(seconds=7210)\n time_upper = time_lower + datetime.timedelta(seconds=20)\n (tracks, statuses) = self.app.log_filenames(\n [self.track_path('silence.mp3')]*5,\n timestamp='2 hours ago'\n )\n self.assertEqual(len(tracks), 5)\n self.assertEqual(self.get_track_count(), 5)\n track_objs = []\n for (idx, track) in enumerate(tracks):\n with self.subTest(idx=idx):\n track_obj = self.get_track_by_id(track.pk)\n track_objs.append(track_obj)\n self.assertGreaterEqual(track_obj['timestamp'], time_lower)\n self.assertLess(track_obj['timestamp'], time_upper)\n if idx > 0:\n self.assertGreater(track_obj['timestamp'],\n track_objs[idx-1]['timestamp'])", "def split_timeseries_and_save(self, window_length=45, zero_padding=True, tmp_dir=os.path.join(\"..\", \"..\", \"data\", \"interim\")):\n #TODO: split from task event file\n\n label_df = pd.DataFrame(columns=[\"label\", \"filename\"])\n out_file = os.path.join(tmp_dir, \"{}_{:03d}.npy\")\n\n # Split the timeseries\n for ii in range(len(self.valid_ts_filepaths)):\n ts = self.get_valid_timeseries([ii])[0]\n ts_duration = ts.shape[0]\n rem = ts_duration % window_length\n if rem == 0:\n n_splits = int(ts_duration / window_length)\n else:\n if zero_padding:\n n_splits = np.ceil(ts_duration / window_length)\n pad_size = int(n_splits*window_length - ts_duration)\n pad_widths = [(0, pad_size), (0, 0)]\n ts = np.pad(ts, pad_width=pad_widths)\n else:\n ts = ts[:(ts_duration-rem), :]\n n_splits = np.floor(ts_duration / window_length)\n split_ts = np.split(ts, n_splits)\n\n # tmp = [split_timeseries(t,n_timepoints=n_timepoints) for t in timeseries]\n # for ts in tmp:\n # split_ts = split_ts + ts\n\n # #keep track of the corresponding labels\n # n = int(timeseries[0].shape[0]/n_timepoints)\n # split_labels = []\n # for l in labels:\n # split_labels.append(np.repeat(l,n))\n\n # #add a label for each split\n # split_labels.append(list(range(n))*len(timeseries))\n # return split_ts, split_labels", "def main():\n\n f = open(eventsfile, 'r')\n lines = f.readlines()\n numcounter = 0\n counter = 0\n fullcounter = 0\n movielist = []\n movielists =[]\n timestamp_list = []\n filteredlist = [] \n startdate = \"2020-02-26\"\n \n for line in lines:\n TAPES = line.split('\\t')\n if int(TAPES[2]) == 1 or int(TAPES[2]) == 2:\n filteredlist.append(line)\n \n for newline in filteredlist:\n TAPES = newline.split('\\t')\n fullcounter +=1\n if int(TAPES[2]) == 2:\n timestamp_list.append(0)\n continue\n startdate2 = startdate.split(\"-\")[1] + \"/\" + startdate.split(\"-\")[2] + \"/\" + startdate.split(\"-\")[0]\n dateplustime = startdate2 + TAPES[0][0:len(TAPES[0])]\n thistime = faststrptime(dateplustime)\n unixtimestamp = datetime.datetime.timestamp(thistime)\n timestamp_list.append(int(unixtimestamp))\n\n i = 0 \n for element in timestamp_list:\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+(counter-i)]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n \n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue \n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+1]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n\n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue\n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n counter += 1\n numcounter += 1\n if element != 0:\n movielist.append(counter)\n i += 1\n \n if numcounter == 30:\n numcounter = 0\n movielists.append(movielist)\n movielist = []\n \n if i > (len(timestamp_list)-1):\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n \n numendlists = counter - fullcounter\n first = len(movielists)-numendlists\n last = len(movielists)\n del movielists[first:last]\n \n for x in movielists:\n for y in x:\n if int(filenumber) == y:\n movielist = x\n\n modename = str(movielist[0]) + \"to\" + str(movielist[len(movielist)-1])\n modefilename = \"mode_\" + modename + \".png\"\n try:\n imread(modefilename)\n except:\n imageMode(modename,movielist)\n\n e = loadmodeImage(modefilename)\n \n roimask = np.zeros((ydim,xdim))\n f = open(roisfile, 'r')\n lines = f.readlines()\n i = 1\n i2 = 0\n for line in lines:\n try:\n print(int(line.split(' ')[0]))\n except ValueError:\n i2 += 1\n continue\n minx = int(line.split(' ')[0])\n miny = int(line.split(' ')[1])\n maxx = int(line.split(' ')[2])\n maxy = int(line.split(' ')[3])\n roimask[int(miny):int(maxy),int(minx):int(maxx)] = i\n i += 1\n numberofwells = i-1\n numberofcols = int(i2/2)\n numberofrows = int(numberofwells/numberofcols)\n roimaskweights = convertMaskToWeights(roimask)\n\n cap = cv2.VideoCapture(videoStream)\n\n cap.set(3,roimask.shape[1])\n cap.set(4,roimask.shape[0])\n \n ret,frame = cap.read()\n storedImage = np.array(e * 255, dtype = np.uint8)\n storedMode = Blur(storedImage)\n storedFrame = grayBlur(frame)\n cenData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))*2 -2])\n pixData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))])\n i = 0;\n totalFrames = 0\n while(cap.isOpened()):\n ret,frame = cap.read()\n if ret == False:\n break\n currentFrame = grayBlur(frame)\n diffpix = diffImage(storedFrame,currentFrame,pixThreshold)\n diff = trackdiffImage(storedMode,currentFrame,pixThreshold)\n diff.dtype = np.uint8\n contours,hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n MIN_THRESH = 20.0\n MIN_THRESH_P = 20.0\n roi_dict = {}\n for r in range(0,numberofwells):\n roi_dict[r+1] = []\n for cs in range(0,len(contours)):\n if cv2.contourArea(contours[cs]) < 1.0:\n continue\n if cv2.arcLength(contours[cs],True) < 1.0:\n continue\n if cv2.contourArea(contours[cs]) > MIN_THRESH or cv2.arcLength(contours[cs],True) > MIN_THRESH_P:\n M = cv2.moments(contours[cs])\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n area = cv2.contourArea(contours[cs])\n perim = cv2.arcLength(contours[cs],True)\n if int(roimask[cY,cX]) == 0:\n continue\n if not roi_dict[int(roimask[cY,cX])]:\n roi_dict[int(roimask[cY,cX])].append((area*perim,cX,cY))\n else:\n if roi_dict[int(roimask[cY,cX])][0][0] < area*perim:\n roi_dict[int(roimask[cY,cX])][0] = (area*perim,cX,cY)\n\n pixcounts = []\n pixcounts = np.bincount(roimaskweights, weights=diffpix.ravel())\n pixData[i,:] = np.hstack((pixcounts))\n counts = []\n keys = roi_dict.keys()\n keys = sorted(keys)\n for k in keys:\n x = -10000\n y = -10000\n if roi_dict[k]:\n x = roi_dict[k][0][1]\n y = roi_dict[k][0][2]\n counts.append(x)\n counts.append(y)\n cv2.line(storedImage,(x,y),(x,y),(255,255,255),2)\n if i == 284:\n cv2.imwrite(videoStream + '_trackedimagewithlines_' + str(i) + \".png\", storedImage)\n cenData[i,:] = np.asarray(counts)\n totalFrames += 1\n storedFrame = currentFrame\n i += 1\n\n file = open(videoStream + \".centroid2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells*2):\n file.write(str(int(cenData[x,:][y])) + '\\n')\n pixData = pixData[:i,:]\n pixData = pixData[:,1:] \n file = open(videoStream + \".motion2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells):\n file.write(str(int(pixData[x,:][y])) + '\\n')\n\n cap.release()\n cv2.destroyAllWindows()\n \n try:\n image = Image.open('lastframe.png')\n except:\n makenumROIsimage()", "def sample_times():\n\tthe_times = []\n\tday = config.window_start_date\n\twhile day <= config.window_end_date:\n\t\t# times from start of window on day to end of window \n\t\ttime = config.tz.localize( datetime.combine( \n\t\t\tday, config.window_start_time \n\t\t) )\n\t\tend_time = config.tz.localize( datetime.combine( \n\t\t\tday, config.window_end_time \n\t\t) )\n\t\twhile time < end_time: # While still in the time window\n\t\t\tthe_times.append( time )\n\t\t\ttime += timedelta(minutes=1)\n\t\tday += timedelta(days=1)\n\treturn the_times", "def get_tagged_events():\n\n f = open('event_info.txt', 'w+')\n f.write('')\n f.close()\n\n for category in MEETUP_TAGS:\n events_added = 0\n days = 5\n while events_added < NUM_EVENTS:\n\n urls = set()\n\n today = datetime.date.today()\n tomorrow = today\n\n tomorrow = tomorrow + datetime.timedelta(days=days)\n\n # https://www.meetup.com/find/events/arts-culture/?allMeetups=false&radius=5&userFreeform=New+York%2C+NY&mcId=z10025&month=4&day=20&year=2018&eventFilter=all\n\n url = 'www.meetup.com/find/events/{}/?allMeetups=true&radius=20 \\\n &userFreeform=New+York%2C+NY&mcId=c10001&mcName=New+York%2C+NY \\\n &month={}&day={}&year={}'.format(category,\n tomorrow.month,\n tomorrow.day,\n tomorrow.year)\n\n r = requests.get('https://' + url)\n print('https://' + url)\n data = r.text\n soup = BeautifulSoup(data)\n\n for link in soup.find_all('a'):\n href = link.get('href')\n if '/events/' in href and '/find/' not in href:\n urls.add(href)\n\n if not urls:\n break\n\n for url in urls:\n os.system('python retrieval.py ' + url + ' ' + category)\n events_added += 1\n if events_added > NUM_EVENTS:\n break\n\n print('Finished ' + str(days))\n days += 1" ]
[ "0.6087541", "0.59404063", "0.58714825", "0.5870363", "0.58608985", "0.57862127", "0.57703376", "0.5700037", "0.5650298", "0.56280893", "0.5617199", "0.56035525", "0.5598488", "0.5553481", "0.55442035", "0.5541307", "0.5524684", "0.55119693", "0.5496372", "0.5496372", "0.54935104", "0.54563636", "0.5414372", "0.54072356", "0.53711987", "0.5353537", "0.5346704", "0.53401476", "0.53386515", "0.53324944" ]
0.6893672
0
given lists of peak fluxes for protons >10 MeV and >100 MeV, creates a boolean for whether or not each event is a subevent (doesn't cross a threshold)
def gen_subevent_bools(p_10,p_100): #list of subevent booleans subevent_bools = [] #extracting 10 MeV peak flux if it exists for j in range(len(p_10)): try: p10 = float(p_10[j]) except ValueError: p10 = 'nan' #extracting 100 MeV peak flux if it exists try: p100 = float(p_100[j]) except ValueError: p100 = 'nan' #checking if peak fluxes exist if str(p10) != 'nan' and str(p100) != 'nan': #if the peak fluxes both exist and >10 MeV is both below threshold, #subevent is true (only care about >10 bc of definition of subevent) if p10 < 10: subevent_bools.append(True) elif p10 > 10: subevent_bools.append(False) #if >10 MeV doesn't exist, subevent is true else: subevent_bools.append(True) return(subevent_bools)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def spec_to_peaks(data, value, fp = iterate_structure(generate_binary_structure(rank = 2, connectivity=2), 10)):\n\n max_arr = maximum_filter(data, footprint = fp)\n return (data == max_arr) & (data > value)", "def check_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass", "def contains_suitable_peak(self):\n if not self.contains_peak:\n return False\n\n idx = self.product_idx(product=self.final_species)\n if idx is not None and self[idx].energy < self[self.peak_idx].energy:\n logger.info('Products made and have a peak. Assuming suitable!')\n return True\n\n # Products aren't made by isomorphism but we may still have a suitable\n # peak..\n if any(self[-1].constraints[b.atom_indexes] == b.final_dist\n for b in self.bonds):\n logger.warning('Have a peak, products not made on isomorphism, but'\n ' at least one of the distances is final. Assuming '\n 'the peak is suitable ')\n return True\n\n return False", "def check_recon_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'recon_spec'):\n for i, spectrum in enumerate(self.recon_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.recon_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.recon_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.recon_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.recon_spec[spectrum]._add_to_overlapping_filters(filtername)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def test_peak_detection(self):\n from sms.models import utilFunctions # pylint: disable=C0415\n\n for i, (mx, _) in enumerate(self.sm.dft_frames(self.x)):\n ploc = sample_dsp.peak_detect(mx, self.sm.t)\n ploc_sms = utilFunctions.peakDetection(mx, self.sm.t)\n for j, (p, p_s) in enumerate(itertools.zip_longest(ploc, ploc_sms)):\n with self.subTest(frame=i, peak_n=j):\n self.assertEqual(p, p_s)", "def check_star(peaks,data):\n star = 0\n for i in peaks:\n max = data[i]\n if i<3 or i+4>data.size:\n continue\n mean = data[i-3:i+4].mean()\n if (max-mean)<0.1*max:\n star += 1\n if star*2>peaks.size:\n return True\n else:\n return False", "def isPeakAssigned(peak, fully=True):\n\n n = 0\n for peakDim in peak.peakDims:\n if len(peakDim.peakDimContribs) > 0:\n n +=1\n \n if n == len(peak.peakDims):\n return True\n \n elif n > 0:\n if fully:\n return False\n else:\n return True\n \n else:\n return False", "def check_sim_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.sim_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.sim_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.sim_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.sim_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.sim_spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def measure_peak(sig, use_inflection=True, return_allinfo=False):\n sig = np.array(sig)\n cr = locate_peak(sig)\n cr_crosszero = np.zeros_like(cr)\n cr_inflection = np.zeros_like(cr)\n\n # cross zero points\n cr_cr1 = -int_sign(sig[1:] * sig[:-1])\n cr_cr2 = -int_sign(sig[:-1] * sig[1:])\n cr_cr1[cr_cr1<0] = 0\n cr_cr2[cr_cr2<0] = 0\n cr_crosszero[1:] = cr_cr1\n cr_crosszero[:-1] += cr_cr2\n cr_crosszero = int_sign(cr_crosszero * sig) * 4\n\n # inflection points\n d2 = second_derivate(sig)\n d2p = locate_peak(d2)\n d2p[np.where( np.abs(d2p) != 1 )] = 0\n d2p[np.where( ((d2p==1) & (sig<0)) | ((d2p==-1) & (sig>0)) )] = 0\n cr_inflection[np.where(d2p==-1)] = 8\n cr_inflection[np.where(d2p==1)] = -8\n \n if use_inflection:\n cr_combine = cr + cr_inflection + cr_crosszero \n else:\n cr_combine = cr + cr_crosszero\n\n oned = False\n if len(np.shape(sig)) == 1:\n oned = True\n sig = sig[:, np.newaxis]\n \n peaks_list = []\n for i in range(np.shape(sig)[1]):\n pvs = np.where(np.abs(cr[:,i]) == 1)[0]\n lims = np.where(np.abs(cr_combine[:,i]) >= 2)[0]\n if len(pvs) == 0 :\n peaks_list.append([])\n continue\n if np.shape(lims)[0] == 0:\n lower_pos = pvs\n upper_pos = pvs\n else:\n lower_arr = (pvs > lims[:, np.newaxis])\n upper_arr = (pvs < lims[:, np.newaxis])\n lower_arr_r = np.flipud(lower_arr)\n upper_pos_i = np.argmax(upper_arr, axis=0)\n upper_pos = lims[(upper_pos_i, )]\n w_upper_none = np.where(upper_arr[-1,:] == False)\n upper_pos[w_upper_none] = pvs[w_upper_none]\n lower_pos_r_i = np.argmax(lower_arr_r, axis=0)\n lower_pos_i = len(lims) - 1 - lower_pos_r_i\n lower_pos = lims[(lower_pos_i, )]\n w_lower_none = np.where(lower_arr[0, :] == False)\n lower_pos[w_lower_none] = 0\n\n peaks = []\n for center, lower, upper in zip(pvs, lower_pos, upper_pos):\n depth = sig[center, i]\n sig_range = sig[lower:upper+1, i]\n sig_range[np.where(int_sign(sig_range) != int_sign(depth))] = 0.0\n volume = np.sum(sig_range)\n peaks.append(Peak(center=center, lower=lower, upper=upper, depth=depth, volume=volume))\n peaks_list.append(peaks)\n if oned:\n peaks_list = peaks_list[0]\n \n if return_allinfo:\n return peaks_list, cr, cr_crosszero, cr_inflection \n else:\n return peaks_list", "def event_overlap(labels, half, timestamp, window):\n\n for l, _ in labels:\n if l[0] == half:\n ceil = l[1] + window//2\n floor = l[1] - window//2\n if timestamp <= ceil and timestamp >= floor:\n return True\n return False", "def test_peak_refinement(self):\n from sms.models import utilFunctions # pylint: disable=C0415\n\n for i, (mx, px) in enumerate(self.sm.dft_frames(self.x)):\n ploc = sample_dsp.peak_detect(mx, self.sm.t)\n ploc_i, pmag_i, pph_i = sample_dsp.peak_refine(ploc, mx, px) # pylint: disable=W0632\n ploc_i_sms, pmag_i_sms, pph_i_sms = utilFunctions.peakInterp(mx, px, ploc)\n with self.subTest(frame=i, value=\"location\"):\n self.assert_almost_equal_rmse(ploc_i, ploc_i_sms)\n with self.subTest(frame=i, value=\"magnitude\"):\n self.assert_almost_equal_rmse(pmag_i, pmag_i_sms)\n with self.subTest(frame=i, value=\"phase\"):\n self.assert_almost_equal_rmse(pph_i, pph_i_sms)", "def isPossibleSubsumer(self):\n if self.action_cnt > cons.theta_sub and self.error < cons.err_sub: #self.prediction < cons.err_sub: (why does it work?)\n return True\n return False", "def is_subset_of(self, uspec):\n \n if self.is_power_onoff() or uspec.is_power_onoff():\n return False\n \n if (uspec.is_bias() or not uspec.is_calib()) and self['speed'] != uspec['speed']:\n return False\n\n if int(self['x_bin']) % int(uspec['x_bin']) != 0 or int(self['y_bin']) % int(uspec['y_bin']) != 0:\n return False\n\n if self.number_windows() > 0:\n\n if not uspec.contains_window(self['x1_start'], self['y1_start'], self['x1_size'], self['y1_size'], self['x_bin'], self['y_bin']):\n return False\n\n if self.number_windows() > 1:\n\n if not uspec.contains_window(self['x2_start'], self['y2_start'], self['x2_size'], self['y2_size'], self['x_bin'], self['y_bin']):\n return False\n\n return True", "def whichPeaks(trace):\n peaks = []\n df = np.diff(trace)\n for t in range(len(df)-4):\n if df[t] > 0 and df[t+1] > 0:\n if df[t+2] < 0 and df[t+3] < 0: # Potential peak\n if trace[t+2] > np.mean(trace):\n peaks.append([t+2, trace[t+2]])\n return peaks", "def testPeakLikelihoodFlux(self):\n # make mp: a flux measurer\n measControl = measAlg.PeakLikelihoodFluxControl()\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(measControl).build(schema)\n \n # make and measure a series of exposures containing just one star, approximately centered\n bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(100, 101))\n kernelWidth = 35\n var = 100\n fwhm = 3.0\n sigma = fwhm/FwhmPerSigma\n convolutionControl = afwMath.ConvolutionControl()\n psf = measAlg.SingleGaussianPsf(kernelWidth, kernelWidth, sigma)\n psfKernel = psf.getLocalKernel()\n psfImage = psf.computeKernelImage()\n sumPsfSq = numpy.sum(psfImage.getArray()**2)\n psfSqArr = psfImage.getArray()**2\n for flux in (1000, 10000):\n ctrInd = afwGeom.Point2I(50, 51)\n ctrPos = afwGeom.Point2D(ctrInd)\n\n kernelBBox = psfImage.getBBox(afwImage.PARENT)\n kernelBBox.shift(afwGeom.Extent2I(ctrInd))\n\n # compute predicted flux error\n unshMImage = makeFakeImage(bbox, [ctrPos], [flux], fwhm, var)\n\n # filter image by PSF\n unshFiltMImage = afwImage.MaskedImageF(unshMImage.getBBox(afwImage.PARENT))\n afwMath.convolve(unshFiltMImage, unshMImage, psfKernel, convolutionControl)\n \n # compute predicted flux = value of image at peak / sum(PSF^2)\n # this is a sanity check of the algorithm, as much as anything\n predFlux = unshFiltMImage.getImage().get(ctrInd[0], ctrInd[1]) / sumPsfSq\n self.assertLess(abs(flux - predFlux), flux * 0.01)\n \n # compute predicted flux error based on filtered pixels\n # = sqrt(value of filtered variance at peak / sum(PSF^2)^2)\n predFluxErr = math.sqrt(unshFiltMImage.getVariance().get(ctrInd[0], ctrInd[1])) / sumPsfSq\n\n # compute predicted flux error based on unfiltered pixels\n # = sqrt(sum(unfiltered variance * PSF^2)) / sum(PSF^2)\n # and compare to that derived from filtered pixels;\n # again, this is a test of the algorithm\n varView = afwImage.ImageF(unshMImage.getVariance(), kernelBBox)\n varArr = varView.getArray()\n unfiltPredFluxErr = math.sqrt(numpy.sum(varArr*psfSqArr)) / sumPsfSq\n self.assertLess(abs(unfiltPredFluxErr - predFluxErr), predFluxErr * 0.01)\n \n for fracOffset in (afwGeom.Extent2D(0, 0), afwGeom.Extent2D(0.2, -0.3)):\n adjCenter = ctrPos + fracOffset\n if fracOffset == (0, 0):\n maskedImage = unshMImage\n filteredImage = unshFiltMImage\n else:\n maskedImage = makeFakeImage(bbox, [adjCenter], [flux], fwhm, var)\n # filter image by PSF\n filteredImage = afwImage.MaskedImageF(maskedImage.getBBox(afwImage.PARENT))\n afwMath.convolve(filteredImage, maskedImage, psfKernel, convolutionControl)\n\n exposure = afwImage.makeExposure(filteredImage)\n exposure.setPsf(psf)\n \n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*adjCenter))\n measFlux = source.get(measControl.name)\n measFluxErr = source.get(measControl.name + \".err\")\n self.assertFalse(source.get(measControl.name + \".flags\"))\n self.assertLess(abs(measFlux - flux), flux * 0.003)\n \n self.assertLess(abs(measFluxErr - predFluxErr), predFluxErr * 0.2)\n\n # try nearby points and verify that the flux is smaller;\n # this checks that the sub-pixel shift is performed in the correct direction\n for dx in (-0.2, 0, 0.2):\n for dy in (-0.2, 0, 0.2):\n if dx == dy == 0:\n continue\n offsetCtr = afwGeom.Point2D(adjCenter[0] + dx, adjCenter[1] + dy)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, offsetCtr)\n offsetFlux = source.get(measControl.name)\n self.assertLess(offsetFlux, measFlux)\n \n # source so near edge of image that PSF does not overlap exposure should result in failure\n \n for edgePos in (\n (1, 50),\n (50, 1),\n (50, bbox.getHeight() - 1),\n (bbox.getWidth() - 1, 50),\n ):\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*edgePos))\n self.assertTrue(source.get(measControl.name + \".flags\"))\n \n # no PSF should result in failure: flags set\n noPsfExposure = afwImage.ExposureF(filteredImage)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, noPsfExposure, afwGeom.Point2D(*adjCenter))\n self.assertTrue(source.get(measControl.name + \".flags\"))", "def _is_mlc_peak_in_window(\n self, window, height_threshold, edge_threshold, picket_peak_val\n ) -> bool:\n if self.orientation == Orientation.UP_DOWN:\n std = np.std(window, axis=1)\n else:\n std = np.std(window, axis=0)\n is_above_height_threshold = np.max(window) > height_threshold * picket_peak_val\n is_not_at_edge = max(std) < edge_threshold * np.median(std)\n return is_above_height_threshold and is_not_at_edge", "def check_peak_win(self):\n if self.peak_win[0] < 0.0:\n self.peak_win[0] = 0.0\n if self.logger is not None:\n self.logger.warning(('Start of peak window < 0 sec for cond: {}. ' +\n 'Setting to 0.').format(self.cond))\n if self.peak_win[1] > self.psc_dur:\n self.peak_win[1] = self.psc_dur\n if self.logger is not None:\n logger.warning(('End of peak window is longer than trial HRF ' +\n 'for cond: {}. Truncating.').format(self.cond))\n return", "def check_overlaps(self, filter_objects, verbose = False):\n if isinstance(FilterClass, type(filter_objects)):\n ## if only one filter is given\n filter_objects = [filter_objects, ]\n\n\n for i, filter_name in enumerate(filter_objects):\n if isinstance(FilterClass, type(filter_name)):\n filter_obj = filter_name\n elif isinstance(filter_objects, dict):\n filter_obj = filter_objects[filter_name]\n else:\n filter_obj = filter_objects[i]\n\n if verbose:print(i, filter_obj)\n\n if hasattr(filter_obj, \"_lower_edge\") and \\\n hasattr(filter_obj, \"_upper_edge\") and \\\n hasattr(self, \"data\"):\n blue_bool = filter_obj._lower_edge > self.min_wavelength\n red_bool = filter_obj._upper_edge < self.max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n if verbose: print(within)\n if within:\n self._add_to_overlapping_filters(filter_name, verbose=verbose)\n else:\n warnings.warn(\"SpectrumClass.check_overlaps - something went wrong... no overlaps or data?\")\n if self._n_overlapping_filters == 1:\n self._overlapping_filter_list = [self._overlapping_filter_list,] ## added to fix issue #27\n pass", "def spectral_maxpeaks(sign, FS):\n f, ff = plotfft(sign, FS)\n diff_sig = np.diff(ff)\n\n return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd+1]<0 and diff_sig[nd]>0)])", "def __isScanContained(self, subms, scanlist, tbin):\n isContained = False \n \n mymsmd = msmdtool()\n mymsmd.open(subms)\n \n # Check if subms scans contain all selected scans\n hasScans = False\n s = mymsmd.scannumbers()\n subms_scans = map(str, s)\n if set(scanlist) <= set(subms_scans):\n hasScans = True\n \n if hasScans:\n t = mymsmd.timesforscans(s)\n mymsmd.close()\n t_range = t.max() - t.min()\n \n if t_range >= tbin: \n isContained = True\n \n return isContained", "def identify_flux(xyz: list) -> list:\n flagged_lines = [tup for tup in xyz if abs(tup[3]) > THRESHOLDS[0] and abs(tup[4]) > THRESHOLDS[1]]\n\n return flagged_lines", "def isSingleParticle(self):\r\n\r\n\t\tindex_of_maximum = np.argmax(self.scatData) #get the peak position\r\n\t\trun = 55. #define the run to use\r\n\t\t\r\n\t\tleft_rise = self.scatData[index_of_maximum]-self.scatData[index_of_maximum-int(run)] #get the rise from posn 10 to the peak\r\n\t\tleft_slope = left_rise/run\r\n\t\t\r\n\t\ttry:\r\n\t\t\tright_rise = self.scatData[index_of_maximum]-self.scatData[index_of_maximum+int(run)] #get the rise from a point the same distance away from teh peak as position 10, but on the other side\r\n\t\t\tright_slope = right_rise/run\r\n\t\texcept:\r\n\t\t\treturn\r\n\t\t\t\r\n\t\tpercent_diff = np.absolute((right_slope-left_slope)/(0.5*right_slope+0.5*left_slope))\r\n\t\tif percent_diff > 0.1:\r\n\t\t\tself.doublePeak = True", "def check_overlap(current, hit, overlap = 200):\n for prev in current:\n p_coords = prev[2:4]\n coords = hit[2:4]\n if get_overlap(coords, p_coords) >= overlap:\n return True\n return False", "def analyze_ev_wf_compact(self, event, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001):\n\n fig, ax = plt.subplots(nrows=3, ncols=3)\n peaks_temp = pd.DataFrame()\n\n for i in range(0, 9):\n if event < len(self.table_sipm_time):\n # Creo un np.array con gli indici della singola waveform..\n wf_idx = [event*self.points_per_wf, event *\n self.points_per_wf+self.points_per_wf]\n # ..i tempi di ciascun punto..\n wf_time = self.table_sipm_time['t'].iloc[event] + \\\n self.table_sipm_wf['TIME'][int(wf_idx[0]):int(wf_idx[1])]\n # ..e i valori del segnale di ciascun ppunto\n wf_ch = - \\\n self.table_sipm_wf['CH1'][int(wf_idx[0]):int(wf_idx[1])]\n\n # Per trovare la baseline, faccio un fit polinomiale di grado 0..\n # ..su un numero finito di punti iniziali, specificato dall'utente..\n # ..poi la salvo internamente alla classe\n self.baseline = np.polyfit(\n wf_time[0:n_bsl], wf_ch[0:n_bsl], 0)[0]\n # Voglio anche disegnarla sui plot, quindi mi creo una lista di x e di y..\n # ..nello spazio della waveform\n bsl_time = wf_time[0:n_bsl]\n bsl_ch = [self.baseline] * n_bsl\n\n # Per trovre i picchi, uso la funzione find_peaks di scipy.signal\n # I valori di height e prominence sono specificati dall'utente..\n # ..e scalti per selezionare tutti i picchi senza prendere rumore\n peaks, _ = sp.find_peaks(\n wf_ch, height=peak_height, prominence=peak_prominences)\n\n # Ora posso plottare tutto:\n plt.ticklabel_format(axis='x', style='sci', scilimits=(0, 0))\n # la waveform..\n ax[int(i / 3)][i % 3].plot(wf_time,\n wf_ch, linestyle='-', linewidth=1)\n # ..la baseline..\n ax[int(i / 3)][i % 3].plot(bsl_time, bsl_ch, linestyle='-',\n linewidth=1, c='darkgreen')\n # ..e i picchi (se ci sono)\n if len(peaks) > 0:\n ax[int(i / 3)][i % 3].scatter(wf_time.iloc[peaks],\n wf_ch.iloc[peaks], c='darkred')\n\n # Set common labels\n fig.text(0.5, 0.01, 'Time (s)', ha='center', va='center')\n fig.text(0.02, 0.5, 'Amplitude (V)', ha='center', va='center', rotation='vertical')\n \n \n # plt.show()\n peaks_temp = pd.concat([peaks_temp, pd.DataFrame(\n {'t': wf_time.iloc[peaks], 'A': wf_ch.iloc[peaks]-self.baseline})], ignore_index=True)\n event += 1\n\n # ..e salvo il plot in una cartella a parte\n folder_name = 'plot'\n plot_name = '{0}/{1}_ev{2}.png'.format(\n folder_name, pic_name, event)\n fig.savefig(plot_name)\n plt.close(fig)\n\n # La funzione restituisce i valori di tempo e ampiezza (ottenuta come Ch1-baseline)..\n # ..agli indici dei massimi trovati da find_peaks\n return peaks_temp", "def is_within_phase_space(self, events: np.ndarray) -> Tuple[bool]:\n raise NotImplementedError", "def global_peak(apsp, sfield, peaks, n_size=5):\n\n peak_map = {p: None for p in peaks}\n corr_map = {p: None for p in peaks}\n\n for p in peaks:\n\n idx = (apsp[p, :]<=n_size)\n peak_map[p] = sfield[idx].mean()\n corr_map[p] = sfield[p]\n\n maxima = max(peak_map, key=peak_map.get)\n\n return [maxima, peak_map]", "def detect_min_max(arr):\n\n max_value = max(np.absolute(np.reshape(arr, -1)))\n peaks_max = []\n peaks_min = []\n x_max = []\n y_max = []\n z_max = []\n x_min = []\n y_min = []\n z_min = []\n\n for j1 in range(10, arr.shape[0]-10):\n for j2 in range(10, arr.shape[1]-10):\n for j3 in range(10, arr.shape[2]-10):\n if (np.absolute(arr[j1, j2, j3]) > 0.3*max_value):\n\n aaaa = [\n arr[j1, j2, j3 + 1], arr[j1, j2 + 1, j3],\n arr[j1 + 1, j2, j3], arr[j1, j2, j3 - 1],\n arr[j1, j2 - 1, j3], arr[j1 - 1, j2, j3],\n arr[j1 + 1, j2 + 1, j3 + 1],\n arr[j1 - 1, j2 - 1, j3 - 1],\n arr[j1 - 1, j2 + 1, j3 + 1], arr[j1, j2 + 1, j3 + 1],\n arr[j1, j2 - 1, j3 - 1], arr[j1, j2 - 1, j3 + 1],\n arr[j1, j2 + 1, j3 - 1], arr[j1 + 1, j2, j3 + 1],\n arr[j1 - 1, j2, j3 - 1], arr[j1 - 1, j2, j3 + 1],\n arr[j1 + 1, j2, j3 - 1], arr[j1 + 1, j2 + 1, j3],\n arr[j1 - 1, j2 - 1, j3], arr[j1 + 1, j2 - 1, j3],\n arr[j1 - 1, j2 + 1, j3], arr\n [j1 + 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 + 1, j3 - 1], arr\n [j1 - 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 - 1, j3 - 1], arr\n [j1 - 1, j2 + 1, j3 - 1]]\n bbbb = [\n arr[j1, j2, j3 + 9], arr[j1, j2 + 9, j3],\n arr[j1 + 9, j2, j3], arr[j1, j2, j3 - 9],\n arr[j1, j2 - 9, j3], arr[j1 - 9, j2, j3]]\n\n if ((arr[j1, j2, j3] > max(aaaa)) and (max(aaaa) > max(bbbb))):\n peaks_max = np.append(peaks_max, arr[j1, j2, j3])\n x_max = np.append(x_max, j1)\n y_max = np.append(y_max, j2)\n z_max = np.append(z_max, j3)\n\n if ((arr[j1, j2, j3] < min(aaaa)) and (min(aaaa) < min(bbbb))):\n peaks_min = np.append(peaks_min, arr[j1, j2, j3])\n x_min = np.append(x_min, j1)\n y_min = np.append(y_min, j2)\n z_min = np.append(z_min, j3)\n\n return peaks_min, np.vstack(\n (x_min, y_min, z_min)), peaks_max, np.vstack(\n (x_max, y_max, z_max))", "def is_coelution(spectrum_in, ms2_precursor, da_after_precursor = 1.3, delta_mz = 0.03, percentage_intensity_not_coelution = 10, percentage_accetable_coelution = False):\n\n upper_mz = ms2_precursor + da_after_precursor\n\n precursor_mz_upper = ms2_precursor + delta_mz\n precursor_mz_lower = ms2_precursor - delta_mz\n\n # Ion +1 to ignore in the spectrum\n ignore_peak_mz = ms2_precursor + 1\n ignore_upper_mz = ignore_peak_mz + delta_mz\n ignore_lower_mz = ignore_peak_mz - delta_mz\n\n peaks = spectrum_in.get_peaks()\n reverse_peaks = reversed(peaks)\n\n position = 0\n for peak in reverse_peaks:\n mz = peak.get_mz()\n\n if mz <= precursor_mz_upper and mz >= precursor_mz_lower:\n precursor_mz = mz\n precursor_intensity = peak.get_intensity()\n precursor_peak = peak\n # print(\"Found precursor in MS1: Mz:\", precursor_mz, \"Intensity:\", precursor_intensity)\n break\n position += 1\n\n # print(spectrum_in.get_size())\n position = spectrum_in.get_size() - position\n\n # Intensity of peak to consider as coelution calculation\n # Below this threshold, nothing is considered coelution\n not_coelution_threshold = precursor_intensity * percentage_intensity_not_coelution / 100\n # Below this threshold, coelution is considered acceptable\n if percentage_accetable_coelution != False:\n acceptable_coelution_threshold = precursor_intensity * percentage_accetable_coelution / 100\n\n acceptable_coelution = list()\n proper_coelution = list()\n coelution = [proper_coelution, acceptable_coelution, precursor_peak]\n\n for peak in peaks[position:]:\n mz = peak.get_mz()\n\n if mz < upper_mz:\n \n # We search for peaks different to the ion +1\n if mz > ignore_upper_mz or mz < ignore_lower_mz:\n intensity = peak.get_intensity()\n \n if intensity > not_coelution_threshold:\n \n if percentage_accetable_coelution == False:\n coelution[0].append(peak)\n\n else:\n \n if intensity > acceptable_coelution_threshold:\n coelution[0].append(peak)\n else:\n coelution[1].append(peak) \n\n else:\n break\n\n \"\"\"\n print(\"Coelution_list\")\n print(\"Proper_coelution:\", end=\"\")\n for peak in coelution[0]:\n print(\"MZ:\", peak.get_mz(), \"Intensity\", peak.get_intensity(), end=\",\")\n print(\"\\nAcceptable_coelution:\", end=\"\")\n for peak in coelution[1]:\n print(\"MZ:\", peak.get_mz(), \"Intensity\", peak.get_intensity(), end=\",\")\n print(\"\")\n \"\"\"\n\n return(coelution)", "def peak_to_peak_variability(magnitudes, errors):\n sums = magnitudes + errors\n differences = magnitudes - errors\n\n min_sum = np.min(sums)\n max_diff = np.max(differences)\n\n ptpv = (max_diff - min_sum) / (max_diff + min_sum)\n return ptpv", "def haveEncountered(self,mono1,mono2,eps): \n return self.distance(mono1,mono2) < eps" ]
[ "0.6195646", "0.607271", "0.5963362", "0.5910339", "0.5904927", "0.5832219", "0.57945627", "0.5773653", "0.563543", "0.5589451", "0.5549814", "0.553361", "0.55285126", "0.5508843", "0.5495379", "0.54413253", "0.54157656", "0.5363649", "0.532954", "0.5318515", "0.53176546", "0.5313904", "0.5270746", "0.5267198", "0.5257355", "0.524623", "0.5244575", "0.52402085", "0.5220869", "0.52094924" ]
0.82217896
0
takes in lists of start times and end times to create a list of time windows, and a list of whether or not an event is a subevent, and uses those lists to run functions that extract data from the GOES database. Each list must have the same length, and indices of lists must correspond (ie start_time[j] has an end time of end_time[j] and its subevent boolean is subevent_bools[j]). not to be confused with multi_events, which generates output given multiple events within one time window.
def many_events(start_time,end_time,subevent_bools): #running through for each event for j in range(len(start_time)): #start, end, and subevent bool for this event st = start_time[j] et = end_time[j] subevent = bool(subevent_bools[j]) #checking if start time is actually available if str(st) != 'nan': try: st = parse(st) yes_st = True except ValueError: yes_st = False else: yes_st = False #checking if end time is actually available if str(et) != 'nan': try: et = parse(et) yes_et = True except ValueError: yes_et = False else: yes_et = False #if both start and end times are available, running the code if yes_st and yes_et: #event must be after Nov. 2010 because currently no capability for #instruments in use before then - change this if you have that #capability if st > datetime(2010,9,1): try: print('got start and end times! running database extraction') database_extraction(st,et,instrument_chosen,subevent) except: continue else: print('cannot run for events before November 2010 because do not have ' 'access to instruments before then')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multi_event(st,et,instrument_chosen,subevent):\r\n print('checking for multiple events within given time window')\r\n \r\n #creating file for time window with first events for all thresholds\r\n out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent)\r\n\r\n #creating files for all second events for all thresholds\r\n new_files = two_in_one(out_name,et,subevent)\r\n \r\n #creating files for any third events for all thresholds that had a second event\r\n for file in new_files:\r\n two_in_one(file,et,subevent) \r\n \r\n return", "def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)", "def _split_events_per_trial(t_idx, codes: np.ndarray, times: np.ndarray, params: dict) -> dict:\n codes, times = _check_input(codes, times)\n trial_to_condition_func = eval(params['trial_to_condition_func'], {}, {})\n cnd_number = np.int16(trial_to_condition_func(codes, t_idx))\n assert np.isscalar(cnd_number)\n\n start_times, end_times = _get_times_subtrials(codes, times, params['subtrials'])\n trial_start_time, trial_end_time = _get_times_trial(codes, times, start_times, end_times, params)\n\n # this is due to ill design of trial window and subtrial window due to human error.\n assert np.all(trial_start_time <= start_times)\n assert np.all(trial_end_time >= end_times)\n\n event_code_idx = np.logical_and(times >= trial_start_time, times <= trial_end_time)\n\n return {\n 'start_times': start_times, # absolute\n 'end_times': end_times, # absolute\n 'trial_start_time': trial_start_time,\n 'trial_end_time': trial_end_time,\n 'event_times': times[event_code_idx],\n 'event_codes': codes[event_code_idx],\n 'condition_number': cnd_number\n }", "def compile_chrono_events(\n test_scenario: SimulationTestScenario, setup_events: List[SimulationEvent]\n) -> Tuple[List[SimulationEvent], Tuple[str, datetime]]:\n previous_subtest_last_event_ts = datetime.min.replace(tzinfo=timezone.utc)\n previous_subtest_last_assertion_ts = datetime.min.replace(tzinfo=timezone.utc)\n current_subtest_first_event_ts = datetime.max.replace(tzinfo=timezone.utc)\n current_subtest_first_assertion_ts = datetime.max.replace(tzinfo=timezone.utc)\n assertion_ts = []\n events = []\n derived_param_outputs = []\n\n for sub_test in test_scenario.sub_tests:\n if sub_test.events:\n current_subtest_first_event_ts = sub_test.events[0].time\n\n if current_subtest_first_event_ts < previous_subtest_last_event_ts:\n log.warning(\n f'Subtest \"{sub_test.description}\" contains '\n \"event timestamp before the previous one.\"\n )\n\n previous_subtest_last_event_ts = sub_test.events[-1].time\n events.extend(sub_test.events)\n\n if sub_test.expected_balances_at_ts:\n assertion_ts.extend(sub_test.expected_balances_at_ts.keys())\n\n if sub_test.expected_posting_rejections:\n assertion_ts.extend(\n expected_rejection.timestamp\n for expected_rejection in sub_test.expected_posting_rejections\n )\n if sub_test.expected_schedules:\n assertion_ts.extend(\n runtime\n for expected_schedule in sub_test.expected_schedules\n for runtime in expected_schedule.run_times\n )\n if sub_test.expected_workflows:\n assertion_ts.extend(\n runtime\n for expected_workflow in sub_test.expected_workflows\n for runtime in expected_workflow.run_times\n )\n\n if sub_test.expected_derived_parameters:\n for expected_derived_param in sub_test.expected_derived_parameters:\n assertion_ts.append(expected_derived_param.timestamp)\n derived_param_outputs.append(\n (\n expected_derived_param.account_id,\n expected_derived_param.timestamp,\n )\n )\n\n if assertion_ts:\n sorted_assertion_ts = sorted(assertion_ts)\n current_subtest_first_assertion_ts = sorted_assertion_ts[0]\n\n if current_subtest_first_assertion_ts < previous_subtest_last_assertion_ts:\n log.warning(\n f'Subtest \"{sub_test.description}\" contains '\n \"assertion timestamp before the previous one.\"\n )\n\n previous_subtest_last_assertion_ts = sorted_assertion_ts[-1]\n assertion_ts.clear()\n\n if (\n previous_subtest_last_event_ts > test_scenario.end\n or previous_subtest_last_assertion_ts > test_scenario.end\n ):\n log.warning(\"last assertion or event happens outside of simulation window\")\n\n if setup_events and events and setup_events[-1].time > events[0].time:\n raise ValueError(\n f\"First custom event at {events[0].time}, it needs to be after \"\n f\"{setup_events[-1].time}, when account and plan setup events are complete\"\n )\n\n return setup_events + events, derived_param_outputs", "def main():\n\n f = open(eventsfile, 'r')\n lines = f.readlines()\n numcounter = 0\n counter = 0\n fullcounter = 0\n movielist = []\n movielists =[]\n timestamp_list = []\n filteredlist = [] \n startdate = \"2020-02-26\"\n \n for line in lines:\n TAPES = line.split('\\t')\n if int(TAPES[2]) == 1 or int(TAPES[2]) == 2:\n filteredlist.append(line)\n \n for newline in filteredlist:\n TAPES = newline.split('\\t')\n fullcounter +=1\n if int(TAPES[2]) == 2:\n timestamp_list.append(0)\n continue\n startdate2 = startdate.split(\"-\")[1] + \"/\" + startdate.split(\"-\")[2] + \"/\" + startdate.split(\"-\")[0]\n dateplustime = startdate2 + TAPES[0][0:len(TAPES[0])]\n thistime = faststrptime(dateplustime)\n unixtimestamp = datetime.datetime.timestamp(thistime)\n timestamp_list.append(int(unixtimestamp))\n\n i = 0 \n for element in timestamp_list:\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+(counter-i)]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n \n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue \n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+1]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n\n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue\n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n counter += 1\n numcounter += 1\n if element != 0:\n movielist.append(counter)\n i += 1\n \n if numcounter == 30:\n numcounter = 0\n movielists.append(movielist)\n movielist = []\n \n if i > (len(timestamp_list)-1):\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n \n numendlists = counter - fullcounter\n first = len(movielists)-numendlists\n last = len(movielists)\n del movielists[first:last]\n \n for x in movielists:\n for y in x:\n if int(filenumber) == y:\n movielist = x\n\n modename = str(movielist[0]) + \"to\" + str(movielist[len(movielist)-1])\n modefilename = \"mode_\" + modename + \".png\"\n try:\n imread(modefilename)\n except:\n imageMode(modename,movielist)\n\n e = loadmodeImage(modefilename)\n \n roimask = np.zeros((ydim,xdim))\n f = open(roisfile, 'r')\n lines = f.readlines()\n i = 1\n i2 = 0\n for line in lines:\n try:\n print(int(line.split(' ')[0]))\n except ValueError:\n i2 += 1\n continue\n minx = int(line.split(' ')[0])\n miny = int(line.split(' ')[1])\n maxx = int(line.split(' ')[2])\n maxy = int(line.split(' ')[3])\n roimask[int(miny):int(maxy),int(minx):int(maxx)] = i\n i += 1\n numberofwells = i-1\n numberofcols = int(i2/2)\n numberofrows = int(numberofwells/numberofcols)\n roimaskweights = convertMaskToWeights(roimask)\n\n cap = cv2.VideoCapture(videoStream)\n\n cap.set(3,roimask.shape[1])\n cap.set(4,roimask.shape[0])\n \n ret,frame = cap.read()\n storedImage = np.array(e * 255, dtype = np.uint8)\n storedMode = Blur(storedImage)\n storedFrame = grayBlur(frame)\n cenData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))*2 -2])\n pixData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))])\n i = 0;\n totalFrames = 0\n while(cap.isOpened()):\n ret,frame = cap.read()\n if ret == False:\n break\n currentFrame = grayBlur(frame)\n diffpix = diffImage(storedFrame,currentFrame,pixThreshold)\n diff = trackdiffImage(storedMode,currentFrame,pixThreshold)\n diff.dtype = np.uint8\n contours,hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n MIN_THRESH = 20.0\n MIN_THRESH_P = 20.0\n roi_dict = {}\n for r in range(0,numberofwells):\n roi_dict[r+1] = []\n for cs in range(0,len(contours)):\n if cv2.contourArea(contours[cs]) < 1.0:\n continue\n if cv2.arcLength(contours[cs],True) < 1.0:\n continue\n if cv2.contourArea(contours[cs]) > MIN_THRESH or cv2.arcLength(contours[cs],True) > MIN_THRESH_P:\n M = cv2.moments(contours[cs])\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n area = cv2.contourArea(contours[cs])\n perim = cv2.arcLength(contours[cs],True)\n if int(roimask[cY,cX]) == 0:\n continue\n if not roi_dict[int(roimask[cY,cX])]:\n roi_dict[int(roimask[cY,cX])].append((area*perim,cX,cY))\n else:\n if roi_dict[int(roimask[cY,cX])][0][0] < area*perim:\n roi_dict[int(roimask[cY,cX])][0] = (area*perim,cX,cY)\n\n pixcounts = []\n pixcounts = np.bincount(roimaskweights, weights=diffpix.ravel())\n pixData[i,:] = np.hstack((pixcounts))\n counts = []\n keys = roi_dict.keys()\n keys = sorted(keys)\n for k in keys:\n x = -10000\n y = -10000\n if roi_dict[k]:\n x = roi_dict[k][0][1]\n y = roi_dict[k][0][2]\n counts.append(x)\n counts.append(y)\n cv2.line(storedImage,(x,y),(x,y),(255,255,255),2)\n if i == 284:\n cv2.imwrite(videoStream + '_trackedimagewithlines_' + str(i) + \".png\", storedImage)\n cenData[i,:] = np.asarray(counts)\n totalFrames += 1\n storedFrame = currentFrame\n i += 1\n\n file = open(videoStream + \".centroid2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells*2):\n file.write(str(int(cenData[x,:][y])) + '\\n')\n pixData = pixData[:i,:]\n pixData = pixData[:,1:] \n file = open(videoStream + \".motion2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells):\n file.write(str(int(pixData[x,:][y])) + '\\n')\n\n cap.release()\n cv2.destroyAllWindows()\n \n try:\n image = Image.open('lastframe.png')\n except:\n makenumROIsimage()", "def events(time):\n\n event_list = eventlist()\n idx = np.all(time == event_list[:, 0:len(time)], axis=1)\n return event_list[idx,:]", "def _locate_events(self, start_time, end_time):\n\n # Define pre-pad as a function of the onset windows\n if self.pre_pad is None:\n self.pre_pad = max(self.p_onset_win[1],\n self.s_onset_win[1]) \\\n + 3 * max(self.p_onset_win[0],\n self.s_onset_win[0])\n\n # Adjust pre- and post-pad to take into account cosine taper\n t_length = self.pre_pad + 4*self.marginal_window + self.post_pad\n self.pre_pad += np.ceil(t_length * 0.06)\n self.post_pad += np.ceil(t_length * 0.06)\n\n trig_events = self.output.read_triggered_events(start_time, end_time)\n n_evts = len(trig_events)\n\n for i, trig_event in trig_events.iterrows():\n event_uid = trig_event[\"EventID\"]\n msg = \"=\" * 120 + \"\\n\"\n msg += \"\\tEVENT - {} of {} - {}\\n\"\n msg += \"=\" * 120 + \"\\n\\n\"\n msg += \"\\tDetermining event location...\\n\"\n msg = msg.format(i + 1, n_evts, event_uid)\n self.output.log(msg, self.log)\n\n w_beg = trig_event[\"CoaTime\"] - 2*self.marginal_window \\\n - self.pre_pad\n w_end = trig_event[\"CoaTime\"] + 2*self.marginal_window \\\n + self.post_pad\n\n timer = util.Stopwatch()\n self.output.log(\"\\tReading waveform data...\", self.log)\n try:\n self._read_event_waveform_data(trig_event, w_beg, w_end)\n except util.ArchiveEmptyException:\n msg = \"\\tNo files found in archive for this time period\"\n self.output.log(msg, self.log)\n continue\n except util.DataGapException:\n msg = \"\\tAll available data for this time period contains gaps\"\n msg += \"\\n\\tOR data not available at start/end of time period\\n\"\n self.output.log(msg, self.log)\n continue\n self.output.log(timer(), self.log)\n\n timer = util.Stopwatch()\n self.output.log(\"\\tComputing 4D coalescence grid...\", self.log)\n\n daten, max_coa, max_coa_norm, loc, map_4d = self._compute(\n w_beg, w_end,\n self.data.signal,\n self.data.availability)\n coord = self.lut.xyz2coord(np.array(loc).astype(int))\n event_coa_data = pd.DataFrame(np.array((daten, max_coa,\n coord[:, 0],\n coord[:, 1],\n coord[:, 2])).transpose(),\n columns=[\"DT\", \"COA\", \"X\", \"Y\", \"Z\"])\n event_coa_data[\"DT\"] = event_coa_data[\"DT\"].apply(UTCDateTime)\n event_coa_data_dtmax = \\\n event_coa_data[\"DT\"].iloc[event_coa_data[\"COA\"].astype(\"float\").idxmax()]\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n\n if (event_coa_data_dtmax >= trig_event[\"CoaTime\"]\n - self.marginal_window) \\\n and (event_coa_data_dtmax <= trig_event[\"CoaTime\"]\n + self.marginal_window):\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n else:\n msg = \"\\n\\tEvent {} is outside marginal window.\\n\"\n msg += \"\\tDefine more realistic error - the marginal window\"\n msg += \" should be an estimate of the origin time uncertainty,\"\n msg += \"\\n\\tdetermined by the expected spatial uncertainty and\"\n msg += \"the seismic velocity in the region of the earthquake\\n\"\n msg += \"\\n\" + \"=\" * 120 + \"\\n\"\n msg = msg.format(event_uid)\n self.output.log(msg, self.log)\n continue\n\n event_mw_data = event_coa_data\n event_mw_data = event_mw_data[(event_mw_data[\"DT\"] >= w_beg_mw) &\n (event_mw_data[\"DT\"] <= w_end_mw)]\n map_4d = map_4d[:, :, :,\n event_mw_data.index[0]:event_mw_data.index[-1]]\n event_mw_data = event_mw_data.reset_index(drop=True)\n event_max_coa = event_mw_data.iloc[event_mw_data[\"COA\"].astype(\"float\").idxmax()]\n\n # Update event UID; make out_str\n event_uid = str(event_max_coa.values[0])\n for char_ in [\"-\", \":\", \".\", \" \", \"Z\", \"T\"]:\n event_uid = event_uid.replace(char_, \"\")\n out_str = \"{}_{}\".format(self.output.name, event_uid)\n self.output.log(timer(), self.log)\n\n # Make phase picks\n timer = util.Stopwatch()\n self.output.log(\"\\tMaking phase picks...\", self.log)\n phase_picks = self._phase_picker(event_max_coa)\n self.output.write_picks(phase_picks[\"Pick\"], event_uid)\n self.output.log(timer(), self.log)\n\n # Determining earthquake location error\n timer = util.Stopwatch()\n self.output.log(\"\\tDetermining earthquake location and uncertainty...\", self.log)\n loc_spline, loc_gau, loc_gau_err, loc_cov, \\\n loc_cov_err = self._calculate_location(map_4d)\n self.output.log(timer(), self.log)\n\n # Make event dictionary with all final event location data\n event = pd.DataFrame([[event_max_coa.values[0],\n event_max_coa.values[1],\n loc_spline[0], loc_spline[1], loc_spline[2],\n loc_gau[0], loc_gau[1], loc_gau[2],\n loc_gau_err[0], loc_gau_err[1],\n loc_gau_err[2],\n loc_cov[0], loc_cov[1], loc_cov[2],\n loc_cov_err[0], loc_cov_err[1],\n loc_cov_err[2]]],\n columns=self.EVENT_FILE_COLS)\n\n self.output.write_event(event, event_uid)\n\n self._optional_locate_outputs(event_mw_data, event, out_str,\n phase_picks, event_uid, map_4d)\n\n self.output.log(\"=\" * 120 + \"\\n\", self.log)\n\n del map_4d, event_coa_data, event_mw_data, event_max_coa, \\\n phase_picks\n self.coa_map = None", "def parseEvents(data, times, eventTimes):\n striped = []\n remaining = range(len(times))\n stripedEvents = []\n\n for t in eventTimes:\n tmpEvent = t.date()\n for j in range(len(times)):\n tmpTime = times[j].date()\n\n if tmpEvent == tmpTime:\n striped.append(tmpEvent)\n stripedEvents.append(data[j, :])\n remaining.remove(j)\n break\n\n stripedEvents = np.array(stripedEvents)\n remainingTimes = np.array(remaining)\n stripedTimes = np.array(striped)\n remainingEvents = data[remaining]\n\n return stripedTimes, remainingTimes, stripedEvents, remainingEvents", "def get_events(raw,event_id,offset=0):\r\n # extract time stamps from annotations\r\n timestamps = np.round(raw._annotations.onset*raw.info['sfreq']+offset).astype(int)\r\n assert np.all(timestamps < raw.n_times), \"offset overflow total data length\"\r\n\r\n # get labels\r\n labels = raw._annotations.description\r\n labels = np.vectorize(event_id.__getitem__)(labels) #convert labels into int\r\n \r\n # build event matrix\r\n events = np.concatenate((timestamps.reshape(-1,1),\r\n np.zeros(timestamps.shape).astype(int).reshape(-1,1),\r\n labels.reshape(-1,1)),axis=1)\r\n \r\n # the difference between two full stimuli windows should be 7 sec. \r\n events = events[events[:, 2] < 100, :] #keep only events and remove annotations\r\n\r\n assert np.unique(events[:, 2]).size ==1 #TODO: make it works for different events\r\n \r\n stimt = np.append(events[:, 0], raw.n_times) #stim interval\r\n epochs2keep = np.where(np.diff(stimt) == raw.info['sfreq']*7)[0] #TODO: keep only epoch of 7sec (make it an argument)\r\n epochs2drop = np.where(np.diff(stimt) != raw.info['sfreq']*7)[0] #drop the rest\r\n\r\n return events, epochs2keep, epochs2drop", "def event_overlap(labels, half, timestamp, window):\n\n for l, _ in labels:\n if l[0] == half:\n ceil = l[1] + window//2\n floor = l[1] - window//2\n if timestamp <= ceil and timestamp >= floor:\n return True\n return False", "def test_overlap():\n events = [['Event', '2017-11-21T10:00:00-08:00', '2017-11-21T11:00:00-08:00'],\n ['Event', '2017-11-21T10:30:00-08:00', '2017-11-21T11:20:00-08:00']]\n freetimes, _ = free(events, 9, 0, 17, 0, day_range, 30)\n fmt_freetime = output_format(freetimes)\n for i in fmt_freetime:\n print(i)\n assert fmt_freetime == ['Tue, Nov 21, 9:00 am to Tue, Nov 21, 10:00 am.',\n 'Tue, Nov 21, 11:20 am to Tue, Nov 21, 5:00 pm.',\n 'Wed, Nov 22, 9:00 am to Wed, Nov 22, 5:00 pm.',\n 'Thu, Nov 23, 9:00 am to Thu, Nov 23, 5:00 pm.',\n 'Fri, Nov 24, 9:00 am to Fri, Nov 24, 5:00 pm.',\n 'Sat, Nov 25, 9:00 am to Sat, Nov 25, 5:00 pm.',\n 'Sun, Nov 26, 9:00 am to Sun, Nov 26, 5:00 pm.',\n 'Mon, Nov 27, 9:00 am to Mon, Nov 27, 5:00 pm.']", "def UTC_times(times, \n trace, \n diff_thres = 30.0):\n # set times values to seconds\n \n #AUTOMATE THIS SECTION!\n #CHECK THAT THIS IS CORRECT\n times = times / trace.stats.sampling_rate\n #remove unwanted parts of times numpy array \n times = times[:,0]\n \n #remove the first instance of time because it is \n #somehow always of the wrong format!\n #times = np.delete(times, 0) \n \n event_times = []\n event = [times[0]]\n \n start_time = trace.stats.starttime\n \n #for item in times:\n # print start_time + item\n\n for i in range(1, len(times)):\n \n # check if two events in times array have a difference < diff_thres, \n #if not, run average of those times, if so append that events to a \n #new events_times list\n \n #time_diff = times[i + 1] - times[i]\n \n time_diff = times[i] - times[i-1]\n\n #save info until events are far enough apart! \n if time_diff < diff_thres:\n\n event.append(times[i])\n \n \n #raise conditional for if events are far enough apart! \n else:\n\n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n\n event_times.append([event_start, event_end])\n \n event = [] \n \n event.append(times[i])\n\n #if event still contains something for any reason, add it to event times\n if len(event) > 0: \n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n event_times.append([event_start, event_end])\n event = [] \n \n\n\n #if len(event_times) == 0 and len(event) > 0 or time_diff > diff_thres and len(event) > 0:\n \n #event_times.append(sum(event) / len(event))\n \n # event_start = event[0] - 2 #minus 5 seconds\n # event_end = event[-1] + 2 #add 5 seconds\n \n # event_times.append([event_start, event_end])\n \n # event = []\n \n #event_times.append(times[i])\n \n # else:\n # event.append(times[i])\n \n\n UTC_events = []\n\n #earthquake length threshold is 10 seconds and above!\n eq_len = 0#5.0\n\n for i in event_times:\n estart = start_time + i[0]\n eend = start_time + i[1]\n \n if eend - estart > eq_len:\n UTC_events.append([estart, eend])\n \n #UTC_events = np.unique(np.asarray(UTC_events))\n\n \n return UTC_events", "def overlap_events(event1, event2, place1, place2, log_places):\n place1.start_event(event1)\n log_conflicts(event1.start_time, log_places)\n place2.start_event(event2)\n log_conflicts(event2.start_time, log_places)\n place1.end_event(event1)\n log_conflicts(event1.end_time, log_places)\n place2.end_event(event2)\n log_conflicts(event2.end_time, log_places)", "def time(self,orid_time,window=5):\n #{{{ Function to get possible matches of events for some epoch time.\n\n results = {}\n\n #\n # If running in simple mode we don't have access to the tables we need\n #\n if config.simple:\n return results\n\n orid_time = _isNumber(orid_time)\n\n if not orid_time:\n print \"Not a valid number in function call: %s\" % orid_time\n return\n \n start = float(orid_time)-float(window)\n end = float(orid_time)+float(window)\n\n dbname = self.dbcentral(orid_time)\n\n if not db:\n print \"No match for orid_time in dbcentral object: (%s,%s)\" % (orid_time,self.dbcentral(orid_time))\n return\n\n try: \n db = datascope.dbopen( dbname , 'r' )\n db.lookup( table='origin')\n db.query(datascope.dbTABLE_PRESENT) \n except Exception,e:\n print \"Exception on Events() time(%s): Error on db pointer %s [%s]\" % (orid_time,db,e)\n return\n\n db.subset( 'time >= %f' % start )\n db.subset( 'time <= %f' % end )\n\n try:\n db = datascope.dbopen( dbname , 'r' )\n db.lookup( table='wfdisc' )\n records = db.query(datascope.dbRECORD_COUNT)\n\n except:\n records = 0\n\n if records:\n\n for i in range(records):\n\n db.record = i\n\n (orid,time) = db.getv('orid','time')\n\n orid = _isNumber(orid)\n time = _isNumber(time)\n results[orid] = time\n\n return results", "def build_timings(events):\n\n stack = []\n timings = []\n for e in events:\n if e.type == 'START':\n stack.append(e)\n elif e.type == 'FINISH':\n prev = stack.pop()\n if prev.step != e.step:\n raise Exception(\n \"\"\"I have a FINISH event for the START event of a\n different step\"\"\")\n yield Proc(e.step, prev.timestamp, e.timestamp, e.job)", "def update_events_start_stop(self):\n\n # stateEventsList = [self.pj[ETHOGRAM][x][BEHAVIOR_CODE] for x in self.pj[ETHOGRAM] if\n # STATE in self.pj[ETHOGRAM][x][TYPE].upper()]\n\n for row in range(0, self.twEvents.rowCount()):\n\n t = self.twEvents.item(row, tw_obs_fields[\"Tempo\"]).text()\n\n if \":\" in t:\n time = time2seconds(t)\n else:\n time = Decimal(t)\n\n subject = self.twEvents.item(row, tw_obs_fields[\"Sujeito\"]).text()\n key = self.twEvents.item(row, tw_obs_fields[\"Chave\"]).text()\n modifier = self.twEvents.item(row, tw_obs_fields[\"Modificador\"]).text()\n\n # check if code is state\n nbEvents = len(\n [event[EVENT_BEHAVIOR_FIELD_IDX] for event in self.pj[OBSERVATIONS][self.observationId][EVENTS]\n if event[EVENT_BEHAVIOR_FIELD_IDX] == key\n and event[EVENT_TIME_FIELD_IDX] < time\n and event[EVENT_SUBJECT_FIELD_IDX] == subject\n and event[EVENT_MODIFIER_FIELD_IDX] == modifier])\n\n # if nbEvents and (nbEvents % 2): # test >0 and odd\n # self.twEvents.item(row, tw_obs_fields[TYPE]).setText(STOP)\n # else:\n # self.twEvents.item(row, tw_obs_fields[TYPE]).setText(START)", "def run_event_outside(self):\n QMessageBox.warning(self, programName, \"Function not yet implemented\")\n return\n\n if not self.observationId:\n self.no_observation()\n return\n\n if self.twEvents.selectedItems():\n row_s = self.twEvents.selectedItems()[0].row()\n row_e = self.twEvents.selectedItems()[-1].row()\n eventtime_s = self.pj[OBSERVATIONS][self.observationId][EVENTS][row_s][0]\n eventtime_e = self.pj[OBSERVATIONS][self.observationId][EVENTS][row_e][0]\n\n durations = [] # in seconds\n\n # TODO: check for 2nd player\n for mediaFile in self.pj[OBSERVATIONS][self.observationId][FILE][PLAYER1]:\n durations.append(self.pj[OBSERVATIONS][self.observationId][\"media_info\"][\"length\"][mediaFile])\n\n mediaFileIdx_s = [idx1 for idx1, x in enumerate(durations) if eventtime_s >= sum(durations[0:idx1])][-1]\n media_path_s = self.pj[OBSERVATIONS][self.observationId][FILE][PLAYER1][mediaFileIdx_s]\n\n mediaFileIdx_e = [idx1 for idx1, x in enumerate(durations) if eventtime_e >= sum(durations[0:idx1])][-1]\n media_path_e = self.pj[OBSERVATIONS][self.observationId][FILE][PLAYER1][mediaFileIdx_e]\n\n # calculate time for current media file in case of many queued media files\n\n print(mediaFileIdx_s)\n print(type(eventtime_s))\n print(durations)\n\n eventtime_onmedia_s = round(eventtime_s - float2decimal(sum(durations[0:mediaFileIdx_s])), 3)\n eventtime_onmedia_e = round(eventtime_e - float2decimal(sum(durations[0:mediaFileIdx_e])), 3)\n\n print(row_s, media_path_s, eventtime_s, eventtime_onmedia_s)\n print(self.pj[OBSERVATIONS][self.observationId][EVENTS][row_s])\n\n print(row_e, media_path_e, eventtime_e, eventtime_onmedia_e)\n print(self.pj[OBSERVATIONS][self.observationId][EVENTS][row_e])\n\n if media_path_s != media_path_e:\n print(\"events are located on 2 different media files\")\n return\n\n media_path = media_path_s\n\n # example of external command defined in environment:\n # export eMOCEXTERNAL=\"myprog -i {MEDIA_PATH} -s {START_S} -e {END_S} {DURATION_MS} --other\"\n\n if \"eMOCEXTERNAL\" in os.environ:\n external_command_template = os.environ[\"eMOCEXTERNAL\"]\n else:\n print(\"eMOCEXTERNAL env var not defined\")\n return\n\n external_command = external_command_template.format(OBS_ID=self.observationId,\n MEDIA_PATH='\"{}\"'.format(media_path),\n MEDIA_BASENAME='\"{}\"'.format(\n os.path.basename(media_path)),\n START_S=eventtime_onmedia_s,\n END_S=eventtime_onmedia_e,\n START_MS=eventtime_onmedia_s * 1000,\n END_MS=eventtime_onmedia_e * 1000,\n DURATION_S=eventtime_onmedia_e - eventtime_onmedia_s,\n DURATION_MS=(\n eventtime_onmedia_e - eventtime_onmedia_s) * 1000)\n\n print(external_command)\n '''\n p = subprocess.Popen(external_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n '''\n\n '''\n if eventtimeS == eventtimeE:\n q = []\n else:\n durationsec = eventtimeE-eventtimeS\n q = [\"--durationmsec\",str(int(durationsec*1000))]\n args = [ex, \"-f\",os.path.abspath(fn),\"--seekmsec\",str(int(eventtimeS*1000)),*q,*(\"--size 1 --track 1 --redetect 100\").split(\" \")]\n if os.path.split(fn)[1].split(\"_\")[0] in set([\"A1\",\"A2\",\"A3\",\"A4\",\"A5\",\"A6\",\"A7\",\"A8\",\"A9\",\"A10\"]):\n args.append(\"--flip\")\n args.append(\"2\")\n print (os.path.split(fn)[1].split(\"_\")[0])\n print (\"running\",ex,\"with\",args,\"in\",os.path.split(ex)[0])\n #pid = subprocess.Popen(args,executable=ex,cwd=os.path.split(ex)[0])\n '''\n\n # Extract Information:\n # videoname of current observation\n # timeinterval\n # custom execution", "def triggers(rate, volume, uptime, start_mjd, end_mjd, episodes):\n episode_events = []\n for episode in range(episodes):\n events = episode_triggers(rate, volume, uptime, start_mjd, end_mjd)\n events['episode'] = episode\n episode_events.append(\n events[['episode', 'event_id', 'mjd', 'ra', 'decl']])\n\n events = pd.concat(episode_events, axis=0)\n\n return events", "def sample_times():\n\tthe_times = []\n\tday = config.window_start_date\n\twhile day <= config.window_end_date:\n\t\t# times from start of window on day to end of window \n\t\ttime = config.tz.localize( datetime.combine( \n\t\t\tday, config.window_start_time \n\t\t) )\n\t\tend_time = config.tz.localize( datetime.combine( \n\t\t\tday, config.window_end_time \n\t\t) )\n\t\twhile time < end_time: # While still in the time window\n\t\t\tthe_times.append( time )\n\t\t\ttime += timedelta(minutes=1)\n\t\tday += timedelta(days=1)\n\treturn the_times", "def database_extraction(mod_start_time,mod_end_time,instrument_chosen,subevent_bool,\r\n detect_previous_event = False,thresholds='100,1',\r\n one_thresh = False):\r\n obs_file_created = False\r\n\r\n #extending time window\r\n window_end_time = (mod_end_time + timedelta(days=2))\r\n window_start_time = (mod_start_time - timedelta(days=2))\r\n \r\n #making a list of all dates within window\r\n day_list=[]\r\n for d in range(10):\r\n day_list.append((window_start_time + timedelta(days=d)).date())\r\n print('day list = %s' %day_list)\r\n \r\n print('determining if an instrument has been chosen')\r\n\r\n if instrument_chosen:\r\n #if an instrument has been chosen, checking to make sure it still works for this date\r\n if inst_end < window_end_time:\r\n instrument_chosen = False\r\n else:\r\n #if insturment hasn't been chosen, figuring out what it should be for given date\r\n try:\r\n #if instrument is specified in cfg using that\r\n instrument = cfg.instrument\r\n inst_end = datetime.today()\r\n print('using %s as our instrument for observations' %instrument)\r\n instrument_chosen = True\r\n\r\n except:\r\n #choosing instrument using function if not given in cfg\r\n instrument_stuff = choose_prime_inst(window_start_time.date(),\r\n window_end_time.date())\r\n instrument = instrument_stuff[0]\r\n #figuring out how long we can use this instrument\r\n inst_end = instrument_stuff[1]\r\n instrument_chosen = True\r\n \r\n #running katie's code to extract data using chosen instrument and dates\r\n print('extracting data from GOES website')\r\n \r\n #running for only one threshold if one_thresh is true, otherwise running for default\r\n #thresholds as well as any additional threshold given\r\n if one_thresh:\r\n one_sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds) \r\n print('ran for threshold %s' %thresholds)\r\n else:\r\n if subevent_bool:\r\n thresholds = '10,1'\r\n #if event is a subevent, changing the threshold in katie's code to\r\n #10 MeV > 1pfu so that it will be recorded\r\n print('********************SUBEVENT**************************')\r\n sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n print('ran for subevent')\r\n else:\r\n #if an event, running with usual thresholds\r\n print('********************EVENT*****************************')\r\n sep.run_all(str(window_start_time), str(window_end_time),str(instrument), \r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n \r\n #reloading function so it doesn't keep old data \r\n reload(sep)\r\n \r\n #reformatting csv created from katie's code to json\r\n print('extracted - reformatting') \r\n for day in day_list: \r\n if not obs_file_created:\r\n #checking each day within the window to find the csv file if it hasn't\r\n #already been found\r\n print('thresholds: %s' %thresholds)\r\n \r\n if one_thresh:\r\n #name includes threshold if only ran for one threshold\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_gt' +\r\n str(thresholds).split(',')[0] + '_' + str(thresholds).split(',')[1] + 'pfu_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n else:\r\n #otherwise only includes date ran for\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n \r\n print('new_os_name %s' %new_obs_name) \r\n \r\n #checking if that file exists\r\n if os.path.exists(katies_path / new_obs_name):\r\n #if a file with this date exists, creating the corresponding json file\r\n \r\n #json name\r\n if one_thresh:\r\n obs_name = (str(instrument) + '_' + str(day) + 'only_' + str(thresholds).split(',')[0] + 'MeV_event.json')\r\n else:\r\n obs_name = (str(instrument) + '_' +\r\n str(day) + '.json')\r\n #creating json file\r\n obs_csv2json((katies_path / new_obs_name), obs_name,\r\n (ref_path/'example_sepscoreboard_json_file_v20190228.json'),\r\n instrument)\r\n \r\n print('obs file created')\r\n #file is created - will not run for anymore dates within window\r\n obs_file_created = True\r\n \r\n return(obs_name)\r\n else:\r\n print('no csv file found with this date, checking next one')", "def get_events(\n events_root,\n gcd_dir,\n start=None,\n stop=None,\n step=None,\n agg_start=None,\n agg_stop=None,\n agg_step=None,\n truth=None,\n photons=None,\n pulses=None,\n recos=None,\n triggers=None,\n angsens_model=None,\n hits=None,\n hit_charge_quant=None,\n min_hit_charge=None,\n):\n if isinstance(events_root, string_types):\n events_roots = [expand(events_root)]\n else:\n if not isinstance(events_root, Iterable):\n raise TypeError(\"`events_root` must be string or iterable thereof\")\n events_roots = []\n for events_root_ in events_root:\n if not isinstance(events_root_, string_types):\n raise TypeError(\n \"Each value in an iterable `events_root` must be a string\"\n )\n events_roots.append(expand(events_root_))\n\n slice_kw = dict(start=start, stop=stop, step=step)\n\n if agg_start is None:\n agg_start = 0\n else:\n agg_start_ = int(agg_start)\n assert agg_start_ == agg_start\n agg_start = agg_start_\n\n if agg_step is None:\n agg_step = 1\n else:\n agg_step_ = int(agg_step)\n assert agg_step_ == agg_step\n agg_step = agg_step_\n\n assert agg_start >= 0\n assert agg_step >= 1\n\n if agg_stop is not None:\n assert agg_stop > agg_start >= 0\n agg_stop_ = int(agg_stop)\n assert agg_stop_ == agg_stop\n agg_stop = agg_stop_\n\n if truth is not None and not isinstance(truth, bool):\n raise TypeError(\"`truth` is invalid type: {}\".format(type(truth)))\n\n if photons is not None and not isinstance(photons, (string_types, Iterable)):\n raise TypeError(\"`photons` is invalid type: {}\".format(type(photons)))\n\n if pulses is not None and not isinstance(pulses, (string_types, Iterable)):\n raise TypeError(\"`pulses` is invalid type: {}\".format(type(pulses)))\n\n if recos is not None and not isinstance(recos, (string_types, Iterable)):\n raise TypeError(\"`recos` is invalid type: {}\".format(type(recos)))\n\n if triggers is not None and not isinstance(triggers, (string_types, Iterable)):\n raise TypeError(\"`triggers` is invalid type: {}\".format(type(triggers)))\n\n if hits is not None and not isinstance(hits, string_types):\n raise TypeError(\"`hits` is invalid type: {}\".format(type(hits)))\n\n if hits is not None:\n if hit_charge_quant is None:\n raise ValueError(\n \"`hit_charge_quant` must be specified if `hits` is specified\"\n )\n if min_hit_charge is None:\n raise ValueError(\n \"`min_hit_charge` must be specified if `hits` is specified\"\n )\n\n agg_event_idx = -1\n for events_root in events_roots:\n for dirpath, dirs, files in walk(events_root, followlinks=True):\n dirs.sort(key=nsort_key_func)\n\n if \"events.npy\" not in files:\n continue\n\n file_iterator_tree = OrderedDict()\n\n num_events, event_indices, headers = iterate_file(\n join(dirpath, 'events.npy'), **slice_kw\n )\n\n meta = OrderedDict(\n [\n (\"events_root\", dirpath),\n (\"num_events\", num_events),\n (\"event_idx\", None),\n (\"agg_event_idx\", None),\n ]\n )\n\n event_indices_iter = iter(event_indices)\n file_iterator_tree['header'] = iter(headers)\n\n # -- Translate args with defaults / find dynamically-specified things -- #\n\n if truth is None:\n truth_ = isfile(join(dirpath, 'truth.npy'))\n else:\n truth_ = truth\n\n if photons is None:\n dpath = join(dirpath, 'photons')\n if isdir(dpath):\n photons_ = [splitext(d)[0] for d in listdir(dpath)]\n else:\n photons_ = False\n elif isinstance(photons, string_types):\n photons_ = [photons]\n else:\n photons_ = photons\n\n if pulses is None:\n dpath = join(dirpath, 'pulses')\n if isdir(dpath):\n pulses_ = [splitext(d)[0] for d in listdir(dpath) if 'TimeRange' not in d]\n else:\n pulses_ = False\n elif isinstance(pulses, string_types):\n pulses_ = [pulses]\n else:\n pulses_ = list(pulses)\n\n if recos is None:\n dpath = join(dirpath, 'recos')\n if isdir(dpath):\n # TODO: make check a regex including colons, etc. so we don't\n # accidentally exclude a valid reco that starts with \"slc\"\n recos_ = []\n for fname in listdir(dpath):\n if fname[:3] in (\"slc\", \"evt\"):\n continue\n fbase = splitext(fname)[0]\n if fbase.endswith(\".llhp\"):\n continue\n recos_.append(fbase)\n else:\n recos_ = False\n elif isinstance(recos, string_types):\n recos_ = [recos]\n else:\n recos_ = list(recos)\n\n if triggers is None:\n dpath = join(dirpath, 'triggers')\n if isdir(dpath):\n triggers_ = [splitext(d)[0] for d in listdir(dpath)]\n else:\n triggers_ = False\n elif isinstance(triggers, string_types):\n triggers_ = [triggers]\n else:\n triggers_ = list(triggers)\n\n # Note that `hits_` must be defined after `pulses_` and `photons_`\n # since `hits_` is one of these\n if hits is None:\n if pulses_ is not None and len(pulses_) == 1:\n hits_ = ['pulses', pulses_[0]]\n elif photons_ is not None and len(photons_) == 1:\n hits_ = ['photons', photons_[0]]\n elif isinstance(hits, string_types):\n hits_ = hits.split('/')\n else:\n raise TypeError(\"{}\".format(type(hits)))\n\n # -- Populate the file iterator tree -- #\n\n if truth_:\n num_truths, _, truths = iterate_file(\n fpath=join(dirpath, 'truth.npy'), **slice_kw\n )\n assert num_truths == num_events\n file_iterator_tree['truth'] = iter(truths)\n\n if photons_:\n photons_ = sorted(photons_)\n file_iterator_tree['photons'] = iterators = OrderedDict()\n for photon_series in photons_:\n num_phs, _, photon_serieses = iterate_file(\n fpath=join(dirpath, 'photons', photon_series + '.pkl'), **slice_kw\n )\n assert num_phs == num_events\n iterators[photon_series] = iter(photon_serieses)\n\n if pulses_:\n file_iterator_tree['pulses'] = iterators = OrderedDict()\n for pulse_series in sorted(pulses_):\n num_ps, _, pulse_serieses = iterate_file(\n fpath=join(dirpath, 'pulses', pulse_series + '.pkl'), **slice_kw\n )\n assert num_ps == num_events\n iterators[pulse_series] = iter(pulse_serieses)\n\n num_tr, _, time_ranges = iterate_file(\n fpath=join(\n dirpath,\n 'pulses',\n pulse_series + 'TimeRange' + '.npy'\n ),\n **slice_kw\n )\n assert num_tr == num_events\n iterators[pulse_series + 'TimeRange'] = iter(time_ranges)\n\n if recos_:\n file_iterator_tree['recos'] = iterators = OrderedDict()\n for reco in sorted(recos_):\n num_recoses, _, recoses = iterate_file(\n fpath=join(dirpath, 'recos', reco + '.npy'), **slice_kw\n )\n assert num_recoses == num_events\n iterators[reco] = iter(recoses)\n\n if triggers_:\n file_iterator_tree['triggers'] = iterators = OrderedDict()\n for trigger_hier in sorted(triggers_):\n num_th, _, trigger_hiers = iterate_file(\n fpath=join(dirpath, 'triggers', trigger_hier + '.pkl'), **slice_kw\n )\n assert num_th == num_events\n iterators[trigger_hier] = iter(trigger_hiers)\n\n if hits_ is not None and hits_[0] == 'photons':\n angsens_model, _ = load_angsens_model(angsens_model)\n else:\n angsens_model = None\n\n while True:\n try:\n event = extract_next_event(file_iterator_tree)\n except StopIteration:\n break\n\n if hits_ is not None:\n hits_array, hits_indexer, hits_summary = get_hits(\n event=event,\n path=hits_,\n hit_charge_quant=hit_charge_quant,\n min_hit_charge=min_hit_charge,\n angsens_model=angsens_model,\n )\n event['hits'] = hits_array\n event['hits_indexer'] = hits_indexer\n event['hits_summary'] = hits_summary\n\n agg_event_idx += 1\n\n event.meta = deepcopy(meta)\n event.meta[\"event_idx\"] = next(event_indices_iter)\n event.meta[\"agg_event_idx\"] = agg_event_idx\n\n if agg_stop is not None and agg_event_idx >= agg_stop:\n return\n\n if agg_event_idx < agg_start or (agg_event_idx - agg_start) % agg_step != 0:\n continue\n\n yield event\n\n for key in list(file_iterator_tree.keys()):\n del file_iterator_tree[key]\n del file_iterator_tree", "def window_index_time(t,windowsize,overlap):\r\n \r\n try:\r\n t=t.tolist()\r\n except:\r\n t=t\r\n \r\n t1=t[0]\r\n t2=t1 + timedelta(seconds=windowsize)\r\n pt1=[0]\r\n pt2=[othertime.findNearest(t2,t)]\r\n while t2 < t[-1]:\r\n t1 = t2 - timedelta(seconds=overlap)\r\n t2 = t1 + timedelta(seconds=windowsize)\r\n\r\n pt1.append(othertime.findNearest(t1,t))\r\n pt2.append(othertime.findNearest(t2,t))\r\n \r\n return pt1, pt2", "def process_events_optimised(self, events_chuck_size, data_chunk_size):\n ev = self.events\n tz = self.args[\"timezone\"]\n indexer = ev.Time.str.contains(\"\\d\\d:\\d\\d\", regex=True, na=False)\n timed_events, several_days_events = ev[indexer], ev[~indexer]\n \n if not several_days_events.empty:\n several_days_events.to_csv(\"special_events.csv\", index=False)\n self.log(\"[+] Special events were saved into standalone CSV-file\")\n else:\n self.log(\"[!] Special events not found\")\n\n self.data = pd.read_csv(self.args[\"data\"],\n iterator=True, chunksize=data_chunk_size)\n\n self.log(\"[.] Events and data linking...\")\n\n start, end = 0, events_chuck_size\n relevant_dates = pd.DataFrame()\n count = 1\n while True:\n events_slice = timed_events.iloc[start:end]\n # TODO: remove in release version\n # events_slice.to_csv('slice_{}_{}.csv'.format(start, end),\n # index=False)\n\n if events_slice.empty:\n break\n\n first_date, first_time = events_slice[['Date', 'Time']].iloc[0]\n lower_bound = convert(first_date + \" \" + first_time, mode='date')\n lower_bound += timedelta(hours=tz, minutes=-1)\n\n last_date, last_time = events_slice[['Date', 'Time']].iloc[-1]\n upper_bound = convert(last_date + \" \" + last_time, mode='date')\n upper_bound += timedelta(hours=tz, minutes=5)\n \n self.log(\"[.] Events slice bounded by [%s; %s] is in processing...\",\n lower_bound, upper_bound)\n \n for chunk in self.data:\n bounds = (lower_bound, upper_bound)\n linked, rest = self._process_chuck(\n chunk, bounds, events_slice, relevant_dates)\n\n relevant_dates = rest\n\n if linked is None:\n if relevant_dates.empty:\n err = \"[!] Warning: events from %d to %d have no data\"\n self.log(err, start + 1, end)\n break\n else:\n continue\n\n if linked.empty:\n err = \"[!] Warning: linked dataframe is empty\"\n self.log(err, severe=True)\n continue\n\n self.log(\"[+] Events from %d to %d were linked. \"\n \"Dataframe size: %d\", start + 1, end, linked.shape[0])\n\n filename = 'linked_events_{}_to_{}.csv'.format(start + 1, end)\n filename = os.path.join(self.args[\"output_folder\"], filename)\n linked.to_csv(filename, index=False)\n linked = pd.DataFrame()\n break\n\n count += 1\n start = end\n end += events_chuck_size", "def events_between(self, starting_measure, starting_offset, ending_measure, ending_offset):\n output_events = []\n for i in range(starting_measure - 1, ending_measure - 1 + 1):\n for event in self.event_groups[i].events:\n if i == starting_measure - 1:\n if i == 0 and event.offset >= starting_offset:\n output_events.append(event)\n elif i != 0 and event.offset > starting_offset:\n output_events.append(event)\n elif i == ending_measure - 1:\n if event.offset < ending_offset and ending_offset != 0:\n output_events.append(event)\n else:\n output_events.append(event)\n return output_events", "def construct_event(date_list, timeformat, dateformat, longdateformat,\n datetimeformat, longdatetimeformat, defaulttz,\n defaulttimelen=60, defaultdatelen=1, encoding='utf-8',\n _now=datetime.now):\n today = datetime.today()\n\n all_day = False\n\n # looking for start datetime\n try:\n # first two elements are a date and a time\n dtstart = datetimefstr(date_list, datetimeformat, longdatetimeformat)\n except ValueError:\n try:\n # first element is a time\n dtstart = timefstr(date_list, timeformat)\n except ValueError:\n try:\n # first element is a date (and since second isn't a time this\n # is an all-day-event\n dtstart = datetimefstr(date_list, dateformat, longdateformat)\n all_day = True\n except ValueError:\n raise\n\n # now looking for the end\n if all_day:\n try:\n # second element must be a date, too\n dtend = datetimefstr(date_list, dateformat, longdateformat)\n dtend = dtend + timedelta(days=1)\n except ValueError:\n # if it isn't we expect it to be the summary and use defaultdatelen\n # as event length\n dtend = dtstart + timedelta(days=defaultdatelen)\n # test if dtend's year is this year, but dtstart's year is not\n if dtend.year == today.year and dtstart.year != today.year:\n dtend = datetime(dtstart.year, *dtend.timetuple()[1:6])\n\n if dtend < dtstart:\n dtend = datetime(dtend.year + 1, *dtend.timetuple()[1:6])\n\n else:\n try:\n # next element datetime\n dtend = datetimefstr(date_list, datetimeformat, longdateformat)\n except ValueError:\n try:\n # next element time only\n dtend = timefstr(date_list, timeformat)\n dtend = datetime(*(dtstart.timetuple()[:3] + dtend.timetuple()[3:5]))\n except ValueError:\n dtend = dtstart + timedelta(minutes=defaulttimelen)\n\n if dtend < dtstart:\n dtend = datetime(*dtstart.timetuple()[0:3] +\n dtend.timetuple()[3:5])\n if dtend < dtstart:\n dtend = dtend + timedelta(days=1)\n if all_day:\n dtstart = dtstart.date()\n dtend = dtend.date()\n\n else:\n try:\n # next element is a valid Olson db timezone string\n dtstart = pytz.timezone(date_list[0]).localize(dtstart)\n dtend = pytz.timezone(date_list[0]).localize(dtend)\n date_list.pop(0)\n except (pytz.UnknownTimeZoneError, UnicodeDecodeError):\n dtstart = defaulttz.localize(dtstart)\n dtend = defaulttz.localize(dtend)\n\n event = icalendar.Event()\n text = ' '.join(date_list).decode(encoding)\n summary = text.split(' :: ',1)[0]\n\n try:\n description = text.split(' :: ',1)[1]\n event.add('description',description)\n except IndexError:\n pass\n\n event.add('dtstart', dtstart)\n event.add('dtend', dtend)\n event.add('dtstamp', _now())\n event.add('summary', summary)\n event.add('uid', generate_random_uid())\n return event", "def find_time_boundaries(indices, times, drop_single_idx=False):\n\n ## If times are not the same size as indices, assume these times are for all recordings\n ## and the recording time for IDX NUM is times[NUM] (ie. idx 5 was recorded at times[5])\n if len(times) != len(indices):\n times = np.array(times)[np.array(indices)]\n\n ## Since list slicing counts up to but not including ends, we need to add 1 to all detected end locations\n ends = np.where(np.diff(indices) > 1)[0] + 1\n\n ## Starts and ends will equal each other since list slicing includes start values, but start needs 0 appended\n starts = np.copy(ends)\n if len(starts) == 0 or starts[0] != 0:\n starts = np.insert(starts, 0, 0)\n\n ## np.diff returns an array one smaller than the indices list, so we need to add the last idx to the ends\n if len(ends) == 0 or ends[-1] != len(indices):\n ends = np.insert(ends, len(ends), len(indices))\n\n ## Loop through all continuous idx start & end to see if any are too small (length = 1)\n time_boundaries = []\n for start, end in zip(starts, ends):\n if end - start < 2:\n if not drop_single_idx:\n raise PipelineException(f\"Disconnected index found at index {start}\")\n else:\n bounds = [np.nanmin(times[start:end]), np.nanmax(times[start:end])]\n time_boundaries.append(bounds)\n\n return time_boundaries", "def draw_around_event(power,events,borders,eventName,maxY=1200):\n event_consider = events[events['eventName']==eventName].reset_index(drop=True)\n print(\"number of\", eventName ,\"in groudtruth=\",len(event_consider))\n i = 0\n while(i<len(event_consider)):\n date = time.mktime(datetime.strptime(event_consider['time'][i], \"%Y-%m-%d %H:%M:%S\").timetuple())\n start = str(datetime.fromtimestamp(date-borders[0]))\n end = str(datetime.fromtimestamp(date+borders[1]))\n print(date,start,end)\n i += 1\n serie = Series.from_array(power[(power['time']>=start)&(power['time']<=end)]['value'])\n if len(serie)>0:\n v = [serie.index[0], serie.index[len(serie)-1], 0, maxY]#xmin,xmax,ymin,ymax\n pyplot.figure(figsize=(20, 5))\n pyplot.plot(serie,'ro')\n pyplot.axis(v)\n pyplot.show()\n else:\n print(\"No data of power for this event\")", "def get_data_for_events(self,event_list,start_end = (-1000,2000),channels = None):\n assert len(event_list)>0, \"Event list is not a list\"\n assert len(event_list)<10000, \"Only up to 10000 timepoints support right now\"\n assert start_end[0]<start_end[1], \"Incorrect values for start and end\"\n \n start = start_end[0]\n end = start_end[1]\n event_list = [int(x) for x in event_list]\n if (channels == None):\n channels = range(self.num_channels)\n channels.sort()\n rv = n.zeros((end-start,len(channels),len(event_list)),\"d\")\n arb = n.zeros((self.num_channels),n.bool)\n for c in channels:\n arb[c]=True\n \n for i,t in enumerate(event_list):\n if t+start<0 or t+end>self.num_datapoints:\n raise IndexError(\"Cannot get data from %i to %i\" % (t+start,t+end) )\n rv[:,:,i] = self[t+start:t+end,arb]\n return rv", "def preprocess_events(events):\n events = events[events['type'] != 'system']\n events.is_copy = False\n \n # set the time interval between events\n t = np.array(events['seconds']) \n t_plus_1 = np.append(t[1:(len(t))],t[len(t)-1])\n t = t_plus_1 - t\n events['interval'] = t\n \n # Create an identifier for the minute of the event, usefull when creating\n # the sessions\n dt = [time.strptime(d, '%Y-%m-%d %H:%M:%S') for d in events['datetime']]\n events['minute'] = [get_minute(d) for d in dt]\n \n # label interruptions and sessions\n interruptions = []\n sessions = []\n intervals = t\n s_id = 0\n for i in range(0,len(events)):\n sessions.append(s_id)\n if intervals[i] >= SPACE_BETWEEN_INT:\n s_id += 1\n interruptions.append(False)\n continue\n \n if intervals[i] >= SIZE_INT:\n interruptions.append(True)\n else:\n interruptions.append(False)\n \n events['is_interruption'] = interruptions\n events['session_id'] = sessions\n \n return events", "def process_events(lines):\n event_regex = re.compile(r'^\\[(\\d{4})-(\\d{2})-(\\d{2}) (\\d{2}):(\\d{2})\\] (.+)', re.MULTILINE)\n guard_regex = re.compile(r'Guard #(\\d+)')\n\n # The loop needs to remember the last guard ID it found so it knows who wake/sleep events should belong to\n guard_id = None\n\n events = []\n guard_ids = set()\n\n # findall gets every match from the string and returns a list of lists containing only the matching groups\n # The MULTILINE flag is important\n for year, month, day, hour, minute, event_description in event_regex.findall(lines):\n # Use the single-match function to see if there's a guard ID (implying it's a SHIFT_BEGIN)\n id_match = guard_regex.match(event_description)\n\n if id_match:\n guard_id = int(id_match.group(1))\n guard_ids.add(guard_id)\n event_type = EventType.SHIFT_BEGIN\n elif event_description == 'wakes up':\n event_type = EventType.WAKE\n else:\n event_type = EventType.SLEEP\n\n events.append(ScheduleEvent(datetime(int(year), int(month), int(day), hour=int(hour), minute=int(minute)),\n guard_id, event_type))\n\n return guard_ids, events" ]
[ "0.65961754", "0.6502333", "0.6414507", "0.6089023", "0.5852518", "0.5768563", "0.57619506", "0.57517695", "0.5713987", "0.56655604", "0.56637067", "0.56566614", "0.56286615", "0.55703324", "0.5542513", "0.5491681", "0.5479628", "0.54761803", "0.5450979", "0.54409343", "0.5434103", "0.54248923", "0.5419411", "0.5416304", "0.5399855", "0.5391567", "0.53671277", "0.5356125", "0.53477895", "0.5322029" ]
0.8048312
0
Returning the sync mode
def get_sync_mode(): return sync_mode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def synchronize_system_mode(self):\n\n return self._synchronize_system_mode", "def sync(self):\n return self._sync", "def isSync(self):\n return False", "def getMode(self):\n with self.lock:\n mode = self.mode\n return mode", "def syncheck(self) :\n\t\ttry :\n\t\t\treturn self._syncheck\n\t\texcept Exception as e:\n\t\t\traise e", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def check_sync_mode():\n global sync_mode\n _description = ''\n\n _modes = {\n SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)',\n SyncMode.SENDER: '(LOCAL ➔ REMOTE)',\n SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)',\n SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)',\n SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)',\n SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)',\n SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)',\n SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)',\n SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)'\n }\n\n for _mode, _desc in _modes.items():\n if getattr(SyncMode, 'is_' + _mode.lower())():\n sync_mode = _mode\n _description = _desc\n\n if is_import():\n output.message(\n output.Subject.INFO,\n f'Import file {output.CliFormat.BLACK}{system.config[\"import\"]}{output.CliFormat.ENDC}',\n True\n )\n\n system.config['is_same_client'] = SyncMode.is_same_host()\n\n output.message(\n output.Subject.INFO,\n f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}',\n True\n )", "def lock_mode(self) -> str:\n return pulumi.get(self, \"lock_mode\")", "def lock_mode(self) -> str:\n return pulumi.get(self, \"lock_mode\")", "def _get_ldp_in_sync(self):\n return self.__ldp_in_sync", "def get_mode(self):\r\n return self.mode", "def supported_modes(self):\n return [OFF, SYNC, CHARGE]", "def getmode(self):\n return self.mode", "def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)", "def CaptureMode(self):\n if self.force_auto_sync:\n self.get('CaptureMode')\n return self._CaptureMode", "def getSyncObj(self):\n \n return self.sync_obj", "def getSyncInfo (self, connection) :\r\n \r\n if self.state != 'valid' :\r\n return False\r\n \r\n if self.sync_target :\r\n return False\r\n \r\n self.state = 'recv_sync'\r\n self.sync_target = connection\r\n self.do_sync_get()\r\n \r\n return True", "def getMode(self):\n return self._mode", "def mode(self):\n return self._vdma.writechannel.mode", "def last_on_mode(self):\n return self._last_on_mode", "def fsync(var, wrapper, message):\n sync_modes(var)", "def get_mode(self):\r\n return self._api.get_mode()", "def mode(self):\r\n return self._mode", "def mode(self):\r\n return self._mode", "def mode(self):\r\n return self._mode", "def getAddressInSync(self):\n return self._addrInSyncMode", "def auto_mode(self):\n return self._auto_mode", "def mode(self):\n return self.__mode", "def DualMode(self) -> bool:", "def mode(self):\n return self._mode" ]
[ "0.72940135", "0.7088847", "0.7082289", "0.7017716", "0.6868106", "0.68618685", "0.68516916", "0.6798914", "0.6798914", "0.6657855", "0.64055157", "0.6349925", "0.63405436", "0.6327484", "0.63198066", "0.63171744", "0.631424", "0.63071305", "0.63044363", "0.62617934", "0.6251813", "0.6222751", "0.619303", "0.619303", "0.619303", "0.6186808", "0.6173106", "0.61379534", "0.61368483", "0.6127583" ]
0.91350996
0
Checking the sync_mode based on the given configuration
def check_sync_mode(): global sync_mode _description = '' _modes = { SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)', SyncMode.SENDER: '(LOCAL ➔ REMOTE)', SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)', SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)', SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)', SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)', SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)', SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)', SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)' } for _mode, _desc in _modes.items(): if getattr(SyncMode, 'is_' + _mode.lower())(): sync_mode = _mode _description = _desc if is_import(): output.message( output.Subject.INFO, f'Import file {output.CliFormat.BLACK}{system.config["import"]}{output.CliFormat.ENDC}', True ) system.config['is_same_client'] = SyncMode.is_same_host() output.message( output.Subject.INFO, f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}', True )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sync_mode():\n return sync_mode", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def check_config_mode(self):\n return False", "def __check_mode_change(self):\n if self.mode[\"auto_mode\"] != self.mode[\"last_mode\"]:\n self.mode[\"last_mode\"] = self.mode[\"auto_mode\"]\n return True\n return False", "def get_config_sync_status(self):\n \n try:\n device_group = self.connection.Management.DeviceGroup.get_list()\n print self.connection.Management.DeviceGroup.get_sync_status([device_group])\n \n except:\n raise Exception(\"Target system has pending configuration, please sync beforehand.\")", "def is_config_mode(self):\n\n return self._connection.get_prompt().strip().startswith('(')", "def _change_conf_check(mds_config):\n loop = asyncio.get_event_loop()\n crt = model.async_set_application_config('ceph-fs', mds_config)\n loop.run_until_complete(crt)\n results = _get_conf()\n self.assertEquals(\n results['mds_cache_memory_limit'],\n mds_config['mds-cache-memory-limit'])\n self.assertAlmostEqual(\n float(results['mds_cache_reservation']),\n float(mds_config['mds-cache-reservation']))\n self.assertAlmostEqual(\n float(results['mds_health_cache_threshold']),\n float(mds_config['mds-health-cache-threshold']))", "def read_configuration_mode(self):\n configuration_mode = self.scpi_comm('CONFIG?').strip()\n mode = 'Unknown'\n if configuration_mode == '0':\n mode = 'Voltage tracking'\n if configuration_mode == '2':\n mode = 'Dual output'\n if configuration_mode in ('3', '4'):\n mode = 'Track Voltage and Current'\n return mode", "def DualMode(self) -> bool:", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def is_time_sync_smart_mode_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSyncSmartModeEnabled', self.handle))", "def isSync(self):\n return False", "def check_config_mode(self, check_string=\">config\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string, pattern=pattern)", "def check_config_mode(self, check_string=\"(config\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string, pattern=pattern)", "def check_config_mode(self, check_string=\")#\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string)", "def synchronize_system_mode(self):\n\n return self._synchronize_system_mode", "def getSyncInfo (self, connection) :\r\n \r\n if self.state != 'valid' :\r\n return False\r\n \r\n if self.sync_target :\r\n return False\r\n \r\n self.state = 'recv_sync'\r\n self.sync_target = connection\r\n self.do_sync_get()\r\n \r\n return True", "def is_delta_sync_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"delta_sync_enabled\"]\n except KeyError:\n return False", "def supported_modes(self):\n return [OFF, SYNC, CHARGE]", "def check_config_mode(\n self, check_string: str = \")#\", pattern: str = \"\", force_regex: bool = False\n ) -> bool:\n return super().check_config_mode(check_string=check_string, pattern=pattern)", "def config_sync(self) -> Optional['outputs.FeatureMembershipConfigmanagementConfigSync']:\n return pulumi.get(self, \"config_sync\")", "def __getMode( self ):\n\n res = self.rssConfig.getConfigState()\n\n if res == 'Active':\n\n if self.rssClient is None:\n self.rssClient = ResourceStatusClient()\n return True\n\n self.rssClient = None\n return False", "def check_enable_mode(self, *args, **kwargs):\n pass", "def check_manual_mode_change(self, event):\n if self.vehicle.get_manual_mode_change(reset=True):\n data = lambda: None\n data.mode_to_set = \"Inactive\"\n self.set_companion_mode(data)", "def _check_for_sync(self, fl_name):\n fl_sync = True\n # Get the list of flavors names to sync.\n fl_wlist = self.get_flavors_white_list()\n fl_blist = self.get_flavors_black_list()\n\n if (len(fl_wlist) != 0):\n fl_sync = self._regex_comp(fl_name, fl_wlist)\n if (fl_sync and (len(fl_blist) != 0)):\n fl_sync = not(self._regex_comp(fl_name, fl_blist))\n return fl_sync", "def lookup_sync(self, flag=0):\n if flag == 1 or self.ser.read() == self.sync[3]:\n if self.ser.read() == self.sync[2]:\n if self.ser.read() == self.sync[1]:\n if self.ser.read() == self.sync[0]:\n return True\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n else:\n return False", "def __get_verify_mode(self):\n ...", "def check_enable_mode(self, check_string='#'):\n return True", "def is_dump():\n return sync_mode in (SyncMode.DUMP_LOCAL, SyncMode.DUMP_REMOTE)", "async def _check_multiple_mode(self):\n logger.info(\"Host {}:Checking multiple mode\".format(self._host))\n out = await self.send_command('show mode')\n if 'multiple' in out:\n self._multiple_mode = True\n\n logger.debug(\"Host {}: Multiple mode: {}\".format(self._host, self._multiple_mode))" ]
[ "0.72925556", "0.692381", "0.6704957", "0.63555396", "0.63372236", "0.61973035", "0.61619157", "0.6108568", "0.6087788", "0.6085571", "0.6059644", "0.60411906", "0.60293037", "0.60175145", "0.594344", "0.59315693", "0.588756", "0.58717567", "0.58026725", "0.580076", "0.5800057", "0.579664", "0.5757282", "0.5745641", "0.5716965", "0.57119477", "0.5677909", "0.5669484", "0.56475365", "0.5643412" ]
0.78860736
0
Check if given client is remote client
def is_remote(client): if client == Client.ORIGIN: return is_origin_remote() elif client == Client.TARGET: return is_target_remote() elif client == Client.LOCAL: return False else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_remote(self): # -> Any | bool:\n ...", "def is_remote(self):\n if socket.gethostbyname(socket.gethostname()).startswith('10.7'):\n return False\n else:\n return True", "def is_local_client(self):\n return self.msg.is_local_client", "def is_remote(self):\n return False", "def is_remote(self):\n raise NotImplementedError()", "def is_remote(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsRemote', self.handle))", "def is_client(self):\n if not hasattr(self, '_is_client'):\n self._is_client = hasattr(self, 'client')\n return self._is_client", "def is_client_alive(self, client):\n client_conn = self.all_clients[client]\n try:\n\n ping_message = Message(\"server\", client, \"utility\", \"ping\")\n client_conn.send(str.encode(ping_message.pack_to_json_string()))\n\n except Exception as e:\n print(\"Client communication error \" + str(e))\n return False\n return True", "def test_connection(remote=False):\n import socket\n remote_server = 'www.google.com' if not remote else remote # TODO: maybe improve for China\n try:\n # does the host name resolve?\n host = socket.gethostbyname(remote_server)\n # can we establish a connection to the host name?\n con = socket.create_connection((host, 80), 2)\n return True\n except:\n print(\"Can't connect to a server...\")\n pass\n return False", "def has_client(self, ip):\n for cli in self.clients:\n if cli.ip == ip:\n return cli\n return None", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def client_exists(self, client=None):\n if type(client) is Client:\n return client.client_id in [c.client_id for c in self.client_list]\n else:\n return False", "def check(client: Client):\n pass", "def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)", "def has_client(self):\n \n return len(self._clients) > 0", "def has_client(self):\n \n return len(self._clients) > 0", "def remote(self):\r\n return self._url.scheme in ('http', 'https')", "def is_cups_server(rm):\n try:\n s = socket.socket()\n s.settimeout(0.3)\n s.connect((rm, 631))\n s.close()\n\n return True\n except (socket.error, socket.timeout):\n return False", "def is_origin_remote():\n return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "async def check_client(self, client_id: Identity) -> AuthResult:\n raise NotImplementedError", "def is_remote(state: 'JobState') -> bool:\n return state in [\n JobState.WAITING, JobState.WAITING_CR, JobState.RUNNING,\n JobState.RUNNING_CR\n ]", "def user_has_perms_on_client(user, client):\n if client and client not in user.clients:\n return False\n\n return True", "def _client_allowed(self):\r\n client_ip = self._client_address[0]\r\n if not client_ip in self._settings.allowed_clients and \\\r\n not 'ALL' in self._settings.allowed_clients:\r\n self._send_content('Access from host %s forbidden.' % client_ip, 'text/html')\r\n return False\r\n return True", "def test_is_remote(self):\n self.assertEqual(self.project.is_remote(), False)", "def is_allowed_to_see_clients(session):\n val = session.get(\"allowed_to_see_clients\")\n # Check to see if their permissions are still valid\n if val and TimeUtils.get_local_timestamp() < val[1]:\n return val[0]\n\n user = None\n\n for server in settings.INSTALLED_GITSERVERS:\n gitserver = models.GitServer.objects.get(host_type=server[\"type\"], name=server[\"hostname\"])\n auth = gitserver.auth()\n user = auth.signed_in_user(gitserver, session)\n if not user:\n continue\n\n api = user.api()\n for authed_user in server.get(\"authorized_users\", []):\n if user.name == authed_user or is_team_member(session, api, authed_user, user):\n logger.info(\"'%s' is a member of '%s' and is allowed to see clients\" % (user, authed_user))\n session[\"allowed_to_see_clients\"] = (True,\n TimeUtils.get_local_timestamp() + settings.PERMISSION_CACHE_TIMEOUT)\n return True\n logger.info(\"%s is NOT allowed to see clients on %s\" % (user, gitserver))\n session[\"allowed_to_see_clients\"] = (False, TimeUtils.get_local_timestamp() + settings.PERMISSION_CACHE_TIMEOUT)\n return False", "def is_remote(path: Text) -> bool:\n\n # TODO(Alex): add check for another remote storages (s3, ...) when they will be supported\n if path.startswith('gs://'):\n return True\n\n return False", "def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None", "def check_credentials(client):\n pid, uid, gid = get_peercred(client)\n\n euid = os.geteuid()\n client_name = \"PID:%s UID:%s GID:%s\" % (pid, uid, gid)\n if uid not in (0, euid):\n raise SuspiciousClient(\"Can't accept client with %s. It doesn't match the current EUID:%s or ROOT.\" % (\n client_name, euid\n ))\n\n _LOG(\"Accepted connection on fd:%s from %s\" % (client.fileno(), client_name))\n return pid, uid, gid", "def _is_self(self, ip, port):\n import socket as sk\n self_ip = sk.gethostbyname(sk.gethostname())\n self_port = self.config['API_PORT']\n return str(self_ip) == ip and self_port == port", "def _check_connection() -> bool:\n return bool(subprocess.check_output([\"hostname\", \"-I\"]))" ]
[ "0.7279024", "0.7175781", "0.70540047", "0.6942053", "0.6814598", "0.6683476", "0.66315174", "0.66083586", "0.6517704", "0.6482558", "0.6460855", "0.6398728", "0.63722503", "0.6251613", "0.62041306", "0.62041306", "0.61882174", "0.615462", "0.61380357", "0.6073914", "0.6026141", "0.60112673", "0.60034007", "0.59746414", "0.5943694", "0.59414196", "0.5917503", "0.5916974", "0.5906758", "0.58996147" ]
0.83206755
0
Check if target is remote client
def is_target_remote(): return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE, SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_remote(client):\n if client == Client.ORIGIN:\n return is_origin_remote()\n elif client == Client.TARGET:\n return is_target_remote()\n elif client == Client.LOCAL:\n return False\n else:\n return False", "def is_remote(self): # -> Any | bool:\n ...", "def is_remote(self):\n if socket.gethostbyname(socket.gethostname()).startswith('10.7'):\n return False\n else:\n return True", "def is_remote(self):\n return False", "def is_remote(self):\n raise NotImplementedError()", "def is_remote(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsRemote', self.handle))", "def is_local_client(self):\n return self.msg.is_local_client", "def remote(self):\r\n return self._url.scheme in ('http', 'https')", "def test_is_remote_source(self):\n self.assertEqual(self.project.is_remote(), False)", "def is_origin_remote():\n return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def test_is_remote(self):\n self.assertEqual(self.project.is_remote(), False)", "def test_connection(remote=False):\n import socket\n remote_server = 'www.google.com' if not remote else remote # TODO: maybe improve for China\n try:\n # does the host name resolve?\n host = socket.gethostbyname(remote_server)\n # can we establish a connection to the host name?\n con = socket.create_connection((host, 80), 2)\n return True\n except:\n print(\"Can't connect to a server...\")\n pass\n return False", "def is_host(self):\n return self.host", "def is_client(self):\n if not hasattr(self, '_is_client'):\n self._is_client = hasattr(self, 'client')\n return self._is_client", "def has_client(self, ip):\n for cli in self.clients:\n if cli.ip == ip:\n return cli\n return None", "def is_virtual_network_host():\n return False", "def check(self, target, port):\n pass", "def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None", "def is_gentarget(self, target):\r\n raise NotImplementedError", "def supported_target(self, target, message_handler):\n\n # iOS can never be a host.\n return False", "def in_host():\n return not in_docker()", "def validate_target(target: str) -> bool:\n try:\n gethostbyname(target)\n except (gaierror, UnicodeError):\n return False\n return True", "def is_remote(state: 'JobState') -> bool:\n return state in [\n JobState.WAITING, JobState.WAITING_CR, JobState.RUNNING,\n JobState.RUNNING_CR\n ]", "def _is_remote_branch(self, branch_reference):\n return branch_reference.startswith(\"refs/remotes/\")", "def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)", "def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)", "def is_remote(path: Text) -> bool:\n\n # TODO(Alex): add check for another remote storages (s3, ...) when they will be supported\n if path.startswith('gs://'):\n return True\n\n return False", "def use_remote_gateways(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_remote_gateways\")", "def use_remote_gateways(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_remote_gateways\")", "def supported_target(self, target, message_handler):\n\n # Android can never be a host.\n return False" ]
[ "0.7902426", "0.7443142", "0.7258655", "0.7257651", "0.7235301", "0.6850371", "0.6576049", "0.645614", "0.6447849", "0.640186", "0.6281278", "0.61393785", "0.60623175", "0.60600775", "0.600218", "0.59922236", "0.59744495", "0.59468746", "0.593498", "0.5931949", "0.59191805", "0.59143037", "0.59089434", "0.58839273", "0.5878677", "0.58627546", "0.58582056", "0.5856519", "0.5856519", "0.58564866" ]
0.7590158
1
Check if origin is remote client
def is_origin_remote(): return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE, SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_remote(client):\n if client == Client.ORIGIN:\n return is_origin_remote()\n elif client == Client.TARGET:\n return is_target_remote()\n elif client == Client.LOCAL:\n return False\n else:\n return False", "def is_remote(self):\n return False", "def is_remote(self): # -> Any | bool:\n ...", "def is_remote(self):\n raise NotImplementedError()", "def is_remote(self):\n if socket.gethostbyname(socket.gethostname()).startswith('10.7'):\n return False\n else:\n return True", "def remote(self):\r\n return self._url.scheme in ('http', 'https')", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def is_remote(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsRemote', self.handle))", "def is_local_client(self):\n return self.msg.is_local_client", "def check_origin(self, origin):\n return True", "def check_origin(self, origin):\n return True", "def check_origin(self, origin):\n return True", "def check_origin(self, origin):\n return True", "def check_origin(self, origin):\n # import re\n # bool(re.match(r'^.*?\\.mydomain\\.com', origin))\n # allowed = super.check_origin(origin)\n if self.allow_origin == '*':\n return True\n\n host = self.request.headers.get(\"Host\")\n if origin is None:\n origin = self.request.headers.get(\"Origin\")\n\n # If no header is provided, assume we can't verify origin\n if origin is None:\n LOG.warning(\"user {0} Missing Origin header, rejecting WebSocket connection.\".format(self.client_id))\n return False\n if host is None:\n LOG.warning(\"user {0} Missing Host header, rejecting WebSocket connection.\".format(self.client_id))\n return False\n\n origin = origin.lower()\n origin_host = urlparse(origin).netloc\n\n # OK if origin matches host\n if origin_host == host:\n return True\n\n # Check CORS headers\n if self.allow_origin:\n allow = self.allow_origin == origin\n # elif self.allow_origin_pat:\n # allow = bool(self.allow_origin_pat.match(origin))\n else:\n # No CORS headers deny the request\n allow = False\n if not allow:\n LOG.warning(\"user {0} Blocking Cross Origin WebSocket Attempt. Origin: %s, Host: %s\",\n self.client_id, origin, host)\n return allow", "def test_is_remote_source(self):\n self.assertEqual(self.project.is_remote(), False)", "def is_remote(path: Text) -> bool:\n\n # TODO(Alex): add check for another remote storages (s3, ...) when they will be supported\n if path.startswith('gs://'):\n return True\n\n return False", "def test_is_remote(self):\n self.assertEqual(self.project.is_remote(), False)", "def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None", "def test_connection(remote=False):\n import socket\n remote_server = 'www.google.com' if not remote else remote # TODO: maybe improve for China\n try:\n # does the host name resolve?\n host = socket.gethostbyname(remote_server)\n # can we establish a connection to the host name?\n con = socket.create_connection((host, 80), 2)\n return True\n except:\n print(\"Can't connect to a server...\")\n pass\n return False", "def _is_remote_branch(self, branch_reference):\n return branch_reference.startswith(\"refs/remotes/\")", "def fingertip_no_remote(self) -> bool:\n hcell = self._get_hcell2()\n return hcell.get(\"fingertip_no_remote\", False)", "def remote(self):\n return self.getItunesAttribute('Track Type') == 'Remote'", "def is_host(self):\n return self.host", "def _is_remote_reusable(inputs, calculation):\n can_use_remote = False\n #If no charge density file is available to restart from the calculation will except\n #with a not nice error message. So we can only reuse the charge density if these files are available\n retrieved_filenames = calculation.base.links.get_outgoing().get_node_by_label('retrieved').list_object_names()\n if any(file in retrieved_filenames for file in (\n 'cdn_last.hdf',\n 'cdn1',\n )):\n can_use_remote = True\n\n if 'fleurinp' in inputs:\n modes = inputs.fleurinp.get_fleur_modes()\n if modes['force_theorem'] or modes['dos'] or modes['band']:\n # in modes listed above it makes no sense copying cdn.hdf\n can_use_remote = False\n # without fleurinp it is harder to extract modes in this case\n # - simply try to reuse cdn.hdf and hope it works\n\n return can_use_remote", "def is_remote(state: 'JobState') -> bool:\n return state in [\n JobState.WAITING, JobState.WAITING_CR, JobState.RUNNING,\n JobState.RUNNING_CR\n ]", "def is_remote_access_allowed(self, path: str):\n return self.public_path_marker.test(path) or self.is_public(path) and not self.is_private(path)", "def is_client(self):\n if not hasattr(self, '_is_client'):\n self._is_client = hasattr(self, 'client')\n return self._is_client", "def IsRemoteRerun(self):\n return self.IsRerun() and not self.IsLocalRerun()", "def is_central_server() -> bool:\n return hasattr(Config().algorithm,\n 'cross_silo') and Config().args.port is None", "def has_upstream_server(self) -> bool:\n return True if self.host is not None else False" ]
[ "0.79242504", "0.7800007", "0.7686724", "0.75528294", "0.747566", "0.7285064", "0.70139563", "0.6977031", "0.6942172", "0.6772315", "0.6772315", "0.6772315", "0.6772315", "0.67488825", "0.6476083", "0.6462293", "0.64356977", "0.63294023", "0.6260433", "0.6211482", "0.62091947", "0.6205737", "0.61427885", "0.61377716", "0.610265", "0.6073228", "0.60689443", "0.606794", "0.60434717", "0.6035247" ]
0.8189138
0
Check if sync mode is import
def is_import(): return sync_mode in (SyncMode.IMPORT_LOCAL, SyncMode.IMPORT_REMOTE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_sync_mode():\n global sync_mode\n _description = ''\n\n _modes = {\n SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)',\n SyncMode.SENDER: '(LOCAL ➔ REMOTE)',\n SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)',\n SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)',\n SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)',\n SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)',\n SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)',\n SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)',\n SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)'\n }\n\n for _mode, _desc in _modes.items():\n if getattr(SyncMode, 'is_' + _mode.lower())():\n sync_mode = _mode\n _description = _desc\n\n if is_import():\n output.message(\n output.Subject.INFO,\n f'Import file {output.CliFormat.BLACK}{system.config[\"import\"]}{output.CliFormat.ENDC}',\n True\n )\n\n system.config['is_same_client'] = SyncMode.is_same_host()\n\n output.message(\n output.Subject.INFO,\n f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}',\n True\n )", "def is_import(self):\n return self.sh_info is None and (self.binding == 'STB_GLOBAL' or \\\n self.binding == 'STB_WEAK' or \\\n self.binding == 'STT_FUNC')", "def import_only(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"import_only\")", "def import_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"import_only\")", "def get_import_mode(self):\n\t\treturn self.buttonImport.get_active()", "def is_dump():\n return sync_mode in (SyncMode.DUMP_LOCAL, SyncMode.DUMP_REMOTE)", "def is_import_completion(self):\n current_line = self.get_current_line()\n\n # Seperate cases! More difficult than I thought\n match = re.match(r\"(import)|(from)\", current_line)\n if match:\n word_before = self.get_word_before()\n if word_before == \"from\" or word_before == \"import\":\n # Need to check for multiple imports! (TODO)\n return True\n\n return False", "def is_import_from_completion(self):\n\n current_line = self.get_current_line()\n\n match = re.match(r\"from .* import\", current_line)\n if match and self.get_word() != \"import\":\n return True\n\n return False", "def isSync(self):\n return False", "def detect_import(self):\n if self.contains_match(CONTAINS_IMPORT): self.es6import = True\n elif self.contains_match(CONTAINS_REQUIRE): self.es6import = False\n else: self.es6import = self.get_project_pref('detect_prefer_imports')", "def checkImport(self):\r\n for imp in self.cap_file.Import.packages:\r\n if a2s(imp.aid) not in export_refs:\r\n return False\r\n return True", "def checkIfImport():\n instance_ipath, product_ipath = getImportedPathes()\n product_ilist = [i for i in os.listdir(product_ipath) \\\n if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]\n if product_ilist:\n return 1\n return 0", "def imports(self):\n line = self.line.strip()\n if line.startswith('im'):\n if line.startswith('import') is False:\n return True\n elif line == '':\n return True", "def auto_import(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_import\")", "def auto_import(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_import\")", "def auto_import(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_import\")", "def set_import_mode(self, flag):\n\t\tif self.buttonImport.get_active() and not flag:\n\t\t\tself.buttonImport.set_active(False)\n\t\t\treturn True\n\t\telif not self.buttonImport.get_active() and flag:\n\t\t\tself.buttonImport.set_active(True)\n\t\t\treturn True\n\t\treturn False", "def is_migrated_before():\n\n global migration_sign\n if os.path.exists(migration_sign):\n return True\n else:\n return False", "def has_import_permission(self, request):\n opts = self.opts\n codename = get_permission_codename('import', opts)\n return request.user.has_perm(\"%s.%s\" % (opts.app_label, codename))", "def get_sync_mode():\n return sync_mode", "def get_auto_start_import(self):\n\t\treturn self.checkAutoStartImport.get_active()", "def is_already_import_function(self, fn):\n if isinstance(fn, basestring):\n for descriptor in self.import_entries:\n for import_element in descriptor.imports:\n if import_element.name == fn:\n return True\n\n elif isinstance(fn, int):\n # TODO : add ordinary import\n pass\n return False", "def is_first_synced(self):\n return True", "def is_imported():\n return len(inspect.stack()) > 3", "def isLocal(self, connectionInfo):\n return False", "def is_func_imported(self, ea):\n # If address is located in IAT\n if ea in self.rt_import_table:\n return True\n\n return False", "def imported(module):\n try:\n if module not in sys.modules:\n __import__(module)\n return 'enabled'\n except:\n return '-'", "async def test_import_exist(hass):\n mocked_device = _create_mocked_device()\n _create_mock_config_entry(hass)\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=CONF_DATA\n )\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_not_called()", "def is_local(self) -> bool:\n if not self.source:\n return False\n\n if self.source.master_name.startswith(MODULE_NAME):\n return True\n\n if self.is_type_defs():\n return True\n\n return False", "def _local_install(self):\n config = self._config\n ext = config.plugins[self.full_name].get('pkg_extension', '')\n if not ext:\n return False\n\n # ensure extension begins with a dot\n ext = '.{0}'.format(ext.lstrip('.'))\n\n return config.context.package.arg.endswith(ext)" ]
[ "0.75585586", "0.69149435", "0.665945", "0.64593935", "0.6442651", "0.63151574", "0.63039666", "0.6274873", "0.6236776", "0.6194281", "0.6192882", "0.6032196", "0.59927666", "0.59285295", "0.59285295", "0.59168273", "0.5914921", "0.58185005", "0.57003963", "0.5684882", "0.56790406", "0.56752753", "0.5652213", "0.55742455", "0.5552539", "0.5551295", "0.5519939", "0.5516505", "0.5488701", "0.5475263" ]
0.9084145
0
Assert valid court order.
def test_court_orders(session, test_status, expected_code, expected_msg): business = factory_business('BC1234567') filing = copy.deepcopy(COURT_ORDER_FILING_TEMPLATE) del filing['filing']['courtOrder']['fileKey'] if test_status == 'FAIL': del filing['filing']['courtOrder']['orderDetails'] filing['filing']['courtOrder']['effectOfOrder'] = 'invalid' err = validate(business, filing) if expected_code: assert err.code == expected_code assert lists_are_equal(err.msg, expected_msg) else: assert err is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_case_customer_complete_courseevent_order(self):", "def test_order_constraint(self):\n orders_placed = [25, 25, 25]\n with self.assertRaises(Exception):\n analyse_uncertain_demand.UncertainDemand(\n orders=orders_placed,\n sku='Rx493-90',\n lead_time=Decimal(4),\n unit_cost=Decimal(40),\n reorder_cost=Decimal(400),\n retail_price=Decimal(600),\n currency='USD'\n )", "def test_validate_good_order(self):\n for proj in testorders.good_test_projections:\n valid_order = copy.deepcopy(self.base_order)\n valid_order['projection'] = {proj: testorders.good_test_projections[proj]}\n\n try:\n good = api.validation(valid_order, self.staffuser.username)\n except ValidationException as e:\n self.fail('Raised ValidationException: {}'.format(e.message))", "def test_validate_valid_crisis(self):\r\n assert self.crisis_tree != 0", "def verify_courses(self, courses):\n assert len(courses) == 1\n self.verify_course(courses[0])", "def test_order(self):\n\n # issue a valid query\n # Assure proper execution, and get results from quilt_history\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'out_of_order']))\n\n o = self.check_query_and_get_results3(o)\n\n # Check results\n # assure that results are in order\n l = []\n for i in xrange(1, 6):\n searchStr = \"{'timestamp': \" + str(i) + '}'\n index = o.find(searchStr)\n logging.debug(\"looking for string: \" + searchStr)\n self.assertTrue(index != -1)\n l.append(index)\n\n isSorted = all(l[i] <= l[i + 1] for i in xrange(len(l) - 1))\n self.assertTrue(isSorted)", "def test_validate_bad_orders(self):\n exc_type = ValidationException\n invalid_order = copy.deepcopy(self.base_order)\n c = 0 # For initial debugging\n\n for proj in testorders.good_test_projections:\n invalid_order['projection'] = {proj: testorders.good_test_projections[proj]}\n\n invalid_list = testorders.InvalidOrders(invalid_order, self.base_schema, abbreviated=True)\n\n for order, test, exc in invalid_list:\n # issues getting assertRaisesRegExp to work correctly\n with self.assertRaises(exc_type):\n try:\n c += 1\n api.validation(order, self.staffuser.username)\n except exc_type as e:\n if str(exc) in str(e):\n raise\n else:\n self.fail('\\n\\nExpected in exception message:\\n{}'\n '\\n\\nException message raised:\\n{}'\n '\\n\\nUsing test {}'.format(str(exc), str(e), test))\n else:\n self.fail('\\n{} Exception was not raised\\n'\n '\\nExpected exception message:\\n{}\\n'\n '\\nUsing test: {}'.format(exc_type, str(exc), test))\n #print c # For initial debugging", "def test_02_lunch_order(self):\r\n cr, uid = self.cr, self.uid\r\n self.test_01_lunch_order()\r\n #We have a confirmed order with its associate cashmove\r\n #We execute the cancel function\r\n self.order_one.cancel()\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #We check that the state is cancelled and that the cashmove has been deleted\r\n self.assertEqual(self.order_one.state,'cancelled')\r\n self.assertFalse(self.order_one.cashmove)", "def test_required_properties_order() -> None:\n soup = generate_case(\"required_properties_order\")\n\n tests.html_schema_doc_asserts.assert_undocumented_required(soup, [\"a\", \"b\", \"b\", \"a\"])", "def test_headlines_order(self) -> None:\n last: Tuple[int, str] = (0, \"\")\n\n for headline in self.report.headlines:\n rule: Optional[HeadlineRules] = self.rules.get_headline_rules(headline.name)\n if (not rule) or (rule.order is None):\n continue\n\n last_order, last_headline = last # type: int, str\n if last_order > rule.order:\n self.add_error(\n (\n f\"Rubriken {headline.name} ska komma före \"\n f\"rubriken {last_headline}.\"\n ),\n headline=headline,\n )\n\n last = (rule.order, headline.name)", "def test_create_course(self):\r\n self.assert_created_course()", "def test_entities__EntityOrder__1():\n zope.interface.verify.verifyObject(IEntityOrder, EntityOrder())", "def final_check(self, test_collection):\n assert True", "def testSortOrder(self):\n timestamp = time.time()\n comment_id1 = Comment.ConstructCommentId(timestamp, 0, 0)\n comment_id2 = Comment.ConstructCommentId(timestamp + 1, 0, 0)\n self.assertGreater(comment_id2, comment_id1)", "def _check_course(self, source_course_loc, dest_course_loc, expected_blocks, unexpected_blocks):\r\n history_info = modulestore().get_course_history_info(dest_course_loc)\r\n self.assertEqual(history_info['edited_by'], self.user)\r\n for expected in expected_blocks:\r\n # since block_type has no impact on identity, we can just provide an empty string\r\n source = modulestore().get_item(source_course_loc.make_usage_key(\"\", expected))\r\n pub_copy = modulestore().get_item(dest_course_loc.make_usage_key(\"\", expected))\r\n # everything except previous_version & children should be the same\r\n self.assertEqual(source.category, pub_copy.category)\r\n self.assertEqual(source.update_version, pub_copy.update_version)\r\n self.assertEqual(\r\n self.user, pub_copy.edited_by,\r\n \"{} edited_by {} not {}\".format(pub_copy.location, pub_copy.edited_by, self.user)\r\n )\r\n for field in source.fields.values():\r\n if field.name == 'children':\r\n self._compare_children(field.read_from(source), field.read_from(pub_copy), unexpected_blocks)\r\n else:\r\n self.assertEqual(field.read_from(source), field.read_from(pub_copy))\r\n for unexp in unexpected_blocks:\r\n with self.assertRaises(ItemNotFoundError):\r\n modulestore().get_item(dest_course_loc.make_usage_key(\"\", unexp))", "def assertKeys(self, data, expected):\r\n self.assertEqual(sorted(data.keys()), sorted(expected))", "def assert_response_orders(self, *args, **kwargs):\n self.assert_response_order(*args, **kwargs)\n kwargs['order_by'] = '-' + kwargs['order_by']\n self.assert_response_order(*args, **kwargs)", "def verify_course(self, course, course_id='edX/toy/2012_Fall'):\n assert course_id == str(course.id)", "def test_lpdaac_good(self):\n self.assertIsNone(api.inventory.check(self.lpdaac_order_good))", "def test_01_lunch_order(self):\r\n cr, uid = self.cr, self.uid\r\n self.test_00_lunch_order()\r\n #We receive the order so we confirm the order line so it's state will be 'confirmed'\r\n #A cashmove will be created and we will test that the cashmove amount equals the order line price\r\n self.order_one.confirm()\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #we check that our order_line is a 'confirmed' one and that there are a cashmove linked to that order_line with an amount equals to the order line price:\r\n self.assertEqual(self.order_one.state,'confirmed')\r\n self.assertTrue(self.order_one.cashmove)\r\n self.assertTrue(self.order_one.cashmove[0].amount==-self.order_one.price)", "def test_c(self):\n v1 = versions.Version(version='1.2', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertFalse(v1 >= v2)\n self.assertTrue(v2 >= v1)", "def course_tester(courses):\n\n return False", "def _verify_published_course(courses_published):\r\n self.assertEqual(len(courses_published), 1, len(courses_published))\r\n course = self.findByIdInResult(courses_published, \"head23456\")\r\n self.assertIsNotNone(course, \"published courses\")\r\n self.assertEqual(course.location.course_key.org, \"testx\")\r\n self.assertEqual(course.location.course_key.offering, \"wonderful\")\r\n self.assertEqual(course.category, 'course', 'wrong category')\r\n self.assertEqual(len(course.tabs), 4, \"wrong number of tabs\")\r\n self.assertEqual(course.display_name, \"The most wonderful course\",\r\n course.display_name)\r\n self.assertIsNone(course.advertised_start)\r\n self.assertEqual(len(course.children), 0,\r\n \"children\")", "def test_payment_accepted_order(self):\r\n student1 = UserFactory()\r\n student1.save()\r\n\r\n order1 = Order.get_cart_for_user(student1)\r\n params = {\r\n 'card_accountNumber': '1234',\r\n 'card_cardType': '001',\r\n 'billTo_firstName': student1.first_name,\r\n 'billTo_lastName': u\"\\u2603\",\r\n 'orderNumber': str(order1.id),\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n 'ccAuthReply_amount': '0.00'\r\n }\r\n\r\n # tests for an order number that doesn't match up\r\n params_bad_ordernum = params.copy()\r\n params_bad_ordernum['orderNumber'] = str(order1.id + 10)\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params_bad_ordernum)\r\n\r\n # tests for a reply amount of the wrong type\r\n params_wrong_type_amt = params.copy()\r\n params_wrong_type_amt['ccAuthReply_amount'] = 'ab'\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params_wrong_type_amt)\r\n\r\n # tests for a reply amount of the wrong type\r\n params_wrong_amt = params.copy()\r\n params_wrong_amt['ccAuthReply_amount'] = '1.00'\r\n with self.assertRaises(CCProcessorWrongAmountException):\r\n payment_accepted(params_wrong_amt)\r\n\r\n # tests for a not accepted order\r\n params_not_accepted = params.copy()\r\n params_not_accepted['decision'] = \"REJECT\"\r\n self.assertFalse(payment_accepted(params_not_accepted)['accepted'])\r\n\r\n # finally, tests an accepted order\r\n self.assertTrue(payment_accepted(params)['accepted'])", "def test_00_lunch_order(self):\r\n cr, uid = self.cr, self.uid\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #we check that our order_line is a 'new' one and that there are no cashmove linked to that order_line:\r\n self.assertEqual(self.order_one.state,'new')\r\n self.assertEqual(list(self.order_one.cashmove), [])\r\n #we order that orderline so it's state will be 'ordered'\r\n self.order_one.order()\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #we check that our order_line is a 'ordered' one and that there are no cashmove linked to that order_line:\r\n self.assertEqual(self.order_one.state,'ordered')\r\n self.assertEqual(list(self.order_one.cashmove), [])", "def testCourses(self):\n self.person.invokeFactory(type_name=\"FSDCourse\", id=\"test-course\")\n self.failUnless('test-course' in self.person.contentIds())\n self.failUnless('test-course' in [c.id for c in self.person.getCourses()])", "def test_certificate_validations():\n course_runs = CourseRunFactory.create_batch(2)\n programs = ProgramFactory.create_batch(2)\n\n course_runs[0].course.page.certificate_page.save_revision()\n course_runs[1].course.page.certificate_page.save_revision()\n\n programs[0].page.certificate_page.save_revision()\n programs[1].page.certificate_page.save_revision()\n\n course_certificate = CourseRunCertificateFactory(\n course_run=course_runs[0],\n certificate_page_revision=course_runs[\n 1\n ].course.page.certificate_page.get_latest_revision(),\n )\n program_certificate = ProgramCertificateFactory(\n program=programs[0],\n certificate_page_revision=programs[\n 1\n ].page.certificate_page.get_latest_revision(),\n )\n\n # When the revision doesn't match the courseware\n with pytest.raises(\n ValidationError,\n match=f\"The selected certificate page {course_certificate} is not for this course {course_runs[0].course}.\",\n ):\n course_certificate.clean()\n\n with pytest.raises(\n ValidationError,\n match=f\"The selected certificate page {program_certificate} is not for this program {programs[0]}.\",\n ):\n program_certificate.clean()", "def validate_testdata(self):\r\n self._get_tcorder()\r\n for line in self.data:\r\n if not line.startswith(\" \"):\r\n tcname = line.strip(\"\\n\")\r\n continue\r\n if \"[Setup]\" in line:\r\n if \"depends\" in line:\r\n line = line.strip(\"\\n\").split(\"depends\")[1][1:]\r\n depends = line.split()[0].split(',')\r\n self._check_dependency(tcname, depends)\r\n\r\n if self.dependency:\r\n msg = \"Test cases are not in proper dependency order.\\n\"\r\n for i in self.dependency:\r\n msg = msg + i\r\n logger.warn(msg, console=False)\r\n notify.message(msg)\r\n raise DependencyException(msg)\r\n else:\r\n msg = \"Testcases are in correct dependency order.\"\r\n logger.warn(msg)\r\n notify.message(msg)", "def test_init(self):\n test_order = Order(\"1\", \"Large\", \"Thin\", \"Cheese\")\n self.assertEqual(test_order.quantity, \"1\")\n self.assertEqual(test_order.size, \"Large\")\n self.assertEqual(test_order.crust, \"Thin\")\n self.assertEqual(test_order.toppings, \"Cheese\")", "def testProtractedNSESanityChecks(self):\n self.assertGreater(self.c3.get_species_richness(1), self.c2.get_species_richness(1))\n self.assertLess(self.c4.get_species_richness(1), self.c3.get_species_richness(1))" ]
[ "0.6418377", "0.6131915", "0.5973272", "0.59159654", "0.5835521", "0.5678925", "0.56525654", "0.5622847", "0.55975693", "0.55640495", "0.55434185", "0.5528798", "0.55012167", "0.54883766", "0.54165864", "0.539526", "0.53826475", "0.5382484", "0.5381499", "0.5373344", "0.5369267", "0.53634447", "0.53475976", "0.53415257", "0.5331142", "0.53071415", "0.5304982", "0.52964115", "0.52892596", "0.528757" ]
0.67318577
0
Find a text label for an axis describing a provided CSV column.
def get_label(column): for key, label in column_to_label.items(): if key in column: return label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _label(self, column):\n # XXX\n return column", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return 'Column %d' % (self.index + 1)", "def GetColLabelValue(self, col):\n label = self.column_label(self.colsel[col])\n \n labels = []\n for mark in sorted(self.marks.keys()):\n if self.colsel[col] in self.marks[mark]:\n labels.append(mark)\n\n if labels:\n return label + \"\\n(\" + ','.join(labels) + ')'\n else:\n return label", "def column_label(self, rawcol):\n label = self.colLabels[rawcol]\n\n try:\n idx = self.dynamic_cols.index(rawcol)\n except ValueError:\n pass\n else:\n if self.dynamic_expressions[idx]:\n label = self.dynamic_expressions[idx]\n\n #custom labels overrides automic column labels\n custom_label = self.column_labels_custom.get(rawcol)\n if custom_label:\n label = custom_label\n\n return label", "def getLabelColumn(self):\n return self.getOrDefault(self.labelColumn)", "def getAxisLabel(self, dim=0):\n return self.__axis_labels__[dim]", "def label(self, row: Dict[str, str]) -> str:\n\n return row['Annotation']", "def get_target(csv, text = False):\n y_mapping = {'BL1':0, 'PA1':1, 'PA2':2, 'PA3':3,'PA4':4}\n \n idx = max( csv.find('PA'), csv.find('BL'))\n label = csv[idx:idx+3]\n if text:\n return label\n return y_mapping[label]", "def pandas_find_post_label_str(index, dataframe):\n return dataframe.at[index, 'label']", "def _get_data_labels(sheet, row, col):\n final_column = col\n header_row = _FIELDS['cell_value']['header']['row']\n # Abstract this sort of thing\n header = sheet.cell(row + header_row, final_column).value\n while any(header.startswith(label) for label\n in _FIELDS['isotherm tabular']['labels']):\n final_column += 1\n header = sheet.cell(row + header_row, final_column).value\n return [sheet.cell(row + header_row, i).value for i in\n range(col, final_column)]", "def column_index(self, column_label):\n return self.column_labels.index(column_label)", "def test_get_dim_label_with_label(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][0]\n dims_df = pyjstat.get_dim_label(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == 'UNR')\n self.assertTrue(dims_df.iloc[-1]['label'] == 'Unemployment rate')", "def findLabel(row):\n if row[\"Max score\"] == row[\"TSG\"]:\n label = \"TSG\"\n elif row[\"Max score\"] == row[\"OG\"]:\n label = \"OG\"\n return label", "def _curve_labels(self, x_axis, sample, ylabel):\n return str(sample), x_axis.capitalize(), sample", "def label_extraction(self) -> None:\n self.df[\"label\"] = self.df[\"y\"]", "def _get_column(self, column_or_label):\n c = column_or_label\n if isinstance(c, collections.Hashable) and c in self.column_labels:\n return self[c]\n else:\n assert len(c) == self.num_rows, 'column length mismatch'\n return c", "def get_column_key(label: Tuple[str, ...], metrics: List[str]) -> Tuple[Any, ...]:\n parts: List[Any] = list(label)\n metric = parts[-1]\n parts[-1] = metrics.index(metric)\n return tuple(parts)", "def get_label_name(label):\n\tindex = np.argmax(label)\n\tlabels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\treturn labels[int(index)]", "def _get_axis_label(\n self,\n label: float | str | Mobject,\n axis: Mobject,\n edge: Sequence[float],\n direction: Sequence[float],\n buff: float = SMALL_BUFF,\n ) -> Mobject:\n\n label = self.x_axis._create_label_tex(label)\n label.next_to(axis.get_edge_center(edge), direction=direction, buff=buff)\n label.shift_onto_screen(buff=MED_SMALL_BUFF)\n return label", "def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment", "def _get_labels(data, axis=0, always=True):\n # NOTE: Previously inferred 'axis 1' metadata of 1D variable using the\n # data values metadata but that is incorrect. The paradigm for 1D plots\n # is we have row coordinates representing x, data values representing y,\n # and column coordinates representing individual series.\n if axis not in (0, 1, 2):\n raise ValueError(f'Invalid axis {axis}.')\n labels = None\n _load_objects()\n if isinstance(data, ndarray):\n if not always:\n pass\n elif axis < data.ndim:\n labels = np.arange(data.shape[axis])\n else: # requesting 'axis 1' on a 1D array\n labels = np.array([0])\n # Xarray object\n # NOTE: Even if coords not present .coords[dim] auto-generates indices\n elif isinstance(data, DataArray):\n if axis < data.ndim:\n labels = data.coords[data.dims[axis]]\n elif not always:\n pass\n else:\n labels = np.array([0])\n # Pandas object\n elif isinstance(data, (DataFrame, Series, Index)):\n if axis == 0 and isinstance(data, (DataFrame, Series)):\n labels = data.index\n elif axis == 1 and isinstance(data, (DataFrame,)):\n labels = data.columns\n elif not always:\n pass\n else: # beyond dimensionality\n labels = np.array([0])\n # Everything else\n # NOTE: We ensure data is at least 1D in _to_arraylike so this covers everything\n else:\n raise ValueError(f'Unrecognized array type {type(data)}.')\n return labels", "def find_label(self, *args):\n return _ida_hexrays.cfunc_t_find_label(self, *args)", "def GetColumnText(self, column):\r\n\r\n return self._header_win.GetColumnText(column)", "def _get_col(self, idx):\n return self.text[self._fwf.column_slices[idx]]", "def get_label(path): # get ED ES label\n label_csv = pd.read_csv(path)\n label_list = []\n trans_list = list(np.array(label_csv).astype(np.int32))\n for i in trans_list:\n temp = []\n for j in i:\n if j >= 0:\n temp.append(j)\n label_list.append(temp)\n return label_list", "def annotate(row, ax, x='x', y='y', text='name', xytext=(7, -5), textcoords='offset points', **kwargs):\n # idx = row.name\n text = row[text] if text in row else str(text)\n x = row[x] if x in row else float(x)\n y = row[y] if y in row else float(y)\n ax.annotate(text, (row[x], row[y]), xytext=xytext, textcoords=textcoords, **kwargs)\n return row[text]", "def first_label(self):\r\n return self.labels.split(',')[0]", "def _get_label(obj):\n # NOTE: BarContainer and StemContainer are instances of tuple\n while not hasattr(obj, 'get_label') and isinstance(obj, tuple) and len(obj) > 1:\n obj = obj[-1]\n label = getattr(obj, 'get_label', lambda: None)()\n return label if label and label[:1] != '_' else None", "def get_label_coords(csv_file, name):\n labels = [] # np.zeros((50, 8), dtype=float)\n for row in csv_file:\n if row[0] == name:\n labels.append(row)\n else:\n pass\n\n return labels", "def label_axis(self, name, label):\n\n axis = self._find_axis(name)\n axis.axis_label = label" ]
[ "0.70492536", "0.65161985", "0.6515418", "0.63260293", "0.60894614", "0.6023913", "0.59223855", "0.5880934", "0.5865998", "0.57208353", "0.5644971", "0.56144625", "0.55985284", "0.55886084", "0.5540931", "0.5496037", "0.5494928", "0.5494829", "0.5493561", "0.5486655", "0.54548746", "0.5435441", "0.5435145", "0.53888637", "0.53557515", "0.53111815", "0.5297283", "0.52613366", "0.52561975", "0.5248511" ]
0.7068317
0
Find all possible values of a column in the pandas.DataFram list
def dfs_all_values(dfs, column): values = [] # loop over all (pandas.DataFrame, str) pairs for df in dfs: values.extend(df[column].tolist()) # set() removes duplicates # sorted() converts Set to List and sort the elements return sorted(set(values))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getValuesForColumn(self, columnname):\n return list(self.abundance_df[columnname].unique())", "def get_values(df):\n return df.columns.values.tolist()", "def get_binary_values(data_frame):\n all_columns = pandas.DataFrame( index = data_frame.index)\n for col in data_frame.columns:\n data = pandas.get_dummies(data_frame[col], prefix=col.encode('ascii', 'replace'))\n all_columns = pandas.concat([all_columns, data], axis=1)\n return all_columns", "def get_binary_values(data_frame):\n all_columns = pandas.DataFrame( index = data_frame.index)\n for col in data_frame.columns:\n data = pandas.get_dummies(data_frame[col], prefix=col.encode('ascii', 'replace'))\n all_columns = pandas.concat([all_columns, data], axis=1)\n return all_columns", "def get_values(self, col) :\n\n if col not in self.cols :\n raise Exception('Column %s not in data' % col)\n\n select_sql = 'SELECT \"%s\" FROM \"%s\" ORDER BY __ROWID ASC' % (col, self.name)\n cur = self.con.cursor()\n cur.execute(select_sql)\n vs = cur.fetchall()\n return [v[0] for v in vs]", "def create_value_set(self, col):\n\n value_set = set()\n\n for df in self:\n value_set.update(df[col])\n return value_set", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n values: list[str] = []\n for row in table:\n values.append(row[column])\n return values", "def normalize(column):\n value_set = set(column)\n unique_count = len(value_set)\n if unique_count == 1:\n # skip everything in this column. \n return []\n elif unique_count == 2:\n zero = list(value_set)[0]\n one = list(value_set)[1]\n normalized_column = []\n for value in column:\n normalized_column.append(1 if value == one else 0)\n return [normalized_column]\n else: \n all_values = list(value_set)\n normalized_column = []\n\n # expand into multiple columns \n for index in range(len(all_values)):\n normalized_column.append([])\n\n for value in column:\n for index in range(len(all_values)):\n normalized_column[index].append(1 if value == all_values[index] else 0)\n \n return normalized_column", "def flat_set_from_df(df, col, condition=None):\n if condition is not None:\n df = df[condition]\n lists = df[col].tolist()\n return set([item for sublist in lists for item in sublist])", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n result: list[str] = []\n for row in table:\n item: str = row[column]\n result.append(item)\n\n return result", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n result: list[str] = []\n for row in table:\n item: str = row[column]\n result.append(item)\n\n return result", "def values(self, cols=None) :\n\n if not cols or cols == self.cols :\n return self.data\n\n def extractor(col) :\n if col in self.cols :\n return self.data[self.cols.index(col)]\n else :\n return None\n \n return [extractor(col) for col in cols]", "def column_select(df,returnList = [\"x\",\"y\"]):\n df = df.sort_values(by = 'frame_id')\n return [ list(df[k]) for k in returnList]", "def create(df,column,list_):\n return df[df[column].isin(list_)]", "def column_values(table: list[dict[str, str]], column_name: str) -> list[str]:\n column_values: list[str] = []\n for row in table:\n item: str = row[column_name]\n column_values.append(item)\n return column_values", "def test_multiple(self):\n df = self.df.copy()\n out = get_full_column(df.values)\n self.assertTrue(out == 0)", "def unique_values(df):\n cols = list(df.columns)\n\n for col in cols:\n uniques = (df[col]).unique()\n print(f\"{len(uniques)} unique items in {col}: {df[col].loc[0]},{df[col].loc[1]}, {df[col].loc[2]}...\")", "def list_unique(df):\n\n # print unique values of each column\n for col in df.columns:\n print(f\"{col}:\")\n print(f\"{list(df[col].unique())}\\n\")", "def unique (a_data,a_column) :\n return list(__np.unique(a_data[a_column]))", "def getColVals(self, col=None, include_nones=None):\n if col is None or col < 1 or col > self.nCol:\n raise SelectError(f\"bad col number {col}\")\n \n vals = []\n for ri in range(self.nRow):\n row = ri + 1\n val = self.getCellVal(row=row, col=col)\n if include_nones or not self.isEmpty(val):\n vals.append(val)\n return vals", "def powerset(iterable):\n try:\n s = list(iterable)\n column_names = list(chain.from_iterable(combinations(s, r) \n for r in range(len(s)+1)))[1:]\n col_list = []\n for i in range(6,0,-1):\n col_list += [item for item in column_names if len(item)==i]\n return col_list\n except:\n my_message = \"\"\"\n ERROR - STEP 2 (MASTER): FAILED MAKING THE POWERSET OF TECHNOLOGIES\n \"\"\"\n my_message = ' '.join(my_message.split()) + '\\n' + traceback.format_exc()\n print(my_message)\n return None", "def column_values_in_list(col, test_list):\n test = np.array([c_i in test_list for c_i in col])\n return test", "def unique_column_values(rows, column_name):\n # declare a set that guarantees no duplicates in the answer\n value_set = set()\n # for all rows, add the value of indicated column to the set\n for row in rows:\n \tvalue_set.add(row[column_name])\n return value_set", "def unique_column_values(rows, column_name):\r\n\r\n values = [] #Create an empty list\r\n for row in rows: #Iterate through each row\r\n values.append(row[column_name]) \r\n values = set(values)\r\n return values", "def getLegalVals(self, row=None, col=None): # Returns: array of legal values\n if (row is None or row < 1 or row > self.nRow\n or col is None or col < 1\n or col > self.nCol): # Safety check\n return [] \n \n usedH = {} # Add to list as found\n # Allow EMPTY\n row_vals = self.getRowVals(row)\n for row_val in row_vals:\n usedH[row_val] = 1\n\n col_vals = self.getColVals(col)\n for col_val in col_vals:\n usedH[col_val] = 1\n\n sq3_vals = self.getSq3Vals(row, col)\n for sq3_val in sq3_vals:\n usedH[sq3_val] = 1\n\n \n legal_vals = []\n for n in range(1, self.nRow+1):\n if n not in usedH:\n legal_vals.append(n) \n \n if SlTrace.trace(\"any\"):\n lvstrs = list(map(str, sorted(legal_vals)))\n SlTrace.lg(f\"getLegals(row={row}, col={col} = \"\n + \", \".join(lvstrs))\n \n return sorted(legal_vals)", "def values(self):\n return [entry.value for entry in self.table if entry.value is not None]", "def get_matching_columns(self, columns):\n result = []\n for column in columns:\n if self.match(column):\n result.append(column)\n return result", "def get_possible_values(self):\n possible_values = {}\n for f in self.__features:\n possible_values[f] = list(self.__data[f].unique())\n return possible_values", "def get_needed_columns(df, list_of_columns):\n return df[list_of_columns]", "def as_list(df: pandas.DataFrame, row=-1) -> list:\n if df is None:\n return []\n if row >= 0:\n rec = []\n for col in range(0, 13):\n rec.append(df.iat[row, col])\n return rec\n recs = []\n for row in range(df.shape[0]):\n recs.append(as_list(df, row=row))" ]
[ "0.6171805", "0.6113757", "0.5891988", "0.5879397", "0.58606875", "0.5812586", "0.57938266", "0.5787178", "0.57815653", "0.5762439", "0.5762439", "0.56766194", "0.56144273", "0.5601676", "0.5587507", "0.5523211", "0.55008084", "0.5450795", "0.5401311", "0.5399768", "0.5390941", "0.53854376", "0.53706497", "0.53641474", "0.5336809", "0.53178674", "0.5315018", "0.5261834", "0.5232556", "0.5215963" ]
0.6156369
1
Draw multiple lines y(x) using data from the dfs list on the ax subplot.
def draw_plot(ax, dfs, legend, x, y, xscale, yaxis_max): xticks = dfs_all_values(dfs, x) # loop over all pandas.DataFrame objects for df in dfs: # setting the x-column as an index is required to draw the y-column # as a function of x argument df = df.set_index(x) # plot line on the subplot df[y].plot.line(ax=ax, rot=45, marker='.') if xscale == "linear": ax.set_xscale(xscale) else: ax.set_xscale(xscale, base=2) ax.xaxis.set_major_formatter(ScalarFormatter()) ax.set_xticks(xticks) ax.set_xlabel(get_label(x)) ax.set_ylabel(get_label(y)) ax.set_ylim(bottom=0) if yaxis_max is not None: ax.set_ylim(top=float(yaxis_max)) ax.legend(legend, fontsize=6) ax.grid(True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot(x, y, *dfs):\n ax = None\n for df in dfs:\n ax = df[[x, y]].set_index(x).plot(kind='line', ylim=(0, None), xlim=(0, None), ax=ax)", "def update_plot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n for line in self.lines: \n ax.draw_artist(line)", "def replot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n line.set_xdata(self.data[i].x)\n for line in self.lines: \n ax.draw_artist(line)", "def plot_datasets(datasets):\n\n\t# plt.grid(True)\n\n\tfor ds in datasets:\n\t\t(f, ax) = plt.subplots()\n\n\t\tax.grid(True)\n\n\t\tif 'xl' in ds:\n\t\t\tax.set_xlabel(ds['xl'])\n\t\tif 'yl' in ds:\n\t\t\tax.set_ylabel(ds['yl'])\n\n\t\tif 'xl' in ds and 'yl' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl'])\n\t\t\tf.canvas.set_window_title(title)\n\n\t\tif 'x' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl']) if 'title' not in ds else ds['title']\n\t\t\tf.canvas.set_window_title(title)\n\t\t\tmarker = 'y1m' in ds and ds['y1m'] or None\n\t\t\tax.plot(ds['x'], ds['y'], label=ds['yl'], marker=marker)\n\t\tif 'x2' in ds:\n\t\t\t# label = \"y2\" if 'y2l' not in ds else ds['y2l']\n\t\t\tlabel = 'y2l' in ds and ds['y2l'] or 'y2'\n\t\t\tmarker = 'y2m' in ds and ds['y2m'] or None\n\t\t\tax.plot(ds['x2'], ds['y2'], label=label, marker=marker)\n\t\t\tax.legend()\n\t\tif 'x3' in ds:\n\t\t\t# label = \"y3\" if 'y3l' not in ds else ds['y3l']\n\t\t\tlabel = 'y3l' in ds and ds['y3l'] or 'y3'\n\t\t\tmarker = 'y3m' in ds and ds['y3m'] or None\n\t\t\tax.plot(ds['x3'], ds['y3'], label=label, marker=marker)\n\t\t\tax.legend()\n\n\t\tif 'sub' in ds:\n\t\t\tfor sub in ds['sub']:\n\t\t\t\t# ax.set_ylabel(sub['yl'])\n\t\t\t\t# ax.set_xlabel(sub['xl'])\n\t\t\t\t# title = \"%s from %s\" % (sub['yl'], sub['xl']) if 'title' not in sub else sub['title']\n\t\t\t\t# f.canvas.set_window_title(title)\n\n\t\t\t\tlabel = 'yl' in sub and sub['yl']\n\t\t\t\tmarker = 'ym' in sub and sub['ym'] or None\n\t\t\t\tax.plot(sub['x'], sub['y'], label=label, marker=marker)\n\t\t\t\tax.legend()\n\n\t\tax.spines['left'].set_position('zero')\n\t\tax.spines['bottom'].set_position('zero')\n\t\tax.spines['left'].set_smart_bounds(True)\n\t\tax.spines['bottom'].set_smart_bounds(True)\n\n\tplt.show()", "def plot_dat_file(dat_paths: [str]):\n import pandas as pd\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots(1, 3, sharey=\"all\", sharex=\"col\", figsize=(8, 6))\n for i, dat_path in enumerate(dat_paths):\n if i == i:\n skipfoot = 11 + 9\n else:\n skipfoot = 11\n dat_file = pd.read_csv(\n dat_path,\n skiprows=3,\n skipfooter=skipfoot,\n header=None,\n delim_whitespace=True,\n engine=\"python\",\n )\n depth = dat_file.values[:, 0]\n vp = dat_file.values[:, 1]\n vs = dat_file.values[:, 3]\n dens = dat_file.values[:, 5]\n\n ax[0].plot(vp, depth, label=f\"nr {i}\")\n\n ax[1].plot(vs, depth)\n ax[2].plot(dens, depth)\n ax[0].set_ylim(ax[0].get_ylim()[::-1])\n ax[0].legend()\n plt.show()", "def plot_lines(self):\n self.plot(3)", "def multi_line_plot(x_data, y_data, title, x_label, y_label):\n plt.figure(1, (18, 8)) # something, plot size\n plt.subplot(111)\n legend = []\n for i in range(len(x_data)):\n plt.plot(x_data[i], y_data[i])\n legend.append((i+1))\n plt.title(title)\n plt.xlabel(x_label, fontsize=12)\n plt.ylabel(y_label, fontsize=12)\n plt.legend(legend, loc='upper left')\n plt.show()", "def plot(self, *args, **kwargs):\r\n lines = super(RadarAxes, self).plot(*args, **kwargs)\r\n for line in lines:\r\n self._close_line(line)", "def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)", "def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)", "def plotLines( self ):\n \n ## plot tree in dfs manner\n def plotLines( node_id ):\n\n node = self.mTree.node( node_id )\n\n left = self.mNodeWidthsStart[node_id]\n right = self.mNodeWidthsEnd[node_id]\n height = self.mNodeHeights[node_id] \n\n if right != left and node_id != self.mTree.root:\n self.addElements( self.mDecoratorHorizontalBranches.getElements(\n node_id,\n self.getHeaderWidth() + left,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height ))\n \n\n for s in node.succ:\n\n new_height = self.mNodeHeights[s]\n self.addElements( self.mDecoratorVerticalBranches.getElements(\n node_id,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height,\n self.getHeaderHeight() + new_height ))\n \n TreeTools.TreeDFS( self.mTree, self.mTree.root,\n pre_function = plotLines )", "def multi_plot(data, fname=None):\n for entry in data['data']:\n plt.plot(entry['x'], entry['y'], label=entry['label'])\n\n plt.title(data['title'])\n plt.xlabel(data['x_label'])\n plt.ylabel(data['y_label'])\n\n #plt.legend(loc='best')\n\n Plotter.show(data['title'], fname=fname)", "def replot(self,ax):\n self.XP_Plotter.replot(ax)\n # theoretical lines\n self.lines_theory[0].set_xdata(self.xx)\n self.lines_theory[1].set_xdata(self.xx)\n self.lines_theory[2].set_xdata(self.xx_itpl)\n for line in self.lines_theory: \n ax.draw_artist(line)", "def update_plot(frame):\n global plotdata\n while True:\n try:\n data = q.get_nowait()\n except queue.Empty:\n break\n shift = len(data)\n plotdata = np.roll(plotdata, -shift, axis=0)\n plotdata[-shift:, :] = data\n for column, line in enumerate(lines):\n line.set_ydata(plotdata[:, column])\n return lines", "def plot2D(*dfs, columns=None, figsize=(5, 5), plot_titles=False):\n fig, ax = plt.subplots(figsize=figsize)\n\n for df, color in zip(dfs, cycle(COLORS)):\n X, Y = (df[col] for col in columns)\n plt.scatter(X, Y, c=color, marker=MARKER)\n\n for axis, col in zip(['x', 'y'], columns):\n getattr(ax, f'set_{axis}label')(col)\n\n if plot_titles:\n for df in dfs:\n for i, j, text in zip(df.iloc[:, 0], df.iloc[:, 1], df.index):\n corr = 2\n ax.annotate(text, xy=(i + corr, j + corr))\n\n plt.show()", "def plot_epochs(epochs, y, line):\n ep = np.arange(0, epochs)\n if hasattr(y[0], '__len__'):\n for i in range(len(y[0])):\n plt.plot(ep, [val[i] for val in y], line[i])\n else:\n plt.plot(ep, y, line)\n plt.show()", "def plot_mult_timetrends(data, geoids, cols, area, colors, markers, sharex,\n ylim_bottom = -150, ylim_top = 150, ylabel = 'Pct change in mobility', xlabels=None):\n ax = plt.axes(area, sharex = None)\n \n cols = cols\n plt.hlines(0,data.num_date.min(),data.num_date.max())\n i = 0\n for y in cols:\n pts = y[:12]\n \n# lim = ylim\n# plt.xlabel('date', fontsize=18)\n plt.ylabel(ylabel, fontsize=22)\n\n plt.yticks(fontsize=30) \n\n x_locator = FixedLocator(data.num_date[np.arange(0,data.shape[0],7)].tolist())\n ax.xaxis.set_minor_locator(x_locator)\n plt.grid(axis='x', which = 'both') \n \n plt.plot(data['num_date'], data[y], color = colors[i], linewidth=5)\n i = i+ 1\n plt.xticks(ticks = data.num_date[np.arange(0,data.shape[0],28)].tolist(),\n labels = xlabels, rotation=30, ha='right',\n fontsize=30)\n plt.ylim(ylim_bottom,ylim_top)\n\n return ax", "def func_double_yaxis_data_subplot_show(data_plot_cfg_dic_nlst, axes_cfg_dic_lst,\n nrow, ncol, x_label, y1_label, y2_label,\n sub_titles=None, anno_text_lst=None, fig_title_lst=None,\n fig_size=None, subplot_fit_rect=None):\n\n # Create a figure\n fig, axs = plt.subplots(nrow, ncol, figsize=fig_size)\n\n # Plot the double-axis data for each subplot\n for index, ax in enumerate(axs.flat):\n ax1, ax2 = func_double_yaxis_data_plot(ax, data_plot_cfg_dic_nlst[index], axes_cfg_dic_lst,\n x_label, y1_label, y2_label)\n\n # Config the figure\n if index == 0:\n fontsize_label = axes_cfg_dic_lst[0].get('fontsize_label', 14)\n ax1.set_ylabel(y1_label, color='k', fontsize=fontsize_label)\n ax2.label_outer() # It seems label_outer() doesn't work for ax2, so I remove ytick labels manually\n ax2.set_yticklabels([])\n elif index == (ncol - 1):\n fontsize_label = axes_cfg_dic_lst[1].get('fontsize_label', 14)\n ax2.set_ylabel(y2_label, color='k', fontsize=fontsize_label)\n ax1.label_outer()\n\n ax1.get_legend().remove() # Remove individual legend for each subplot\n ax2.get_legend().remove() # Remove individual legend for each subplot\n # ax1.label_outer()\n # ax2.label_outer()\n\n # Define appearance\n func_matlab_style(ax)\n\n if fig_title_lst is not None:\n ax.set_title(fig_title_lst[index], fontweight='bold')\n if sub_titles is not None:\n ax.text(-25, -43, sub_titles[index], fontsize=11, fontweight='bold')\n if anno_text_lst is not None:\n ax.text(axes_cfg_dic_lst[0]['xlim'][0]+10, -5, anno_text_lst[index], fontsize=8)\n # ax.set_aspect('equal')\n\n ax1_handles, ax1_labels = ax1.get_legend_handles_labels()\n ax2_handles, ax2_labels = ax2.get_legend_handles_labels()\n handles = ax1_handles + ax2_handles\n labels = ax1_labels + ax2_labels\n fontsize_legend = axes_cfg_dic_lst[0].get('fontsize_legend', 12)\n fig.legend(handles, labels, ncol=4, loc='lower center', prop={'size': fontsize_legend})\n\n fig.tight_layout(rect=subplot_fit_rect) # otherwise the right y-label is slightly clipped\n plt.show()", "def plot_data_matplotlib(df_data):\n # creating the figure and subplots as two rows and one column\n fig, ax = plt.subplots(2, 1)\n # defining the colours used for the plots and y axes\n red = \"#DA2525\"\n blue = \"#003A78\"\n # setting up the subplots to share the x axis\n # ax02 is the second y axis of the first subplot\n ax02 = ax[0].twinx()\n # ax12 is the second y axis of the second subplot\n ax12 = ax[1].twinx()\n # the global co2 line plot\n line1 = ax[0].plot(\n df_data.index,\n df_data[\"global_co2\"],\n label=\"Global $CO_2$ Emissions\",\n color=blue\n )\n # the global temperature line plot\n line2 = ax02.plot(\n df_data.index,\n df_data[\"global_temp\"],\n label=\"Global Temperature Anomaly\",\n color=red\n )\n # the uk co2 line plot\n line3 = ax[1].plot(\n df_data.index,\n df_data[\"uk_co2\"],\n label=\"UK $CO_2$ Emissions\",\n color=blue\n )\n # the uk temperature line plot\n line4 = ax12.plot(\n df_data.index,\n df_data[\"uk_temp\"],\n label=\"UK Surface Temperature\",\n color=red\n )\n # the next three dataframes are used to indicate where there are gaps in\n # the data, which I will use to produce a shaded region to highlight this\n # fact\n # for the global temperature data\n global_temp_nan = df_data[pd.isna(df_data[\"global_temp\"])]\n # for the UK temperature data\n uk_temp_nan = df_data[pd.isna(df_data[\"uk_temp\"])][:-1]\n # for the UK co2 emissions data\n uk_co2_nan = df_data[pd.isna(df_data[\"uk_co2\"])][:-2]\n # creating a shaded region to show the missing global temperature data\n ax[0].axvspan(\n global_temp_nan.index[0],\n global_temp_nan.index[-1],\n alpha=0.1,\n color=\"black\"\n )\n # creating a shaded region to show the missing UK co2 data\n ax[1].axvspan(\n uk_temp_nan.index[0],\n uk_co2_nan.index[-1],\n alpha=0.1,\n color=\"black\"\n )\n # creating a shaded region to show the missing UK temperature data\n ax[1].axvspan(\n uk_co2_nan.index[-1],\n uk_temp_nan.index[-1],\n alpha=0.05,\n color=\"black\"\n )\n # setting titles for the figure and subplots\n ax[0].set_title(\"{}{}{}\".format(\n \"Global and UK \",\n \"$CO_2$ Emissions and Surface Temperature over Time\",\n \"\\n\\nGlobal\"))\n ax[1].set_title(\"UK\")\n # setting axes labels\n ax[1].set_xlabel(\"Time (years)\")\n ax[0].set_ylabel(\"$CO_2$ Emissions (Tg)\", color=blue)\n ax02.set_ylabel(\"Temperature Anomaly (°C)\", color=red)\n ax[1].set_ylabel(\"$CO_2$ Emissions (Tg)\", color=blue)\n ax12.set_ylabel(\"Temperature (°C)\", color=red)\n # setting x axes limits so both subplots are over the same range\n ax[0].set_xlim((df_data.index[0], df_data.index[-1]))\n ax[1].set_xlim((df_data.index[0], df_data.index[-1]))\n # setting the x axes tick values\n ax[0].set_xticks([d for d in df_data.index if d.year % 20 == 0])\n ax[1].set_xticks([d for d in df_data.index if d.year % 20 == 0])\n # setting y axes colours to match the line plots\n ax[0].tick_params(\"y\", colors=blue)\n ax02.tick_params(\"y\", colors=red)\n ax[1].tick_params(\"y\", colors=blue)\n ax12.tick_params(\"y\", colors=red)\n # annotating the shaded regions\n ax[0].annotate(\n \"No temperature data available\",\n (\"1760-01-01\", 4000)\n )\n ax[1].annotate(\n \"No data available\",\n (\"1760-01-01\", 300)\n )\n ax[1].annotate(\n \"No temperature data available\",\n (\"1850-01-01\", 500)\n )\n # setting the legends \n ax[0].legend(\n line1 + line2,\n [\n line1[0].get_label(),\n line2[0].get_label(),\n ],\n loc=2\n )\n ax[1].legend(\n line3 + line4,\n [\n line3[0].get_label(),\n line4[0].get_label()\n ],\n loc=2\n )\n plt.show()\n return", "def plot_subplots(x_list, y_list, z_list):\n # create a line chart with the average rating of the top movies per year\n # min rating = 0 and max = 10\n plot1 = plt.subplot(211)\n plt.plot(x_list, y_list, color = 'lightseagreen')\n plt.axis([START_YEAR, END_YEAR - 1, 0, 10])\n plt.title('Average IMDB Movie Rating per Year', fontsize=12)\n plt.ylabel('Average Rating')\n plt.grid(True)\n\n # make x ticklabels of plot1 invisible\n plt.setp(plot1.get_xticklabels(), visible=False)\n\n # adjust space between subplots\n plt.subplots_adjust(hspace=0.3)\n\n # create a line chart with the average runtime with shared x-axis\n plot2 = plt.subplot(212, sharex=plot1)\n plt.plot(x_list, z_list, color = 'lightseagreen')\n plt.title('Average IMDB Movie Runtime per Year', fontsize=12)\n plt.ylabel('Average Runtime (min)')\n plt.grid(True)\n\n # define axes, with all years (2008 till 2017) on the x-axis\n # min runtime = 0, max runtime = 180\n plt.axis([START_YEAR, END_YEAR - 1, 0, 180])\n plt.xticks(x_list)\n plt.xlabel('Year')\n\n # plot both the subplots\n plt.show()", "def dline_tdepl_array(lines,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, axs = plt.subplots(len(lines), sharex='col',\\\n figsize=(6,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n for i,ax in enumerate(axs):\n\n dline_tdepl(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[1],nGal=p.nGals[1],add=True,cb=True)\n dline_tdepl(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add=True,cb=False)\n\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/dlines_tdepl_array_%s%s%s_%s%s_%s.png' % (p.ext,p.grid_ext,p.table_ext,p.sim_name,p.sim_run,p.select), format='png', dpi=300)", "def etio_subplot(df, ax, title, graph_color='skyblue'):\n\n post_dx_histo = histo_dx_includes(df)\n hist_df = pd.DataFrame({\"Dx\": post_dx_histo.index, \"Count\": post_dx_histo.data})\n #hist_df = hist_df.drop(1)\n print(hist_df)\n\n graph_range = range(1,len(hist_df.index)+1)\n ax.hlines(y=graph_range, xmin=0, xmax=hist_df['Count'], color=graph_color)\n ax.plot(hist_df['Count'], graph_range, \"D\", color=graph_color)\n ax.set_yticks(range(1, len(hist_df['Dx'])+1))\n ax.set_yticklabels(hist_df['Dx'], fontsize='10')\n\n ax.set_title(title, fontsize='10')\n return ax", "def seas_line_subplot(rows, cols, df, fwd=None, **kwargs):\n fig = make_subplots(\n cols=cols,\n rows=rows,\n specs=[[{\"type\": \"scatter\"} for x in range(0, cols)] for y in range(0, rows)],\n subplot_titles=kwargs.get(\"subplot_titles\", None),\n )\n\n chartcount = 0\n for row in range(1, rows + 1):\n for col in range(1, cols + 1):\n # print(row, col)\n if chartcount > len(df):\n chartcount += 1\n continue\n\n dfx = df[df.columns[chartcount]]\n fwdx = None\n if fwd is not None and len(fwd) > chartcount:\n fwdx = fwd[fwd.columns[chartcount]]\n\n showlegend = True if chartcount == 0 else False\n\n traces = cptr.seas_plot_traces(\n dfx, fwd=fwdx, showlegend=showlegend, **kwargs\n )\n\n for trace_set in [\"shaded_range\", \"hist\", \"fwd\"]:\n if trace_set in traces:\n for trace in traces[trace_set]:\n fig.add_trace(trace, row=row, col=col)\n\n chartcount += 1\n\n legend = go.layout.Legend(font=dict(size=10))\n fig.update_xaxes(\n tickvals=pd.date_range(start=str(dates.curyear), periods=12, freq=\"MS\"),\n tickformat=\"%b\",\n )\n title = kwargs.get(\"title\", \"\")\n fig.update_layout(\n title=title,\n title_x=0.01,\n xaxis_tickformat=\"%b\",\n legend=legend,\n margin=preset_margins,\n )\n return fig", "def _plot(self, df, head, title, lines, verbose: bool = False):\n fig = go.Figure(layout=set_layout())\n\n if isinstance(lines, str):\n lines = [lines]\n elif not isinstance(lines, list):\n raise ValueError(\"Only string or list is valid type for lines.\")\n\n for n in lines:\n fig.add_trace(self._plot_line(df, head=head, y=n, line_name=n.upper()))\n\n if verbose:\n fig.add_trace(self._plot_stock_data(self._df, head))\n\n fig.update_layout(\n title_text=f\"{title} Chart ({self.stock_code})\",\n xaxis_rangeslider_visible=False,\n )\n fig.show()", "def draw_lists_pyplot(y_array, line_weight=3, learnig_rate=1):\n y = y_array\n plt.plot(y, lw=line_weight, label='cost(a={:})'.format(learnig_rate))\n plt.legend()\n\n plt.title(\"Gradient Descent Optimizing Method\\nminimize cost function\")\n plt.xlabel('time-itoration')\n plt.ylabel('cost-function')\n\n plt.xlim(0,)\n plt.ylim(0,)\n\n plt.grid(b=None, which='major', axis='both')\n plt.show()", "def draw_lists_pyplot(y_array, line_weight=3, learnig_rate=1):\n y = y_array\n plt.plot(y, lw=line_weight, label='cost(a={:})'.format(learnig_rate))\n plt.legend()\n\n plt.title(\"Gradient Descent Optimizing Method\\nminimize cost function\")\n plt.xlabel('time-itoration')\n plt.ylabel('cost-function')\n plt.xlim(0,)\n plt.ylim(0,)\n\n plt.grid(b=None, which='major', axis='both')\n plt.show()", "def dyplot(self, x, y, name, dir):\n fig, ax1 = plt.subplots(figsize=(6, 4), dpi=500, facecolor='white')\n ax1.plot(x, '-b*', ms=2, linewidth=1)\n ax1.set_xlabel('Epoch', fontsize=9)\n ax1.set_ylabel('Discriminator Loss per Epoch', fontsize=9, color='b')\n ax1.tick_params('y', colors='b')\n\n ax2 = ax1.twinx()\n ax2.plot( y, '-r*', ms=2, linewidth=1)\n ax2.set_ylabel('Generator Loss per Epoch', fontsize=9, color='r')\n ax2.tick_params('y', colors='r')\n fig.tight_layout()\n plt.savefig('{}/{}.png'.format(dir, 'Loss-Adversarial-' + name))\n plt.close()", "def dline_dSFR_array(lines,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, axs = plt.subplots(len(lines), sharex='col',\\\n figsize=(6,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n for i,ax in enumerate(axs):\n\n dline_dSFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[1],nGal=p.nGals[1],add_obs=p.add_obs,MS=p.MS,add=True,cb=True)\n dline_dSFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add_obs=False,add=True,cb=False)\n\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/dlines_dSFR_array_%s%s%s_%s%s_%s.png' % (p.ext,p.grid_ext,p.table_ext,p.sim_name,p.sim_run,p.select), format='png', dpi=300)", "def multiplot(self, x, y, **kwargs):\n\n # --------------------------------------------------------------------------------------------- #\n # Attributes\n self._evalKwargs(kwargs)\n # Remove the previous and create the new framework\n plt.delaxes(self.ax)\n count = 0\n colcount = 0\n # Get the min and max values of the X-axis\n xmin = []\n xmax = []\n for i in range( len(x) - 1):\n if hasattr(x[i][0], \"__len__\"):\n for j in range( len(x[i]) - 1):\n xmin.append( min(x[i][j]) )\n xmax.append( max(x[i][j]) )\n else:\n xmin.append( min(x[i]) )\n xmax.append( max(x[i]) )\n if self.xmin is not None:\n xmin = [self.xmin]\n if self.xmax is not None:\n xmax = [self.xmax]\n deltaX = max(xmax) - min(xmin)\n xmin = min(xmin) - 0.05*deltaX\n xmax = max(xmax) + 0.05*deltaX\n\n # --------------------------------------------------------------------------------------------- #\n # Iterate over the number of subplots \n for nSP in range( len(self.prop) ):\n # --------------------------------------------------------------------------------------------- #\n # Initialize the subplot properties\n self.ax = plt.subplot2grid( (sum(self.prop), 1), (count, 0), rowspan=self.prop[nSP])\n count += self.prop[nSP] # Keep track of the size of the plot\n # Extract the errors if any are given\n if self.yerr is not None:\n yerrSP = self.yerr[nSP]\n if self.xerr is not None:\n xerrSP = self.xerr[nSP] \n # Set the y-axis and x-axis scales\n try:\n ymode = self.ymode[colcount]\n except:\n ymode = self.ymode\n self.ax.set_yscale(ymode)\n self.ax.set_xscale(self.xmode)\n\n # --------------------------------------------------------------------------------------------- #\n # Iterate over the different curves to plot in the same subplot\n if hasattr(y[nSP][0], \"__len__\"):\n for nCurv in range( len(y[nSP]) ):\n # Read the plot properties\n try: color = self.color[colcount]\n except: color = self.color\n try: mksize = self.mksize[colcount]\n except: mksize = self.mksize\n try: alpha = self.alpha[colcount]\n except: alpha = self.alpha\n try: ncol = self.ncol[colcount]\n except: ncol = self.ncol\n try: loc = self.loc[colcount]\n except: loc = self.loc\n try: legend = self.label[colcount]\n except: legend = self.label \n try: lstyle = self.lstyle[colcount]\n except: lstyle = self.lstyle\n try: mktype = self.mktype[colcount]\n except : mktype= self.mktype\n\n # Extract the errors if any are given\n if (self.yerr is not None) and (hasattr(self.yerr[nSP][nCurv], \"__len__\")):\n yerrnCurv = self.yerr[nSP][nCurv]\n else:\n yerrnCurv = None\n if (self.xerr is not None) and (hasattr(self.xerr[nSP][nCurv], \"__len__\")):\n xerrnCurv = self.xerr[nSP][nCurv] \n else:\n xerrnCurv = None\n\n # Plot limits as down-arraows\n if (self.limit is not None) and (self.limit[nSP][nCurv]):\n self.ax.errorbar(x[nSP][nCurv], y[nSP][nCurv], xerr=xerrnCurv, \n yerr=[yerrnCurv, np.zeros( len(yerrnCurv) )], fmt='none', \n ecolor=color, elinewidth=0.5, alpha=alpha, capsize=0, \n barsabove=False, lolims=False, uplims=False, xlolims=False, \n xuplims=False, errorevery=1, capthick=None, zorder=nCurv, legend=None)\n self.ax.plot(x[nSP][nCurv], y[nSP][nCurv]-yerrnCurv, marker='v',\n color=color, alpha=alpha, markersize=mksize, linestyle='',\n markeredgecolor=color, zorder=nCurv)\n # Fill an area between y[nSP][0][0] and y[nSP][0][1]\n #elif hasattr(y[nSP][nCurv], \"__len__\"):\n # self.ax.fill_between(x[nSP][nCurv], y[nSP][nCurv][0], y[nSP][nCurv][1], facecolor=self.color, edgecolor='none', alpha=0.5,\n # rasterized=self.raster, zorder=-10)\n # Plot a 'normal' curve\n else:\n if (legend is not None) and (legend != 'None') :\n graph = self.ax.errorbar(x[nSP][nCurv], y[nSP][nCurv], yerr=yerrnCurv, \n xerr=xerrnCurv, fmt=mktype, ecolor=color, elinewidth=0.5, capsize=0,\n linestyle=lstyle, markerfacecolor=color, markeredgecolor=color, \n color=color, markersize=mksize, label=legend, linewidth=self.lwdth, \n barsabove=False, errorevery=1, capthick=None, alpha=alpha, zorder=nCurv)\n # Handling of the labels of the curves\n handles, labels = self.ax.get_legend_handles_labels()\n handle_list, label_list = [], []\n for k in xrange( len(labels) ):\n if labels[k] in self.label:\n handle_list.append(handles[k])\n label_list.append(labels[k])\n self.ax.legend(handle_list, label_list, loc=\"best\", prop={'size':self.ftsize2},\n frameon=True, numpoints=1, ncol=ncol, handletextpad=0.1)\n else:\n graph = self.ax.errorbar(x[nSP][nCurv], y[nSP][nCurv], yerr=yerrnCurv,\n xerr=xerrnCurv, fmt=mktype, ecolor=color, elinewidth=0.5, capsize=0,\n linestyle=lstyle, markerfacecolor=color, markeredgecolor=color, \n color=color, markersize=mksize, alpha=alpha, linewidth=self.lwdth,\n barsabove=False, errorevery=1, capthick=None, zorder=nCurv)\n colcount += 1\n # --------------------------------------------------------------------------------------------- #\n # There is only one curve per subplot\n else:\n # Read the plot properties\n try: color = self.color[colcount]\n except: color = self.color\n try: mksize = self.mksize[colcount]\n except: mksize = self.mksize\n try: alpha = self.alpha[colcount]\n except: alpha = self.alpha\n try: ncol = self.ncol[colcount]\n except: ncol = self.ncol\n try: loc = self.loc[colcount]\n except: loc = self.loc\n try: legend = self.label[colcount]\n except: legend = self.label \n try: lstyle = self.lstyle[colcount]\n except: lstyle = self.lstyle\n try: mktype = self.mktype[colcount]\n except : mktype= self.mktype\n\n # Extract the errors if any are given\n if (self.yerr is not None) and (hasattr(self.yerr[nSP], \"__len__\")):\n yerrSP = self.yerr[nSP]\n else:\n yerrSP = None\n if (self.xerr is not None) and (hasattr(self.xerr[nSP], \"__len__\")):\n xerrSP = self.xerr[nSP] \n else:\n xerrSP = None\n # Plot\n if (self.limit is not None) and (self.limit[nSP]):\n self.ax.errorbar(x[nSP], y[nSP], xerr=xerrSP, \n yerr=[yerrSP, np.zeros( len(yerrSP) )], fmt='none', \n ecolor=color, elinewidth=0.5, alpha=alpha, capsize=0, \n barsabove=False, lolims=False, uplims=False, xlolims=False, \n xuplims=False, errorevery=1, capthick=None, legend=None)\n self.ax.plot(x[nSP], y[nSP]-yerrSP, marker='v',\n color=color, alpha=alpha, markersize=mksize, linestyle='',\n markeredgecolor=color)\n else:\n self.ax.errorbar(x[nSP], y[nSP], yerr=yerrSP, xerr=xerrSP, fmt=mktype, ecolor=color,\n elinewidth=0.5, capsize=0, linestyle=lstyle, markerfacecolor=color, \n markeredgecolor=color, markersize=mksize, label=legend, alpha=alpha, color=color,\n barsabove=False, errorevery=1, capthick=None)\n colcount += 1\n if legend is not None:\n # Handling of the labels of the curves\n self.ax.legend(loc=\"best\", prop={'size':self.ftsize2}, frameon=True, numpoints=1,\n ncol=ncol, handletextpad=0.1)\n handles, labels = self.ax.get_legend_handles_labels()\n handle_list, label_list = [], []\n for k in xrange(len(labels)):\n if labels[k] in self.label:\n handle_list.append(handles[k])\n label_list.append(labels[k])\n self.ax.legend(handle_list, label_list, loc=\"best\", prop={'size':self.ftsize2}, \n frameon=True, numpoints=1, ncol=ncol, handletextpad=0.1)\n\n # --------------------------------------------------------------------------------------------- #\n # Make pretty each subplot\n\n # Shift the x-label\n self.ax.yaxis.set_label_coords(self.labelx, 0.5)\n # Set the y-label for each subplot\n self.ax.set_ylabel(self.ylabel[nSP], fontsize=self.ftsize1, multialignment='center')\n self._plotDisplay()\n\n # Dimensions\n self.ax.set_xlim(xmin, xmax) # Every subplot has the same x-axis \n ymin, ymax = self.ax.get_ylim()\n try: ymin = self.ymin[nSP]\n except: pass\n try: ymax = self.ymax[nSP]\n except: pass\n self.ax.set_ylim(ymin, ymax) \n\n # Draw a horizontal line\n if (self.hline is not None) and (self.hline[nSP] is not None):\n # Multiple h-line to draw\n self.ax.axhline(y=self.hline[nSP], color='black', linestyle=':')\n # Fill an area\n if self.fill is not None:\n #self.ax.fill_between(x[nSP][nCurv], y[nSP][nCurv][0], y[nSP][nCurv][1], facecolor=self.color, edgecolor='none', alpha=0.5,\n # rasterized=self.raster, zorder=-10)\n for k in range(len(self.fill)/2):\n self.ax.axvspan(self.fill[k*2], self.fill[k*2+1], facecolor=self.shadecol, \n edgecolor=\"none\", linewidth=0., zorder=-10, alpha=0.5)\n # For all upper subplot, remove the last ticks\n if nSP != len(self.prop)-1:\n plt.setp(self.ax.get_xticklabels(), visible=False)\n self.ax.set_xlabel('')\n ymincheck, ymaxcheck=self.ax.get_ylim()\n if ymaxcheck > ymincheck:\n self.ax.get_yticklabels()[0].set_visible(False)\n else: # in case of a revert y axis...\n self.ax.get_yticklabels()[-1].set_visible(False)\n\n self.f.subplots_adjust(hspace=0)", "def plot_many_y(x, y, yer=None, xlabel = None, ylabel = None, ynames = None, label = None, domain=None,\n yrange = None, undertext =None, savedir = None, marker=None, markerstyles=None, plotspecs = None, groupings=None,\n groupings_labels_within = None, vlines = None, legend_title=None, n_legend_columns=None, text=None, linestyles=None,\n colors=None, save=None):\n if save is None:\n save = True\n if savedir is None:\n save_dir = os.getcwd()\n else:\n save_dir = savedir\n if marker is None:\n marker = False\n if vlines is None:\n vlines = []\n if isinstance(vlines, float):\n vlines = [vlines]\n if n_legend_columns is None:\n n_legend_columns = 1\n\n if markerstyles is None:\n my_marker_styles = [st for st in marker_styles]\n else:\n my_marker_styles = [st for st in markerstyles]\n if groupings_labels_within is None:\n groupings_labels_within = False\n\n if linestyles is None:\n my_line_styles = [ls for ls in line_styles]\n else:\n my_line_styles = [ls for ls in linestyles]\n\n\n #in case linestyle -- comes up\n dashes = (10, 25)\n dashes = [20,55]\n dashes = [40, 40]\n dashes = [5, 5]\n dash_width_factor = 2\n dash_width_factor = 1.5\n\n number_y = len(y)\n\n if groupings is None:\n grouped = False\n #print([\"hi\" for _ in range(number_y_num)])\n groupings = [{ii} for ii in range(number_y)]\n else:\n grouped = True\n\n # Make sure all the elements are in a colour grouping\n if grouped:\n extra_group = set()\n for i in range(number_y):\n in_a_group = False\n for seti in groupings:\n for el in seti:\n if i == el:\n if not in_a_group:\n in_a_group = True\n #else:\n #print el, ' in two colour groups'\n if not in_a_group:\n extra_group.add(i)\n\n if len(groupings) == 1:\n if ynames is not None:\n if len(ynames) == number_y:\n grouped = False\n\n\n default_plot_specs = copy.deepcopy(default_plot_specs_all)\n default_plot_specs['legend_font'] = {'size': 8}\n default_plot_specs['legend_anchor'] = 'upper right'\n default_plot_specs['legend_loc'] = (0.98, -0.1)\n\n if marker:\n default_plot_specs['x_scale'] = 0.05\n else:\n default_plot_specs['x_scale'] = 0\n\n text_heights = [-0.023, -0.069, -0.115,-0.161]\n\n if plotspecs is not None:\n for stat in list(default_plot_specs.keys()):\n if stat in plotspecs:\n default_plot_specs[stat] = plotspecs[stat]\n\n the_label = ''\n\n if domain is not None:\n xlow = domain[0]\n xhigh = domain[1]\n for ii in range(number_y):\n klow = x[ii].index(find_nearest(x[ii],xlow))\n khigh = x[ii].index(find_nearest(x[ii], xhigh))\n #khigh = x[ii].index(find_nearest_above(x[ii], xhigh))\n x[ii] = x[ii][klow:khigh]\n y[ii] = y[ii][klow:khigh]\n if yer:\n yer[ii] = yer[ii][klow:khigh]\n if yrange is not None:\n ylow = yrange[0]\n yhigh = yrange[1]\n if xlabel is None:\n x_label = ''\n else:\n x_label = xlabel\n if ylabel is None:\n y_label = ''\n the_label = 'y_' +str(number_y) +'_'\n else:\n y_label = ylabel\n the_label += y_label[:4] +'_'\n if ynames is None:\n y_names = []\n else:\n y_names = ynames\n if label is None:\n the_label = the_label + 'vs_' +x_label\n else:\n the_label = label\n\n under_text = []\n if undertext is not None:\n under_text = undertext[:]\n\n if marker:\n rcParams['legend.numpoints'] = 1\n\n plt.clf()\n\n fig = plt.figure(figsize=default_plot_specs['fsize'], dpi=default_plot_specs['dpi'])\n ax_1 = fig.add_subplot(111)\n\n if default_plot_specs['xlog']:\n ax_1.set_xscale('log')\n if default_plot_specs['ylog']:\n ax_1.set_yscale('log')\n\n if grouped:\n mycolors = cm.rainbow(np.linspace(0, 1, len(groupings)))\n else:\n mycolors = cm.rainbow(np.linspace(0, 1, number_y))\n color_dict = dict()\n line_style_dict = dict()\n marker_style_dict = dict()\n\n\n ynames_dict = dict()\n custom_legend_entries_dict = dict()\n display_leg_numbers = []\n\n add_dummy_ynames = False\n if ynames is not None:\n if len(ynames) == len(groupings):\n if len(groupings) != len(y):\n # if only the first element of each group is named\n add_dummy_ynames = True\n if not groupings_labels_within:\n display_leg_numbers = [kk for kk in range(len(ynames))]\n elif not groupings_labels_within:\n display_leg_numbers = [kk for kk in range(len(ynames))]\n elif not groupings_labels_within:\n display_leg_numbers = [kk for kk in range(len(ynames))]\n\n\n for seti, jj in zip(groupings, range(len(groupings))):\n for k,ii in zip(sorted(list(seti)), range(len(seti))):\n #jj is the group number\n #ii is the number within the set\n #k is the number in the ylist\n if colors is None:\n if grouped:\n color_dict[k] = mycolors[jj]\n else:\n color_dict[k] = mycolors[k]\n\n else:\n if grouped:\n color_dict[k] = colors[jj]\n else:\n color_dict[k] = colors[k]\n if grouped:\n marker_style_dict[k] = my_marker_styles[ii]\n line_style_dict[k] = my_line_styles[ii]\n else:\n # print(k)\n # print(markerstyles)\n if markerstyles is None:\n marker_style_dict[k] = default_plot_specs['marker_style']\n else:\n marker_style_dict[k] = markerstyles[k]\n if linestyles is None:\n line_style_dict[k] = default_plot_specs['linestyle']\n else:\n line_style_dict[k] = linestyles[k]\n if add_dummy_ynames:\n if ii == 0: # if the first in the set\n ynames_dict[k] = ynames[jj]\n else:\n ynames_dict[k] = 'dummy'\n\n\n\n if groupings_labels_within:\n\n if ii == 0:\n display_leg_numbers.append(k)\n\n # Create custom artists\n if marker:\n markstyli = marker_style_dict[k]\n style = line_style_dict[k]\n if markstyli and not style:\n capsizi = default_plot_specs['cap_size']\n else:\n capsizi = None\n if line_style_dict[k] == '--':\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k', marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n dashes=dashes)\n else:\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k', marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n linestyle=style,\n linewidth=default_plot_specs['linewidth'])\n else:\n if line_style_dict[k] == '--':\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k', dashes=dashes,\n linewidth=dash_width_factor*default_plot_specs['linewidth'])\n else:\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k',\n linestyle=style,\n linewidth=default_plot_specs['linewidth'])\n\n if add_dummy_ynames:\n ynames = [ynames_dict[k] for k in range(number_y)]\n # Create custom artists\n\n simArtist = plt.Line2D((0, 1), (0, 0), color='k', marker='o', linestyle='')\n anyArtist = plt.Line2D((0, 1), (0, 0), color='k')\n\n #print color_dict\n\n # print 'printing ynames in funct'\n # print ynames\n #print 'yname dict', ynames_dict\n\n hl = False\n for jj in range(number_y):\n coli = color_dict[jj]\n style = line_style_dict[jj] # '--' #'None'\n thickness = default_plot_specs['linewidth']\n if style == '--':\n thickness = thickness*dash_width_factor\n hl = True\n hl_num = 3.6\n dashi = True\n else:\n dashi = False\n if marker:\n if yer is None:\n markstyli = marker_style_dict[jj]\n if ynames is None or jj>len(ynames)-1 or not ynames[jj]:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, marker=markstyli\n , markersize=default_plot_specs['marker_size'],\n dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, marker=markstyli, linestyle=style\n , markersize=default_plot_specs['marker_size'],\n linewidth=thickness)\n else:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, label=ynames[jj], marker=markstyli\n , markersize=default_plot_specs['marker_size'],\n dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, label=ynames[jj], marker=markstyli,\n linestyle=style, markersize=default_plot_specs['marker_size'],\n linewidth=thickness)\n # else:\n # ax_1.plot(x[jj], y[jj], color=coli,linestyle=style)\n else:\n if ynames is None or jj > len(ynames) - 1:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness,dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness, linestyle=style)\n else:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness,label=ynames[jj],dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness, linestyle=style,\n label=ynames[jj])\n\n\n\n if yer is not None:\n\n # ax_1.plot(x[jj], yer_datas_high, color=coli,\n # label=y_names[jj] + ' + SE', linestyle='--')\n # ax_1.plot(x[jj], yer_datas_low, color=coli,\n # label=y_names[jj] + ' - SE', linestyle='--')\n if marker:\n markstyli = marker_style_dict[jj]\n if markstyli and not style:\n capsizi = default_plot_specs['cap_size']\n else:\n capsizi = None\n if ynames is None or jj > len(ynames) - 1:\n if dashi:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n linewidth=default_plot_specs['linewidth'],dashes=dashes)\n else:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n linewidth=default_plot_specs['linewidth'],linestyle=style)\n else:\n if dashi:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n label=y_names[jj],\n linewidth=default_plot_specs['linewidth'],dashes=dashes)\n else:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n label=y_names[jj],\n linewidth=default_plot_specs['linewidth'],linestyle=style)\n else:\n yer_datas_high = [y_i + y_er_i for y_i, y_er_i in zip(y[jj], yer[jj])]\n yer_datas_low = [y_i - y_er_i for y_i, y_er_i in zip(y[jj], yer[jj])]\n ax_1.plot(x[jj], yer_datas_high, color=coli, linestyle='--',dashes=dashes)\n ax_1.plot(x[jj], yer_datas_low, color=coli, linestyle='--',dashes=dashes)\n\n if default_plot_specs['yrotation'] is 'vertical':\n if default_plot_specs['ylabelspace'] ==0:\n ax_1.set_ylabel(y_label, **default_plot_specs['axis_font'])\n else:\n labpad = int(default_plot_specs['axis_font']['size'])*default_plot_specs['ylabelspace']\n ax_1.set_ylabel(y_label,labelpad=labpad, **default_plot_specs['axis_font'])\n else:\n labpad =int(default_plot_specs['axis_font']['size'])*3\n #ax_1.set_ylabel(y_label,rotation=plotspecs['yrotation'],labelpad=int(labpad), **default_plot_specs['axis_font'])\n ax_1.set_ylabel(y_label, rotation=default_plot_specs['yrotation'],labelpad=labpad, horizontalalignment = 'center',verticalalignment ='center',\n **default_plot_specs['axis_font'])\n\n\n # Set the tick labels font\n for labeli in (ax_1.get_xticklabels() + ax_1.get_yticklabels()):\n # labeli.set_fontname('Arial')\n labeli.set_fontsize(default_plot_specs['ticksize'])\n\n ax_1.set_xlabel(x_label, **default_plot_specs['axis_font'])\n\n\n xlow, xhigh = min(x[0]), max(x[0])\n for xx in x[1:]:\n mycopy_low = [g for g in copy.deepcopy(xx)]\n mycopy_high = [g for g in copy.deepcopy(xx)]\n mycopy_low.append(xlow)\n mycopy_high.append(xhigh)\n xlow, xhigh = min(mycopy_low), max(mycopy_high)\n # set axes limits\n if domain is None:\n extra = (xhigh-xlow)*default_plot_specs['x_scale']\n xlow -= extra\n xhigh +=extra\n\n\n #Make vertical lines\n for xfloat in vlines:\n if xlow < xfloat < xhigh:\n ax_1.axvline(x=xfloat,color = default_plot_specs['vlinecolor'],linestyle= default_plot_specs['vlinestyle'],linewidth=default_plot_specs['vlineswidth'])\n\n # if not marker:\n # xhigh -= 15\n\n if yrange is None:\n if y:\n if y[0]:\n if yer is not None:\n ylow, yhigh = min([yi-yi_er for yi, yi_er in zip(y[0],yer[0])]), max([yi+yi_er for yi, yi_er in zip(y[0],yer[0])])\n else:\n ylow, yhigh = min(y[0]), max(y[0])\n else:\n ylow, yhigh = 0, 0\n else:\n ylow, yhigh = 0, 0\n if yer is not None:\n for yy, yy_er in zip(y[1:],yer[1:]):\n ylow, yhigh = min([ylow] + [yi-yi_er for yi, yi_er in zip(yy,yy_er)]), max([yhigh]+ [yi+yi_er for yi, yi_er in zip(yy,yy_er)])\n else:\n for yy in y[1:]:\n ylow, yhigh = min([ylow] + yy), max([yhigh] + yy)\n extra = (yhigh-ylow)*default_plot_specs['y_scale']\n ylow -= extra\n yhigh +=extra\n\n\n ax_1.set_xlim(xlow, xhigh)\n ax_1.set_ylim(ylow, yhigh)\n\n while under_text:\n texti = under_text.pop(0)\n plt.figtext(0.08, text_heights.pop(0), texti, default_plot_specs['undertext_font'])\n\n if text:\n ax_1.text(default_plot_specs['text_loc'][0], default_plot_specs['text_loc'][1], text,\n verticalalignment='bottom', horizontalalignment='right',\n transform=ax_1.transAxes,\n color=default_plot_specs['text_color'], fontsize=default_plot_specs['text_size'])\n\n #print 'display_leg_numbers', display_leg_numbers\n\n\n if default_plot_specs['xshade']:\n ax_1.axvspan(default_plot_specs['xshade'][0], default_plot_specs['xshade'][1], alpha=0.3, color=default_plot_specs['xshade_color'])\n\n if ynames:\n # print 'the display leg numbers '\n # print display_leg_numbers\n\n handles, labels = ax_1.get_legend_handles_labels()\n handles = [handle for i,handle in enumerate(handles) if i in display_leg_numbers]\n labels = [label for i,label in enumerate(labels) if i in display_leg_numbers]\n if groupings_labels_within:\n mini = min(len(list(custom_legend_entries_dict.keys())),len(groupings_labels_within))\n handles += [custom_legend_entries_dict[k] for k in range(mini)]\n labels += groupings_labels_within[:mini]\n if hl:\n lgd = ax_1.legend(handles, labels, loc=default_plot_specs['legend_anchor'],\n bbox_to_anchor=default_plot_specs['legend_loc'],\n prop=default_plot_specs['legend_font'], ncol=n_legend_columns,handlelength=hl_num)\n else:\n lgd = ax_1.legend(handles, labels, loc=default_plot_specs['legend_anchor'],\n bbox_to_anchor=default_plot_specs['legend_loc'],\n prop=default_plot_specs['legend_font'], ncol=n_legend_columns)\n\n if legend_title:\n lgd.set_title(legend_title,prop=default_plot_specs['legend_font'])\n\n plt.setp(lgd.get_title(), multialignment='center')\n\n # if hl:\n # print 'doing hl 2'\n # ax_1.legend(handlelength=2)\n\n\n if default_plot_specs['nxticks'] > 0:\n #visible_labelsx = [lab for lab in ax_1.get_xticklabels() if lab.get_visible() is True and lab.get_text() != '']\n for lab in ax_1.get_xticklabels():\n lab.set_visible(True)\n visible_labelsx = [lab for lab in ax_1.get_xticklabels() if lab.get_visible() is True]\n visible_labelsx=visible_labelsx[1::default_plot_specs['nxticks']]\n plt.setp(visible_labelsx, visible = False)\n #\n #ax_1.set_xticks(visible_labelsx[1::2])\n #plt.setp(visible_labels[1::2], visible=False)\n #ax_1.locator_params(axis='x', nticks=default_plot_specs['nxticks'])\n #\n if default_plot_specs['nyticks'] > 0:\n # #ax_1.locator_params(axis='y', nticks=default_plot_specs['nyticks'])\n visible_labelsy = [lab for lab in ax_1.get_yticklabels() if lab.get_visible() is True]\n if len(visible_labelsy) > 4:\n visible_labelsy = visible_labelsy[2:-2]\n plt.setp(visible_labelsy, visible=False)\n\n #plt.grid('off')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n save_dir = os.path.join(save_dir,'%s.png' % the_label)\n\n if save:\n save_fig(fig, save_dir)\n else:\n return fig, save_dir" ]
[ "0.75955755", "0.6639895", "0.63147664", "0.6244438", "0.6018296", "0.5990527", "0.59632486", "0.5877029", "0.58101994", "0.58101994", "0.5800447", "0.5794655", "0.5775401", "0.57584023", "0.5737174", "0.5724625", "0.5724219", "0.57178116", "0.56640327", "0.56584495", "0.56467277", "0.5619865", "0.56068057", "0.5601182", "0.55957174", "0.5587734", "0.5585871", "0.5565191", "0.552624", "0.550852" ]
0.7199761
1
Draw a table of all data used to chart y(x)
def draw_table(ax, dfs, legend, x, y): col_labels = dfs_all_values(dfs, x) column_legend = [] cell_text = [] # loop over all pandas.DataFrame objects for df in dfs: # to allow query y(x) easily df = df.set_index(x) df_row = df[y] # build a row with filled blanks '-' row = ["{:.2f}".format(df_row[column]) if column in df_row.index else '-' \ for column in col_labels] cell_text.append(row) ax.axis('tight') ax.axis('off') ax.table(cellText=cell_text, rowLabels=legend, colLabels=col_labels, \ loc='top')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table(self):\n\n param=self.x_param\n\n device=self.device\n\n base_params=device.get_params()\n\n data_tot=DataFrame()\n\n for i in range(len(param)):\n\n print_index=1\n\n for name in param.names:\n\n device._set_params(param(i))\n\n device.draw()\n\n df=device.export_all()\n\n if self.labels_bottom is not None:\n\n index=self.labels_bottom[i]\n\n else:\n\n index=str(i)\n\n print(\"Generating table, item {} of {}\\r\".format(print_index,len(param)),end=\"\")\n\n data_tot=data_tot.append(Series(df,name=index))\n\n device._set_params(base_params)\n\n return data_tot", "def plot_data(self):", "def plot_table(self):\r\n q = dict(sorted(decorator.arr.items(), key=lambda item: item[1]))\r\n print(\"PROGRAM | RANK | TIME ELAPSED\")\r\n count = 1\r\n for i in q:\r\n print(i[0], \"\\t\", count, \"\\t\", float(q[i]) * 1000, \"ms\")\r\n count += 1", "def table(\n data=None,\n rows=None,\n columns=None,\n hrows=None,\n brows=None,\n lcols=None,\n rcols=None,\n label=None,\n width=None,\n height=None,\n ):\n canvas = Canvas(width=width, height=height)\n axes = canvas.table(\n data=data,\n rows=rows,\n columns=columns,\n hrows=hrows,\n brows=brows,\n lcols=lcols,\n rcols=rcols,\n label=label)\n return canvas, axes", "def __draw(self, state:dict):\n _, ax = plt.subplots()\n ax.set_axis_off()\n tb = Table(ax, bbox=[0,0,1,1])\n\n width = height = 1.0 /9 \n\n\n for key in self.state.keys():\n # Add cells\n i,j = self.__display_table_map[key]\n tb.add_cell(i, j, width, height, text='{}'.format(state[key]), \n loc='center',facecolor= self.__color_map[key])\n\n ax.add_table(tb)\n plt.show()", "def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:\n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.table(\n ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs\n )", "def plotly_table():\n model_data = your_choice()\n model_data[\"test_prediction\"] = list(model_data[\"test_prediction\"])\n \n df = pd.DataFrame(model_data[\"test_prediction\"], columns=[\"test_prediction\"])\n for k,v in model_data.items():\n if k != \"test_prediction\":\n df[k] = str(v)\n\n fig = a_libraries.plotly_table(df)\n\n return fig", "def plot_table(timestamps: dict, threadList: list, mList: list) -> None:\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()", "def create_tables(times, accuracies, batch_sizes):\r\n #Get time data\r\n p_cpu_times = list(times[0].values())\r\n p_gpu_times = list(times[1].values())\r\n c_cpu_times = list(times[2].values())\r\n c_gpu_times = list(times[3].values())\r\n\r\n #Get differences in times\r\n p_diff_times = [a - b for a, b in zip(p_cpu_times, p_gpu_times)]\r\n c_diff_times = [a - b for a, b in zip(c_cpu_times, c_gpu_times)]\r\n cpu_diff_times = [a - b for a, b in zip(p_cpu_times, c_cpu_times)]\r\n gpu_diff_times = [a - b for a, b in zip(p_gpu_times, c_gpu_times)]\r\n\r\n #Set data in np array for table\r\n data = np.array([p_cpu_times,\r\n p_gpu_times,\r\n p_diff_times,\r\n c_cpu_times,\r\n c_gpu_times,\r\n c_diff_times,\r\n cpu_diff_times,\r\n gpu_diff_times]).T\r\n\r\n #Get data in text format\r\n n_rows = data.shape[0]\r\n cell_text = []\r\n for row in range(n_rows):\r\n cell_text.append(['%1.3f' % x for x in data[row]])\r\n \r\n #Get rows and cols for table\r\n columns = ('P CPU Time (s)', 'P GPU Time (s)', 'P Diff (s)', 'C CPU Time (s)', 'C GPU Time (s)', 'C Diff (s)', 'CPU Diff (s)', 'GPU Diff (s)')\r\n row_colors = plt.cm.BuPu(np.linspace(0, 0.5, n_rows))\r\n col_colors = np.array([192/255,192/255,192/255, 1])\r\n col_colors = np.repeat(col_colors.reshape((1, col_colors.shape[0])), len(columns), axis=0)\r\n\r\n #Create table\r\n plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')\r\n plt.table(cellText=cell_text,\r\n rowLabels=batch_sizes,\r\n rowColours=row_colors,\r\n colLabels=columns,\r\n colColours=col_colors,\r\n loc='center')\r\n ax = plt.gca()\r\n ax.axis('off')\r\n plt.savefig('results\\\\figures\\\\table_time.png')\r\n\r\n\r\n #Get accuracy table\r\n #Get accuracy data\r\n p_cpu_accuracy = list(accuracies[0].values())\r\n p_gpu_accuracy = list(accuracies[1].values())\r\n c_cpu_accuracy = list(accuracies[2].values())\r\n c_gpu_accuracy = list(accuracies[3].values())\r\n\r\n #Get max of each batch\r\n p_cpu_max = [max(x) for x in p_cpu_accuracy]\r\n p_gpu_max = [max(x) for x in p_gpu_accuracy]\r\n c_cpu_max = [max(x) for x in c_cpu_accuracy]\r\n c_gpu_max = [max(x) for x in c_gpu_accuracy]\r\n\r\n #Get differences in accuracies\r\n p_diff_acc = [a - b for a, b in zip(p_cpu_max, p_gpu_max)]\r\n c_diff_acc = [a - b for a, b in zip(c_cpu_max, c_gpu_max)]\r\n cpu_diff_acc = [a - b for a, b in zip(p_cpu_max, c_cpu_max)]\r\n gpu_diff_acc = [a - b for a, b in zip(p_gpu_max, c_gpu_max)]\r\n\r\n #Set data in np array for table\r\n data = np.array([p_cpu_max,\r\n p_gpu_max,\r\n p_diff_acc,\r\n c_cpu_max,\r\n c_gpu_max,\r\n c_diff_acc,\r\n cpu_diff_acc,\r\n gpu_diff_acc]).T\r\n\r\n #Get data in text format\r\n n_rows = data.shape[0]\r\n cell_text = []\r\n for row in range(n_rows):\r\n cell_text.append(['%1.3f' % x for x in data[row]])\r\n \r\n #Get rows and cols for table\r\n columns = ('P CPU Acc (%)', 'P GPU Acc (%)', 'P Diff (%)', 'C CPU Acc (%)', 'C GPU Acc (%)', 'C Diff (%)', 'CPU Diff (%)', 'GPU Diff (%)')\r\n\r\n #Create table\r\n plt.clf()\r\n plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')\r\n plt.table(cellText=cell_text,\r\n rowLabels=batch_sizes,\r\n rowColours=row_colors,\r\n colLabels=columns,\r\n colColours=col_colors,\r\n loc='center')\r\n ax = plt.gca()\r\n ax.axis('off')\r\n plt.savefig('results\\\\figures\\\\table_acc.png')", "def draw(x, y):\n\t\n\t##The length of the 'dott' sequence can be adjusted and the rest of the\n\t\t#drawing will adjust itself after reloading\"\"\"\n\tdott = \" ----- \"\n\tpipe = \"|\"\n\t\n\tprint \"\\n\"\n\tif y: print dott * x + \"\\n\"\n\tfor i in xrange(y):\n\t\t#Though not very readable, the line below is responsible for determining how long\n\t\t\t#one y(vertical) cell should be and printinng as many pipes along the y axis\n\t\t\t #after considering the width of a cell(x-axis unit) \n\t\t#The initial part before the final times sign prints 1 + the number of \n\t\t\t#cells along the x axis (rows) inorder to close last cell \n\t\t\t#the calculation of the spacing of the pipes was determined after testing\n\t\t\t\t#for the best fit\n\t\t\n\t\tprint ((\" \"*(len(dott)-1)).join(iter(pipe*(x+1))) + \"\\n\") * (len(dott) / 2)\n\t\t\n\t\tprint dott*x + \"\\n\"", "def make_plot(x,y):", "def data_table(self, X, y, models_predictions):\n models_predictions = assess_models_names(models_predictions)\n base_color = self.plot_design.base_color_tints[0]\n\n # formatter for y and prediction columns to color and style them separately\n cols = [TableColumn(\n field=y.name,\n title=y.name,\n formatter=HTMLTemplateFormatter(template=self._model_column_template.format(color=base_color))\n )]\n\n # predictions\n _ = []\n i = 0\n for model, predictions in models_predictions:\n if i == 0:\n color = self.plot_design.models_color_tuple[0]\n i += 1\n else:\n color = self.plot_design.models_color_tuple[1]\n\n predictions = pd.Series(predictions, name=model).round(6)\n _.append(predictions)\n cols.append(\n TableColumn(\n field=model,\n title=model,\n formatter=HTMLTemplateFormatter(template=self._model_column_template.format(color=color)))\n )\n\n for col in X.columns:\n cols.append(TableColumn(field=col, title=col))\n scores = pd.DataFrame(_).T # by default, wide table is created instead of a long one\n\n # final DataFrame and DataTable\n df = pd.concat([y, scores, X], axis=1)\n source = ColumnDataSource(df)\n dt = DataTable(source=source, columns=cols, editable=False, sizing_mode=\"stretch_width\")\n\n return dt", "def on_scatter_toolbar_table_click(self):\n #print('*** on table click ***')\n row = self.myTableWidget.currentRow()\n if row == -1 or row is None:\n return\n yStat = self.myTableWidget.item(row,0).text()\n self.myParent.replot()", "def visualize_data(y_test, x_test, window_out, num_plots, num_win_ser, cols_y, col_idx):\n \n \n ser_idx = [i for i in range(0, len(y_test), num_win_ser)]\n if num_plots > len(ser_idx):\n print(\"Too many plots, reduce the mumber\")\n else:\n indx = ser_idx[0:num_plots]\n days = range(num_win_ser)\n for idx in indx:\n CR = x_test[idx][0][3]\n #pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx]\n true = y_test[idx : idx+num_win_ser, window_out -1, col_idx]\n \n plt.title(\"Y_True, CR: \"+ str(CR))\n plt.xlabel('Days')\n plt.ylabel(cols_y[col_idx])\n \n #plt.plot(days, pred, label = 'Pred')\n plt.plot(days, true, label = 'True')\n \n plt.legend()\n plt.show()", "def DrawBarGraph(connection, table_name, y_axis_field, x_axis_values,\n x_axis_field, arrangement):\n def GetListsFromDB(x_axis_values, x_axis_field, connection,\n table_name, y_axis_field, category):\n \"\"\"This function returns lists of values of a field from the DB.\n\n The function returns lists of `y_axis_field` for the values corresponding to\n the `x_axis_values` in `x_axis_field`.\n Args:\n x_axis_values: a list of values for which the `y_axis_field` will be\n fetched for.\n x_axis_field: name of the field for x_axis\n connection: the connection to the database\n table_name: name of the table in the database which has the data\n y_axis_field: the name of the column in the table, whose data will be put\n into the list\n category: Direct or Envoy or which category the data belong to\n Returns:\n Returns a list of lists with all the values of `y_axis_field`\n corresponding to `x_axis_values`.\n \"\"\"\n lists = list()\n for x in x_axis_values:\n condition = (\"where {}=\\\"{}\\\" and\"\n \" category=\\\"{}\\\"\").format(x_axis_field, x, category)\n single_list = db_utils.SingleColumnToList(db_utils.GetFieldFromTable(\n connection, table_name, field=y_axis_field, cond=condition))\n if not single_list:\n print(\"{} {} is not found in table for {} results.\".format(\n x_axis_field, x, category))\n single_list = [0]\n\n lists.append(single_list)\n return lists\n\n direct_lists = GetListsFromDB(x_axis_values, x_axis_field, connection,\n table_name, y_axis_field,\n \"direct-{}\".format(arrangement))\n envoy_lists = GetListsFromDB(x_axis_values, x_axis_field, connection,\n table_name, y_axis_field,\n \"envoy-{}\".format(arrangement))\n\n def GetMeansAndStdsFromList(lists):\n \"\"\"This function returns the means and standard deviation of lists.\n\n Args:\n lists: A list of lists. Each list inside the top-level list consists\n of a sample for a given variable that summary stats will be computed on.\n Returns:\n A pair of list containing means and standard deviations.\n \"\"\"\n means = [np.mean(single_list) for single_list in lists]\n stds = [np.std(single_list) for single_list in lists]\n return means, stds\n\n direct_means, direct_std = GetMeansAndStdsFromList(direct_lists)\n envoy_means, envoy_std = GetMeansAndStdsFromList(envoy_lists)\n\n ind = np.arange(len(x_axis_values))\n width = 0.35\n fig, ax = plt.subplots()\n rects1 = ax.bar(ind, direct_means, width, color=\"r\", yerr=direct_std)\n rects2 = ax.bar(ind + width, envoy_means, width, color=\"y\", yerr=envoy_std)\n\n ax.set_ylabel(y_axis_field)\n ax.set_xlabel(x_axis_field)\n ax.set_xticks(ind + width)\n ax.set_xticklabels(x_axis_values, rotation=\"vertical\", fontsize=8)\n # legend will be placed out of the main graph\n ax.legend((rects1[0], rects2[0]), (\"Direct\", \"Envoy\"),\n loc=\"center left\", bbox_to_anchor=(1, 0.5))\n AutoLabel(rects1, ax)\n AutoLabel(rects2, ax)\n fig.savefig(\"{} {}.png\".format(\n x_axis_field, \",\".join(str(i) for i in x_axis_values)),\n bbox_inches=\"tight\")", "def create_display_data_table():\n\n for ccd in range(0, 10):\n for node in range(0, 4):\n file = 'ccd' + str(ccd) + '_' + str(node)\n infile = data_dir + file\n outfile = web_dir + 'Data/' + file\n\n f = open(infile, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n\n fo = open(outfile, 'w')\n#\n#--- adding heading\n#\n line = \"#\\n#Date Mn K alpha Al K alpha Ti K alpha Slope Sigma Int Sigma\\n#\\n\"\n fo.write(line)\n for ent in data:\n atemp = re.split('\\s+', ent)\n stime = int(atemp[0])\n#\n#--- converting the date into <mon> <year> form (e.g. May 2013)\n#\n ltime = tcnv.axTimeMTA(stime)\n btemp = re.split(':', ltime)\n year = btemp[0]\n [mon, mdate] = tcnv.changeYdateToMonDate(int(year), int(btemp[1]))\n lmon = tcnv.changeMonthFormat(mon)\n line = lmon + ' ' + year \n for j in range(1, len(atemp)):\n line = line + '\\t' + atemp[j]\n\n line = line + '\\n'\n fo.write(line)\n fo.close()", "def paint_cells(self, data):\r\n if len(data) == 0: return\r\n col, row = zip(*data.keys())\r\n colors = tuple(data.values())\r\n if not isinstance(colors[0], Number):\r\n colors = [self.cdict[color] for color in colors] \r\n self.A[row, col] = colors\r\n self.plot()", "def fill_table(self, data):\r\n if len(data) > 0:\r\n if isinstance(data, np.ndarray):\r\n data = data.tolist()\r\n data_rows = len(data)\r\n data_columns = len(data[0])\r\n if data_columns > 0:\r\n self.setRowCount(data_rows)\r\n # We hide the imag part of the complex impedance\r\n self.setColumnCount(data_columns - 1)\r\n for r in range(0, data_rows):\r\n # Update real columns\r\n for c, realc in [(0, 0), (1, 1), (3, 4)]:\r\n item = QTableWidgetItem() \r\n item.setText(str(data[r][realc])) \r\n self.setItem(r, c, item)\r\n # Earth resistance has a hidden column which can have an imaginary number\r\n if data[r][3] != 0.0:\r\n # show complex impedance\r\n item = QTableWidgetItem() \r\n item.setText(str(np.complex(data[r][2], data[r][3])))\r\n self.setItem(r, 2, item)\r\n else:\r\n # show real impedance\r\n item = QTableWidgetItem()\r\n item.setText(str(data[r][2])) \r\n self.setItem(r, 2, item)\r\n # Last Column is a QComboBox to select phasing\r\n phasing = QComboBox()\r\n phasing.addItems([\"Normal\",\"120 degree shift\", \"240 degree shift\"])\r\n phasing.setCurrentIndex(np.real(data[r][5]))\r\n phasing.currentIndexChanged.connect(self.phasing_signal(phasing, r, 5))\r\n self.setCellWidget(r, 4, phasing)", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def outputTable(xpoints, ypoints, ypointse, outfile='completeness.txt', magType='Instrumental'):\r\n fout = open(outfile, 'w')\r\n fout.write('# '+magType+'F814W fc fce\\n')\r\n\r\n for i,_ in enumerate(xpoints):\r\n fout.write(str(xpoints[i])+' '+\"%1.3f\" %ypoints[i]+' '+\"%1.3f\" %ypointse[i]+'\\n')\r\n fout.close()", "def __str__(self):\n table_string = ''\n values = [x * y for x in range(1, self.x + 1)\n for y in range(1, self.y + 1)\n ]\n for value in range(1, len(values) + 1):\n if value % self.x == 0:\n table_string += f'{values[value - 1]}\\n'\n else:\n table_string += f'{values[value - 1]} | '\n return table_string", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def __update_table(self):\n\n headlines = [\"\", ]\n headlines += range(1, + 1)\n headlines = [\" \"] + [str(x) for x in range(1, self.find_table_length() + 1)]\n self.__main_display_table.config(columns=headlines)\n\n for headline in headlines:\n self.__main_display_table.heading(headline, text=headline)\n self.__main_display_table.column(headline, anchor=\"center\", width=35)\n\n data = self.__display_buses_location()\n\n for i in self.__main_display_table.get_children():\n # deletes all the data in the chart\n self.__main_display_table.delete(i)\n for line in data:\n # inserts new data into the chart, goes line by line\n self.__main_display_table.insert(\"\", END, values=line)", "def plot(self, context=None):\n\n response = requests.get(self.url).content\n table = pd.read_html(response, attrs={\"id\": \"main_table_countries_today\"})\n df = table[0].fillna(0)\n # df.drop(df.index[0], inplace=True) # World\n df.drop([\"ActiveCases\", 'Serious,Critical', 'Serious,Critical', 'Deaths/1M pop', 'Tests/ 1M pop'], axis=1, inplace=True)\n df.drop(df.columns[6], axis=1, inplace=True)\n\n if len(context) > 3:\n context = context.lower().capitalize()\n df = df.loc[df[\"Country,Other\"] == context]\n if 4 > len(context) > 1:\n context = context.upper()\n df = df.loc[df[\"Country,Other\"] == context]\n if len(context) <= 1:\n df = df[1:]\n\n C_Names = df[\"Country,Other\"].head(n=10).values.tolist()\n T_Cases = df[\"TotalCases\"].head(n=10).values.tolist()\n # N_Cases = df[\"NewCases\"].head(n=10).values.tolist() # not plotted\n T_Deaths = df[\"TotalDeaths\"].head(n=10).values.tolist()\n # N_Deaths = df[\"NewDeaths\"].head(n=10).values.tolist() # not plotted\n T_Recovered = df[\"TotalRecovered\"].head(n=10).values.tolist()\n T_Tests = df[\"TotalTests\"].head(n=10).values.tolist()\n\n x = np.arange(len(C_Names))\n width = 0.20\n\n fig, ax = plt.subplots()\n\n ax.bar(x - 0.30, T_Cases, width, label='TotalCases', color=\"Blue\")\n ax.bar(x - 0.10, T_Deaths, width, label='TotalDeaths', color=\"Red\")\n ax.bar(x + 0.10, T_Tests, width, label='TotalTests', color=\"Green\")\n ax.bar(x + 0.30, T_Recovered, width, label='TotalRecovered', color=\"Orange\")\n\n if len(context) > 1:\n ax.set_title(\"{}'s Situation\".format(context))\n else:\n ax.set_title(\"World's Top10 Situation\")\n\n ax.set_xticks(x)\n ax.set_xticklabels(C_Names)\n ax.legend()\n plt.ticklabel_format(style='plain', axis=\"y\")\n fig.set_size_inches(18.5, 10.5)\n fig.tight_layout()\n plt.grid()\n\n if len(context) > 1:\n font1 = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'bold',\n 'size': 20}\n font2 = {'family': 'serif',\n 'color': 'red',\n 'weight': 'normal',\n 'size': 20}\n font3 = {'family': 'serif',\n 'color': 'green',\n 'weight': 'normal',\n 'size': 20}\n font4 = {'family': 'serif',\n 'color': 'orange',\n 'weight': 'normal',\n 'size': 20}\n\n # bbox=dict(facecolor='black', alpha=0.5)\n plt.text(0.863, 0.67, \"Total Cases:\\n{:,}\".format(int(T_Cases[0])), fontdict=font1, transform=ax.transAxes)\n plt.text(0.863, 0.57, \"Total Deaths:\\n{:,}\".format(int(T_Deaths[0])), fontdict=font2, transform=ax.transAxes)\n plt.text(0.863, 0.47, \"Total Tests:\\n{:,}\".format(int(T_Tests[0])), fontdict=font3, transform=ax.transAxes)\n plt.text(0.863, 0.37, \"Total Recovered:\\n{:,}\".format(int(T_Recovered[0])), fontdict=font4, transform=ax.transAxes)\n\n # plt.savefig('corona.png') # Uncomment it to save the figure\n plt.show()", "def test_2d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n dates = df_iv[df_iv['dte'] == 30]['date']\n impl_vols = df_iv[df_iv['dte'] == 30]['impl_vol']\n db.close()\n\n print df_iv.sort_values('impl_vol').head()\n\n plt.plot(dates, impl_vols)\n plt.xlabel('date')\n plt.ylabel('impl_vols')\n plt.show()", "def plottable(scores):\n\n y = []\n x = []\n for key in sorted(scores.keys()):\n if sum(scores[key]) != 0:\n y.append(math.log10(sum(scores[key]) / len(scores[key])))\n\n return y", "def DrawTimeSeriesGraph(connection, table_name, y_axis_field, time,\n arrangement):\n def GetListFromDB(time, category, y_axis_field, connection, table_name):\n condition = (\"where time_of_entry >= \\\"{}\\\" and\"\n \" category=\\\"{}\\\" Group By RunID \"\n \"Order By time_of_entry\").format(\n time, category)\n single_list = db_utils.GetFieldFromTable(\n connection, table_name,\n field=\"AVG({}), STDDEV({}), time_of_entry, RunID\".format(\n y_axis_field, y_axis_field),\n cond=condition)\n if not single_list:\n print(\"Values are not found in table for category {}.\".format(\n category))\n return None\n\n return single_list\n\n direct_list = GetListFromDB(time, \"direct-{}\".format(arrangement),\n y_axis_field, connection, table_name)\n envoy_list = GetListFromDB(time, \"envoy-{}\".format(arrangement),\n y_axis_field, connection, table_name)\n\n if direct_list:\n direct_means, direct_std = zip(*direct_list)[:2]\n direct_times = [v[2].time().strftime(\"%H:%M\") if not i % 2 else \"\"\n for i, v in enumerate(direct_list)]\n else:\n raise ShowGraphError(\"Direct's data not found for time-series graph.\")\n\n if envoy_list:\n envoy_means, envoy_std = zip(*envoy_list)[:2]\n # time is not needed again but if needed, it can be taken from here\n # envoy_times = [v[2] for v in envoy_list]\n else:\n raise ShowGraphError(\"Envoy's data not found for time-series graph.\")\n\n ind = np.arange(len(direct_times))\n fig, ax = plt.subplots()\n rects1 = ax.errorbar(ind, direct_means, color=\"r\", yerr=direct_std)\n rects2 = ax.errorbar(ind, envoy_means, color=\"y\", yerr=envoy_std)\n\n ax.set_ylabel(y_axis_field)\n ax.set_xlabel(\"time\")\n ax.set_xticks(ind)\n ax.set_xticklabels(direct_times, rotation=\"vertical\", fontsize=8)\n ax.legend((rects1[0], rects2[0]), (\"Direct\", \"Envoy\"),\n loc=\"center left\", bbox_to_anchor=(1, 0.5))\n\n # Helper function to put standard deviation as labels inside the graph\n # data points\n def PutStdDevOnGraph(ax, rects, stddev):\n for i, num in enumerate(rects[0].get_xydata()):\n ax.text(num[0], 1.05*num[1],\n \"%d%%\" % int(100.0*stddev[i]/(1.0*num[1])),\n ha=\"center\", va=\"bottom\", fontsize=8)\n\n PutStdDevOnGraph(ax, rects1, direct_std)\n PutStdDevOnGraph(ax, rects2, envoy_std)\n\n fig.savefig(\"Time-{}-{}.png\".format(time, arrangement),\n bbox_inches=\"tight\")", "def table_example():\n\n print(\"\\nExample making a new table from scratch:\\n\")\n # Make a new (empty) table object\n tbl = table(\"A table with random data\")\n # Add three columns called \"x\", \"x^2\" and \"1/x\"\n tbl.addcolumn(\"x\")\n tbl.addcolumn(\"x^2\")\n tbl.addcolumn(\"1/x\")\n # Add some rows of data\n for i in range(0, 10):\n row = dict()\n row[\"x\"] = i\n row[\"x^2\"] = i * i\n if i != 0:\n row[\"1/x\"] = 1.0 / float(i)\n else:\n row[\"1/x\"] = \"?\"\n tbl.add_data(row)\n # Define some graphs\n tbl.definegraph(\"Y = X(squared)\", (\"x\", \"x^2\"))\n tbl.definegraph(\"Y = 1/X\", (\"x\", \"1/x\"))\n tbl.definegraph(\"All data\", (\"x\", \"x^2\", \"1/x\"))\n # Print out the data as a simple \"table\" and in loggraph markup\n print(tbl.show())\n print(tbl.loggraph())", "def create_curve(data_tab, state):\n global width, prev_index, min_temp, max_temp, max_humid, min_humid\n\n def min_max(arr, arr_size):\n \"\"\"\n Helper to get the min and max of the tab\n \"\"\"\n max_t = arr[0]\n min_t = arr[0]\n for i in range(arr_size):\n if arr[i] > max_t:\n max_t = arr[i]\n if arr[i] < min_t:\n min_t = arr[i]\n return min_t, max_t\n\n # The max difference between two temp; if greater than 8, then we need to move vertically\n min_data, max_data = min_max(data_tab, len(data_tab))\n min_max_diff = max(8, max_data - min_data)\n\n # Update min/max values of each curve\n if state == \"temp\":\n min_temp = min(min_data, min_temp)\n max_temp = max(max_data, max_temp)\n elif state == \"humid\":\n min_humid = min(min_data, min_humid)\n max_humid = max(max_data, max_humid)\n\n width = len(data_tab)\n\n normalized_data = data_tab.copy()\n\n for i in range(len(data_tab)):\n normalized_data[i] = ((data_tab[i] - min_data)*7) / min_max_diff\n\n full_data_tab = [[0 for x in range(8)] for y in range(width)]\n\n # The first data that we collected is gonna be centered on the y-axis\n base_data = normalized_data[0]\n\n # Change the base_index depending on max variation of temp\n base_index = 7 - round(base_data)\n\n # Records value for when we change displayed_data\n prev_index = -1\n for i in range(width):\n diff = round(normalized_data[i] - base_data)\n curr_index = base_index - diff\n full_data_tab[i][curr_index] = 1\n\n # COMMENT NEXT FULL BLOCK TO REMOVE VERTICAL PIXELS\n if i > 0:\n delta_index = curr_index - prev_index\n if delta_index > 1:\n for j in range(prev_index + 1, curr_index):\n full_data_tab[i][j] = 1\n if delta_index < -1:\n for j in range(curr_index + 1, prev_index):\n full_data_tab[i][j] = 1\n prev_index = curr_index\n # END OF BLOCK TO COMMENT\n\n\n return full_data_tab" ]
[ "0.64481795", "0.6369142", "0.61868083", "0.615285", "0.60729265", "0.60604787", "0.59911245", "0.5980409", "0.59740573", "0.5895259", "0.5869834", "0.5847987", "0.5836506", "0.5818547", "0.58129287", "0.5809058", "0.57461077", "0.57257223", "0.5720079", "0.5665956", "0.5658745", "0.5657095", "0.56568444", "0.56556606", "0.5653524", "0.56482875", "0.56281406", "0.5612309", "0.56049144", "0.5597462" ]
0.67767966
0
Going to a nonchunkadmin URL should be ok, and should also put the `_data_changed` parameter onto the URL.
def test_to_other_url(self): user = User(username='test', is_staff=True, is_superuser=True, is_active=True) user.set_password('test') user.full_clean() user.save() request = RequestFactory().get('/') response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/') admin_instance = get_modeladmin(Iframe) new_response = admin_instance.maybe_fix_redirection( request=request, response=response_302, obj=user) self.assertEqual(new_response['X-Chunkadmin-Response'], 'not-chunkadmin') # noqa self.assertEqual(302, new_response.status_code) self.assertEqual('/admin_mountpoint/?_data_changed=1', new_response['Location'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_returned_data_changed(self):\n request = RequestFactory().get('/')\n admin_instance = get_modeladmin(Iframe)\n response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/')\n new_response = admin_instance.maybe_fix_redirection(\n request=request, response=response_302)\n # returned early because it was a redirect, but we updated the\n # querystring anyway\n self.assertEqual(new_response['X-Chunkadmin-Response'], 'early')\n self.assertEqual(302, new_response.status_code)\n self.assertEqual('/admin_mountpoint/?_data_changed=1',\n new_response['Location'])", "def test_user_page_change_page(self):\n url = reverse('admin:core_user_change', args=[self.user.id])\n # houw args workd\n # admin/core/usre/\n res = self.client.get(url)\n #checking response for 200 ok page works\n self.assertEqual(res.status_code, 200)", "def test_data_admin_page(self):\n self.login(self.data_admin.user.username)\n self._go_to_data_admin_page()\n self.check_page_title(self.data_admin_config.get('PAGE_TITLE'))\n self.check_page_contains_ids(self.data_admin_config.get('ADMIN_LINKS'))", "def test_page_change_view(self):\n user = self._create_user({\"username\":\"user3\",\"email\":\"[email protected]\"})\n anotheruser = self._create_random_user(startname=\"another_user_\")\n testproject = self._create_comicsite_in_admin(user,\"user3project\") \n testpage1 = create_page_in_admin(testproject,\"testpage1\")\n testpage2 = create_page_in_admin(testproject,\"testpage2\") \n url = reverse(\"admin:comicmodels_page_change\",\n args=[testpage1.pk])\n\n self._test_url_can_be_viewed(user,url) \n self._test_url_can_be_viewed(self.root,url)\n #TODO: The permissions are not correct, https://github.com/comic/comic-django/issues/306\n #self._test_url_can_not_be_viewed(anotheruser,url)", "def post(self) :\n self.redirect('/admin')", "def test_user_change_page(self):\n # example url: /admin/cor/user/<userID>\n url = reverse('admin:core_user_change', args=[self.user.id])\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_user_change_page(self):\n\n # Get the admin url with the user id and send a GET request\n url = reverse('admin:core_user_change', args=[self.user.id])\n res = self.client.get(url)\n\n # Assertion\n self.assertEqual(res.status_code, 200)", "def test_user_change_page(self):\n url = reverse('admin:core_user_change', args=[self.user.id])\n # Works like: /admin/core/user/{id}\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def web_admin_required(handler):\n\n def check_admin(self, *args, **kwargs):\n \"\"\"\n If handler has no login_url specified invoke a 403 error\n \"\"\"\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)\n\n return check_admin", "def get(self):\n self.redirect('/admin')", "def get(self):\n self.redirect('/admin')", "def get(self) :\n setSessionMessageByRequest(self, \"Invalid Request\", True)\n self.redirect('/admin')", "def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def response_post_save_change(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)", "def http_method_not_allowed(self, request, *args, **kwargs):\n # Instead of just returning the standard \"method not allowed\" HTTP\n # status code, we can forward to the moderation admin\n return redirect(reverse('mod_admin'))", "def test_not_logged_cannot_update_tab(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def GET_adminon(self):\r\n #check like this because c.user_is_admin is still false\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = True)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def test_user_page_change(self):\n url = reverse('admin:core_user_change', args=[self.user.id])\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def checkForURL(self, data):\n \n moduleCoordinator.ModuleCoordinator().addEvent(moduleCoordinator.URL_EVENT, data, self.hash)", "def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('public.home', next=request.url))", "def changelist_view(self, request, extra_context=None):\n return HttpResponseRedirect(reverse('admin:index'))", "def changelist_view(self, request, extra_context=None):\n return HttpResponseRedirect(reverse('admin:index'))", "def test_partial_update_should_not_be_allowed(self):\n response = self.client.patch(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def goToAdmin(request):\n\n\ttemplate = '../admin'\n\treturn HttpResponseRedirect(template)", "def test_url_data_present_in_url(self):\n url_data = {'anything': 'my username'}\n req = self.httpbin_3.test_requests_patch_method(url_data=url_data, dry_run=True)\n path = self.httpbin_3.client['homepage']['test_requests_patch_method']['path']\n self.assertEqual(urlparse(req.prepared_request.url).path, quote(path.format(**url_data)))", "def locking_admin_changelist_js_url(self):\n return reverse('admin:' + self.locking_admin_changelist_js_url_name)", "def response_change(self, request, obj):\r\n \r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_saveasnew\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n \r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n \r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_change(request, obj)", "def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('index'))", "def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('index'))", "def test_logentry_get_admin_url(self):\n logentry = LogEntry.objects.get(content_type__model__iexact=\"article\")\n expected_url = reverse(\n \"admin:admin_utils_article_change\", args=(quote(self.a1.pk),)\n )\n self.assertEqual(logentry.get_admin_url(), expected_url)\n self.assertIn(\"article/%d/change/\" % self.a1.pk, logentry.get_admin_url())\n\n logentry.content_type.model = \"nonexistent\"\n self.assertIsNone(logentry.get_admin_url())" ]
[ "0.6304963", "0.60231835", "0.5975458", "0.59199995", "0.57574075", "0.5661228", "0.56567496", "0.5646688", "0.56410176", "0.5619408", "0.5619408", "0.5565735", "0.5562113", "0.5547753", "0.5510401", "0.55066466", "0.54986465", "0.54867476", "0.54704493", "0.54627734", "0.5448876", "0.5448876", "0.5429229", "0.5409688", "0.5406041", "0.53909856", "0.5355159", "0.5354408", "0.5354408", "0.53482854" ]
0.6291411
1
If `_autoclose` is in the URL, that + `_data_changed` should propagate to the next redirect URL for the purposes of our adminlinks JS.
def test_autoclose_chunkadmin(self): user = User(username='test', is_staff=True, is_superuser=True, is_active=True) user.set_password('test') user.full_clean() user.save() admin_instance = get_modeladmin(Iframe) self.assertIsInstance(admin_instance, RealishAdmin) request = RequestFactory().get('/', { '_autoclose': 1, }) request.user = user iframe_admin = reverse('admin:embeds_iframe_add') response_301 = HttpResponsePermanentRedirect(redirect_to=iframe_admin) ct = get_content_type(User) iframe = Iframe(position=2, region='test', content_type=ct, content_id=user.pk, url='https://news.bbc.co.uk/') iframe.full_clean() iframe.save() new_response = admin_instance.maybe_fix_redirection( request=request, response=response_301, obj=iframe) self.assertEqual(new_response['X-Chunkadmin-Response'], 'autoclose') self.assertEqual(301, new_response.status_code) location, querystring = new_response['Location'].split('?') self.assertEqual('/admin_mountpoint/embeds/iframe/add/', location) self.assertIn('region=test', querystring) self.assertIn('_data_changed=1', querystring) self.assertIn('_autoclose=1', querystring) self.assertIn('content_type={0}'.format(ct.pk), querystring) self.assertIn('content_id={0}'.format(iframe.pk), querystring)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def response_post_save_change(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)", "def response_change(self, request, obj):\r\n \r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_saveasnew\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n \r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n \r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_change(request, obj)", "def test_returned_data_changed(self):\n request = RequestFactory().get('/')\n admin_instance = get_modeladmin(Iframe)\n response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/')\n new_response = admin_instance.maybe_fix_redirection(\n request=request, response=response_302)\n # returned early because it was a redirect, but we updated the\n # querystring anyway\n self.assertEqual(new_response['X-Chunkadmin-Response'], 'early')\n self.assertEqual(302, new_response.status_code)\n self.assertEqual('/admin_mountpoint/?_data_changed=1',\n new_response['Location'])", "def response_post_save_add(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)", "def redirect_on_exit_url(self, redirect_on_exit_url):\n\n self._redirect_on_exit_url = redirect_on_exit_url", "def response_add(self, request, obj):\r\n\r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n\r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n\r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_add(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(xt_id_mb__gt=obj.xt_id_mb).order_by('xt_id_mb')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(mbAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_lab__gt=obj.id_xt_lab).order_by('id_xt_lab')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(xtlabAdmin, self).response_change(request, obj)", "def response_post_save_change(self, request, obj):\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n return HttpResponseRedirect(url)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_mcce__gt=obj.id_xt_mcce).order_by('id_xt_mcce')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(mcceAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_pc__gt=obj.id_xt_pc).order_by('id_xt_pc')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(pcAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_sust__gt=obj.id_xt_sust).order_by('id_xt_sust')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(xt_sustanciasAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_pcce__gt=obj.id_xt_pcce).order_by('id_xt_pcce')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(pcceAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_mc__gt=obj.id_xt_mc).order_by('id_xt_mc')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(mcAdmin, self).response_change(request, obj)", "def redirect(url):", "def on_before_close(self):\n pass", "def redirect_to_original_url(query_short_url):\n db_url = Url.query.filter_by(short_url=query_short_url).first_or_404()\n db_url.views += 1\n db.session.commit()\n return redirect(db_url.original_url)", "def url_event_listener():\n track_template = \"<a href=\\\"{0}\\\" target=\\\"_blank\\\" onclick=\\\"trackOutboundLink('{0}'); return false;\\\"\"\n if request.method == 'POST':\n urls = request.form['url_textbox']\n track_urls = [track_template.format(url.strip()) for url in urls.split('\\n')]\n return render_template('link_tracking.html', links=track_urls)\n return render_template('link_tracking.html', links=[])", "def process_IN_CLOSE_WRITE(s, event):\n s.doReload(event)", "def link_new_callback(self):\n pass", "def after_link_issue(self, external_issue, **kwargs):\n pass", "def test_to_other_url(self):\n user = User(username='test', is_staff=True, is_superuser=True,\n is_active=True)\n user.set_password('test')\n user.full_clean()\n user.save()\n request = RequestFactory().get('/')\n response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/')\n admin_instance = get_modeladmin(Iframe)\n new_response = admin_instance.maybe_fix_redirection(\n request=request, response=response_302, obj=user)\n self.assertEqual(new_response['X-Chunkadmin-Response'], 'not-chunkadmin') # noqa\n self.assertEqual(302, new_response.status_code)\n self.assertEqual('/admin_mountpoint/?_data_changed=1',\n new_response['Location'])", "def response_post_save_change(self, request, obj):\n\n # Default response\n resp = super(StoryAdmin, self).response_post_save_change(request, obj)\n\n # Check that you clicked the button `_save_and_copy`\n if '_accept_story' in request.POST:\n # Accept the Story, and get the copy new CuratedStory\n created, problems = self.accept_story(obj)\n if created != 1:\n msg = \"Could not accept Story, one already exists for %s\"\n message = msg % obj.person.name\n self.message_user(request, message, level=messages.ERROR)\n return resp\n\n new_obj = CuratedStory.objects.get(story=obj)\n\n # Get its admin url\n opts = CuratedStory._meta\n info = opts.app_label, opts.model_name\n route = 'admin:{}_{}_change'.format(*info)\n post_url = reverse(route, args=(new_obj.pk,))\n\n # Inform the user they are now editting the CuratedStory\n self.message_user(request, \"Now editting the Curated Story\")\n\n # And redirect to it\n return HttpResponseRedirect(post_url)\n elif '_reject_story' in request.POST:\n # Reject the stories, and return the default response\n queryset = Story.objects.filter(pk=obj.pk)\n self.reject_stories(queryset)\n return resp\n else:\n # Otherwise, just use default behavior\n return resp", "def cog_unload(self):\n self.resend_post.cancel()", "def redirect_view(request, short_url):\n try:\n if request.method == 'GET':\n shortener = ShortenedURL.objects.get(short_url=short_url)\n shortener.times_visited += 1\n shortener.save()\n return HttpResponseRedirect(shortener.long_url)\n except ShortenedURL.DoesNotExist:\n return HttpResponse(status=404)", "def on_connection_closed(self):", "def after_request(self, response):\n # only track data for specified blueprints\n if self.blueprints:\n if request.blueprint not in self.blueprints:\n return response\n\n t_0 = getattr(g, 'start_time', dt.datetime.now())\n\n visit = dict(\n session_id=session.get('UUID', 0),\n timestamp=timestamp(),\n url=request.url,\n view_args=request.view_args,\n status_code=response.status_code,\n path=request.path,\n latency=(dt.datetime.now() - t_0).microseconds / 100000,\n content_length=response.content_length,\n referer=request.referrer,\n values=request.values\n )\n self.store_visit(visit)\n self.update_top_list(request.path)\n return response", "def change_abandoned(self, event):\n pass", "def html_redirect(self):\n soup = BeautifulSoup(self.contents, \"lxml\")\n meta = soup.find('meta', **{'http-equiv': 'refresh'})\n assert meta is not None, 'No <meta http-equiv=\"refresh\" /> tag found.'\n url = meta.get('content').partition(';url=')[2]\n self.open(url)", "def _after_serve_actions(self):\n pass" ]
[ "0.5717953", "0.56154025", "0.5552884", "0.5355236", "0.5205197", "0.5178103", "0.49210623", "0.48757377", "0.48716828", "0.48654342", "0.48438132", "0.48274982", "0.48240012", "0.48213187", "0.47654843", "0.47389874", "0.46854186", "0.46743634", "0.46646327", "0.463678", "0.46236932", "0.46202537", "0.4605772", "0.45826113", "0.45722023", "0.45464882", "0.45457536", "0.4532105", "0.4526008", "0.45164093" ]
0.5659059
1
if continue editing is hit, it should go back to the parent URL, I think?
def test_continue_editing_parent_object(self): user = User(username='test', is_staff=True, is_superuser=True, is_active=True) user.set_password('test') user.full_clean() user.save() admin_instance = get_modeladmin(Iframe) self.assertIsInstance(admin_instance, RealishAdmin) request = RequestFactory().get('/', { '_continue': 1, }) request.user = user iframe_admin = reverse('admin:embeds_iframe_add') response_301 = HttpResponsePermanentRedirect(redirect_to=iframe_admin) ct = get_content_type(User) iframe = Iframe(position=2, region='test', content_type=ct, content_id=user.pk, url='https://news.bbc.co.uk/') iframe.full_clean() iframe.save() new_response = admin_instance.maybe_fix_redirection( request=request, response=response_301, obj=iframe) self.assertEqual(new_response['X-Chunkadmin-Response'], 'redirect-to-parent') self.assertEqual(301, new_response.status_code) self.assertEqual('/admin_mountpoint/auth/user/1/?_data_changed=1', new_response['Location'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def submit_and_back(self):\n self.submit(skip_confirm=True)\n self.parent().do_detail()", "def submit_and_back(self):\n self.submit(skip_confirm=True)\n self.parent().do_detail()", "def response_change(self, request, obj):\r\n \r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_saveasnew\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n \r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n \r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_change(request, obj)", "def test_edit_view(self):\n target_url = url_for('content.edit_content')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def __get_redirect_url(self):\n if self.get_submit_save_and_continue_edititing_button_name() not in self.request.POST:\n return self.request.cradmin_app.reverse_appindexurl()\n return self.request.cradmin_app.reverse_appurl(\n 'groupcomment-edit',\n args=self.args,\n kwargs=self.kwargs)", "def quit_form(self):\n self.parse_request()\n\n try:\n # Back to record list.\n # Parse list's url from the request path.\n pos = self.request.path.rfind(\"/\")\n if pos > 0:\n url = self.request.path[:pos] + \"/list.html\"\n if self.page:\n url += \"?_page=\" + str(self.page)\n return HttpResponseRedirect(url)\n except Exception, e:\n logger.log_tracemsg(\"Quit form error: %s\" % e)\n\n raise http.Http404", "def start_editing(self):\r\n if self._mode is None:\r\n self._mode = 'edit'\r\n params = {\r\n 'f' : 'json',\r\n 'sessionID' : self._guid\r\n }\r\n url = \"%s/startEditing\" % self._url\r\n res = self._con.post(url, params)\r\n return res['success']\r\n return False", "def editPage(request, title):\n entry = util.get_entry(title)\n if request.method == \"POST\":\n # check if the data is valid then save/replace old data\n form = editPageForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data[\"editTitle\"]\n content = form.cleaned_data[\"editBody\"]\n\n util.save_entry(title, content)\n\n # take user to their editted page\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": title\n }))\n # give user a editting form with existing data filled in by defult. \n else:\n editForm = editPageForm(initial={\n \"editTitle\": title,\n \"editBody\": entry\n })\n editFormTitle = editForm[\"editTitle\"]\n editFormBody = editForm[\"editBody\"]\n return render(request, \"encyclopedia/editPage.html\", {\n \"formTitle\": editFormTitle,\n \"formBody\": editFormBody\n })", "def edit(self):\n\n pass", "def response_post_save_change(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)", "def after_successful_edit(self):\n pass", "def response_change(self, request, obj):\n opts = obj._meta\n\n msg = 'The menu item \"%s\" was changed successfully.' % force_unicode(obj)\n\n if \"_continue\" in request.POST:\n self.message_user(request, msg + ' ' + \"You may edit it again below.\")\n return HttpResponseRedirect(request.path)\n\n elif \"_addanother\" in request.POST:\n self.message_user(request, msg + ' ' + (\"You may add another %s below.\" % force_unicode(opts.verbose_name)))\n return HttpResponseRedirect(obj.menu_item.menu.get_add_page_url())\n\n else:\n self.message_user(request, msg)\n return HttpResponseRedirect(obj.menu_item.menu.get_edit_url())", "def edit_redirect_url(self):\n return url_for(self.edit_redirect_to_view)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_pcce__gt=obj.id_xt_pcce).order_by('id_xt_pcce')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(pcceAdmin, self).response_change(request, obj)", "def response_add(self, request, obj):\r\n\r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n\r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n\r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_add(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_lab__gt=obj.id_xt_lab).order_by('id_xt_lab')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(xtlabAdmin, self).response_change(request, obj)", "def change_view(self, request, object_id, extra_context=None):\n\n latest_draft = self.get_latest_draft(object_id)\n has_publish_perm = request.user.has_perm(\"easypublisher.can_approve_for_publication\")\n context = extra_context or {}\n\n if latest_draft:\n context['has_draft'] = latest_draft.pk\n \n if not context.get('current', False):\n \n if not has_publish_perm: \n return HttpResponseRedirect('drafts/%s/' % latest_draft.pk)\n \n return super(EasyPublisher, self).change_view(request, object_id, context)", "def response_change_formset(self, request, obj, post_url_continue='../../%s/%s/'):\n opts = obj._meta\n pk_value = obj._get_pk_val()\n verbose_name = opts.verbose_name\n # msg = _('The %(name)s \"%(obj)s\" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}\n msg = _(u'Изменения раздела \"%(title)s\" для %(name)s \"%(obj)s\" успешно сохранены.') % \\\n {\"title\" : force_unicode(self.formset_pages[self.page][\"title\"]), 'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}\n \n if \"_continue\" in request.POST:\n self.message_user(request, msg + ' ' + _(u\"Ниже Вы можете продолжить редактирование.\"))\n return HttpResponseRedirect(post_url_continue % (pk_value, self.page,))\n else:\n self.message_user(request, msg)\n if self.has_change_permission(request, None):\n return HttpResponseRedirect('../../')\n else:\n return HttpResponseRedirect('../../../../')", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_pc__gt=obj.id_xt_pc).order_by('id_xt_pc')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(pcAdmin, self).response_change(request, obj)", "def edit():", "def edit(self, **kwargs):\n ...", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_mcce__gt=obj.id_xt_mcce).order_by('id_xt_mcce')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(mcceAdmin, self).response_change(request, obj)", "def force_edit(request, page_id):\n context = RequestContext(request)\n try:\n page = Page.objects.get(pk=page_id)\n except Exception, e:\n raise e\n page.clear_editor()\n page.clear_cache(context)\n return HttpResponseRedirect(request.META['HTTP_REFERER'])", "def post(self) :\n self.redirect('/admin')", "def activate_external_editing(self, new_doc):\n new_doc.setup_external_edit_redirect(self.request, action=\"oneoffixx\")", "def editDetail(id):\n form = EditDetailForm(request.form)\n if request.method == \"GET\":\n return render_template(\"/pages/edit.html\", form=form)\n else:\n choose = True\n section = form.category.data\n return redirect(url_for(\"editDetailSection\", id=id ,section=section))", "def edit(request, address):\n if address.startswith(\"/\"):\n address = address[1:]\n if address.endswith(\"/\"):\n address = address[:-1]\n\n # we try to find the parent. Creating a page without parent isn't possible.\n parent = None\n if \"/\" in address:\n parent = address.rsplit(\"/\", 1)[0]\n else:\n parent = \"\"\n\n try:\n parent = Page.objects.get(address=parent)\n except Page.DoesNotExist:\n parent = None\n\n # try to get the page itself, which might exist\n try:\n page = Page.objects.get(address=address)\n except Page.DoesNotExist:\n page = None\n\n initial = {}\n if page:\n initial[\"title\"] = page.title\n initial[\"content\"] = page.content\n\n if request.method == 'POST':\n # the form has been sent, use the different access rights\n form = PageForm(request.POST, initial=initial)\n if form.is_valid():\n title = form.cleaned_data[\"title\"]\n content = form.cleaned_data[\"content\"]\n user = request.user\n user = user if user.is_authenticated else None\n can = False\n if user and user.is_superuser:\n # the superuser can do it all\n can = True\n elif parent and page is None and parent.access(user, \"write\"):\n # the page doesn't exist, but the parent does, and the user can edit it\n can = True\n elif page and page.access(user, \"write\"):\n # the page already exist and the user can edit it\n can = True\n\n if can:\n new_page = Page.objects.create_or_update_content(address, user, content)\n new_page.title = title\n if parent is not None and page is None:\n new_page.can_write = parent.can_write\n new_page.can_read = parent.can_read\n new_page.save()\n\n return HttpResponseRedirect('/wiki/' + address)\n else:\n form = PageForm(initial=initial)\n\n return render(request, \"wiki/edit.html\", {'form': form, 'address': address, \"page\": page, \"parent\": parent})", "def on_cancel(self, keypress=None):\n self.parentApp.switchFormPrevious()", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_sust__gt=obj.id_xt_sust).order_by('id_xt_sust')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(xt_sustanciasAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(xt_id_mb__gt=obj.xt_id_mb).order_by('xt_id_mb')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(mbAdmin, self).response_change(request, obj)" ]
[ "0.6385778", "0.6385778", "0.6379418", "0.62234807", "0.6166165", "0.61428994", "0.6119325", "0.6115395", "0.61025864", "0.60791975", "0.6070473", "0.6042546", "0.60110354", "0.60064095", "0.600623", "0.5988462", "0.59622556", "0.59316474", "0.590217", "0.58978534", "0.5896916", "0.5869238", "0.58592373", "0.5857044", "0.58454955", "0.5832334", "0.58213085", "0.5808154", "0.5802074", "0.57872236" ]
0.65077806
0
Generate immediate (different by one mismatch) neighbours of the given genome pattern
def _generate_immediate_neighbours(pattern: str) -> list: generated = [] for i in range(len(pattern)): if pattern[i] == 'A': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_A]) elif pattern[i] == 'C': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_C]) elif pattern[i] == 'T': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_T]) elif pattern[i] == 'G': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_G]) return generated
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_neighbours(pattern: str, mismatches: int) -> set:\n neighbourhood = set()\n neighbourhood.add(pattern)\n\n curr_patterns = [pattern]\n next_patterns = []\n\n for curr_mismatches in range(mismatches):\n for curr_pattern in curr_patterns:\n for neighbour in _generate_immediate_neighbours(curr_pattern):\n if neighbour not in neighbourhood:\n neighbourhood.add(neighbour)\n next_patterns.append(neighbour)\n\n curr_patterns = next_patterns\n next_patterns = []\n\n return neighbourhood", "def neighbors(pattern, d):\n\n if d == 0:\n return [pattern]\n if len(pattern) == 1:\n return ['A', 'C', 'G', 'T']\n neighborhood = []\n suffix_pattern = pattern[1:]\n suffix_neighbors = neighbors(suffix_pattern, d)\n for text in suffix_neighbors:\n hdist = compute_hamming_distance(suffix_pattern, text)\n if hdist < d:\n for n in ['A', 'C', 'G', 'T']:\n neighbor = n + text\n neighborhood.append(neighbor)\n else:\n neighbor = pattern[0] + text\n neighborhood.append(neighbor)\n return neighborhood", "def getNeighbours(seg,meta,inversedIndex):\n return np.unique(np.fromiter( (inversedIndex[x] for x in np.concatenate([meta.loc[seg]['ins'],meta.loc[seg]['outs']])),dtype=np.int))", "def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]", "def get_neighbours_8(x, y):\n return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), \\\n (x - 1, y), (x + 1, y), \\\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]", "def neighbors_generator(state: str, nurses_number=10) -> str:\n\n genes = 21 * nurses_number\n\n # Random index to change and generated the neighbor\n index = randrange(0, genes)\n\n # Here we're taking the first part of the state before the bit that will be modified\n new_state = state[0:index]\n\n # Here is modified the bit\n if state[index] == '0':\n new_state += '1'\n else:\n new_state += '0'\n\n # Here we're taking the last part of the state passed\n new_state += state[index+1:]\n\n # Here is returned the new state and the next bit to be modified\n return new_state", "def _get_neighbours(kmer):\n assert (is_dna(kmer))\n bases = 'ACTG'\n result = set()\n for i in range(len(kmer)):\n for base in bases:\n result.add(kmer[:i] + base + kmer[(i + 1):])\n return result", "def find_neighbours(engine, field, features):\n code = CodeSegment(engine)\n N = len(engine.q)\n Nf = 3 ** engine.pm.ndim\n code.assign(x=Literal(numpy.zeros((N, Nf))), y='features')\n grid = engine.pm.generate_uniform_particle_grid(shift=0)\n for i in range(Nf):\n ii = i\n a = []\n for d in range(engine.pm.ndim):\n a.append(ii % 3 - 1)\n ii //= 3\n\n grid1 = grid + numpy.array(a[::-1]) * (engine.pm.BoxSize / engine.pm.Nmesh)\n layout = engine.pm.decompose(grid1)\n code.readout(x=Literal(grid1), mesh='field', value='feature1', layout=Literal(layout), resampler='nearest')\n code.assign_component(attribute='features', value='feature1', dim=i)\n return code", "def find_pattern(pattern, genome):\n\n tens_table = [pow(10, m) for m in xrange(len(pattern))]\n hash_pattern = get_hash(pattern, tens_table)\n index = []\n for current_index in xrange(len(genome) - len(pattern) + 1):\n\t\tif current_index == 0:\n\t\t\tcurrent_hash = get_hash(genome[0:len(pattern)], tens_table)\n\t\telse:\n\t\t\tcurrent_hash = ((current_hash - (nucleotide_value_map[genome[current_index-1]] * tens_table[len(pattern)-1])) * 10 + nucleotide_value_map[genome[current_index-1+len(pattern)]])\n if current_hash == hash_pattern:\n index.append(current_index)\n return index", "def neighbors(pattern, d):\n if d == 0:\n return pattern\n\n if len(pattern) == 1:\n return ['A', 'C', 'G', 'T']\n\n neighborhood = []\n\n # ##########\n # We can use recursion to successively compute neighbors(suffix(pattern), d),\n # where suffix(pattern) = pattern[1:]\n #\n # The reason being: if we have neighbors(suffix(pattern, d)), then we know\n # that the Hamming Distance between `pattern` and `suffix(pattern)` is either equal\n # to d or less than d.\n #\n # In the first case, we can add `pattern[0]` to the beginning of\n # `suffix(pattern)` in order to obtain a k-mer belonging to\n # Neighbors(Pattern, d). In the second case, we can add any symbol\n # to the beginning of `suffix(pattern)` and obtain a k-mer belonging\n # to Neighbors(Pattern, d).\n # ##########\n\n suffix_pattern = pattern[1:]\n suffix_neighbors = neighbors(suffix_pattern, d)\n\n for i in range(len(suffix_neighbors)):\n\n neighboring_pattern_text = suffix_neighbors[i]\n\n if hamming_distance(suffix_pattern, neighboring_pattern_text) < d:\n for n in _NUCLEOTIDES:\n neighborhood.append(n + neighboring_pattern_text)\n\n else:\n neighborhood.append(pattern[0] + neighboring_pattern_text)\n\n return neighborhood", "def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours", "def _get_neighbours(self, pos, input_data):\r\n neighbours = []\r\n\r\n start = AlignmentOutputData.table_values[pos.y][pos.x]\r\n diagonal = float(strings.NAN)\r\n up = float(strings.NAN)\r\n left = float(strings.NAN)\r\n\r\n cur_char_seq_1 = strings.EMPTY\r\n cur_char_seq_2 = strings.EMPTY\r\n\r\n if pos.y - 1 >= 0 and pos.x - 1 >= 0:\r\n diagonal = AlignmentOutputData.table_values[pos.y - 1][pos.x - 1]\r\n\r\n if pos.y - 1 >= 0:\r\n up = AlignmentOutputData.table_values[pos.y - 1][pos.x]\r\n\r\n if pos.x - 1 >= 0:\r\n left = AlignmentOutputData.table_values[pos.y][pos.x - 1]\r\n\r\n if pos.y - 1 >= 0:\r\n cur_char_seq_1 = input_data.sequence_a[pos.y - 1]\r\n if pos.x - 1 >= 0:\r\n cur_char_seq_2 = input_data.sequence_b[pos.x - 1]\r\n\r\n matching = start == diagonal + input_data.cost_function.get_value(cur_char_seq_1, cur_char_seq_2)\r\n deletion = start == up + input_data.gap_cost\r\n insertion = start == left + input_data.gap_cost\r\n\r\n if matching:\r\n neighbours.append(Vector(pos.x - 1, pos.y - 1))\r\n\r\n if insertion:\r\n neighbours.append(Vector(pos.x - 1, pos.y))\r\n\r\n if deletion:\r\n neighbours.append(Vector(pos.x, pos.y - 1))\r\n\r\n return neighbours", "def neighbours(num):\n num = str(num)\n num = '0'*(4-len(num))+num # Prepend 0 until length is 4\n\n return [\n int(add_wo_carry(num, '0001')),\n int(add_wo_carry(num, '0010')),\n int(add_wo_carry(num, '0100')),\n int(add_wo_carry(num, '1000')),\n int(sub_wo_carry(num, '0001')),\n int(sub_wo_carry(num, '0010')),\n int(sub_wo_carry(num, '0100')),\n int(sub_wo_carry(num, '1000'))]", "def get_neighbour(self, y, x):\n if [y, x] in self.mine_locations:\n return Minesweeper.BOMB\n count = 0\n # (x-1, y-1), (x, y-1), (x+1, y-1),\n # (x-1, y), (x, y), (x+1, y),\n # (x-1, y+1), (x, y+1), (x+1, y+1)\n for xe in range(x - 1, x + 2):\n for ye in range(y - 1, y + 2):\n if [ye, xe] in self.mine_locations:\n count += 1\n return str(count)", "def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n", "def neighbours(indexing, random_stream=None):\n\n # pre-compute some necessary values\n counts = compute_index_counts(indexing)\n binary_sm = compute_binary_set_mappings(indexing, counts)\n unary_sm = compute_unary_set_mappings(indexing, counts)\n empty = find_empty(counts)\n image = [idx for idx,count in enumerate(counts) if count != 0]\n \n def candidates(vertex, index, image, binary_sm, unary_sm, counts, empty):\n \"\"\"generates the set of possible target indices for a given vertex\n\n :param vertex: the vertex\n :type vertex: int\n :param index: the current index of the vertex\n :type index: int\n :param image: the image of the current indexing\n :type image: list\n :param binary_sm: result of `compute_binary_set_mappings`\n :type binary_sm: np.array[n,dtype=int]\n :param unary_sm: result of `compute_unary_set_mappings`\n :type unary_sm: np.array[n,dtype=int]\n :param counts: number of vertices/index\n :type counts: np.array[n,dtype=int]\n :param empty: an index that is assigned no vertex, None is also allowed\n :type empty: int/None\n :yield: iterator over target indices\n :rtype: Iterator[int]\n \"\"\"\n for k in image:\n if k == index:\n continue\n if counts[index] > 1 or counts[k] > 1:\n yield k\n elif vertex < unary_sm[k]: # implicitly: counts[index]==1 and counts[k]==1\n yield k\n if counts[index] > 2 or (counts[index] == 2 and vertex==binary_sm[index]):\n yield empty\n \n if random_stream is not None:\n # Random Move-Enumeration\n pweights = compute_probability_weights(indexing, counts, image, binary_sm)\n vertices = np.random.choice(indexing.shape[0], random_stream, p=pweights)\n for vertex in vertices:\n index = indexing[vertex]\n ks = list(candidates(vertex, index, image, binary_sm, unary_sm, counts, empty))\n k = random.choice(ks)\n yield vertex, k\n else:\n # Move-Enumeration\n for vertex, index in enumerate(indexing):\n for k in candidates(vertex, index, image, binary_sm, unary_sm, counts, empty):\n yield vertex, k", "def neighbour(seq):\n it = iter(seq)\n it_next = itertools.islice(itertools.chain(iter(seq), [None]), 1, None)\n\n prev = None\n for curr, next in zip(it, it_next):\n yield(prev, curr, next)\n prev = curr", "def _neuron_location(self, m, n):\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def neighbors(pattern, d):\n tides = set([\"A\", \"C\", \"G\", \"T\"])\n if d == 0:\n return set([pattern])\n if len(pattern) == 1:\n return tides\n neighborhood = set([])\n suffix_neighbors = neighbors(pattern[1:], d)\n for text in suffix_neighbors:\n if ham_dist(pattern[1:], text) < d:\n for tide in tides:\n neighborhood.add(tide + text)\n else:\n neighborhood.add(pattern[0] + text)\n return neighborhood", "def island():\n\n grid = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]\n ]\n\n def dfs():\n rows = len(grid)\n cols = len(grid[0])\n count = 0\n for i in range(0, rows):\n for j in range(0, cols):\n if grid[i][j] == '1':\n check_valid(i, j, grid)\n count = count + 1\n return count\n\n def check_valid(i, j, grid=None):\n rows = len(grid)\n cols = len(grid[0])\n\n if not 0 <= i < rows or not 0 <= j < cols or grid[i][j] != '1':\n return\n\n grid[i][j] = '0'\n\n check_valid(i + 1, j, grid)\n check_valid(i - 1, j, grid)\n check_valid(i, j + 1, grid)\n check_valid(i, j - 1, grid)\n\n return dfs()", "def compute_neighbours(index, matrix):\n row, col = decode_to_matrix_cell(index, matrix)\n n1 = index + 1\n if n1 >= matrix.size or col == matrix.cols - 1:\n n1 = None\n\n n2 = index + matrix.cols\n if n2 >= matrix.size or row == matrix.rows - 1:\n n2 = None\n return n1, n2,", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def _get_neighbors(cls, pattern: str, max_distance: int) -> List[str]:\n return get_neighborhood(pattern, ''.join(cls.nucleobases.keys()), max_distance)", "def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]", "def get_neighbors(pattern, d):\n # if no difference\n if d == 0:\n return [pattern]\n # if no pattern\n if len(pattern) == 1:\n return ['A', 'C', 'T', 'G']\n # initialize the container\n neighborhood = set()\n # checking for the suffix patterns\n neighbors = get_neighbors(pattern[1:], d)\n # iterates through the neighbors\n for kmer in neighbors:\n # check for the allowed distance\n if hamming_distance(pattern[1:], kmer) < d:\n # iterates through the charcater/bases\n for char in ['A', 'C', 'T', 'G']:\n # add the character to the suffix payyern\n neighborhood.add(char + kmer)\n else:\n # otherwise add the first character again\n neighborhood.add(pattern[0] + kmer)\n return sorted(list(neighborhood))", "def iter_neighbors(x: int, y: int) -> t.Generator[COORDINATE, None, None]:\n yield x - 1, y\n yield x + 1, y\n yield x, y - 1\n yield x, y + 1", "def neighbours(assignment): \n for index_1, index_2 in itertools.combinations(range(len(assignment)), 2):\n new_assign = list(assignment)\n new_assign[index_1], new_assign[index_2] = new_assign[index_2], new_assign[index_1]\n yield tuple(new_assign)", "def test_multigrid_calculates_neighbours_correctly():\n\n # create a grid which will result in 9 cells\n h = 64\n img_dim = (3 * h + 1, 3 * h + 1)\n amg = mg.MultiGrid(img_dim, h, WS=127)\n\n # check that each cell has the expected neighbours\n print(amg.n_cells)\n\n # expected neieghbours left to right, bottom to top\n cells = [{\"north\": amg.cells[3], \"east\": amg.cells[1], \"south\": None, \"west\": None}, # bl\n {\"north\": amg.cells[4], \"east\": amg.cells[2],\n \"south\": None, \"west\": amg.cells[0]}, # bm\n {\"north\": amg.cells[5], \"east\": None,\n \"south\": None, \"west\": amg.cells[1]}, # br\n {\"north\": amg.cells[6], \"east\": amg.cells[4],\n \"south\": amg.cells[0], \"west\": None}, # ml\n {\"north\": amg.cells[7], \"east\": amg.cells[5],\n \"south\": amg.cells[1], \"west\": amg.cells[3]}, # mm\n {\"north\": amg.cells[8], \"east\": None,\n \"south\": amg.cells[2], \"west\": amg.cells[4]}, # mr\n # tl\n {\"north\": None, \"east\": amg.cells[7],\n \"south\": amg.cells[3], \"west\": None},\n # tm\n {\"north\": None,\n \"east\": amg.cells[8], \"south\": amg.cells[4], \"west\": amg.cells[6]},\n {\"north\": None, \"east\": None,\n \"south\": amg.cells[5], \"west\": amg.cells[7]}, # tr\n ]\n\n for ii, (gc, cell) in enumerate(zip(amg.cells, cells)):\n print(ii)\n assert gc.north == cell['north']\n assert gc.east == cell['east']\n assert gc.south == cell['south']\n assert gc.west == cell['west']", "def neighbours(ar, cur_index, cnt_of_neiboors=3, exclude_from_neibors_index=[]):\n rmax = np.max([0, cur_index + cnt_of_neiboors - len(ar)])\n lmin = np.max([cur_index - (cnt_of_neiboors + rmax), 0])\n\n excl = set(exclude_from_neibors_index) | {cur_index}\n nbs = [i for i in range(lmin, len(ar)) if i not in excl]\n return ar[nbs[:cnt_of_neiboors * 2]]", "def neighbours(number: int, number_sectors: int) -> [int, int, int, int]:\n col = number % number_sectors\n row = number // number_sectors\n\n nieg = [number - number_sectors, number + number_sectors, number - 1, number + 1]\n\n if row == 0:\n nieg[0] = -1\n if row == number_sectors - 1:\n nieg[1] = -1\n if col == 0:\n nieg[2] = -1\n if col == number_sectors - 1:\n nieg[3] = -1\n return nieg" ]
[ "0.6630292", "0.6147133", "0.61121386", "0.6109716", "0.6099266", "0.60585225", "0.60353184", "0.5980777", "0.59600437", "0.5955379", "0.58933324", "0.5820584", "0.5806383", "0.5797996", "0.5792484", "0.5790195", "0.57679236", "0.5754327", "0.5728205", "0.5717146", "0.5706085", "0.570483", "0.56959444", "0.56939876", "0.5687913", "0.5687054", "0.5681787", "0.5659362", "0.5639083", "0.5631543" ]
0.7541577
0
Whether the given card matches this card
def is_match(self, card): return self.suit == card.suit or self.value == card.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_card_match(self, chosen_card, active_card, active_suit):\n\t\treturn chosen_card.is_match(active_card) or chosen_card.suit == active_suit", "def __eq__(self, other_card):\n if self.rank == other_card.rank or self.suit == other_card.suit:\n return True\n else:\n return False", "def check_cards(self, cards):\n if len(cards) != 3:\n return False\n\n match = 0\n card1 = cards[0][1]\n card2 = cards[1][1]\n card3 = cards[2][1]\n\n match += self.compare_element(card1, card2, card3, 'shape')\n match += self.compare_element(card1, card2, card3, 'colour')\n match += self.compare_element(card1, card2, card3, 'count')\n match += self.compare_element(card1, card2, card3, 'fill')\n\n return match == 4", "def check_card(card1, card2):\r\n\r\n num1 = card1.split(' ')[0]\r\n num2 = card2.split(' ')[0]\r\n\r\n if num1 == num2:\r\n return True\r\n else:\r\n return False", "def __eq__(self, card2):\n return self.suit == card2.suit and self.rank == card2.rank", "def can_play(self, card):\n played_cards = map(lambda x: str(x).lower(), self.played_cards)\n if str(card).lower() in played_cards:\n return False\n if card.prebuild in played_cards:\n return True\n\n for res in card.cost", "def does_player_have_card(self, player, card):\n return card in self.hands[player]", "def __eq__(self, other: Card) -> bool:\n return compare_map[self.number] == compare_map[other.number]", "def cardPlayable(self, card):\n return self.field[Suit.toInt(card.getSuit()) - 1] == card.getValue() - 1", "def is_valid(self, card):\n # type: (str, Card) -> bool\n if card.version == \"3.0\":\n return False\n fingerprint = self.crypto.calculate_fingerprint(\n Utils.strtobytes(card.snapshot)\n )\n fingerprint_hex = fingerprint.to_hex\n if fingerprint_hex != card.id:\n return False\n verifiers = self.verifiers.copy()\n card_public_key = self.crypto.import_public_key(card.public_key)\n verifiers[fingerprint_hex] = card_public_key\n for key in verifiers:\n if key not in card.signatures:\n return False\n is_valid = self.crypto.verify(\n fingerprint.value,\n Utils.b64tobytes(card.signatures[key]),\n verifiers[key]\n )\n if not is_valid:\n return False\n return True", "def _check_suit_or_value_match(cls, card1, card2):\n\t\tsuit_match, value_match = False, False\n\t\tif (card1.suit == card2.suit) or (card2.suit == constants.CARD_BLACK) or (card1.suit == constants.CARD_BLACK):\n\t\t\tsuit_match = True\n\t\tif card1.value == card2.value:\n\t\t\tvalue_match = True\n\t\treturn suit_match or value_match", "def is_card_playable(self, card):\n color_index = COLOR.index(card[0])\n return len(self.firework[color_index]) == int(card[1]) - 1", "def __eq__(self, other):\n if isinstance(other, Card):\n return self.color == other.color and self.value == other.value\n return False", "def validate_cards(self, cards_list):\n return set(self.hand).issubset(set(cards_list))", "def cardExists(self, id):\n return id in self.cards", "def is_blackjack(self) -> bool:\n if self.score == 21 and len(self.cards) == 2:\n return True\n else:\n return False", "def check_valid(self, cards):\n\n if len(cards) == 1: # one card\n return True\n if len(cards) == 2: # two cards\n if ((self.num_to_card(int(cards[0])) == self.num_to_card(int(cards[1]))) or # two same cards\n (int(cards[0]) > 51) or # any card and a joker\n (int(cards[1])) > 51): # any card and a joker\n return True\n return False\n\n # 3 or more: all same number/ascending order\n # check how many jokers\n jokers = 0\n for card in cards:\n #print(int(card))\n #print(self.num_to_card(card))\n if int(card) > 51:\n jokers += 1\n #print(\"YESSSSSSSSSSIR\")\n #print(f'[THERE ARE {jokers} JOKERS]')\n\n # check if all same number\n sort = sorted(cards)\n #print(f'[THE SORTED CARDS: {sort}]')\n index = 0\n for card in sort:\n if self.num_to_card(int(card)) == self.num_to_card(int(sort[0])) or int(card) > 51:\n index += 1\n if index == len(cards):\n return True\n\n # check ascend order\n if not self.is_same_sign(cards):\n print('Here')\n return False\n\n #print(\"accend left\")\n return self.ascend(cards, jokers)", "def valid(self, a_card: card.Card) -> bool:\n if self._pile:\n return self._pile[-1].foundation_valid(a_card)\n if a_card.value == 0:\n return True\n return False", "def check_card_number(self, card_number):\n database_cursor.execute(f\"SELECT number FROM card WHERE number = {card_number};\")\n result = database_cursor.fetchall()\n return result[0][0] == card_number if result else False", "def check_color_card(player, color):\n for card in player.cards:\n if card.suit == color:\n return True", "def is_same_sign(self, cards):\n\n jokers = 0\n w_o_jokers = []\n for card in cards:\n if self.num_to_card(int(card)) == 0:\n jokers += 1\n else:\n w_o_jokers.append(int(card))\n\n w_o_jokers = sorted(w_o_jokers)\n print(\"whitout jokers: \", w_o_jokers)\n if w_o_jokers[0] <= 12: # if the cards are CLUBS\n if w_o_jokers[-1] > 12:\n return False\n if w_o_jokers[0] <= 25: # if the cards are DIAMONDS\n if w_o_jokers[-1] > 25:\n return False\n if w_o_jokers[0] <= 38: # HEARTS\n if w_o_jokers[-1] > 38:\n return False\n if w_o_jokers[0] <= 51:\n if w_o_jokers[-1] > 51:\n return False\n return True", "def valid(self, a_card: card.Card) -> bool:\n if self._pile:\n return self._pile[-1].tableau_valid(a_card)\n if a_card.value == 12:\n return True\n return False", "def hash_comparison(self):\n for result in self.cards:\n if result.hash_status:\n return True\n return False", "def hasBlackjack(self):\n return len(self.cards) == 2 and self.getPoints() == 21", "def match(self) -> bool:", "def deck_has_cards(deck, cards):\n deck_dict = collections.defaultdict(int)\n for card in itertools.chain(deck.draw_pile, deck.discard_pile, deck.hand):\n deck_dict[card] += 1\n return deck_dict == cards", "def has_cards(self):\n return self.hand.len() > 0", "def compare_element(self, card1, card2, card3, element):\n e1 = card1[element]\n e2 = card2[element]\n e3 = card3[element]\n if (e1 == e2 and e2 == e3) or (e1 != e2 and e1 != e3 and e2 != e3):\n # All the same or all different.\n return 1\n return 0", "def is_valid(current_card: Card, destination: Card) -> bool:\n # TODO: check for a card to a space is only Kings; maybe in the board?\n match = current_card.color == destination.color\n difference = destination.value - current_card.value\n if not match and difference == 1:\n return True\n else:\n return False", "def is_match(self, other_cpe):\n if not isinstance(other_cpe, CPE):\n return False\n\n if self.part == other_cpe.part and self.vendor == other_cpe.vendor:\n\n if other_cpe.product not in ['*', self.product]:\n return False\n if other_cpe.version not in ['*', self.version]:\n return False\n if other_cpe.update not in ['*', self.update]:\n return False\n if other_cpe.edition not in ['*', self.edition]:\n return False\n if other_cpe.language not in ['*', self.language]:\n return False\n if other_cpe.sw_edition not in ['*', self.sw_edition]:\n return False\n if other_cpe.target_sw not in ['*', self.target_sw]:\n return False\n if other_cpe.target_hw not in ['*', self.target_hw]:\n return False\n if other_cpe.other not in ['*', self.other]:\n return False\n\n return True\n else:\n return False" ]
[ "0.78050846", "0.7451856", "0.727455", "0.7232319", "0.718153", "0.7173871", "0.713753", "0.7100822", "0.67850065", "0.6778596", "0.6773867", "0.674615", "0.6730441", "0.66842115", "0.6663965", "0.6543529", "0.6535424", "0.6506496", "0.648398", "0.64583373", "0.6433088", "0.641239", "0.6393447", "0.6389675", "0.63526195", "0.6300056", "0.6273384", "0.62630904", "0.61931187", "0.61919403" ]
0.8583878
0
Ensures that chosen_card is an acceptable match, given the active_card and active_suit
def _validate_card_match(self, chosen_card, active_card, active_suit): return chosen_card.is_match(active_card) or chosen_card.suit == active_suit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_chosen_card(self, allowed_cards, chosen_card):\n if self.action is not None:\n if self.action in allowed_cards:\n logger.info(f\"Successfully chose the card: {self.action}\")\n chosen_card = self.action\n else:\n logger.error(f\"{self.action} is not a valid card! Choosing the first allowed card now.\")\n else:\n logger.debug(\"chosen card is None\")\n return chosen_card", "def is_match(self, card):\n\t\treturn self.suit == card.suit or self.value == card.value", "def check_selected_card(_player1, _player2):\n if _player1.selected_card and _player2.selected_card:\n color = _player1.selected_card.suit\n if _player2.selected_card.suit != color and check_color_card(_player2, color):\n _player2.selected_card = None", "def choose_validator(payload, chosen):\n _has_theme = has_theme(payload[\"cards\"], payload[\"theme\"])\n special_tuple = (\n SkullEnum.WHITE,\n SkullEnum.MERMAID,\n SkullEnum.PIRATE,\n SkullEnum.GREENPIRATE,\n SkullEnum.SKULLKING,\n )\n\n if not chosen.isdecimal():\n print(f\"Choose a number between 1 and {len(payload['cards'])}\")\n return False\n if not (1 <= int(chosen) <= len(payload[\"cards\"])):\n print(f\"Choose a number between 1 and {len(payload['cards'])}\")\n return False\n if (\n _has_theme\n and payload[\"cards\"][int(chosen) - 1].CARDTYPE not in special_tuple\n and payload[\"cards\"][int(chosen) - 1].CARDTYPE != payload[\"theme\"]\n ):\n print(\n f\"You have a card of the theme {payload['theme']}. You must choose that card\"\n )\n return False\n\n return True", "def test_suit(self):\n card = self._card\n self.assertEqual(card.suit, self._suit)", "def dealer_matching(self):\n if len([card for card in self.dealer_hand if card[1] == '8']) > 0:\n self.discard_pile = [card for card in self.dealer_hand if card[1] == '8'][0]\n self.dealer_hand.remove(self.discard_pile)\n dealer_suits = [card[0] for card in self.dealer_hand]\n self.new_suit = max(set(dealer_suits), key=dealer_suits.count)\n print(\"\\nNew suit is :\", self.new_suit)\n return 1\n if self.new_suit != '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.new_suit:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n self.new_suit = ''\n return 1\n else:\n return 0\n if self.new_suit == '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.discard_pile[0] or card[1] == self.discard_pile[1]:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n return 1\n else:\n return 0", "def compare_cards(self, guess):\n \n \"\"\"\n Compares cards to determine higher_lower, \n compares result with guess\n Args: \n self: : An instance of Dealer.\n self.card_1: int\n self.card_2: int\n guess: bool\n \"\"\"\n card_str_1 = self.get_card_str(self.card_1)\n card_str_2 = self.get_card_str(self.card_2)\n if guess: \n if self.card_1 == self.card_2:\n print(f\"{card_str_2} is equal to {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 > self.card_2:\n print(f\"{card_str_2} is lower than {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 < self.card_2:\n print(f\"{card_str_2} is higher than {card_str_1}\")\n self.player.score += 100\n if not guess:\n if self.card_1 == self.card_2:\n print(f\"{card_str_2} is equal to {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 > self.card_2:\n print(f\"{card_str_2} is lower than {card_str_1}\")\n self.player.score += 100\n elif self.card_1 < self.card_2:\n print(f\"{card_str_2} is higher than {card_str_1}\")\n self.player.score -= 75", "def test_play_card(self):\n self.plr.piles[Piles.DECK].set(\"Silver\", \"Province\", \"Moat\", \"Gold\")\n self.vic.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"discard\", \"discard\", \"putback\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertIn(\"Duchy\", self.vic.piles[Piles.DISCARD])\n self.assertIn(\"Gold\", self.plr.piles[Piles.DISCARD])\n self.assertIn(\"Province\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Moat\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Silver\", self.plr.piles[Piles.DECK])", "def check_cards_eligibility(self):\n for c in self.hand:\n c.check_actions(self)\n for c in self.phand:\n c.check_actions(self)\n for c in self.discard:\n c.check_actions(self)\n for c in self.active_player.phand:\n c.check_actions(self)\n for c in self.active_player.hand:\n c.check_actions(self)\n for c in self.active_player.discard:\n c.check_actions(self)\n for c in self.played_user_cards:\n c.check_actions(self)\n if ACTION_KEEP in self.actions:\n for p in self.players:\n for c in p.phand:\n c.check_actions(self)\n for c in p.hand:\n c.check_actions(self)\n for c in p.discard:\n c.check_actions(self)", "def choose_card(playable_cards):\r\n\r\n playing = playable_cards[0]\r\n print('\\n choosing \\n', playing)\r\n\r\n return playing # for now\r", "def followUpAttack(self, validCards):\n print(\"Select card from... \")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n while card not in validCards: # error checking\n print(card)\n print(\"Please select a valid card from...\")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n self.currentHand.remove(card)\n card = self.checkDoubles(card)\n return card", "def guess(card1: dict, card2: dict) -> bool:\n print(f\"The current card is {card1['rank']} of {card1['suit']}\")\n selection = str(input('Will the next card be higher h or lower l?: '))\n if selection == 'h':\n return compare(card1, card2) < 0\n elif selection == 'l':\n return compare(card1, card2) > 0\n else:\n print(\"Type h or l\")\n return False", "def _check_suit_or_value_match(cls, card1, card2):\n\t\tsuit_match, value_match = False, False\n\t\tif (card1.suit == card2.suit) or (card2.suit == constants.CARD_BLACK) or (card1.suit == constants.CARD_BLACK):\n\t\t\tsuit_match = True\n\t\tif card1.value == card2.value:\n\t\t\tvalue_match = True\n\t\treturn suit_match or value_match", "def test_card_suit(mock_card):\n assert mock_card.suit == Suit.SPADE", "def choose_card(self, state=None):\n # if self.at_last_stich():\n # allowed = yield self.cards[0]\n # else:\n self.observation_received.acquire()\n self.observation = self.build_observation(state, self.cards)\n logger.debug(f\"choose_card received observation: {self.observation}\")\n self.observation_received.notify_all() # notify all threads to be sure\n self.observation_received.release()\n\n self.action_received.acquire()\n received = self.action_received.wait()\n if not received:\n logger.debug(\"Timeout occurred. action_received condition has not been notified.\")\n logger.debug(f\"choose_card received action: {self.action}\")\n allowed_cards = self.allowed_cards(state=state)\n chosen_card = allowed_cards[0] # set chosen_card to the first allowed card in case anything goes south\n chosen_card = self.set_chosen_card(allowed_cards, chosen_card)\n self.action_received.release()\n\n allowed = yield chosen_card\n\n if allowed:\n yield None", "def dealer_card_choice(update, context):\n query = update.callback_query\n if query.message.reply_to_message:\n user = query.message.reply_to_message.from_user\n else:\n user = query.message.chat\n bot = context.bot\n CURRENT_USER = USERS[user.username]\n CURRENT_CONTEXT = process_card_value(query.data, CURRENT_USER)\n message = f'Round: {CURRENT_CONTEXT[\"round\"]} ({CURRENT_CONTEXT[\"username\"]}) \\nDealers Card: {CURRENT_CONTEXT[\"dealer_card\"]}\\nYour Cards: {CURRENT_CONTEXT[\"player_cards\"]} \\nYour total: {CURRENT_CONTEXT[\"player_total\"]} \\n\\nChoose Dealers Card: '\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message,\n reply_markup=card_markup\n )\n\n # Tell ConversationHandler that we're in state `STRATEGY` now\n return STRATEGY", "def take_comp_turn(self, deck, pile):\n matches = [card for card in self.hand if card.is_match(pile.top_card() != 0)]\n if len(matches) > 0: # can play\n choice = random.randrange(len(matches))\n self.play_card(matches[choice-1], pile)\n if matches[choice - 1].kind == 'wild' or matches[choice - 1].kind == 'wild4':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n matches[choice - 1].color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n print(str(self.name) + \" played \" + str(matches[choice-1]))\n\n else: # comp can't play\n # check if deck is empty -- if so, reset it\n if deck.is_empty():\n deck.reset_deck(pile)\n # draw a new card from the deck\n newcard = self.draw_card(deck)\n print(\"The computer drew: \" + str(newcard))\n if newcard.is_match(pile.top_card()): # can be played\n self.play_card(newcard, pile)\n if newcard.kind == 'wild':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n newcard.color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n else: # still can't play\n print(\"Sorry, you still can't play.\")\n print(str(self.name) + \" played \" + str(newcard))\n return", "def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)", "def can_play(self, card):\n played_cards = map(lambda x: str(x).lower(), self.played_cards)\n if str(card).lower() in played_cards:\n return False\n if card.prebuild in played_cards:\n return True\n\n for res in card.cost", "def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")", "def test_play(self):\n self.plr.piles[Piles.DECK].set(\"Province\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.test_input = [\"keep\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.coins.get(), 2)\n self.assertIn(\"Province\", self.plr.piles[Piles.DECK])\n self.assertNotIn(\"Province\", self.plr.piles[Piles.DISCARD])", "def compare_cards(board, eng_card, scot_card, eng_type, scot_type, eng_parameter, scot_parameter):\n\n\n \n year_ends_early = False\n\n \n if get_card_val(eng_card) > get_card_val(scot_card):\n who_goes_first = 'ENGLAND'\n \n elif get_card_val(eng_card) < get_card_val(scot_card):\n who_goes_first = 'SCOTLAND'\n \n elif get_card_val(eng_card) == get_card_val(scot_card):\n \n who_goes_first = 'ENGLAND'\n \n if get_card_val(eng_card) == 4 and get_card_val(scot_card) == 4:\n year_ends_early = True\n \n board.who_goes_first = who_goes_first\n\n eng_played_truce = False\n if eng_card == 'TRU':\n eng_played_truce = True\n\n scot_played_truce = False\n if scot_card == 'TRU':\n scot_played_truce = True\n\n if who_goes_first == 'ENGLAND':\n\n resolve_card(board, eng_type, scot_type, eng_card, 'ENGLAND', eng_parameter, scot_played_truce)\n resolve_card(board, eng_type, scot_type, scot_card, 'SCOTLAND', scot_parameter, eng_played_truce)\n \n elif who_goes_first == 'SCOTLAND':\n \n resolve_card(board, eng_type, scot_type, scot_card, 'SCOTLAND', scot_parameter, eng_played_truce)\n resolve_card(board, eng_type, scot_type, eng_card, 'ENGLAND', eng_parameter, scot_played_truce)\n \n return who_goes_first, year_ends_early", "def check_color_card(player, color):\n for card in player.cards:\n if card.suit == color:\n return True", "def take_turn(self):\n \n self.card_1 = self.get_card()\n self.display_card_1()\n guess = self.player.higher_lower()\n self.card_2 = self.get_card()\n self.display_card_2()\n self.compare_cards(guess)\n self.player.print_score()\n if self.player.score > 0:\n self.can_deal = self.player.keep_playing()\n print(\"\\n\")\n else:\n self.can_deal = False\n print(\"Game overThanks for playing!\")", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def test_gain(self):\n self.plr.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"Get Estate\"]\n self.plr.gain_card(\"Cursed Village\")\n self.assertNotIn(\"Curse\", self.plr.piles[Piles.DISCARD])\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Estate\"])\n self.assertIn(\"Duchy\", self.g.trashpile)", "def test_play(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"Gain\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.favors.get(), 1)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1 + 1)", "def choose_hand(hand, deck):\n possible = list()\n for c in combinations(hand, 4):\n possible.append([Cribbage.expected_score(list(c), deck), c])\n best = max(possible, key = lambda i : i[0])\n discard = list(set(hand) - set(best[1]))\n return best[1], discard", "def test_seven_cards_poker(self):\n self.assertEqual(best_hand(\"6C 7C 8C 9C TC 5C JS\".split()),\n ('6C', '7C', '8C', '9C', 'TC'))\n self.assertEqual(best_hand(\"TD TC TH 7C 7D 8C 8S\".split()),\n ('TD', 'TC', 'TH', '8C', '8S'))\n self.assertEqual(best_hand(\"JD TC TH 7C 7D 7S 7H\".split()),\n ('JD', '7C', '7D', '7S', '7H'))", "def indicate_discard_card(whose_turn,players):\n cards_to_choose_from = players[whose_turn].hand.cards\n players[whose_turn].hand.print_cards()\n chosen_to_discard = int(input('Select a card to discard. Type a number. '))\n return chosen_to_discard" ]
[ "0.70289963", "0.70051605", "0.6518905", "0.6450109", "0.63630843", "0.630566", "0.62759304", "0.62171423", "0.61978257", "0.61198056", "0.60976046", "0.60950667", "0.6085195", "0.6076674", "0.60443", "0.60375905", "0.60289156", "0.59881556", "0.59762114", "0.59721756", "0.5937578", "0.5936938", "0.59308237", "0.59230936", "0.5919256", "0.5891327", "0.5885813", "0.5869765", "0.5838972", "0.5826585" ]
0.82993805
0
If test_mode is True, an image of `screen` is saved
def save_screen(screen): if not video_mode: # Don't record video return False # Make global variables writeable global current_frame global path_checked frames_directory = os.path.dirname( os.path.dirname( os.path.realpath(__file__))) + "\\frames\\" if not path_checked: check_folder(frames_directory) pygame.image.save( screen, frames_directory + "ants-frame{}.jpeg".format( str(current_frame).zfill(4))) current_frame += 1 # Move count to next frame
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveScreenPNG(self, filename):\n return nes_lib.saveScreenPNG(self.obj, filename)", "def screen_shot(self):\n screen_size = '{}x{}@{}x{}/0'.format(self.screen[0], self.screen[1], self.screen[0], self.screen[1])\n subprocess.check_call([\n ADB_EXECUTOR, '-s', self.device_id, 'shell',\n 'LD_LIBRARY_PATH=/data/local/tmp', '/data/local/tmp/minicap', '-s', '-P', screen_size,\n '>', TEMP_PIC_ANDROID_PATH\n ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n logger.info('screen shot saved in {}'.format(TEMP_PIC_ANDROID_PATH))", "def screen_shot():\n screen_shot_string_io = StringIO.StringIO()\n ImageGrab.grab().save(screen_shot_string_io, \"PNG\")\n screen_shot_string_io.seek(0)\n return screen_shot_string_io.read()", "def save_screenshot(self, file_name, width=3840, height=2160, first=True, last=True):\n if first and self.assigned_opengl_context is not None:\n self.assigned_opengl_context.makeCurrent()\n gr3.export(file_name, width, height)\n if last and self.assigned_opengl_context is not None:\n self.assigned_opengl_context.doneCurrent()", "def screens_maker(task):\n rend_type = int( task['render_type'] )\n rend_project = task['project_name']\n rend_result_dir = task['result_dir']\n file_name = p_rend_type[rend_type]['file_screen'].split( '/' )[1]\n logging.info( 'IN SCREEN Maker {}'.format( task ) )\n try:\n bpy.ops.wm.open_mainfile( filepath=rend_project )\n scn = bpy.context.scene\n scn.frame_start = 100\n scn.frame_end = 101\n bpy.data.scenes[scn.name].render.image_settings.file_format = 'JPEG'\n scn.render.filepath = '{}'.format( str( rend_result_dir ) + '/' + str( file_name ) )\n bpy.ops.render.render( write_still=True )\n try:\n os.chown( scn.render.filepath, int( u_ugid ), int( u_gguid ) )\n os.chmod( scn.render.filepath, 0o777 )\n except Exception as e:\n logging.info( 'err SCREEN MAKER rights{}'.format( str( e ) ) )\n except Exception as e:\n logging.info( 'ERR IN SCREEN Maker {}'.format( str( e ) ) )\n\n return 1", "def snapshot(self, file_path=None):\n \"\"\"default not write into file.\"\"\"\n screen = self.minicap.get_frame()\n\n if file_path:\n file_name = str(time.time()*1000) + '.jpg'\n file_path = os.path.join(file_path, file_name)\n ImgUtils.imwrite(file_path, screen)\n\n # '''t_img 需转换为cv2可解码的文件,不然会抛错 src is not a numpy array, neither a scalar'''\n # try:\n # screen = ImgUtils.str2img(screen)\n # except Exception:\n # # may be black/locked screen or other reason print exc for debugging\n # import traceback\n # traceback.print_exc()\n # return None\n\n return screen", "def screenGrab():\n box = (x_pad+1, y_pad+1, 796, 825)\n save_directory = os.getcwd()\n time_stamp = int(time.time())\n image_file_name = '{}\\\\full_snap__{}.png'.format(save_directory, time_stamp)\n im = ImageGrab.grab(box)\n im.save(image_file_name, 'PNG')", "def screen_shot(self, pic_path):\n self.run_command(f'shell screencap -p /sdcard/screen.png')\n if not path.exists(pic_path):\n self.run_command(f'pull /sdcard/screen.png {pic_path}')\n else:\n raise ADBError(f'{pic_path} already exist')\n self.run_command(f'shell rm /sdcard/screen.png')\n yield pic_path\n remove(pic_path)", "def get_screen_image(dir=\"screenshots\"):\n screenshot_name = dir + \"/screenshot_\" + str(random.randint(0, 1e10)) + \".png\"\n\n screenshot = autopy.bitmap.capture_screen()\n screenshot.save(screenshot_name)\n return screenshot_name", "def capture(self):\n current_time=time.strftime('%Y%m%d-%H%M%S')\n self.filepath=f\"files/{current_time}.png\"\n self.ids.camera.export_to_png(self.filepath)\n self.manager.current='image_screen'\n self.manager.current_screen.ids.img.source=self.filepath", "def test_save_screenshot():\n\n surface_flow_file = Path(TEST_RESULTS_FILES_PATH, \"surface_flow.vtu\")\n screenshot_file = save_screenshot(surface_flow_file, \"Mach\")\n assert screenshot_file.exists()\n\n if screenshot_file.exists():\n screenshot_file.unlink()", "def saveimage(self):\n if self.saveimageButton.isChecked():\n self.save = True\n self.channelsOpen()\n self.movetoStart()\n self.saveimageButton.setText('Abort')\n self.guarda = np.zeros((self.numberofPixels, self.numberofPixels))\n self.liveviewStart()\n\n else:\n self.save = False\n print(\"Abort\")\n self.saveimageButton.setText('reintentar Scan and Stop')\n self.liveviewStop()", "def export_screen(self, target_path):\n subprocess.check_call([\n ADB_EXECUTOR, '-s', self.device_id,\n 'pull', TEMP_PIC_ANDROID_PATH, target_path\n ], stdout=subprocess.DEVNULL)\n logger.info('export screen shot to {}'.format(target_path))", "def saveWindowState(self):\n print(\"Save button has been pressed!\")\n screenshot = self.widgetHolder.grab()\n self.screenshotNum += 1\n if(self.addressBox.text() != \"\"):\n screenshot.save(os.path.join(self.addressBox.text(), (\"screenshot\" + str(self.screenshotNum) + \".jpg\")))\n else:\n screenshot.save(\"screenshot\" + str(self.screenshotNum) + \".jpg\", \"jpg\")", "def screenShot(self, cam=None, path=os.path.expanduser('~'), basenm='view'):\n if cam is None:\n # This allows use to dynamicly select cameras\n cam = GetActiveCamera()\n os.chdir(path)\n self.view(cam=cam)\n WriteImage(\"%s.png\" % (basenm))", "def export_screenshot(self):\n\n if self.vis_type is None or len(self.vis_type) < 1:\n vis_type_suffix = ''\n else:\n vis_type_suffix = self.vis_type\n\n print(\"exporting screenshot for {}\".format(self.current_unit_id))\n ss_out_file = self.screenshot_dir / \"{}_{}_{}.{}\".format(\n self.current_unit_id, vis_type_suffix,\n cfg.screenshot_suffix, cfg.screenshot_format_ext)\n self.fig.savefig(ss_out_file, bbox_inches='tight', dpi=cfg.dpi_export_fig)", "def save_detection(self, image):\n\t\timg = self.visualize_detection(image)\n\t\timg = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\t\tcv2.imwrite(f'{SAVE_PATH}{self.clip}{self.num_save}.jpg', img)", "def _save_buffer(self):\n img_data = renderer.fbuffer.read(mode='color', alpha=False)\n img = Image.fromarray(img_data)\n img.save(self._save_fname)\n self._save_flag = False", "def save_screenshot(self, img, file_name: str):\n img.save(str(self.info.screenshots_path / file_name))", "def setScreenMode(mode='normal'):\n screendict = {'normal':'REVERS', 'black':'NOREV'}\n dislin.scrmod(screendict[mode])", "def capture(self):\n current_time = time.strftime('%Y%m%d-%H%M%S')\n filepath = f'files/{current_time}.png'\n self.ids.camera.export_to_png(filepath)\n self.manager.current = 'image_screen' # switch to the next screen\n self.manager.current_screen.ids.img.source = filepath # inherit img to the next screen\n return filepath", "def write_to_screen(self, text):\n\t\tself.blank_image = np.full((1280,1920, 3), 255, np.uint8)\n\t\tcv2.putText(self.blank_image, text,(40,300), font, 8,(0,0,0),3,cv2.LINE_AA)\n\t\tcv2.imshow(\"Background\", self.blank_image)\n\t\tcv2.waitKey(1)", "def draw(canvas_result,automatic_save,manual_save):\r\n if canvas_result is not None and canvas_result.image_data is not None and (automatic_save or manual_save):\r\n # Receive the user's drawing with the dimensions: 512X512X4\r\n img_data = canvas_result.image_data\r\n # the user's drawing is in RGBA mode with floats instead of integers - convert to uint8 type and to RGB format\r\n im = Image.fromarray(img_data.astype(np.uint8)[:,:,:3]).convert('RGB') # convert to dimensions 512X512X3\r\n # initialize a copy of the user's drawing.\r\n add_bg = np.array(im, dtype='uint8') # initalize a copy\r\n # allow the user to know that the saving is in progress.\r\n with st.spinner(\"Saving image...\"):\r\n # the drawing is lack of the GauGAN background because streamlit_drawable_canvas library doesn't allow it yet.\r\n # Because of that the background will be added manually - o(n^3) at the moment.\r\n for i in range(add_bg.shape[0]):\r\n for j in range(add_bg.shape[1]):\r\n if list(add_bg[i,j]) != [0,0,0]: # if the current RGB value is not (0,0,0) (black) -\r\n for k in range(add_bg.shape[2]): # then make sure we don't have white values (255)\r\n if add_bg[i,j][k] == 255: # we will fill them with the relevant background color position\r\n add_bg[i,j][k] = colors['Sky'][k] if i<300 else colors['Sea'][k]\r\n else: # else, we do indeed have RGB value of (0,0,0), then replace it by its entirety to the relevant\r\n # background color.\r\n add_bg[i,j] = colors['Sky'] if i<300 else colors['Sea']\r\n\r\n # Create PIL object of the manually added background with drawing on the canvas\r\n add_bg = Image.fromarray(add_bg)\r\n # Assign the path where the file will be saved\r\n if not os.path.exists(\"tmp/\"):\r\n os.makedirs(\"tmp/\")\r\n file_path = f\"tmp/pic%s.png\"%(len(counter))\r\n # Increase the counter by adding dummy element into the counter list\r\n counter.append(0)\r\n # Save the drawing in PNG format\r\n\r\n add_bg.save(file_path, \"PNG\")\r\n st.success(\"Image saved successfully. Keep drawing!!\")", "def capture_image(self):\n ext = self.image_save_type.lower()\n\n if self.calibrating:\n print('calibrating')\n\n if ext == 'fits':\n self.save_fits()\n self._image_counter += 1\n else:\n img = self.original_image\n path = os.path.join(self.home, 'data')\n name = \"camtrak_frame_{}.png\".format(self._image_counter) \n fn = os.path.join(path, name)\n cv2.imwrite(fn, img)\n\n QtWidgets.QApplication.beep()\n self.statusBar().showMessage(f'Saved image to {fn}')\n self._image_counter += 1", "def test(self,windowsize = False):\n\n # set up a specific window to test the text in\n if windowsize:\n self.screen = pygame.display.set_mode(windowsize)\n self.screen.fill((200,200,200))\n self.screen.blit(*self.blitinfo)\n\n # if no specific window is specified create a small one around the\n # outside of the text\n else:\n self.screen = pygame.display.set_mode((self.imagewidth + 20,self.imageheight + 20))\n self.screen.fill((200,200,200))\n self.screen.blit(self.image, (10,10))\n\n pygame.display.flip()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()", "def test(self,windowsize = False):\n\n # set up a specific window to test the text in\n if windowsize:\n self.screen = pygame.display.set_mode(windowsize)\n self.screen.fill((200,200,200))\n self.screen.blit(*self.blitinfo)\n\n # if no specific window is specified create a small one around the\n # outside of the text\n else:\n self.screen = pygame.display.set_mode((self.imagewidth + 20,self.imageheight + 20))\n self.screen.fill((200,200,200))\n self.screen.blit(self.image, (10,10))\n\n pygame.display.flip()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()", "def draw_screen(self, master_screen):\n master_screen.blit(self.screen_image, (0, 0))", "def save_full_canvas_as_png(self, output_fname):\n\n # put a sleep in here in case there is a dialog covering the screen\n # before this method is called.\n time.sleep(0.1)\n # TODO: are we missing a PIL.Image conversion here?\n im = self.save_currently_displayed_canvas_to_numpy_array()\n im.save(output_fname)", "def save_image(image, filename, mode='PNG'):\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()", "def save_image(image, filename, mode='PNG'):\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()" ]
[ "0.65687513", "0.64645797", "0.62103873", "0.6199405", "0.6150051", "0.6116136", "0.6103849", "0.6095868", "0.60331243", "0.6008139", "0.5992217", "0.5990289", "0.59354466", "0.5926195", "0.58971107", "0.5877742", "0.58629024", "0.5829966", "0.58243024", "0.57998353", "0.57982475", "0.57810163", "0.57530606", "0.5729971", "0.5725155", "0.5725155", "0.5699817", "0.5695005", "0.5672938", "0.5672938" ]
0.7221238
0
>>> find_good_recipes(9, 10) '5158916779' >>> find_good_recipes(5, 10) '0124515891' >>> find_good_recipes(18, 10) '9251071085' >>> find_good_recipes(2018, 10) '5941429882'
def find_good_recipes(improvement_num, count): recipes = [3, 7] elf1 = 0 elf2 = 1 while len(recipes) <= improvement_num + count: elf1_value = recipes[elf1] elf2_value = recipes[elf2] recipe_sum = elf1_value + elf2_value if recipe_sum > 9: recipe_string = f"{recipe_sum:02d}" recipes.append(int(recipe_string[:1])) recipes.append(int(recipe_string[1:])) else: recipes.append(recipe_sum) elf1 = loop_around(1 + elf1 + elf1_value, len(recipes)) elf2 = loop_around(1 + elf2 + elf2_value, len(recipes)) answer_string = "" for i in range(improvement_num, improvement_num + count): answer_string += str(recipes[i]) return answer_string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_similar_recipes(self):\n pass", "def test_get_random_recipes(self):\n pass", "def measure_the_work(pattern_to_find):\n recipes = [3, 7]\n keys = [int(key) for key in pattern_to_find]\n elf1 = 0\n elf2 = 1\n not_found = True\n\n while not_found:\n elf1_value = recipes[elf1]\n elf2_value = recipes[elf2]\n\n recipe_sum = elf1_value + elf2_value\n\n if recipe_sum > 9:\n recipe_string = f\"{recipe_sum:02d}\"\n recipes.append(int(recipe_string[:1]))\n recipes.append(int(recipe_string[1:]))\n else:\n recipes.append(recipe_sum)\n\n elf1 = loop_around(1 + elf1 + elf1_value, len(recipes))\n elf2 = loop_around(1 + elf2 + elf2_value, len(recipes))\n if recipes[-1] == keys[-1] or recipes[-2] == keys[-1]:\n\n if pattern_to_find in ''.join(map(str, recipes[-(len(keys) + 2):])):\n not_found = False\n if recipes[-1] == keys[-1]:\n return len(recipes) - len(keys)\n else:\n return len(recipes) - len(keys) - 1", "def cakes(recipe, available):\n return min({k: available[k]//recipe[k] if k in available else 0 for k in recipe}.values())", "def search_recipe(ingredients):\n\n params = '+'.join(ingredients.split())\n url_search = SEARCH_URL.format(params)\n response = req.get(url_search)\n\n return response.content", "def test_search_recipes_by_nutrients(self):\n pass", "def test_search_recipes_by_ingredients(self):\n pass", "def find_string(n, c_length, start=None):\n \n c = range(c_length)\n if start is None:\n i = get_minimum(n)\n else:\n i = start\n\n strings = [e for e in generate_strings(n, c)]\n while True:\n for x, s in enumerate(generate_strings(i, c)):\n if check_string(s, strings):\n return s\n\n if x % 1000000 == 0:\n print x\n i += 1\n print \"processing %s\" % i", "def test_search_by_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['butter', 'sugar', 'eggs'])\n self.assertGreater(recipe_id, 0)", "def test_search_recipes(self):\n pass", "def check_recipes(self):\n\n self.recipe = None\n\n for recipe in all_recipes:\n if recipe.matches(self.crafting, self.crafting_stride):\n self.recipe = recipe", "async def get_recipes_from_components(\n fridge_components: dict, db_path: Path = DB_PATH\n) -> list:\n available_components = set(fridge_components.keys())\n logger.debug(\"Available components: {}\".format(available_components))\n\n # Updated counters of users' components\n for component in available_components:\n await execute_query(\n \"UPDATE components SET total_encountered = 1 + \"\n \"(SELECT total_encountered FROM components WHERE component = ?) \"\n \"WHERE component = ?\",\n (component, component),\n db_path=db_path,\n )\n logger.debug(\"Updated component counters of: {}\".format(available_components))\n\n recipes = await get_query_results(\n \"SELECT recipe_name, components FROM recipes\", db_path=db_path\n )\n\n # Select recipes that are possible to prepare with users' components\n selected_recipes = []\n for recipe in recipes:\n recipe_components = json.loads(recipe[1])\n recipe_components_names = set([x[\"item\"] for x in recipe_components])\n logger.debug(\n \"Recipe '{}' contains '{}'\".format(recipe[0], recipe_components_names)\n )\n\n # If user has all components of the recipe, find minimum amount that can be prepared\n minimum_quantity = 0\n if recipe_components_names.issubset(available_components):\n logger.debug(\n \"Recipe '{}' can be cooked with available components.\".format(recipe[0])\n )\n\n for components in recipe_components:\n available_quantity = fridge_components[components[\"item\"]]\n needed_quantity = components[\"q\"]\n\n if minimum_quantity:\n minimum_quantity = min(\n minimum_quantity, available_quantity / needed_quantity\n )\n else:\n # First cycle\n minimum_quantity = available_quantity / needed_quantity\n\n selected_recipes.append({\"name\": recipe[0], \"quantity\": minimum_quantity})\n\n selected_recipes_names = [x[\"name\"] for x in selected_recipes]\n\n # Update last recommended time for recipes\n for recipe_name in selected_recipes_names:\n current_time = int(time())\n\n await execute_query(\n \"UPDATE recipes SET last_recommended = ? WHERE recipe_name = ?\",\n (current_time, recipe_name),\n db_path=db_path,\n )\n logger.debug(\"Updated last recommended times of: {}\".format(selected_recipes_names))\n\n return selected_recipes", "def get_number_of_search_recipes(cuisine):\n cuisine_search_link = SEARCH_URL.format(0, cuisine)\n cuisine_recipes = get_content_from_dynamic_url(cuisine_search_link)\n if not cuisine_recipes:\n print \"no content for:\", cuisine_search_link\n return None\n soup_cuisine = BeautifulSoup(cuisine_recipes)\n # get recipe-count and convert it into integer\n return int(soup_cuisine.find(\"h1\", {\"class\": \"search-title\"}).find(\"em\").get_text())", "def get_recipe_chef(soup_recipe):\n chef_name = soup_recipe.find(\"div\",\n {\"class\": \"recipe-header__chef recipe-header__chef--first\"}).find(\"a\")\n if not chef_name:\n chef_name = soup_recipe.find(\"div\",\n {\"class\": \"recipe-header__chef recipe-header__chef--first\"}).find(\"span\")\n if not chef_name:\n return None\n return chef_name.get_text()", "def get_recipe_chef(soup_recipe):\n chef_name = soup_recipe.find(\"span\", {\"itemprop\": \"author\"})\n if not chef_name:\n return None\n return chef_name.get_text().strip()", "def get_recipe_difficulty(soup_recipe):\n difficulty = soup_recipe.find(\"span\", {\"class\": \"frr_difficulty fr_sep\"})\n if not difficulty:\n return None\n return difficulty.get_text().strip()", "def find_odds(numbers):\n\n pass # remove this line when starting your function", "def get_cuisine_recipes(search_cuisisnes, cuisines):\n cuisine_df = pd.DataFrame()\n for cuisine in search_cuisisnes:\n cuisine_dict = {}\n cuisine_dict['cuisine'] = cuisine\n cuisine_dict['source'] = 'BBC Good Food'\n cuisine_no_space = cuisine.lower().replace(' & ', '-').replace(' ', '-')\n recipes_cuisine_search = get_number_of_search_recipes(cuisine_no_space)\n cuisine_dict['pages'] = int(ceil(recipes_cuisine_search /\n NUMBER_OF_RECIPES_PER_SEARCH_PAGE))\n collection = False\n if cuisine in cuisines:\n cuisine_dict['pages'] += 1\n collection = True\n cuisine_dict['recipes_details'] = get_recipe_links(cuisine_no_space,\n cuisine_dict['pages']-1, collection)\n cuisine_dict['num_recipes'] = len(cuisine_dict['recipes_links'])\n print '#####'\n print \"Cuisine: %s \\t Number of recipes: %d \\t\\t Number of pages: %d\" \\\n % (cuisine, cuisine_dict['num_recipes'], cuisine_dict['pages'])\n coll.insert_one(cuisine_dict)\n cuisine_df = cuisine_df.append(pd.DataFrame.from_dict(cuisine_dict, orient='columns'),\n ignore_index=True)\n return cuisine_df", "def get_servings(soup_recipe):\n servings = soup_recipe.find(\"span\", {\"itemprop\": \"recipeYield\"})\n if not servings:\n return None\n return servings.get_text().strip()", "def search_recipes():\r\n cuisine, course, allergens = Helpers.dropdowns(coll_cuisines, coll_courses, coll_allergens)\r\n args = request.args.get\r\n args_list = request.args.getlist\r\n\r\n # Get Search and Pagination arguments from URL\r\n keyword_args = (\r\n args(\"search_keys\") if args(\"search_keys\") is not None else \"\")\r\n cuisineFilter_args = (\r\n args(\"cuisine_filter\") if args(\"cuisine_filter\") is not None else \"\")\r\n courseFilter_args = (\r\n args(\"course_filter\") if args(\"course_filter\") is not None else \"\")\r\n allergenFilter_args = (\r\n args_list(\"allergen_filter\") if args_list(\r\n \"allergen_filter\") is not None else [])\r\n page_args = int(args(\"page\")) if args(\"page\") is not None else 1\r\n\r\n # Set search variables\r\n search_keywords = (\r\n keyword_args.split() if keyword_args is not None else \"\")\r\n search_cuisine = (\r\n cuisineFilter_args if cuisineFilter_args is not None else \"\")\r\n search_course = (\r\n courseFilter_args if courseFilter_args is not None else \"\")\r\n search_allergens = (\r\n allergenFilter_args if allergenFilter_args != [] else \"\")\r\n\r\n # Join search variables and perform search\r\n search = (\r\n '\"' + '\" \"'.join(search_keywords) +\r\n '\" \"' + ''.join(search_cuisine) +\r\n '\" \"' + ''.join(search_course) +\r\n '\"' + ' -' + ' -'.join(search_allergens))\r\n search_results = coll_recipes.find(\r\n {\"$text\": {\"$search\": search}}).skip((page_args * 8) - 8)\\\r\n .limit(8).sort([(\"views\", -1)])\r\n\r\n # Pagination\r\n (\r\n pages, previous_page, next_page, count,\r\n total_recipes, results_count) = Helpers.pagination(\r\n search_results, page_args, coll_recipes)\r\n\r\n return render_template(\r\n \"searchrecipes.html\",\r\n recipes=search_results,\r\n cuisine=sorted(cuisine),\r\n course=course,\r\n allergens=allergens,\r\n keywords=keyword_args,\r\n f_cuisine=cuisineFilter_args,\r\n f_course=courseFilter_args,\r\n f_allergen=allergenFilter_args,\r\n pages=pages,\r\n results_count=results_count,\r\n total_recipes=total_recipes,\r\n count=count,\r\n page=page_args,\r\n next_page=next_page,\r\n previous_page=previous_page)", "def test_get_recipe_information(self):\n pass", "def test_summarize_recipe(self):\n pass", "def get_servings(soup_recipe):\n servings = soup_recipe.find(\"span\", {\"itemprop\": \"recipeYield\"})\n if not servings:\n return None\n return servings.get_text()", "def test_search_by_bad_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['asdfadsfa'])\n self.assertEqual(recipe_id, None)", "def solution(input_string):\n __check_validation(input_string)\n substrings = __get_all_possible_substrings(base_string=input_string)\n best_by_leftovers = __get_candidates_best_by_leftovers_count(substrings=substrings, base_string=input_string)\n best_by_quantity = __get_candidates_best_by_elements_count(substrings=best_by_leftovers)\n return best_by_quantity[0][1]", "def test_known_common_stable_isotopes_cases():\n assert \"H-1\" in known_isotopes(\"H\")\n assert \"D\" in known_isotopes(\"H\")\n assert \"T\" in known_isotopes(\"H\")\n assert \"Be-8\" in known_isotopes(\"Be\")\n assert \"Og-294\" in known_isotopes(118)\n assert \"H-1\" in common_isotopes(\"H\")\n assert \"H-4\" not in common_isotopes(1)\n assert \"H-1\" in stable_isotopes(\"H\")\n assert \"D\" in stable_isotopes(\"H\")\n assert \"T\" not in stable_isotopes(\"H\")\n assert \"Fe-56\" in common_isotopes(\"Fe\", most_common_only=True)\n assert \"He-4\" in common_isotopes(\"He\", most_common_only=True)", "def get_recommendations(soup_recipe):\n ratings = soup_recipe.find(\"meta\", {\"itemprop\": \"ratingValue\"})[\"content\"]\n ratings_count = soup_recipe.find(\"meta\", {\"itemprop\": \"ratingCount\"})[\"content\"]\n if ratings == 0:\n return None, None\n return ratings, ratings_count", "def get_recipe_details(recipe_links):\n cuisine_recipes = {}\n for r in recipe_links:\n soup_recipe = BeautifulSoup(r)\n if \"www.chowhound.com\" in r.a[\"href\"]:\n recipe = {}\n recipe['r_link'] = r.a[\"href\"]\n print \"recipe link: \", recipe['r_link']\n soup_recipe = get_recipe(recipe['r_link'])\n recipe['recipe title'] = get_recipe_title(soup_recipe)\n recipe['chef'] = get_recipe_chef(soup_recipe)\n recipe['description'] = get_description(soup_recipe)\n recipe['ingredient list'] = get_recipe_ingredients(soup_recipe)\n recipe['preperation steps'] = get_recipe_preperation(soup_recipe)\n recipe['total_time'], recipe['active_time'] = get_recipe_time(soup_recipe)\n recipe['servings'] = get_servings(soup_recipe)\n recipe['skill_level'] = get_recipe_difficulty(soup_recipe)\n recipe['rating'], recipe['rating count'] = get_ratings(soup_recipe)\n recipe['nutritional_info'] = get_nutrition_per_serving(soup_recipe)\n recipe['image_source'] = get_image_source(soup_recipe)\n cuisine_recipes[recipe['recipe title']] = recipe\n return cuisine_recipes", "def find_recipe(self, recipe_id):\n return self.find_doc('recipe', 'name', self.get_unique_recipe_name(recipe_id))", "def randomly_pick_recipe(cls):\n return random.choice(cls._recipes)" ]
[ "0.62732804", "0.61937374", "0.6106467", "0.59431934", "0.59081733", "0.5582103", "0.5502457", "0.5500289", "0.5376491", "0.53712493", "0.53555405", "0.53383607", "0.52812743", "0.52651393", "0.5197361", "0.5169107", "0.516706", "0.51159555", "0.5107954", "0.51040334", "0.50993323", "0.50930053", "0.50803745", "0.50738853", "0.50727516", "0.5061389", "0.50247467", "0.5018143", "0.5017217", "0.50040656" ]
0.7260232
0
Set next handler of the chain
def set_next(self, handler): self.next = handler return handler
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_handler(self, handler):\n self.next_handler = handler", "def _handler_changed(self, handler):\n if self.next is not None:\n self.next.handler = handler", "def setNext(self, next):\n\t\t\tself.next = next", "def next(self, next):\n\n self._next = next", "def next(self, next):\n\n self._next = next", "def chain(self, chain):\n\n self._chain = chain", "def _wrapped_handler_ref_changed(self, wrapped_handler_ref):\n if self.next is not None:\n self.next.wrapped_handler_ref = wrapped_handler_ref", "def send_next(self):\n event = next(self)\n self.send(event)\n return event", "def set_next(self, frame):\n self._set_stopinfo(frame, None)", "def set_next(self, next_logger: Logger):\n self.next = next_logger\n return self.next", "def set_next(self, next: Callable[[UserMessage], None], is_output):\n\n self.next = next\n self.is_output = is_output", "def add_handler(self, path, handler):\n if path: # guard against Null path, we assume handler could be Null\n path_list = self.split_path(path)\n self.trie.insert(step_list=path_list, handler=handler)", "def next(self, event):\n self.result = 1", "def set_next(self, new_next):\n self.next = new_next", "def next(self) -> Optional[Chainable]:\n return None", "def _set_link(self, value, handler):\n self._mapping[value] = handler", "def __next__(self):\n\n pass", "def add_handler(self, handler):\n pass", "def next( self ):\n next(self)", "def set_handler(self, handler):\n self._handler = handler", "def setNext(self, nextNode):\n self.__next = nextNode", "def set_next(self, node):\r\n self.__next = node", "def set_next(self, next_layer):\n self.next_layer = next_layer", "def __next__(self):\n pass", "def __next__(self):\n pass", "def __next__(self):\n pass", "def __next__(self):\n pass", "def set_next(self, node):\n self.__next = node", "def next(action, value, error_handle, skip_invoked=True):\n error_handle['action'] = 'NEXT'\n if skip_invoked:\n print_info(\"failure action= next\")\n return error_handle", "def set_next(node, value):\n node['next'] = value" ]
[ "0.7661297", "0.6707803", "0.6030326", "0.59693336", "0.59693336", "0.5968152", "0.5833051", "0.5818501", "0.57607543", "0.57461786", "0.5692767", "0.566452", "0.5590942", "0.5582867", "0.557292", "0.55651385", "0.55508965", "0.5547468", "0.55370283", "0.55218494", "0.5515645", "0.5487466", "0.5444706", "0.5423415", "0.5423415", "0.5423415", "0.5423415", "0.54202133", "0.5407615", "0.53480464" ]
0.80296415
0
Look for and return any unexplored point including the given seed. Calling map.find_above(MSS) after map.block_down(MSS) will thus find strict supersets of the MSS, as the MSS itself has been blocked.
def find_above(self, seed): superset_exists = self.solver.solve((i + 1) for i in seed) if superset_exists: return self.get_seed() else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_seed(self, seed):\n out = self.complement(seed)\n return self.solver.solve([(i + 1) for i in seed] + [-(i + 1) for i in out])", "def choose_pos(self):\n s = self\n\n availablepos = []\n for dblock in s.pjs.dblocks:\n is_available = True\n\n for powerup in s.pjs.powerups:\n if powerup.rects[0].overlap(dblock.rects[0]):\n is_available = False\n break\n\n if is_available:\n availablepos.append(dblock.rpos)\n\n pos = random.randint(0, len(availablepos) - 1)\n s.rpos = availablepos[pos]", "def get_mask_with_stent_likely_positions(data, th, verbose=False):\n \n # NOTE: this pure-Python implementation is little over twice as slow\n # as the Cython implementation, which is a neglectable cost since\n # the other steps in stent segmentation take much longer. By using\n # pure-Python, installation and modification are much easier!\n # It has been tested that this algorithm produces the same results\n # as the Cython version.\n \n # Init mask\n mask = np.zeros_like(data, np.uint8)\n \n # Criterium 1A: voxel must be above th\n # Note that we omit the edges\n #mask[25:-25,25:-25,25:-25] = (data[25:-25,25:-25,25:-25] > th[0]) * 3\n mask[1:-1,1:-1,1:-1] = (data[1:-1,1:-1,1:-1] > th[0]) * 3\n \n cnt = 0\n seed = None\n seeds = []\n values = []\n for z, y, x in zip(*np.where(mask==3)):\n \n # Only proceed if this voxel is \"free\"\n if mask[z,y,x] == 3:\n \n # Set to 0 initially\n mask[z,y,x] = 0 \n \n # Get value\n val = data[z,y,x]\n \n # Get maximum of neighbours\n patch = data[z-1:z+2, y-1:y+2, x-1:x+2].copy()\n patch[1,1,1] = 0\n themax = patch.max()\n \n # # Criterium 2: must be local max\n # if themax > val:\n # continue\n # # Also ensure at least one neighbour to be *smaller*\n # if (val > patch).sum() == 0:\n # continue\n \n # Criterium 3: one neighbour must be above th\n if themax <= th[0]:\n continue\n \n # Criterium 1B: voxel must be below upper seed th, if given\n if len(th) ==2:\n if val > th[1]:\n if verbose:\n print('Seed removed by higher th: ',(z,y,x),'ctvalue=', val)\n continue\n \n # # Criterium 4: seed must be at least 5 voxels away from other seeds\n # if not seed is None:\n # newseed = np.asarray([z,y,x])\n # v = seeds - newseed\n # d = (v[:,0]**2 + v[:,1]**2 + v[:,2]**2)**0.5 # np.linalg.norm(v) # magnitude\n # if d.min() < 5:\n # cnt+=1\n # continue\n seed = np.asarray([z,y,x])\n seeds.append(seed)\n \n # Set, and suppress stent points at direct neighbours\n #mask[z-1:z+2, y-1:y+2, x-1:x+2] = 1 # do not suppress neighbours to have more points for centerline\n mask[z,y,x] = 2\n values.append(data[z,y,x])\n \n print()\n # print('Seed ctvalues: {}'.format(sorted(values)))\n print('-------')\n # print('Seeds removed by criterium 4: {}'.format(cnt))\n \n return mask", "def Step1(self):\n import random\n print('get mask for seedpoints NELLIX is used')\n # Check if we can go\n if self._vol is None or self._params is None:\n raise ValueError('Data or params not yet given.')\n \n t0 = time.time()\n \n # Detect points\n th = self._params.seed_threshold\n pp = get_stent_likely_positions(self._vol, th) # call below\n \n # Create nodes object from found points\n nodes = stentgraph.StentGraph()\n for p in pp:\n p_as_tuple = tuple(p.flat) # todo: perhaps seed detector should just yield list of tuples.\n nodes.add_node(p_as_tuple)\n \n t1 = time.time()\n if self._verbose:\n print()\n print('Found %i seed points, which took %1.2f s.' % (len(nodes), t1-t0))\n \n # Store the nodes\n self._nodes1 = nodes\n \n # Draw?\n if self._draw:\n self.Draw(1)\n \n return nodes", "def get_furthest_offgrid_pin(self, pin, insufficient_list):\n \n # Find the coordinate with the most overlap\n best_coord = None\n best_dist = math.inf\n for coord in insufficient_list:\n min_dist = grid_utils.distance_set(coord, self.blocked_grids)\n if min_dist<best_dist:\n best_dist=min_dist\n best_coord=coord\n \n return set([best_coord])", "def get_stent_likely_positions(data, th):\n \n # Get mask\n mask = get_mask_with_stent_likely_positions(data, th)\n \n # Convert mask to points\n indices = np.where(mask==2) # Tuple of 1D arrays\n pp = PointSet( np.column_stack(reversed(indices)), dtype=np.float32)\n \n # Correct for anisotropy and offset\n if hasattr(data, 'sampling'):\n pp *= PointSet( list(reversed(data.sampling)) ) \n if hasattr(data, 'origin'):\n pp += PointSet( list(reversed(data.origin)) ) \n \n return pp", "def find_basin(self, s):\n \n assert s.size==self.n\n atMin = False\n thisState = s.astype(np.int8)\n\n while not atMin: \n dE = self.neighbor_dE(thisState)\n if np.any( dE<0 ):\n ix = dE.argmin()\n thisState[ix] *= -1\n else:\n atMin = True\n return thisState", "def _getPosLock( self, bSeed ):\n\n\t\treturn ( bSeed & 0xFF )", "def find_unsettled_spot(self):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif self.grid[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn", "def get_seed_points(img,seed_values):\n\n m,n = img.shape\n coordinates = [(i,j) for i,j in it.product(range(m),range(n)) if img[i,j] in seed_values]\n\n return coordinates", "def test_find_best_W_mers_2(self):\n self.options.min_num_sites = self.options.max_num_sites = num_to_find = 2\n \n # load data and create STEME object\n fasta_file = os.path.normpath(get_fasta_file('T00759-small.fa'))\n \n #\n # Load sequences and build index\n #\n algorithm = stempy.Algorithm(self.options)\n algorithm._initialise(fasta_file)\n data = algorithm.input_sequences.data\n\n for seed in (\n 'ATGCAGAAAAATTAAG',\n 'TTTAAAATACTTTAAA',\n ):\n # create and seed a model\n W = len(seed)\n model = algorithm.create_model_of_input(W)\n model.bs.seed(seed, True)\n model.set_lambda_for_sites(data.num_sequences)\n \n # look for best W-mers under model\n best_w_mer_finder = stempy.create_best_w_mer_finder(data, model, num_to_find)\n best_w_mer_finder()\n avg_Z = 0.\n for _eval in best_w_mer_finder.best_w_mers:\n logging.info(\n 'Seed: %s; Site: %s; p(binding): %.2e; p(not binding): %.2e',\n seed, data.get_W_mer(W, _eval.global_pos), _eval.Z, 1.-_eval.Z\n )\n avg_Z += _eval.Z\n logging.info('Seed: %s; Average Z: %.6f', seed, avg_Z / len(best_w_mer_finder.best_w_mers))\n \n #\n # Check we found the seed\n #\n for _eval in best_w_mer_finder.best_w_mers:\n if data.get_W_mer(W, _eval.global_pos) == seed:\n break\n else:\n raise RuntimeError('Could not find seed in best W-mers')\n \n #\n # Log the product of p-values\n #\n best_w_mer_finder.update_model(num_to_find, use_pseudo_counts=False)\n logging.info('Seed: %s; log PoP: %.6f', seed, algorithm.significance.log_product_p_values(model))", "def runmaxmin(self):\n import random\n random.seed(self.seed)\n mindist_ptolandmarkset = np.full(self.pointcloud.size, np.inf)\n self.subsetindices = []\n for i in xrange(self.subsetsize):\n if i == 0:\n selected_index = random.randint(0, self.pointcloud.size - 1)\n # update min for all the rest indices\n # update min for this index to 0.\n for z in xrange(self.pointcloud.size):\n # if z == selected_index:\n # mindist_ptolandmarkset[z] = 0.0\n # else:\n mindist_ptolandmarkset[z] = self.pointcloud.distmat[selected_index][z]\n else:\n selected_index = np.argmax(mindist_ptolandmarkset)\n # update minimum distance for all points\n for z in xrange(self.pointcloud.size):\n mindist_ptolandmarkset[z] = min(mindist_ptolandmarkset[z],\n self.pointcloud.distmat[selected_index][z])\n\n self.subsetindices.append(selected_index)\n\n self.subsetpointcloud = pc.PointCloud(self.pointcloud.points[self.subsetindices])", "def get_nearest_offgrid_pin(self, pin, insufficient_list):\n # Find the coordinate with the most overlap\n best_coord = None\n best_dist = math.inf\n for coord in insufficient_list:\n track_pin = self.convert_track_to_pin(coord)\n min_dist = pin.distance(track_pin)\n if min_dist<best_dist:\n best_dist=min_dist\n best_coord=coord\n \n return set([best_coord])", "def FindPredeccesor(self, id):\r\n node = self.nodeInfo\r\n while True:\r\n succNode = self.RemoteGetSuccessor(node.Address)\r\n if self.IsInRange(id, node.HashValue, False,succNode.HashValue, True) == False:\r\n node = self.RemoteClosestPrecedingFinger(node.Address, id)\r\n else:\r\n break\r\n return node", "def find_sandwich_top_below(blk):\n if blk.name in ['sandwichtop', 'sandwichtop_no_label',\n 'sandwichtop_no_arm', 'sandwichtop_no_arm_no_label']:\n return blk\n # Always follow the main branch of a flow: the last connection.\n _blk = blk.connections[len(blk.connections) - 1]\n while _blk is not None:\n if _blk.name in ['sandwichtop', 'sandwichtop_no_label',\n 'sandwichtop_no_arm', 'sandwichtop_no_arm_no_label']:\n return _blk\n _blk = _blk.connections[len(_blk.connections) - 1]\n return None", "def ClosestPrecedingFinger(self, id):\r\n for i in range(M_BITS, 0, -1):\r\n if self.IsInRange(self.fingerTable[i].Node.HashValue, self.nodeInfo.HashValue, False, id, False):\r\n return self.fingerTable[i].Node\r\n return self.nodeInfo", "def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos", "def find_sandwich_top(blk):\n # Always follow the main branch of a flow: the first connection.\n _blk = blk.connections[0]\n while _blk is not None:\n if _blk.name in COLLAPSIBLE:\n return None\n if _blk.name in ['repeat', 'if', 'ifelse', 'forever', 'while']:\n if blk != _blk.connections[len(_blk.connections) - 1]:\n return None\n if _blk.name in ['sandwichtop', 'sandwichtop_no_label',\n 'sandwichtop_no_arm', 'sandwichtop_no_arm_no_label']:\n return _blk\n blk = _blk\n _blk = _blk.connections[0]\n return None", "def maximize_seed(self, seed, direction):\n while True:\n comp = self.complement(seed)\n x = self.solver.new_var() + 1\n if direction:\n # search for a solution w/ all of the current seed plus at\n # least one from the current complement.\n self.solver.add_clause([-x] + [i + 1 for i in comp]) # temporary clause\n # activate the temporary clause and all seed clauses\n havenew = self.solver.solve([x] + [i + 1 for i in seed])\n else:\n # search for a solution w/ none of current complement and at\n # least one from the current seed removed.\n self.solver.add_clause([-x] + [-(i + 1) for i in seed]) # temporary clause\n # activate the temporary clause and deactivate complement clauses\n havenew = self.solver.solve([x] + [-(i + 1) for i in comp])\n self.solver.add_clause([-x]) # remove the temporary clause\n\n if havenew:\n seed = self.get_seed()\n else:\n return seed", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def closestScaredGhost(pos, scaredGhosts, walls):\n fringe = [(pos[0], pos[1], 0)]\n expanded = set()\n while fringe:\n pos_x, pos_y, dist = fringe.pop(0)\n if (pos_x, pos_y) in expanded:\n continue\n expanded.add((pos_x, pos_y))\n # if we find a scared ghost at this location then exit\n for ghostPosition in scaredGhosts: # Check if collision\n if manhattanDistance( ghostPosition, (pos_x, pos_y) ) <= COLLISION_TOLERANCE:\n return dist\n # otherwise spread out from the location to its neighbours\n nbrs = Actions.getLegalNeighbors((pos_x, pos_y), walls)\n for nbr_x, nbr_y in nbrs:\n fringe.append((nbr_x, nbr_y, dist+1))\n # no scared ghost found\n return None", "def getStartingDistrict(self):\n district = None\n\n if len(self.activeDistrictMap.keys()) == 0:\n self.notify.info('no shards')\n return None\n\n if base.fillShardsToIdealPop:\n # Choose highest-population shard that is not yet\n # a 'high-population' shard\n lowPop, midPop, highPop = base.getShardPopLimits()\n self.notify.debug('low: %s mid: %s high: %s' %\n (lowPop, midPop, highPop))\n for s in self.activeDistrictMap.values():\n if s.available and s.avatarCount < lowPop:\n self.notify.debug('%s: pop %s' %\n (s.name, s.avatarCount))\n if district is None:\n district = s\n else:\n # if multiple shards have the same population,\n # sort them by name so that all clients will\n # choose the same one\n if s.avatarCount > district.avatarCount or (\n (s.avatarCount == district.avatarCount and\n s.name > district.name)\n ):\n district = s\n\n # if all of the shards are over the cutoff population, pick\n # the lowest-population shard\n if district is None:\n self.notify.debug(\n 'all shards over cutoff, picking lowest-population shard')\n for s in self.activeDistrictMap.values():\n if s.available:\n self.notify.debug('%s: pop %s' %\n (s.name, s.avatarCount))\n if (district is None or\n (s.avatarCount < district.avatarCount)):\n district = s\n\n if district is not None:\n self.notify.debug('chose %s: pop %s' % (district.name, district.avatarCount))\n return district", "def FindClosestInsertedPoint(self, ):\n ...", "def cluster(M, point, eps): # zwraca punkty dla ktorych dystans z punktu point jest mniejszy od eps\n seeds = []\n for i in range(0, M.shape[0]):\n if eps_neighborhood(M, point, i, eps):\n seeds.append(i)\n return seeds", "def get_best_offgrid_pin(self, pin, insufficient_list):\n # Find the coordinate with the most overlap\n best_coord = None\n best_overlap = -math.inf\n for coord in insufficient_list:\n full_pin = self.convert_track_to_pin(coord)\n # Compute the overlap with that rectangle\n overlap_rect=pin.compute_overlap(full_pin)\n # Determine the min x or y overlap\n min_overlap = min(overlap_rect)\n if min_overlap>best_overlap:\n best_overlap=min_overlap\n best_coord=coord\n \n return set([best_coord])", "def naive_consensus_search(Ts, m):\n k = len(Ts)\n\n bsf_radius = np.inf\n bsf_Ts_idx = 0\n bsf_subseq_idx = 0\n\n for j in range(k):\n radii = np.zeros(len(Ts[j]) - m + 1)\n for i in range(k):\n if i != j:\n mp = naive.stump(Ts[j], m, Ts[i])\n radii = np.maximum(radii, mp[:, 0])\n min_radius_idx = np.argmin(radii)\n min_radius = radii[min_radius_idx]\n if min_radius < bsf_radius:\n bsf_radius = min_radius\n bsf_Ts_idx = j\n bsf_subseq_idx = min_radius_idx\n\n return bsf_radius, bsf_Ts_idx, bsf_subseq_idx", "def look_ahead(self, point):\n directions = [N(Point.make(point)), S(Point.make(point)), E(Point.make(point)), W(Point.make(point))]\n for point in directions:\n if not point in self.nodes:\n return True\n return False", "def targetpoint(self, initpoint):\n while True:\n col = int(random.uniform(0, COLS))\n row = int(random.uniform(0, ROWS))\n if (row, col) != initpoint:\n break\n return (row, col)", "def __findFarestPoint__( self, outPoint ):\n end = outPoint;\n endInside = self.inside( end );\n if endInside: return outPoint;\n start = self.center;\n startInside = self.inside( start );\n \n while( True ):\n if ( utility.euclideanDistSqr( start, end ) <= 4 ):\n return start;\n mid = utility.devide( utility.add( start, end ), 2);\n if self.inside( mid ):\n start = mid;\n else:\n end = mid;", "def _get_nearest_neighbor(self, sample):\n d_min=float('inf') #minimum distance\n node_neighbor=self.start\n\n for iter in self.start:\n d=0 #distance between sample and each node in the node tree\n for j in range(sample.size):\n d+=(iter.state[j]-sample[j])**2\n if(d<d_min):\n d_min=d\n node_neighbor=iter\n\n return node_neighbor" ]
[ "0.59967655", "0.55694646", "0.5480614", "0.5397099", "0.5393073", "0.5385499", "0.5333922", "0.52977884", "0.52768016", "0.5189014", "0.51889", "0.518867", "0.5116351", "0.5088275", "0.5082775", "0.5077057", "0.50351775", "0.50136656", "0.5006063", "0.49916822", "0.49692985", "0.49560353", "0.49290282", "0.490117", "0.4896136", "0.48890227", "0.48712504", "0.485398", "0.48457998", "0.4845772" ]
0.7450136
0
Maximize a given seed within the current set of constraints. The Boolean direction parameter specifies up (True) or down (False)
def maximize_seed(self, seed, direction): while True: comp = self.complement(seed) x = self.solver.new_var() + 1 if direction: # search for a solution w/ all of the current seed plus at # least one from the current complement. self.solver.add_clause([-x] + [i + 1 for i in comp]) # temporary clause # activate the temporary clause and all seed clauses havenew = self.solver.solve([x] + [i + 1 for i in seed]) else: # search for a solution w/ none of current complement and at # least one from the current seed removed. self.solver.add_clause([-x] + [-(i + 1) for i in seed]) # temporary clause # activate the temporary clause and deactivate complement clauses havenew = self.solver.solve([x] + [-(i + 1) for i in comp]) self.solver.add_clause([-x]) # remove the temporary clause if havenew: seed = self.get_seed() else: return seed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maximize(self):\n raise NotImplementedError", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def _maximize(self, board, possible_actions, depth_limit, alpha, beta):\r\n pass", "def maximize(self, budget, optimizer):\n\n\t\tpass", "def move(self):\n self._move_range_shuffle(3)\n self._move_satisfy_random_constraint()\n # self._move_range_shuffle(3)\n #if (curr_energy > 50):\n # self._move_satisfy_random_constraint()\n #else:\n # self._move_range_shuffle(3)", "def _move_satisfy_random_constraint(self):\n secure_random = random.SystemRandom()\n done = False\n while not done:\n c = secure_random.choice(self.constraints)\n if self._is_constraint_violated(c):\n done = True\n # swap 2 wizards to move closer\n self._swap_wizards(c[random.randint(0, 1)], c[2])\n # with probability 0.5, swap the two border wizards\n if random.randint(0, 1) == 1:\n self._swap_wizards(c[0], c[1])\n if not done: print(\"Nothing to do...\")", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity, depth_limit, -10000, 10001, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def update_heuristic(self):\n self.heuristic = self.manhattan_distance()", "def progressive_deepening(state, heuristic_fn=always_zero, depth_limit=INF,\n maximize=True) :\n anytime_value = AnytimeValue() # TA Note: Use this to store values.\n depth = 0\n while depth<=depth_limit-1:\n depth+=1\n best_option=minimax_search_alphabeta(state,-INF,INF, heuristic_fn=heuristic_fn,depth_limit=depth, maximize=True)\n anytime_value.set_value(best_option)\n return anytime_value", "def optimize_library_descent(self, target, direction='max', steps=100,\n multiprocessing=False, ret_info=False,\n args=None):\n # get the target function to call\n target_function = getattr(self, target)\n if args is not None:\n target_function = functools.partial(target_function, **args)\n\n # initialize the optimizer\n value = target_function()\n value_best, state_best = value, self.sens_mat.copy()\n \n if ret_info:\n # store extra information\n start_time = time.time()\n info = {'values': {}}\n values_count = self.parameters['optimizer_values_count']\n values_step = max(1, steps // values_count)\n \n if multiprocessing:\n # run the calculations in multiple processes\n pool_size = self.get_number_of_cores()\n pool = mp.Pool(processes=pool_size)\n if ret_info:\n values_step = max(1, values_step // pool_size)\n \n # iterate for given number of steps\n for step in range(int(steps) // pool_size):\n joblist = []\n init_arguments = self.init_arguments\n for _ in range(pool_size):\n # modify the current state and add it to the job list\n i = random.randrange(self.sens_mat.size)\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n params = init_arguments['parameters'] \n params['sensitivity_matrix'] = self.sens_mat\n params['initialize_state']['sensitivity'] = 'exact'\n \n joblist.append((copy.deepcopy(init_arguments), target))\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n # run all the jobs\n results = pool.map(_run_job, joblist)\n \n # find the best result \n if direction == 'max':\n res_best = np.argmax(results)\n if results[res_best] > value_best:\n value_best = results[res_best]\n state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']\n # use the best state as a basis for the next iteration\n self.sens_mat = state_best\n \n elif direction == 'min':\n res_best = np.argmin(results)\n if results[res_best] < value_best:\n value_best = results[res_best]\n state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']\n # use the best state as a basis for the next iteration\n self.sens_mat = state_best\n \n else:\n raise ValueError('Unsupported direction `%s`' % direction)\n \n if ret_info and step % values_step == 0:\n info['values'][step * pool_size] = results[res_best]\n \n else:\n # run the calculations in this process\n for step in range(int(steps)):\n # modify the current state\n i = random.randrange(self.sens_mat.size)\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n # get the value of the new state\n value = target_function()\n \n improved = ((direction == 'max' and value > value_best) or\n (direction == 'min' and value < value_best))\n if improved:\n # save the state as the new best value\n value_best, state_best = value, self.sens_mat.copy()\n else:\n # undo last change\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n if ret_info and step % values_step == 0:\n info['values'][step] = value_best\n\n # sort the best state and store it in the current object\n state_best = self.sort_sensitivity_matrix(state_best)\n self.sens_mat = state_best.copy()\n\n if ret_info:\n info['total_time'] = time.time() - start_time \n info['states_considered'] = steps\n info['performance'] = steps / info['total_time']\n return value_best, state_best, info\n else:\n return value_best, state_best", "def optimize_library_anneal(self, target, direction='max', steps=100,\n ret_info=False, args=None):\n # lazy import\n from .optimizer import ReceptorOptimizerAnnealer # @UnresolvedImport\n \n # prepare the class that manages the simulated annealing\n annealer = ReceptorOptimizerAnnealer(self, target, direction, args,\n ret_info=ret_info)\n annealer.steps = int(steps)\n annealer.Tmax = self.parameters['anneal_Tmax']\n annealer.Tmin = self.parameters['anneal_Tmin']\n if self.parameters['verbosity'] == 0:\n annealer.updates = 0\n\n # do the optimization\n MI, state = annealer.optimize()\n\n # sort the best state and store it in the current object\n state = self.sort_sensitivity_matrix(state)\n self.sens_mat = state.copy()\n \n if ret_info:\n return MI, state, annealer.info\n else:\n return MI, state", "def ai_move():\n\tinitial_state = map(get_filled_edges, rects)\n\tpossible_moves = []\n\tfor index, filled_edges in enumerate(initial_state):\n\t\tif filled_edges == 0:\n\t\t\tpossible_moves.extend([(index, i) for i in 'ltrb'])\n\t\telif filled_edges == 1:\n\t\t\tpossible_moves.extend(one_filled_edge(index))\n\t\telif filled_edges == 2:\n\t\t\tpossible_moves.extend(two_filled_edge(index))\n\t\telif filled_edges == 3:\n\t\t\tpossible_moves.extend(three_filled_edge(index))\n\tprint possible_moves\n\tpossible_decisions = []\n\tfor move in possible_moves:\n\t\tfinal_state = apply_move(move)\n\t\tpossible_decisions.append(is_feasible(initial_state, final_state))\n\tprint possible_decisions\n\t# randomizing when some decisions have the same weight\n\tmax_weight = max(possible_decisions)\n\t# list of indices which have the same weight\n\tmax_indices = []\n\tfor index, weight in enumerate(possible_decisions):\n\t\tif weight == max_weight:\n\t\t\tmax_indices.append(index)\n\tx = choice(max_indices)\n\tprint x\n\treturn possible_moves[x]\n\t# return possible_moves[possible_decisions.index(max(possible_decisions))]", "def expand(self, right=0, down=0, left=0, up=0):\n self.min_col -= left\n self.min_row -= up\n self.max_col += right\n self.max_row += down", "def mod_space_opt(\n *,\n space,\n dryness_method,\n fuel_build_up_method,\n include_temperature,\n discrete_params,\n defaults=None,\n basinhopping_options=None,\n minimizer_options=None,\n mode=\"basinhopping\",\n x0=None,\n):\n to_optimise = gen_to_optimise(\n fail_func=fail_func,\n success_func=success_func,\n # Init (data) params.\n dryness_method=dryness_method,\n fuel_build_up_method=fuel_build_up_method,\n include_temperature=include_temperature,\n _uncached_data=False,\n **discrete_params,\n )\n\n defaults_dict = defaults if defaults is not None else {}\n\n def to_optimise_with_discrete(x):\n return to_optimise(\n **space.inv_map_float_to_0_1(dict(zip(space.continuous_param_names, x))),\n **defaults_dict,\n )\n\n def basinhopping_callback(x, f, accept):\n # NOTE: Parameters recorded here are authoritative, since hyperopt will not\n # properly report values modified as in e.g. `mod_quniform`.\n values = {\n **space.inv_map_float_to_0_1(dict(zip(space.continuous_param_names, x))),\n **discrete_params,\n **defaults_dict,\n }\n values[\"dryness_method\"] = dryness_method\n values[\"fuel_build_up_method\"] = fuel_build_up_method\n values[\"include_temperature\"] = include_temperature\n\n minimizer_options_dict = minimizer_options if minimizer_options is not None else {}\n basinhopping_options_dict = (\n basinhopping_options if basinhopping_options is not None else {}\n )\n\n if x0 is None:\n x0 = space.continuous_x0_mid\n\n if mode == \"basinhopping\":\n res = basinhopping(\n to_optimise_with_discrete,\n x0=x0,\n seed=0,\n callback=basinhopping_callback,\n take_step=BoundedSteps(\n stepsize=0.3, rng=np.random.default_rng(0), verbose=True\n ),\n **{\n \"disp\": True,\n \"minimizer_kwargs\": dict(\n method=\"L-BFGS-B\",\n jac=None,\n bounds=[(0, 1)] * len(space.continuous_param_names),\n options={\n \"maxiter\": 60,\n \"ftol\": 1e-5,\n \"eps\": 1e-3,\n **minimizer_options_dict,\n },\n ),\n \"T\": 0.05,\n \"niter\": 100,\n \"niter_success\": 15,\n **basinhopping_options_dict,\n },\n )\n elif mode == \"minimize\":\n res = minimize(\n to_optimise_with_discrete,\n x0=x0,\n method=\"L-BFGS-B\",\n jac=None,\n bounds=[(0, 1)] * len(space.continuous_param_names),\n options={\n \"maxiter\": 60,\n \"ftol\": 1e-5,\n \"eps\": 1e-3,\n **minimizer_options_dict,\n },\n )\n else:\n raise ValueError\n\n return res", "def default_fitness(maximise):\n if maximise:\n return -100000.0\n else:\n return 100000.0", "def greedy_MAP_assignment(theta,random_runs = 10,heur = 'first'):\r\n N = theta.shape[0]\r\n scipy.random.seed()\r\n max_p = -scipy.inf\r\n for k in range(random_runs):\r\n A = scipy.random.randint(2,size = N)\r\n improved = True\r\n p = A.dot( theta.dot(A) )\r\n while improved:\r\n improved = False\r\n if heur == 'first':\r\n p2 = -scipy.inf\r\n perm = scipy.random.permutation(N)\r\n for s in perm:\r\n #dp: change in p if A[i] bit is reversed\r\n dp = (1-2*A[s])*( A.dot(theta[s,:]+ theta[:,s]) ) + theta[s,s]\r\n if dp>0:\r\n p2 = dp\r\n break\r\n\r\n if heur == 'best':\r\n dp = (1-2*A)*( A.dot(theta + theta.T) ) + scipy.diag(theta)\r\n p2,s = dp.max(), dp.argmax()\r\n if p2 > 0:\r\n A[s] = 1-A[s]\r\n improved = True\r\n p += p2\r\n if p>max_p:\r\n greedy_A,max_p = A.copy(),p\r\n return greedy_A.astype(int),max_p", "def greedyOptimize(self, cpoints):\n # the currently best known energy is the current energy\n best_energy = self.totalEnergy(cpoints.values())\n best_before = best_energy\n cpoints_ = cpoints.copy()\n # iterate over each control point in order to find the movement\n # that improves it i.e. the snakes overall energy best\n cv = cpoints_.values()\n for i in range(len(cpoints_)):\n best_step = None \n # test all possible steps\n for step in self.step_directions:\n c1 = cpoints_[i]\n # only check a step if it ends within the image bounds\n if self.inImageBound(cpoints_[i] + step):\n # apply the step to the control point\n cpoints_[i] = cpoints_[i] + step\n # compute the new energy\n new = self.totalEnergy(cpoints_.values())\n # check wether it is a true improvement\n if new < best_energy:\n assert new < best_energy\n # update the currently best known energy\n best_energy = new\n best_step = step\n cv = cpoints_.values()\n cpoints_[i] = cpoints_[i] - step\n assert (c1[0], c1[1]) == (cpoints_[i][0], cpoints_[i][1])\n \n # apply the best step to the control point\n if best_step != None:\n cpoints_[i] = cpoints_[i] + best_step\n \n # ensure saneness\n assert np.array_equal(cv, cpoints_.values())\n self.bestenergy_debug = best_energy\n assert best_before >= best_energy, '(%s !>= %s) the optimized energy is not euqal-smaller than the energy before' % (best_before, best_energy)\n assert self.totalEnergy(cpoints_.values()) == best_energy, '(%s != %s) the new calculated energy does not equal the best calculated energy' % (self.totalEnergy(cpoints_.values()), best_energy)\n return cpoints_", "def maximize(self):\n self.abstract_obj.maximize()", "def move(self):\n for agent in self.agents:\n if not agent.fidelity:\n options = agent.get_move_options(agent.hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options,weights=[x.quality**2 for x in options])\n agent.move(target[0])", "def greedy_next_action(self, state):\n max_val = float('-inf')\n if self.verbose:\n cells = []\n max_candidates = {}\n for i in range(3):\n for j in range(3):\n if state[i][j] == VALUES.EMPTY:\n val = self.q_value((state, (i, j)))\n if val >= max_val:\n max_val = val\n max_move = (i, j)\n max_candidates[max_move] = val\n if self.verbose:\n cells.append('{0:.3f}'.format(val).center(6))\n elif self.verbose:\n cells.append(state[i][j].center(6))\n if self.verbose:\n self.logger.info(BOARD.format(*cells))\n possible_actions = [k for k, v in max_candidates.items() if v == max_val]\n action = random.choice(possible_actions) if len(possible_actions) > 0 else None\n return action", "def move(self, direction, max_height):\n if direction > 0:\n self.y_pos -= self.SPEED\n elif direction < 0:\n self.y_pos += self.SPEED\n\n if self.y_pos >= max_height - 40:\n self.y_pos = max_height - 40", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def action(self, direction: str, baby_position: tuple) -> None:\n assert direction in constants.BABY_MOVEMENTS\n # First move to pick up baby is they are adjacent\n if baby_position[0] == self.position[0]:\n if baby_position[1] == self.position[1] - 1:\n self.position[1] -= 1\n return\n elif baby_position[1] == self.position[1] + 1:\n self.position[1] += 1\n return\n elif baby_position[1] == self.position[1]:\n if baby_position[0] == self.position[0] - 1:\n self.position[0] -= 1\n return\n elif baby_position[0] == self.position[0] + 1:\n self.position[0] += 1\n return\n\n # not adjacent\n if np.random.random() < self.movement_probability:\n if self.dumb:\n if direction == \"N\":\n if self.position[0] != 0:\n self.position[0] -= 1\n elif direction == \"E\":\n if self.position[1] != self.board_dimensions[1] - 1:\n self.position[1] += 1\n elif direction == \"S\":\n if self.position[0] != self.board_dimensions[0] - 1:\n self.position[0] += 1\n elif direction == \"W\":\n if self.position[1] != 0:\n self.position[1] -= 1\n else:\n # Find out whether the baby is further away row-wise\n # or column-wise to decide movement\n dad_pos = self.position.copy()\n row_diff = baby_position[0] - dad_pos[0]\n col_diff = baby_position[1] - dad_pos[1]\n # Move in the direction with greatest difference\n if abs(row_diff) > abs(col_diff):\n if row_diff > 0:\n self.position[0] += 1\n else:\n self.position[0] -= 1\n return\n elif abs(row_diff) < abs(col_diff):\n if col_diff > 0:\n self.position[1] += 1\n else:\n self.position[1] -= 1\n return\n elif abs(row_diff) == abs(col_diff):\n if np.random.random() < 0.5:\n if row_diff > 0:\n self.position[0] += 1\n else:\n self.position[0] -= 1\n else:\n if col_diff > 0:\n self.position[1] += 1\n else:\n self.position[1] -= 1", "def move(self, state):\n \n self.depth_limit=1\n self.best_utility=-2\n action=None\n while not self.is_time_up():\n self.terminal=True\n self.cache={}\n action=self.alpha_beta_search(state,0)\n if self.terminal==True:\n break\n self.depth_limit=self.depth_limit+1\n \n return action", "def isFeasible(self):\n if self.function.constraints(self.position[0],self.position[1]) == False:\n self.position = np.array([random.uniform(-50,50), random.uniform(-50,50)]) \n self.velocity = np.array([random.uniform(-1,1), random.uniform(-1,1)])", "def opt_settlement(player, board, gains, goal=\"default\"):\n goal_index = goal_list.get(goal, 0)\n vertex_score = lambda t: vertex_eval(player, board, t[0], gains, goal_index)\n vertex_list = [(v, board.get_vertex_location(v)) for v in range(board.max_vertex+1) \\\n if board.if_can_build(\"settlement\", *(board.get_vertex_location(v)))]\n return max(vertex_list, key = vertex_score, default=(None, None))", "def findHeuristic(self, _, __):\n popSize = 100\n retain = 0.25\n random_select = 0.1\n mutate = 0.1\n\n popList = self.populationList(popSize)\n\n solved = False\n count = 0\n while not solved:\n # evolves current\n popList = (self.evolve(popList, retain, random_select, mutate))\n# print(popList) # for troubleshooting\n for i in popList:\n if (self.fitness(i) == 0):\n print(\"solution: \", i)\n solved = True\n break\n # if plateus at a local minima, then end after 50 generations\n if count >= 50:\n if (self.fitness(i) <= 10):\n print(\"solution: \", i)\n solved = True\n break\n if solved is True:\n break\n print(\"-----------------\")\n\n # will modify mutation, random_select and retain values to help leave a\n # local minima. More randomness the longer it takes up to specific points\n if count % 3 == 0:\n if mutate < 0.2:\n mutate += 0.01\n if random_select < 0.3:\n random_select += 0.01\n count += 1\n\n return exit(0)", "def qlearn(self, num_simulations):\n initial_maze_loc = self.maze.location\n for i in range(num_simulations):\n curr_coord = self.maze.location\n new_epsilon = round(1 - (i+1)/num_simulations, 2)\n self.epsilon = new_epsilon if new_epsilon > 0 else self.epsilon\n\n while (self.grid[curr_coord[0]][curr_coord[1]] != 'G' and\n self.grid[curr_coord[0]][curr_coord[1]] != 'E'):\n rand_num = round(random.random(), 2)\n\n move = (0,0)\n if rand_num < self.epsilon: # exploration\n move = random.choice(self.maze.moves())\n else: # exploitation\n possible_moves = self.maze.moves()\n best_next_move_q = 0\n for pmove in possible_moves:\n if (self.qtable[curr_coord[0]+pmove[0]][curr_coord[1]+pmove[1]] >=\n best_next_move_q):\n move = pmove\n best_next_move_q = (\n self.qtable[curr_coord[0]+pmove[0]][curr_coord[1]+pmove[1]])\n\n self.q(curr_coord, move)\n curr_coord = (curr_coord[0]+move[0], curr_coord[1]+move[1])\n self.maze.location = curr_coord\n self.maze.location = initial_maze_loc\n #print(f\"Simulation {i+1} of {num_simulations} complete.\")", "def Maximize(self):\r\n\r\n return self.SetFlag(self.optionMaximized, True)", "def direction_correction(self):\n self.directions.monster = random.uniform(self.directions.monster * self.get_monster_sensitivity(),\n self.directions.monster * (1 + (1 - self.get_monster_sensitivity())))\n self.directions.food = random.uniform(self.directions.food * self.get_food_sensitivity(),\n self.directions.food * (1 + (1 - self.get_food_sensitivity())))\n self.directions.water = random.uniform(self.directions.water * self.get_water_sensitivity(),\n self.directions.water * (1 + (1 - self.get_water_sensitivity())))" ]
[ "0.5255628", "0.51518804", "0.50815153", "0.5040505", "0.50042754", "0.49726215", "0.48932496", "0.4887833", "0.486175", "0.48460725", "0.4834822", "0.4807731", "0.48042133", "0.47982746", "0.47937402", "0.4761865", "0.4752347", "0.4741787", "0.47137696", "0.46898678", "0.46607664", "0.46577516", "0.4634375", "0.46226537", "0.46104294", "0.46091238", "0.46086308", "0.4607482", "0.45969924", "0.45945126" ]
0.7806484
0
Return the complement of a given set w.r.t. the set of mapped constraints.
def complement(self, aset): return self.all_n.difference(aset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complement(self) -> 'RangeSet':\n return RangeSet(Range()) - self", "def get_complement(seta):\n\n complement_set = set()\n\n for elem in seta:\n new_elem_tuple = (elem[0], float(D('1.0') - D(str(elem[1]))))\n complement_set.add(new_elem_tuple)\n\n return complement_set", "def complement(self):\n for cell in self.compact:\n cell.set(not cell.peg)", "def complement(self):\n N = self._size + 1\n new_covers = [[N - i[0], N - i[1]] for i in self._poset.cover_relations_iterator()]\n return TamariIntervalPoset(N - 1, new_covers)", "def only_diff_elements(set_1, set_2):\n return (set_1 ^ set_2)", "def _complement(self):\n assert self._.d == 2, \"the complement is only defined for two classes\"\n kargs = {\"complement\": self}\n if self._has(\"p\"):\n kargs[\"p\"] = self._.p.reorder([0, 2, 1], inplace=False)\n elif self._has(\"q\"):\n kargs[\"q\"] = self._.q.reorder([0, 2, 1], inplace=False)\n elif self._has(\"P\"):\n kargs[\"P\"] = self._.P[[0, 2, 1], [0, 2, 1]]\n elif self._has(\"Q\"):\n kargs[\"Q\"] = self._.Q[[0, 2, 1], [0, 2, 1]]\n return ASParameters(**kargs)", "def symmetric_difference(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> 'RangeSet':\n # convert to a RangeSet\n rng_set = RangeSet._to_rangeset(rng_set)\n # get union and then remove intersections\n union = self.union(rng_set)\n intersection = self.intersection(rng_set)\n union.difference_update(intersection)\n return union", "def complement(self):\n assert self._.d == 2, \"the complement is only defined for two classes\"\n return self._.complement", "def vcf_complement_set_snvs(vcf_input: str, set_negative:set, vcf_output:str):\n with pysam.VariantFile(vcf_input) as input_vcf_handle:\n with pysam.VariantFile(vcf_output,header=input_vcf_handle.header,mode='w') as out:\n for record in input_vcf_handle:\n\n if len(record.ref)!=1:\n continue\n\n if len(record.alts[0])!=1:\n continue\n\n\n if not (record.chrom,record.pos) in set_negative:\n out.write(record)", "def subtract(wb, wl):\n return set(wb) - set(wl)", "def union_of_non_none_sets(sets):\r\n return functools.reduce(lambda x, y: x.union(y), filter(lambda z: z is not\\\r\n None, sets), set())", "def prune_sequence(sequence_set, extended_set):\n tmp_set = set()\n for seq in sequence_set:\n # se una sotto-sequenza e' trovata viene ignorata, altrimenti e' aggiunta al set temporaneo\n found = False\n for ext in extended_set:\n if seq1_in_seq2(seq, ext, 0): # eps e' 0 perche' le sequenze sono identiche\n found = True\n break\n if not found:\n tmp_set.add(seq)\n # alla fine aggiungi tutto il set esteso, si puo' includere nel ciclo precedente\n for ext in extended_set:\n tmp_set.add(ext)\n return tmp_set", "def complement(G):\n\n nset = set(G.nodes())\n n_nodes = G.order()\n n_edges = n_nodes * (n_nodes - 1) - G.size() + 1\n \n cmp_edges = ((u, v) for u in G.nodes()\n\t\t for v in nset - set(G.successors(u)))\n deg = make_deg(n_nodes, cmp_edges)\n cmp_edges = ((u, v) for u in G.nodes()\n\t\t for v in nset - set(G.successors(u)))\n H = make(n_nodes, n_edges, cmp_edges, deg)\n return H", "def __sub__(self, vs):\n return [v for v in self.__elements if tuple(v) not in map(tuple, vs)]", "def complement(self):\n comp = self.__class__(self.name, complement(self.seq),\n start=self.start, end=self.end)\n comp.comp = False if self.comp else True\n return comp", "def unlabeled_set(self):\n # unlabeled set is the query set minus the preselected set\n unlabeled_tag_bitmask = self._query_tag_bitmask - self._preselected_tag_bitmask\n return unlabeled_tag_bitmask.masked_select_from_list(\n self.api_workflow_client.filenames_on_server\n )", "def remove_super_sets(sub_set, set_of_sets):\n return [x for x in set_of_sets if not set(x).issuperset(set(sub_set))]", "def apply_to_sets(cls, sets):\n for sq_set in sets:\n sqs_with_val = {}\n sqs_by_bitmask = {}\n for sq in iter(sq_set):\n for sq2 in iter(sq_set):\n if sq2.known_value:\n sq.eliminate(sq2)\n\n pvals = sq.possible_values()\n\n if sq.bitmask not in sqs_by_bitmask:\n sqs_by_bitmask[sq.bitmask] = []\n sqs_by_bitmask[sq.bitmask].append(sq)\n\n for val in pvals:\n if val not in sqs_with_val:\n sqs_with_val[val] = []\n sqs_with_val[val].append(sq)\n\n for val, sqs in sqs_with_val.iteritems():\n if len(sqs) == 1:\n sqs[0].set_value(val)\n\n for bm, sqs in sqs_by_bitmask.iteritems():\n if len(sqs) > 1:\n pvals = list(SudokuSquare.bitmask_to_possible_values(bm))\n if len(sqs) == len(pvals):\n for sq in iter(sq_set):\n if sq not in sqs:\n sq.eliminate(sqs[0])", "def constraint_not_adjacent(m, n) :\n return not constraint_adjacent(m,n)", "def Res(K: Set[CNFClause]) -> Set[CNFClause]:\n K_list = list(K)\n res = set()\n for i in range(len(K_list) - 1):\n for j in range(i + 1, len(K_list)):\n for literal in list(K_list[i].literals):\n if - literal in K_list[j].literals:\n resolute = (K_list[i] | K_list[j]) - CNFClause({literal, -literal})\n if not resolute.is_tautology():\n res.add(resolute)\n break\n return K | res", "def _complement(self, k, p):\n assert self._.d == 2, \"the complement is only defined for two classes\"\n if checkPos(self._.b[0] - self._.c[2]):\n return self._get_class()((k[2], p[2, 2, 1]),\n (Integer(1), p[1, 2, 2]),\n complement=self)\n else:\n return ASParameters._complement(self)", "def notInSet(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(rhs, Set)\n return lhs not in rhs", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def complement(self, **kwargs):\n self._data.switch_complement(whether=True, **kwargs)\n return self", "def filter(self, new_set):\n for old_set in self.itervalues():\n for feat in old_set.iterkeys():\n if feat not in new_set:\n del old_set[feat]\n return self", "def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())", "def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())", "def complement(self, universe):\n return Complement(universe, self)", "def __xor__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__xor__', other)", "def remove_elements_from_set(s: set, *args) -> set:\n for _ in args:\n s.remove(_)\n return s" ]
[ "0.6654452", "0.6576406", "0.63831866", "0.62827057", "0.58516365", "0.5725783", "0.56848663", "0.5537928", "0.5518822", "0.54672354", "0.5461112", "0.5455276", "0.5429003", "0.5420453", "0.5417606", "0.54041857", "0.5387203", "0.5379959", "0.53690493", "0.5363554", "0.5359956", "0.53369766", "0.53304964", "0.53043926", "0.5292924", "0.5292573", "0.5292573", "0.5283176", "0.5259491", "0.5236802" ]
0.76911646
0
Add a given clause to the Map solver.
def add_clause(self, clause): self.solver.add_clause(clause) if self.dump is not None: self.dump.write(" ".join(map(str, clause)) + " 0\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_clause(self, clause):\n self.abstract_clauses.append(clause)", "def tell (self, clause):\n self.clauses.add(clause)", "def add_clause(self, clause, soft=False):\n\n # first, map external literals to internal literals\n # introduce new variables if necessary\n cl = list(map(lambda l: self._map_extlit(l), clause))\n\n if not soft:\n # the clause is hard, and so we simply add it to the SAT oracle\n self.oracle.add_clause(cl)\n else:\n self.soft.append(cl)\n\n # soft clauses should be augmented with a selector\n sel = cl[0]\n if len(cl) > 1 or cl[0] < 0:\n self.topv += 1\n sel = self.topv\n\n self.oracle.add_clause(cl + [-sel])\n\n self.sels.append(sel)", "def AddUseClause(self, use_clause):\n assert _IsValidUseClause(use_clause), use_clause\n self.use_clauses.append(use_clause)", "def add_clause(self, lits):\n\n self.nclauses += 1\n self.base_clauses.append(lits)", "def add_where_clause(self, clause):\r\n if not isinstance(clause, WhereClause):\r\n raise StatementException(\"only instances of WhereClause can be added to statements\")\r\n clause.set_context_id(self.context_counter)\r\n self.context_counter += clause.get_context_size()\r\n self.where_clauses.append(clause)", "def add_statement(rq_dict, statement, result_data_contents=\"graph\"):\n rq_dict[\"statements\"].append({\"statement\": statement})\n rq_dict[\"statements\"][-1][\"resultDataContents\"] = [result_data_contents]", "def add_assignment_clause(self, clause):\r\n if not isinstance(clause, AssignmentClause):\r\n raise StatementException(\"only instances of AssignmentClause can be added to statements\")\r\n clause.set_context_id(self.context_counter)\r\n self.context_counter += clause.get_context_size()\r\n self.assignments.append(clause)", "def parse_and_add_clause(self, line):\n clause = list()\n for literal in line.split():\n negated = 1 if literal.startswith('!') else 0\n variable = literal[negated:]\n if variable not in self.variable_table:\n self.variable_table[variable] = len(self.variables)\n self.variables.append(variable)\n encoded_literal = self.variable_table[variable] << 1 | negated\n clause.append(encoded_literal)\n self.clauses.append(tuple(clause))", "def add_change_clause(sv, nod, tree, vlu):\r\n clau=((Change, tree, None), vlu) \r\n if not clau in nod.clauses: nod.clauses+=[clau] # avoid duplicates\r", "def _add(\n self,\n instruction_name: str,\n qubits: Tuple[int, ...],\n entry: CalibrationEntry,\n ):\n self._map[instruction_name][qubits] = entry\n self._qubit_instructions[qubits].add(instruction_name)", "def add_criterion(self, criterion):\n self.criterion_entries.add(criterion)\n return self", "def handle_clause(clause,i,lit_to_clauses):\n for s in clause.split():\n l = int(s)\n if (l !=0):\n lit_to_clauses[dimacs2index(l)].append(i)\n link_literal_to_clause(l,i)", "def add_phrase(self, phrase: Phrase) -> None:\n self.phrase_string_map[phrase.phrase_string] = phrase\n self.phrase_type[phrase.phrase_string].add(\"phrase\")\n self.phrase_index[phrase.phrase_string] = phrase\n self.phrase_length_index[len(phrase.phrase_string)].add(phrase.phrase_string)\n self._index_phrase_words(phrase)\n self._index_phrase_tokens(phrase)", "def adder(where, what, value):\n if what in where:\n pass\n else:\n where[what] = value", "def AddCondition(self, name, expression):\n self.conditions[name] = expression", "def addConstraint(constraint, problem):\n problem += constraint", "def addConstraint(self, constraint: Constraint, /) -> None:\n ...", "def get_or_insert(cls, name):\n\t\tid = cls.normalize_name(name)\n\t\treturn super(Clause, cls).get_or_insert(id, name=name)", "def get_clause(self, clause_letter, clause_id = None):\n clause_type = {\"A\":\"subject\", \"B\":\"predicate\", \"C\":\"outcome\"}[clause_letter]\n if not clause_id:\n clause_id = get_random_id(self.soup, clause_type)\n clause_text = self.soup.find(clause_type, number=clause_id).description.text\n exec(\"self.\"+clause_letter+\"_clause = clause_text\")\n exec(\"self.\"+clause_letter+\"_clause_id = clause_id\")", "def log_clause(clause):\n t = str(clause)\n params = clause.compile().params\n\n def token(m):\n return repr(params[m.group(1)])\n\n logger.debug(re.compile(r':(\\w+)').sub(token, t))", "def add_constraint(self, constraint):\n self.constraints.append(constraint)", "def add_optimizer(self, optimizer):\n assert isinstance(optimizer, torch.optim.Optimizer)\n setattr(self, 'optimizer'+str(self._optimizer_counter), optimizer)\n self._optimizer_counter += 1\n # optimizer indexing : optimizer 0 is the optimizer for layer 0", "def addConstraintsPyOpt(self, optProb):\n if self.addToPyOpt:\n optProb.addCon(\n self.name + \"_thick\", lower=self.thickLower, upper=self.thickUpper, wrt=self.DVGeo.getVarNames()\n )\n\n optProb.addCon(\n self.name + \"_MAC\", lower=self.MACFracLower, upper=self.MACFracUpper, wrt=self.DVGeo.getVarNames()\n )", "def add(self, name, expression, level):\n assert isinstance(level, EnvironmentLevel)\n index = len(level.expressions)\n level.bindings[name] = index\n level.expressions.append(expression)", "def Add(self, ADD_Constraint):\r\n key = ADD_Constraint.conid\r\n if key in self.add_constraints:\r\n print(\"already has key...key=%s\\n%s\" % (key, str(ADD_Constraint)))\r\n else:\r\n self.add_constraints[key] = ADD_Constraint", "def _add_hints(self, **hints):\n self._hints.update(hints)", "def add_solver(library):\n def _add_solver(solver):\n if library:\n solvers.append(solver)\n return solver\n return _add_solver", "def add_word(self, word):\n word = self.map_word(word)\n super(InvariantLanguage, self).add_word(word)", "def add(self, pt, update_limits=True):\n self._add(pt)\n if update_limits:\n self.update_limits()" ]
[ "0.7232271", "0.68253154", "0.64435804", "0.64024657", "0.6194685", "0.589618", "0.55395997", "0.549732", "0.5440252", "0.5343942", "0.5326947", "0.5320732", "0.52594405", "0.5190137", "0.5155156", "0.50672406", "0.49388227", "0.48930344", "0.48519868", "0.48471695", "0.48246765", "0.47988698", "0.4788916", "0.4788251", "0.47707832", "0.47665048", "0.47170556", "0.47126248", "0.46920457", "0.4686941" ]
0.75571716
0
Sends a POST request containing `data` to url. `auth` should be a tuple containing (username, password).
def post(url, data, auth=None, retries=10): if not url.startswith('http://'): url = 'http://' + url request = urllib2.Request(url) if auth: request.add_header('Authorization', 'Basic %s' % b64encode('%s:%s' % auth)) params = urllib.urlencode(data) response = urllib2.urlopen(request, params) return response.read()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_post_request(self, url, data):\n auth = (self.AUTH_ID, self.AUTH_TOKEN)\n headers = {'content-type': 'application/json'}\n return requests.post(url, data=data, auth=auth, headers=headers)", "def _post(self, url, data=None):\n if data is not None:\n data = urllib.urlencode(data)\n return self._request(url, method='POST', payload=data)", "def post(self, data):\n req = self._new_http_request()\n req.add_data(self._prepare_data(data))\n\n return self._urllib2.urlopen(req)", "def post(self, url, data):\n return self.app.post(get_url(url), data=data, follow_redirects=True)", "def request(conn, data):\n headers = {'Authorization': 'Basic %s' % conn.auth_string}\n r = requests.post(conn.endpoint, data=data, headers=headers)\n return parse_response(r.content)", "def post(self, data):\n return requests.post(self.url, headers=self.headers, data=data)", "def post(self, url, data, token=None):\n return self.app.post(url,\n data=json.dumps(data),\n headers=_token_header(token),\n content_type='application/json')", "def httpPost(self, url='', data='', params={}, headers={}):\n\n return self.httpRequest('POST', url, data, params, headers)", "def _http_post(self, path, data):\n # Prepare the request path\n if path[0] == '/':\n path = path[1:]\n path = urljoin(self.servlet_path, path)\n\n # Request the end points\n conn = httplib.HTTPConnection(\"localhost\", self.port)\n conn.request(\"POST\", path, data, {\"Content-Type\": \"application/json\"})\n result = conn.getresponse()\n data = result.read()\n conn.close()\n\n # Convert the response to a string\n return result.status, to_str(data)", "def post(self, url, data):\r\n print(f\"POST {url}\")\r\n print(\"data:\")\r\n self.pp.pprint(data)\r\n response = self.session.post(url, data=data)\r\n print(f\"STATUS {response.status_code}\")\r\n self.print_cookies()\r\n return response", "def post(self, url, data):\n\t\treturn self.session.post(url, data, headers=self.headers)", "def _post(self, url, data=None):\n url = urljoin(self.base_url, url)\n try:\n r = self._make_request(**dict(\n method='POST',\n url=url,\n json=data,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n\n if r.status_code == 204:\n return None\n return r.json()", "def __http_post(self, data, url_path, with_authentication = True):\n\n res = requests.post(self.__http_build_url(url_path), json = data, headers = self.__http_build_headers(with_authentication))\n res.raise_for_status()\n return res.json()", "def post(path: str, data={}):\n token = get_token()\n headers = {\n \"Authorization\": f\"Bearer {token}\"\n }\n return requests.post(get_base_url() + path, headers=headers, json=data)", "def send_post(self, data):\n\n self.post_data = data\n self.post_data['csrfmiddlewaretoken'] = self.csrftoken\n self.response = self.client.post(self.url, self.post_data, cookies=self.cookies)", "def post_collection(self, data: bytes) -> requests.Response:\n r = requests.post(\n self.config.endpoint,\n auth=(self.config.username, self.config.password),\n data=data,\n )\n return r", "def httpPost(self, url, post_parameters=None):\r\n return self.auth.post(url, post_parameters)", "def _submit(self, endpoint, data):\n full_url = self._prepare_url(endpoint)\n req = self._request(full_url, self._username, self._apikey)\n req.post(data)", "def post(self, url, data=None):\r\n response = self.requestHelper.post(url, data=data)\r\n return self.process(response)", "def post(url, data=None, json=None, **kwargs):\n\n return request('post', url, data=data, json=json, **kwargs)", "def post(self, url, user=None, data=None):\n if user:\n token = self.login(user)\n response = requests.post(\n url_root + url, headers={\"access-token\": token}, json=data\n )\n else:\n response = requests.post(url_root + url, json=data)\n return response.json(), response.status_code", "def make_post_request(client, endpoint, data):\n return client.post(endpoint, data=data)", "async def post(self, url, params=None, json_data=None):\n if self._authenticated:\n return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data)", "def post(url, to_error=_default_to_error, data=None, json=None, **kwargs):\n\n return request('post',\n url, to_error=to_error, data=data, json=json, **kwargs)", "def postTo(self,conn,data):\n #log(\"postTo: \"+str(conn))\n conn.request(self.command,self.path,data,self.headers)\n resp = conn.getresponse()\n log(\"postTo: \"+str(resp.status)+\", \"+str(resp.reason)+\", \"+str(resp.version))\n return resp", "def api_post(self, path, data):\n return self._api_request(path, 'POST', data)", "def post_data(data):\n\n # Load Userpass CSV File (InfluxDB)\n # Format: server,port,db,user,pass\n with open('userpass_influx', 'r') as f:\n line = f.readline()\n line = line.strip().split(',')\n host = line[0]\n port = line[1]\n db = line[2]\n user = line[3]\n pswd = line[4]\n\n # Post\n url = \"http://%s:8086/write?db=%s&precision=s\" % (host, db)\n auth = requests.auth.HTTPBasicAuth(\"%s\" % user, \"%s\" % pswd)\n r = requests.post(\"%s\" % url, auth=auth, data=\"%s\" % data)\n\n # Debug\n # print r.status_code\n # print r.headers\n # print r.content", "def _post(self, path, data=None):\n headers = {'content-type': 'application/json'}\n if data:\n data = json.dumps(data)\n r = requests.post(self._url(path), data=data, headers=headers)\n assert r.status_code == 200\n return r", "def post(self, url, data=None, params=None):\n return self.session.post(url=self.base_url + url, data=data,\n params=params)", "def send_request(url, user, passwd, payload):\n response = requests.post(url,\n data=json.dumps(payload),\n auth=(user, passwd),\n verify=False,\n timeout=30)\n\n if response.status_code != 200:\n print(\"Status code {}\".format(response.status_code))\n return ERR_STATUS_CODE\n\n try:\n print(json.dumps(response.json(), indent = 4, sort_keys=True))\n except ValueError:\n print(\"{}\".format(response.text))\n return ERR_WRONG_JSON\n\n return SUCCESS" ]
[ "0.6842728", "0.6454791", "0.63290054", "0.6314501", "0.62772477", "0.6190568", "0.61195064", "0.6063945", "0.60278213", "0.5984815", "0.5982844", "0.5974754", "0.59532285", "0.5944688", "0.5935827", "0.59314847", "0.5888572", "0.58549273", "0.5854561", "0.58424336", "0.5775395", "0.57697403", "0.57646114", "0.5763307", "0.57305205", "0.5728581", "0.56576097", "0.5620381", "0.5609589", "0.5602404" ]
0.7495922
0
Collect characters while within a source record
def characters(self, content): if self.in_source: self.chars += content
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def characters(self, data):\n pass", "def cleanup_raw_data(buf):\n raw = str(buf, encoding='iso-8859-1').strip()\n records = raw.splitlines()\n return records", "def extractCharacters(self):\n \n length, high=self.getSize() ##geting size of LineFrame object - high and length\n vHisto = self.vLinesHistogram()\n spaceLength = findSpaceLength(vHisto,high) ##finding of expected length of Space in line\n position = 0 ##position, from where findChar is serching for character\n Line=[] ##list of words in line\n Word=[] ##list of characters in word\n correction=0\n End = False\n while not End: ##while not End of the line, search for characters\n position, char, correction = self.findChar(position, spaceLength+correction)\n if type(char) == str: #check if returned CharFrame object or repor\n if char == \"Space\": #Space was finded in line, end of word, Word list append to Line list, and new Word list started\n Line.append(Word)\n Word=[]\n elif char == \"Enter\": ##Finden end of line, Wor list closed and appened to Line list, end of method, returned Line list\n Line.append(Word)\n #for i in range(0,len(Line)):\n #for j in range(0, len(Line[i])):\n #Line[i][j].savePicture(str(i)+\"kafel\"+str(j)+\".bmp\",\"BMP\")\n return Line\n else: ## Character finden in line, append CharFrame object to Word list\n Word.append(char)", "def read_chars(self):\n char_data = []\n for word in self.train_data:\n if word == self.eos or word == self.sos:\n continue\n if self.unit == \"oracle\":\n if '+' in word:\n tags = word.split('+')\n word_tag = tags[0].split(':')\n word = word_tag[1]\n if self.unit == \"morpheme\":\n word = re.sub(\"@@\", \"\", word)\n char_data.extend([ch for ch in word])\n return char_data", "def _read_characters(self):\n\n # Read the character information table\n for c in range(self.smallest_character_code, self.largest_character_code + 1):\n self._process_char(c)", "def parseC(self, field, data):\r\n return str(data.rstrip(b'\\0 '), self.encoding, errors='replace')", "def _translate_string(self, data):\n data = data.encode('iso-8859-1', errors='replace')\n\n for index, char in enumerate(data):\n yield self._meta.characters - 1 - self._ct[char]", "def _characters(self):\n self.characters = list(\n set([item for sublist in self.grid for item in sublist])\n )\n return self.characters", "def _peek_char(self):\n if self.read_pos > self.length:\n return \"\"\n\n return self.data[self.read_pos]", "def characters(self, in_chars):\n self.char_buffer.append(in_chars)", "def cleaning_up(self):\n # find all non-letter-no-digit except whitespace and \"-\"\n try:\n pattern = re.compile(\"[a-zA-Z0-9\\\\s\\\\-]\")\n badChars = re.sub(pattern, '', string.printable)\n logging.debug(\"Bad chars: {}\".format(badChars))\n # define translate table\n remap = dict.fromkeys(badChars)\n logging.debug(remap)\n table = str.maketrans(remap)\n result = \"\"\n with open(self.input) as infile:\n lines = (line.strip() for line in infile)\n for line in lines:\n if len(line) == 0:\n continue\n else:\n logging.debug(line)\n result = result + \" \" + line.translate(table)\n # Since the input file only has one line, we can use the following\n # code. For general use, I kept above code.\n # result = line.translate(remap)\n # break;\n except LookupError as e:\n logging.exception(\"Lookup Error: {}\".format(e.strerror))\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")\n return result.strip()", "def read_until(self, chars):\n\n start_index = self.index\n\n while self.index < self.length and self.xtext[self.index] not in chars:\n self.index += 1\n\n assert self.index < self.length\n\n return self.xtext[start_index:self.index]", "def get_characters(self):\n return self.characters", "def dissect(self, text):", "def filter_record(self, record):\n if len(record) >= self.max_length:\n return record[:self.max_length]\n else:\n return record", "def _grab_unascii(self):\r\n unascii = \"\"\r\n while self._char != -1 and not self._char in \"\\x00\\t\\r\\n\":\r\n unascii += self._char\r\n self._get_char()\r\n return unascii", "def clean_substr(self, match_obj):\n x = MLStripper()\n x.feed(match_obj.group(1).strip())\n return x.get_fed_data()", "def _read_char(self):\n if self.read_pos >= len(self.data):\n self.char = \"\"\n else:\n self.char = self.data[self.read_pos]\n\n self.pos = self.read_pos\n self.read_pos += 1", "def buffer_before_token(self):\n r = \"\".join(i for i in map(lambda x: x.decode(\"utf-8\"), self.buffer))\n self.buffer = []\n return r", "def characters(self, content):\n if self._current_tag:\n self._buffer.append(content)", "def read_chars(self, snapshot: Bug, location: FileLocationRange) -> str:\n # logger.debug(\"Reading characters at %s in snapshot, %s\",\n # location, snapshot.name)\n filename = location.filename\n contents_file = self.read_file(snapshot, filename)\n\n start_at = self.line_col_to_offset(snapshot,\n filename,\n location.start.line,\n location.start.column)\n stop_at = self.line_col_to_offset(snapshot,\n filename,\n location.stop.line,\n location.stop.column)\n\n contents = contents_file[start_at:stop_at + 1]\n # logger.debug(\"Read characters at %s in snapshot, %s: %s\",\n # location, snapshot.name, contents)\n return contents", "def _char_data_handler(data):\r\n current.text = data", "def filter_chars(accepted_chars,target):\n while True:\n c = (yield)\n if c.lower() in accepted_chars:\n target.send(c.lower())", "def _parse_till_unescaped_char(stream, chars):\n rv = \"\"\n while True:\n escaped = False\n for c in chars:\n if EscapeCharToken.starts_here(stream, c):\n rv += stream.next() + stream.next()\n escaped = True\n if not escaped:\n c = stream.next()\n if c in chars: break\n rv += c\n return rv, c", "def prepseq(self, seq):\n\n wtf = re.sub(r'\\*$', '', seq)\n return wtf", "def char_to_seq( self, uchar ):\n\t\t\n\t\tlstParts = self._char39[ uchar ].split( '+' ) # [ 'nb', 'ns', 'nb', ... ]\n\t\t# Force evaluation with globals definition and local object definition (say on self.x)\n\t\treturn [ eval( '_'+code, globals(), self.__dict__ ) for code in lstParts ]", "def decode_fn(s_in):\r\n s_out = []\r\n for w in s_in:\r\n if w == '<s>':\r\n continue\r\n elif w=='</s>':\r\n break\r\n s_out.append(w)\r\n s_out = ' '.join(s_out)\r\n return s_out", "def handle_charref(self, number):\n codepoint = int(number[1:], 16) if number[0] in ('x', 'X') else int(number)\n text = six.unichr(codepoint)\n self.result.append(text)\n return text", "def extract_data(self):\r\n self.parse()\r\n lst = []\r\n for i in self.table.text.split(\"\\n\")[3:]:\r\n if i != \"\" and bool(re.search(r'\\d', i)):\r\n lst.append(i.replace(u'\\xa0', ''))\r\n single = lst.pop(-3)\r\n lst = [i + \" \" + j for i, j in zip(lst[::2], lst[1::2])]\r\n lst.append(single)\r\n return lst[0:28]", "def gen_chars(self, lines_str_list):\n char_index_counter = 0\n chars = VGroup()\n for line_no in range(lines_str_list.__len__()):\n chars.add(VGroup())\n chars[line_no].add(\n *self.lines_text.chars[\n char_index_counter : char_index_counter\n + lines_str_list[line_no].__len__()\n + 1\n ]\n )\n char_index_counter += lines_str_list[line_no].__len__() + 1\n return chars" ]
[ "0.63615435", "0.5877636", "0.564212", "0.5450437", "0.53484285", "0.5325434", "0.5275199", "0.52268296", "0.51683784", "0.51639074", "0.5162519", "0.5138824", "0.5114489", "0.51047087", "0.50685495", "0.50610465", "0.5054689", "0.5049034", "0.50311816", "0.50303805", "0.5023495", "0.502008", "0.5017473", "0.50035", "0.49966088", "0.49799478", "0.4971176", "0.4947623", "0.49475273", "0.49458337" ]
0.63518363
1
Create connection line constraint between item's handle and the port.
def constraint(self, item, handle, glue_item): start = MatrixProjection(self.start, glue_item.matrix_i2c) end = MatrixProjection(self.end, glue_item.matrix_i2c) point = MatrixProjection(handle.pos, item.matrix_i2c) cx = EqualsConstraint(point.x, start.x) cy = BetweenConstraint(point.y, start.y, end.y) return MultiConstraint(start, end, point, cx, cy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_fixed_distance_to_line_constraint():\n return FixedDistanceToLineConstraint()", "def test_connect(self):\n line, head = self._get_line()\n self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n self.assertTrue(cinfo is not None)\n self.assertEquals(self.box1, cinfo.connected)\n self.assertTrue(cinfo.port is self.box1.ports()[0],\n 'port %s' % cinfo.port)\n self.assertTrue(isinstance(cinfo.constraint, LineConstraint))\n # No default callback defined:\n self.assertTrue(cinfo.callback is None)\n\n line, head = self._get_line()\n self.tool.connect(line, head, (90, 50))\n cinfo2 = self.canvas.get_connection(head)\n self.assertTrue(cinfo is not cinfo2, cinfo2)\n self.assertTrue(cinfo2 is None, cinfo2)", "def test_item_and_port_glue(self):\n\n ports = self.box1.ports()\n\n # glue to port nw-ne\n sink = self.tool.glue(self.line, self.head, (120, 50))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[0], sink.port)\n\n # glue to port ne-se\n sink = self.tool.glue(self.line, self.head, (140, 70))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[1], sink.port)\n\n # glue to port se-sw\n sink = self.tool.glue(self.line, self.head, (120, 90))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[2], sink.port)\n\n # glue to port sw-nw\n sink = self.tool.glue(self.line, self.head, (100, 70))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[3], sink.port)", "def ioLineDrag(self, startItem, pos0, pos1, done=False):\n assert isinstance(startItem, PortItem)\n assert isinstance(pos0, QPointF)\n assert isinstance(pos1, QPointF)\n assert isinstance(done, bool)\n\n if self._draggedLineItem is None:\n self._draggedLineItem = DraggedLineItem(pos0, pos1)\n self.addItem(self._draggedLineItem)\n else:\n self._draggedLineItem.setEndpoint(pos1)\n\n vaildItem = None\n\n if QLineF(pos0, pos1).length() > 5.0:\n # Check if line is over other PortItem\n for item in self.items(pos1):\n if isinstance(item, PortItem):\n vaildItem = item\n print item.name()\n break\n\n self._draggedLineItem.showEndpoint(vaildItem is not None)\n\n if done:\n self.removeItem(self._draggedLineItem)\n self._draggedLineItem = None\n\n if vaildItem is not None:\n # Request connection creation\n name1 = startItem.fullname()\n name2 = vaildItem.fullname()\n self.sigCreateConnection.emit(name1, name2)", "def test_port_create_with_binding_information(self):\n network, segments, subnets = self._create_test_segments_with_subnets(3)\n\n # Map the host to the middle segment (by mocking host/segment mapping)\n self._setup_host_mappings([\n (segments[1]['segment']['id'], 'fakehost'),\n (segments[1]['segment']['id'], 'otherhost'),\n (segments[0]['segment']['id'], 'thirdhost')])\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n res = self.deserialize(self.fmt, response)\n self._validate_immediate_ip_allocation(res['port']['id'])\n\n # Since host mapped to middle segment, IP must come from middle subnet\n self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr'])", "def net_acl_iptables_rule(item):\n # defaults\n fmt = {\n 'chain': '-A INPUT',\n 'device': '',\n 'protocol': ' -p tcp',\n 'state': '',\n 'identifier': ' -m comment --comment \"20CACL {}\"'.format(item['name']),\n 'target': ' -j ACCEPT',\n }\n\n if item.get('device', None):\n fmt['device'] = ' -i {}'.format(item.device)\n if item.get('protocol', None):\n fmt['protocol'] = ' -p {}'.format(item.protocol)\n # FIXME parse for false\n if item.get('stateful', False) == True:\n fmt['state'] = ' -m state --state NEW'\n if not item.get('ports', None):\n raise ValueError(\"missing ports\")\n else:\n fmt['ports'] = ' -m multiport --dports={}'.format(','.join(map(str, item['ports'])))\n\n line = \"{chain}{device}{protocol}{state}{ports}{identifier}{target}\".format(**fmt)\n\n return line", "def test_reconnect_same(self):\n line, head = self._get_line()\n self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n item = cinfo.connected\n port = cinfo.port\n constraint = cinfo.constraint\n\n assert item == self.box1\n assert item != self.box2\n\n # connect to box1 again, handle's connected item and port should be\n # the same but connection constraint will differ\n connected = self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n self.assertEqual(self.box1, cinfo.connected)\n self.assertEqual(self.box1.ports()[0], cinfo.port)\n self.assertNotEqual(constraint, cinfo.constraint)", "def slot_constraint(self, item, role_spec):\n return self.kb.slot_value(\n logic.expr(item),\n CONSTRAINT_EXPR,\n logic.expr(role_spec))", "def __init__(self, srcNode, destNode):\r\n super(NodeConnection, self).__init__()\r\n \r\n self.setSrcNode(srcNode)\r\n self.setDestNode(destNode)\r\n \r\n self._srcPt = None\r\n self._destPt = None\r\n self.setArrowSize(10)\r\n \r\n self.Adjust()\r\n self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)", "def _set_constraint(self):\n pass", "def create_connection(\n self,\n from_id: str,\n to_id: str\n ):\n raise NotImplementedError", "def addConnection(self, port1Name, port2Name, connItem):\n assert isinstance(connItem, ConnectionItem)\n\n # Ensure port1Name and port2Name are str, not QString\n port1Name = str(port1Name)\n port2Name = str(port2Name)\n\n node1Name = port1Name.split(':')[0]\n node2Name = port2Name.split(':')[0]\n\n if node1Name == node2Name:\n return False\n\n node1 = self.nodeFromName(node1Name)\n node2 = self.nodeFromName(node2Name)\n\n if node1.isConnected(port1Name) or node2.isConnected(port2Name):\n return False\n\n self.addItem(connItem)\n node1.addConnection(port1Name, connItem)\n node2.addConnection(port2Name, connItem)\n\n assert connItem.startPortName() is not None\n assert connItem.endPortName() is not None\n return True", "def _setup_create_firewall_rule_with_all_params(self, protocol='tcp'):\r\n resource = 'firewall_rule'\r\n cmd = firewallrule.CreateFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n source_ip = '192.168.1.0/24'\r\n destination_ip = '192.168.2.0/24'\r\n source_port = '0:65535'\r\n destination_port = '0:65535'\r\n action = 'allow'\r\n tenant_id = 'my-tenant'\r\n my_id = 'myid'\r\n args = ['--description', description,\r\n '--shared',\r\n '--protocol', protocol,\r\n '--source-ip-address', source_ip,\r\n '--destination-ip-address', destination_ip,\r\n '--source-port', source_port,\r\n '--destination-port', destination_port,\r\n '--action', action,\r\n '--enabled',\r\n '--admin-state-up',\r\n '--tenant-id', tenant_id]\r\n position_names = []\r\n position_values = []\r\n if protocol == 'any':\r\n protocol = None\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n description=description, shared=True,\r\n protocol=protocol,\r\n source_ip_address=source_ip,\r\n destination_ip_address=destination_ip,\r\n source_port=source_port,\r\n destination_port=destination_port,\r\n action=action, enabled=True,\r\n tenant_id=tenant_id)", "def setup_rule(self, client):\n pass", "def setup_rule(self, client):\n pass", "def _add_line(self, key, info):\n info = copy.deepcopy(info)\n anticipated_bus = self._get_df_with_new_elements(\"bus\")\n new_lines = []\n required = {\"from_bus_id\", \"to_bus_id\"}\n xor_sets = {(\"capacity\", \"Pmax\"), (\"capacity\", \"Pmin\")}\n optional = {\"Pmin\"}\n for i, line in enumerate(info):\n self._check_entry_keys(line, i, key, required, xor_sets, optional)\n start = line[\"from_bus_id\"]\n end = line[\"to_bus_id\"]\n if start not in anticipated_bus.index:\n raise ValueError(\n \"No bus with the following id for line #%d: %d\" % (i + 1, start)\n )\n if end not in anticipated_bus.index:\n raise ValueError(\n \"No bus with the following id for line #%d: %d\" % (i + 1, end)\n )\n if start == end:\n raise ValueError(f\"to/from buses of line #{i + 1} must be different\")\n if \"capacity\" in line:\n if not isinstance(line[\"capacity\"], (int, float)):\n raise ValueError(\"'capacity' must be a number (int/float)\")\n if line[\"capacity\"] < 0:\n raise ValueError(\"capacity of line #%d must be positive\" % (i + 1))\n # Everything looks good, let's translate this to Pmin/Pmax\n line[\"Pmax\"] = line[\"capacity\"]\n line[\"Pmin\"] = -1 * line[\"capacity\"]\n del line[\"capacity\"]\n elif {\"Pmin\", \"Pmax\"} < set(line.keys()):\n if key == \"new_branch\":\n err_msg = \"Can't independently set Pmin & Pmax for AC branches\"\n raise ValueError(err_msg)\n for p in {\"Pmin\", \"Pmax\"}:\n if not isinstance(line[p], (int, float)):\n raise ValueError(f\"'{p}' must be a number (int/float)\")\n if line[\"Pmin\"] > line[\"Pmax\"]:\n raise ValueError(\"Pmin cannot be greater than Pmax\")\n else:\n raise ValueError(\"Must specify either 'capacity' or Pmin and Pmax\")\n if (\n key == \"new_branch\"\n and anticipated_bus.interconnect[start]\n != anticipated_bus.interconnect[end]\n ):\n raise ValueError(\n \"Buses of line #%d must be in same interconnect\" % (i + 1)\n )\n elif (\n anticipated_bus.lat[start] == anticipated_bus.lat[end]\n and anticipated_bus.lon[start] == anticipated_bus.lon[end]\n ):\n raise ValueError(\"Distance between buses of line #%d is 0\" % (i + 1))\n new_lines.append(line)\n\n if key not in self.ct:\n self.ct[key] = []\n self.ct[key] += new_lines", "def cmd_CONNECTION(self, line):\r\n config = ConnectionOptions(self.terminal)\r\n\r\n try:\r\n config.parseOptions(line)\r\n cmd = config.subCommand\r\n opts = config.subOptions if hasattr(config, 'subOptions') else {}\r\n except usage.UsageError as errortext:\r\n self.terminal.write(\"BUG in usage: {0}\".format(errortext))\r\n else:\r\n if cmd == 'add':\r\n if opts['tag1'] and opts['tag2']:\r\n self.callToUser('addConnection', 'robot', opts['tag1'],\r\n opts['tag2'])\r\n elif cmd == 'remove':\r\n if opts['tag1'] and opts['tag2']:\r\n self.callToUser('removeConnection', 'robot', opts['tag1'],\r\n opts['tag2'])", "def build_connection(self, src, tgt) -> NoReturn:\n # If src and tgt are the same node, src not in node_collection or\n # tgt not in node_collection,\n # then skip this edge.\n if src == tgt or src not in self._nodes_collection or tgt not in self._nodes_collection:\n if src.split(':')[0] not in self._nodes_collection:\n warnings.warn(f\"Graph construct a self-loop node {src}. Ignored.\")\n return\n\n if tgt not in self._nodes_collection[src.split(':')[0]].successor_nodes:\n self._nodes_collection[src.split(':')[0]].successor_nodes.append(tgt)\n if src not in self._nodes_collection[tgt].precursor_nodes:\n self._nodes_collection[tgt.split(':')[0]].precursor_nodes.append(src)", "def cmd_port (self, line):\r\n info = line[1].split (',')\r\n ip = '.'.join (info[:4])\r\n port = int(info[4])*256 + int(info[5])\r\n # how many data connections at a time?\r\n # I'm assuming one for now...\r\n # TODO: we should (optionally) verify that the\r\n # ip number belongs to the client. [wu-ftpd does this?]\r\n self.client_addr = (ip, port)\r\n self.respond ('200 PORT command successful.')", "def connect(self, handle: Handle, port: Port) -> bool:\n pin = self.pin\n if not pin.subject:\n pin.subject = pin.model.create(\n UML.InputPin if isinstance(pin, InputPinItem) else UML.OutputPin\n )\n\n assert isinstance(pin.subject, (UML.InputPin, UML.OutputPin))\n pin.subject.opaqueAction = self.action.subject\n\n # This raises the item in the item hierarchy\n pin.change_parent(self.action)\n\n return True", "def test_port_create_with_binding_and_no_subnets(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n # Map the host to the segment\n self._setup_host_mappings([(segment['segment']['id'], 'fakehost')])\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n res = self.deserialize(self.fmt, response)\n\n # No subnets, so no allocation. But, it shouldn't be an error.\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def _connection_maker(\n self,\n first_device,\n first_port,\n second_device,\n second_port):\n if first_port is None:\n return self.network.make_connection(\n first_device.id, None,\n second_device.id, second_port.id)\n else:\n return self.network.make_connection(\n first_device.id, first_port.id,\n second_device.id, second_port.id)", "def allow(self, handle, port):\n assert self.canvas\n\n line = self.line\n element = self.element\n\n # Check if no other items are connected\n connections = self.canvas.get_connections(connected=element)\n connected_items = [\n c\n for c in connections\n if isinstance(c.item, TransitionItem) and c.item is not line\n ]\n if handle is line.head and not any(connected_items):\n return super().allow(handle, port)\n else:\n return None", "def addConstraint(self, constraint: Constraint, /) -> None:\n ...", "def add_connection(self, switch_name, port1, port2, bidir=False):\n raise NotImplementedError()", "def createConstraint(*argv):", "def create_port_item(self, port_spec, is_connected, is_optional,\n is_visible, is_editable, parent=None):\n return PortItem(port_spec, is_connected, True, False, False, parent)", "def constraint(self, c):\n self.add_constraint(c)", "def create_port_forward_rule(self, ipaddressid, protocol, virtualmachineid,\n privateport, privateendport,\n publicport, publicendport): \n params = {'command':'createPortForwardingRule',\n 'ipaddressid':ipaddressid,\n 'protocol':protocol,\n 'privateport':privateport,\n 'privateendport':privateendport,\n 'publicport':publicport,\n 'publicendport':publicendport,\n 'virtualmachineid':virtualmachineid,\n 'openfirewall':False} \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['createportforwardingruleresponse']['jobid']\n self.logger.debug('Start job - createPortForwardingRule: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def test_port_create_with_binding_information_fallback(self):\n with self.network() as network:\n with self.subnet(network=network,\n ip_version=constants.IP_VERSION_6,\n cidr='2001:db8:0:0::/64') as subnet:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n self._validate_l2_adjacency(network['network']['id'], is_adjacent=True)\n\n # Map the host to the segment\n self._setup_host_mappings([(segment['segment']['id'], 'fakehost')])\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n\n res = self.deserialize(self.fmt, response)\n self._validate_immediate_ip_allocation(res['port']['id'])\n\n # Since the subnet is not on a segment, fall back to it\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])" ]
[ "0.5818294", "0.5687332", "0.5500008", "0.52568966", "0.5220818", "0.5207515", "0.52057487", "0.505119", "0.5035412", "0.5020907", "0.49766943", "0.4966835", "0.49285123", "0.492827", "0.492827", "0.49215764", "0.49072984", "0.49011382", "0.4897819", "0.48977235", "0.4889161", "0.4876178", "0.4873932", "0.48451567", "0.48292992", "0.48227188", "0.47957954", "0.47898212", "0.47815078", "0.4751471" ]
0.6178213
0
Draw lifeline. We always draw the lifeline's head. We only draw the lifeline's lifetime when the lifetime is visible.
def draw_lifeline(self, box, context, bounding_box): cr = context.cairo cr.rectangle(0, 0, self.width, self.height) stroke(context) if ( context.hovered or context.focused or context.dropzone or self._lifetime.visible ): bottom = self._lifetime.bottom cr = context.cairo with cairo_state(cr): cr.set_dash((7.0, 5.0), 0) x = self._handles[SW].pos.x top = self._lifetime.top cr.move_to(top.pos.x - x, top.pos.y) cr.line_to(bottom.pos.x - x, bottom.pos.y) stroke(context, dash=False) # draw destruction event if self.is_destroyed: d1 = 8 d2 = d1 * 2 cr.move_to(bottom.pos.x - d1, bottom.pos.y - d2) cr.line_to(bottom.pos.x + d1, bottom.pos.y) cr.move_to(bottom.pos.x - d1, bottom.pos.y) cr.line_to(bottom.pos.x + d1, bottom.pos.y - d2) cr.stroke()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_line():\n\n # Small Size Line\n glLineWidth(0.1)\n glColor3f(0.5, 1.0, 0.9)\n wid = 0\n while wid <= width:\n length = 0\n while length <= height:\n glBegin(GL_LINES)\n glVertex3f(0.0, length, 0.0)\n glVertex3f(wid, length, 0)\n glEnd()\n glBegin(GL_LINES)\n glVertex3f(length, 0, 0.0)\n glVertex3f(length, wid, 0)\n glEnd()\n length += 10\n wid += 50\n # Medium Size Line\n glLineWidth(2.0)\n wid = 0\n while wid <= width:\n length = 0\n while length <= height:\n glBegin(GL_LINES)\n glVertex3f(0.0, length, 0.0)\n glVertex3f(wid, length, 0)\n glEnd()\n length += 50\n glBegin(GL_LINES)\n glVertex3f(length, 0, 0.0)\n glVertex3f(length, wid, 0)\n glEnd()\n wid += 50\n # Main Line\n # ordinat\n glLineWidth(1.5)\n glColor3f(0.5, 0.4, 0.8)\n glBegin(GL_LINES)\n glVertex3f(height / 2, 0, 0.0)\n glVertex3f(height / 2, width, 0)\n glEnd()\n # absis\n glBegin(GL_LINES)\n glVertex3f(0, width / 2, 0.0)\n glVertex3f(height, width / 2, 0)\n glEnd()", "def draw_reticle(self):\n glColor3d(0, 0, 0)\n self.reticle.draw(GL_LINES)", "def __draw_line(display, color, ball_pos, dx, dy):\n pygame.draw.line(display, color, ball_pos, (ball_pos[0] + dx, ball_pos[1] + dy), 2)", "def display_line_map(self):\n lh_count = len(flatten(self.lh_data))\n print('{} horizontal line mapping: {} hline draw calls. {} bytes'.format(\n self.char,\n lh_count,\n len(list(self._stream_lhmap()))\n ))\n print('v' * len(''.join([str(i) for i in range(self.width)])), ' y [(x, length)]')\n for y in range(self.height):\n for x in range(self.width):\n space = ' ' if x < 10 else ' '\n char = space if self.pixels[y * self.width + x] else x\n print(char, end='')\n print(' ', '%2d' % y, self.lh_data[y])\n print()\n\n lv_count = len(flatten(self.lv_data))\n print('{} vertical line mapping: {} vline draw calls. {} bytes'.format(\n self.char,\n lv_count,\n len(list(self._stream_lvmap()))\n ))\n print('>' * len(''.join([str(i) for i in range(self.height)])), ' x [(y, length)]')\n for x in range(self.width)[::-1]:\n for y in range(self.height):\n space = ' ' if y < 10 else ' '\n char = space if self.pixels[y * self.width + x] else y\n print(char, end='')\n print(' ', '%2d' % x, self.lv_data[x])\n print()\n\n print('selecting {} mapping for {} char\\n'.format(\n 'lhmap horizontal' if self.is_char_lhmap() else 'lvmap vertical',\n self.char\n ))", "def do_draw_network(self, line):\n self.fibbing.root.lsdb.graph.draw(line)", "def _draw_line(self, event):\n if not self.obstacle_creation_mode:\n return\n\n if self.previous_coordinates is None:\n self.previous_coordinates = event.x, event.y\n self.new_obstacle.append([event.x, event.y])\n return\n\n x1, y1 = event.x, event.y\n\n if self._is_closing_shape(x1, y1, self.new_obstacle):\n x1, y1 = self.new_obstacle[0]\n else:\n self.new_obstacle.append([x1, y1])\n\n x0, y0 = self.previous_coordinates\n self.canvas.create_line(x0, y0, x1, y1, **self.LINE_OPTIONS)\n self.previous_coordinates = x1, y1", "def drawLines(self):\n\t\tintersections = [[], []]\n\t\tfor l in self.lines:\n\t\t\tif l.direction == 'v':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + int((self.width - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.width / 100) if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[0].append(position)\n\t\t\t\tfor yPos in range(1, self.height - 2):\n\t\t\t\t\tself.wts(yPos, position, '│', self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(0, position, '┬',self._borderColor)\n\t\t\t\tself.wts(self.height - 2, position, '┴', self._borderColor)\n\t\t\telif l.direction == 'h':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + ((self.height - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.height / 100) - 1 if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[1].append(position)\n\t\t\t\tself.wts(position, 1, '─' * (self.width - 2), self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(position, 0, '├', self._borderColor)\n\t\t\t\tself.wts(position, self.width - 1, '┤', self._borderColor)\n\t\t# draw intersections\n\t\tfor x in intersections[1]:\n\t\t\tfor y in intersections[0]:\n\t\t\t\tself.wts(x, y, '┼', self._borderColor)\n\t\tself.verticalBoundaries = intersections[0]\n\t\tif self.screenBorder:\n\t\t\tself.verticalBoundaries.append(self.width)", "def draw_line(color, start_pos, end_pos, width=1):\n pygame.draw.line(screen, color, start_pos, end_pos, width)", "def draw_lines(self):\n for x_cord in range(0, Dimension.SCREEN_WIDTH.value, Dimension.SQUARE_WIDTH.value):\n pg.draw.line(self.window, Colors.BLACK.value, (x_cord, 0), (x_cord, Dimension.SCREEN_HEIGHT.value))\n\n for y_cord in range(0, Dimension.SCREEN_HEIGHT.value, Dimension.SQUARE_HEIGHT.value):\n pg.draw.line(self.window, Colors.BLACK.value, (0, y_cord), (Dimension.SCREEN_WIDTH.value, y_cord))\n\n pg.display.update()", "def draw(self):\n glColor3f(1.0, 0.0, 0.0)\n glBegin(GL_LINES)\n for vertex in self.edges[0]:\n glVertex3fv(self.vertices[vertex])\n glColor3f(0.0, 1.0, 0.0)\n for vertex in self.edges[1]:\n glVertex3fv(self.vertices[vertex])\n glColor3f(0.0, 0.0, 1.0)\n for vertex in self.edges[2]:\n glVertex3fv(self.vertices[vertex])\n glEnd()", "def draw(self):\n # s1 = ShowPoint(self.cnv, self.p1.xpt, self.p1.ypt)\n # s2 = ShowPoint(self.cnv, self.p2.xpt, self.p2.ypt)\n # s1.draw()\n # # s2.draw()\n self.cnv.create_line(self.p1.xpt, self.p1.ypt, self.p2.xpt, self.p2.ypt)", "def update_line(self):\n self._draw_line_text()\n self._draw_status()\n self._line_listbox.set_focus(self.model.l_index)", "def _render_horizontal(self, gc, lx, ly, rx, ry, mx, my):\n\n with gc:\n gc.set_line_width(20)\n gc.set_stroke_color(self._get_border_color())\n tee_h(gc, lx, ly, mx, my, ry)\n\n gc.set_line_width(10)\n self.set_fill_color(gc)\n tee_h(gc, lx, ly, mx, my, ry)", "def _draw_line_text(self):\n self._line_text.set_text(self.model.get_current_line())", "def display(self, screen: pygame.Surface, line_thickness=3):\n\t\tfor p1, p2 in self.__calculate_points():\n\t\t\tpygame.draw.line(screen, Color(255).get(), p1.get_int(), p2.get_int(), line_thickness)", "def draw_lines(self):\n # draw x lines\n y = self.step_y\n while y <= self.height:\n x = 0\n while x <= self.width:\n self.canvas.create_line(x, y, x+3.5, y)\n self.canvas.update()\n x += 3.5\n y += self.step_y\n \n # draw y lines\n x = self.step_x\n while x <= self.width:\n y = 0\n while y <= self.height:\n self.canvas.create_line(x, y, x, y+3.5)\n self.canvas.update()\n y += 3.5\n x += self.step_x\n \n self.is_operating = False", "def draw(self, camera):\n for line in self._polyline.lines:\n camera.draw_line(line.begin, line.end, self.color, self.width)", "def hline(self, x, y, length, color):\n self.fill_rect(x, y, length, 1, color)", "def draw_bullet(self):\n pygame.draw.rect(self.__screen, self.__color, self.rect)", "def create_line(self):\n if self.hosts and self.line:\n self.msg(\"There is a line here already.\")\n self.display_line()\n return\n self.line = []\n other_hosts = [self.caller.search(arg) for arg in self.lhslist]\n other_hosts = [ob for ob in other_hosts if ob and ob.player]\n other_hosts.append(self.caller)\n self.hosts = other_hosts\n if \"loop\" in self.switches:\n self.toggle_loop()\n self.display_line()", "def _newLine(self, usePos=True):\n if len(self.currentLine) > 1:\n self.screen._drawline(self.currentLineItem, self.currentLine,\n self._pencolor, self._pensize)\n self.currentLineItem = self.screen._createline()\n self.items.append(self.currentLineItem)\n else:\n self.screen._drawline(self.currentLineItem, top=True)\n self.currentLine = []\n if usePos:\n self.currentLine = [self._position]", "def draw_line(self, x0, y0, x1, y1, color=Color['white']):\n pygame.draw.line(self.display, color, (x0, y0), (x1, y1))", "def draw_laser(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "def drawReference(x, y, z, l):\r\n\r\n glPushMatrix()\r\n\r\n glColor3f(1.0, 0.0, 0.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x + l, y, z)\r\n glEnd()\r\n\r\n glColor3f(0.0, 1.0, 0.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x, y + l, z)\r\n glEnd()\r\n\r\n glColor3f(0.0, 0.0, 1.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x, y, z + l)\r\n glEnd()\r\n\r\n glPopMatrix()", "def draw_bullet(self):\r\n pygame.draw.rect(self.screen, self.color, self.rect)", "def draw_line(self, DISP, side:str, indizes:tuple, pink = False):\r\n offset = 1 #< Just to draw the line nicely\r\n pos = (indizes[0] - 1) * self.grid_size, indizes[1] * self.grid_size\r\n # Check if it's a pink line\r\n if pink:\r\n start_pos = pos[0], pos[1] + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size, pos[1] + self.grid_size // 2\r\n # Check if the line should be vertically. u for up\r\n elif side == 'u':\r\n start_pos = pos[0] + self.width - offset + self.grid_size // 2, pos[1] + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size + offset + self.grid_size // 2 - self.width, pos[1] + self.grid_size // 2\r\n # Check if the line should be horizontally. l for left\r\n elif side == 'l':\r\n start_pos = pos[0] + self.grid_size // 2, pos[1] + self.width - offset + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size // 2, pos[1] - self.width + self.grid_size + offset + self.grid_size // 2\r\n if not pink:\r\n pg.draw.line(DISP, Colors.colors['BLACK'], start_pos,end_pos, self.width + 2 * offset) \r\n else:\r\n pg.draw.line(DISP, Colors.colors['PINK'], start_pos,end_pos, self.width + 2 * offset)", "def add_line(self, text):\n\t\twidth, height = self.font.size(text)\n\t\tpos = (self.rect.left + 10, self.rect.bottom - height- 5)\n\t\trend = self.font.render(text, True, BLACK)\n\t\t# Move all already existing lines up\n\t\tfor i in range(len(self.all_lines)):\n\t\t\toldsurf, oldpos = self.all_lines[i]\n\t\t\tself.all_lines[i] = self.lift_line(oldsurf, height, oldpos)\n\t\t\tcopy = oldsurf.copy()\n\t\t\tcopy.fill(BG_COLOR)\n\t\t\tself.image.blit(copy, oldpos)\n\t\tself.all_lines.append([rend, pos])\n\t\tself.image.blit(rend, pos)", "def draw(self):\r\n pygame.draw.rect(self.screen, self.background_color, self.bounds)\r\n line_window = self.lines[self.scroll_window_top:self.scroll_window_bottom]\r\n for idx,line in enumerate(line_window):\r\n text = self.font.render(line, True, self.foreground_color)\r\n x,y = self._get_x_y_from_pos(self.position[0], self.position[1]+idx)\r\n self.screen.blit(text,(x,y))\r\n \r\n if self.cursor_visible and self.scroll_window_bottom == len(self.lines):\r\n x,y = self._get_x_y_from_pos(len(line_window[-1]), len(line_window))\r\n cursor_rect = pygame.Rect(x,y,\r\n self.text_width,self.text_height)\r\n pygame.draw.rect(self.screen, self.foreground_color, cursor_rect)", "def draw_line(tick_length, tick_label=''):\n line = \"_\" * tick_length\n if tick_label:\n line += ' ' + tick_label\n print(line)", "def draw(self):\n if len(self.__points) >= 2:\n self._total_length = 0\n for i in range(len(self.__points) - 1):\n p1 = self.__points[i]\n p2 = self.__points[i + 1]\n coords = self.__line_segment(p1, p2)\n if not coords is None:\n pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,\n [0, 1, 2, 1, 2, 3],\n ('v2i', coords),\n ('c4b', self.color * 4)\n )\n coords = self.__line_cap(p2)\n pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,\n [0, 1, 2, 0, 2, 3],\n ('v2i', coords),\n ('c4b', self.color * 4)\n )" ]
[ "0.6687818", "0.6577214", "0.65438086", "0.64897895", "0.63934726", "0.63052434", "0.6266094", "0.6217813", "0.61868715", "0.61665386", "0.6159552", "0.6142421", "0.6132305", "0.60893184", "0.60786086", "0.60565233", "0.60377926", "0.60295296", "0.6012153", "0.59879833", "0.59606403", "0.595874", "0.59582007", "0.59281206", "0.5923167", "0.59134054", "0.5903696", "0.5900028", "0.5886914", "0.5865786" ]
0.6821605
0
Load data from CSV files and return them as numpy arrays The use_labels parameter indicates whether one should read the first column (containing class labels). If false, return all 0s.
def load_data(filename, use_labels=True): # load column 1 to 8 (ignore last one) data = np.loadtxt(open( filename), delimiter=',', usecols=range(1, 9), skiprows=1) if use_labels: labels = np.loadtxt(open( filename), delimiter=',', usecols=[0], skiprows=1) else: labels = np.zeros(data.shape[0]) return labels, data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self, path=\"../data_set/final-train-dataset.csv\", shuffle=False,\n onlyLabelToUse=None, useOnlyBestIndicators=False, binary=False):\n data = []\n labels = []\n\n with open(path) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n skip = True\n\n for row in reader:\n if skip:\n skip = False\n continue\n\n entries = []\n\n for i in range(SAMPLES_OF_DATA_TO_LOOK_AT):\n # Get one time point's data, including indicators:\n if useOnlyBestIndicators:\n # entries.append(\n # [float(row[i + j * SAMPLES_OF_DATA_TO_LOOK_AT]) for\n #\n entries.append(\n [float(row[i + j * SAMPLES_OF_DATA_TO_LOOK_AT]) for\n j in [0, 1, 2, 5, 7]])\n else:\n entries.append(\n [float(row[i + j * SAMPLES_OF_DATA_TO_LOOK_AT]) for\n j in range(INPUT_CHANNELS)])\n\n data.append(np.array(entries))\n\n if onlyLabelToUse is not None:\n if binary:\n label = [int(float(row[SAMPLES_OF_DATA_TO_LOOK_AT * TOTAL_INPUTS_IN_DATASET + onlyLabelToUse]) > 0.5)]\n else:\n label = [float(row[SAMPLES_OF_DATA_TO_LOOK_AT * TOTAL_INPUTS_IN_DATASET + onlyLabelToUse])]\n else:\n if binary:\n label = [int(float(row[SAMPLES_OF_DATA_TO_LOOK_AT * TOTAL_INPUTS_IN_DATASET + j]) > 0.5) for j in range(OUTPUT_CHANNELS)]\n else:\n label = [float(row[SAMPLES_OF_DATA_TO_LOOK_AT * TOTAL_INPUTS_IN_DATASET + j]) for j in range(OUTPUT_CHANNELS)]\n\n labels.append(label)\n\n if shuffle:\n indices = [i for i in range(len(data))]\n np.random.shuffle(indices)\n labels = np.array([labels[i] for i in indices])\n data = np.array([data[i] for i in indices])\n else:\n data = np.array(data)\n labels = np.array(labels)\n\n return data, labels", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype = str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0' \n y[y==label1]='1' \n y[y==label2]='2'\n y.astype(np.float) \n return X, y", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype=str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0'\n y[y==label1]='1'\n y[y==label2]='2'\n y=y.astype(np.float)\n return X, y", "def load_labeled_data(files):\n\tx = []\n\ty = []\n\tfor filename in files:\n\t\tdata = []\n\t\twith open(filename) as infile:\n\t\t\tlabel = int(infile.readline())\n\t\t\tfor line in infile:\t\n\t\t\t\tdata.append(dna_string_to_array(line.strip()))\n\t\ty += [label]*len(data)\n\t\tx += data\n\n\treturn (np.array(x), np.array(y))", "def load_data_and_labels(data_file, labels_file):\r\n x_text = []\r\n y = []\r\n \r\n with open(data_file, encoding = \"utf-8\") as csvFile:\r\n readCSV = csv.reader(csvFile, delimiter = \",\")\r\n for row in readCSV:\r\n row = \"\".join(row)\r\n x_text.append(row) \r\n \r\n with open(labels_file, encoding = \"utf-8\") as csvFile2:\r\n readCSV = csv.reader(csvFile2, delimiter = \",\")\r\n for row in readCSV:\r\n d = defaultdict(list)\r\n for k,va in [(v,i) for i,v in enumerate(row)]:\r\n d[k].append(va)\r\n \r\n for k in range(len(d.get(\"1.0\"))):\r\n index = d.get(\"1.0\")[k]\r\n row[index] = 1\r\n for k in range(len(d.get(\"0.0\"))):\r\n index = d.get(\"0.0\")[k]\r\n row[index] = 0\r\n \r\n# print(len(row))\r\n y.append(row)\r\n \r\n\r\n\r\n\r\n \r\n print(\"x = {}\".format(len(x_text)))\r\n print(\"y = {}\".format(len(y)))\r\n \r\n return x_text, y", "def load_csv_data(data_path):\n print(\"LOADING CSV FILE FROM {}\".format(data_path))\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=[1])\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\n ids = x[:, 0].astype(np.int)\n input_data = x[:, 2:]\n\n # convert class labels from strings to binary (-1,1)\n yb = np.ones(len(y))\n yb[np.where(y == 'b')] = -1\n\n return yb, input_data, ids", "def load_csv(fname = data_indoor):\n \n reader = csv.reader(open(fname, 'r'))\n \n # Blank list\n data = []\n \n # Don't read the zeroth element of each row (image name), convert to float.\n for row in reader:\n data.append(map(float, row[1:]))\n \n # Convert list to array \n d = np.array(data)\n \n # Seperate labels from features\n Y = d[:,0]\n X = d[:,1:]\n \n return X,Y", "def loader(filename,sep=',',rowskip=[], colskip=[], axis=1,names=1,fromstring=0):\n\n #manages excpetions to the csv file incase of missing data\n if (type(filename)==str) and (fromstring==1):\n iterable=filename.strip('\\n').split('\\n')\n content=np.array([i for i in csv.reader(iterable,delimiter=sep)])\n elif type(filename)==np.ndarray:\n content=filename\n else:\n content=np.array([i for i in\\\n csv.reader(open(filename,'r'),delimiter=sep)])\n #content=np.genfromtxt(filename,delimiter=sep,dtype=str)\n\n if rowskip:\n #rowskip.sort(reverse=True)\n content=np.delete(content,rowskip,0)\n #for i in rowskip: content.pop(i)\n\n if colskip:\n #colskip.sort(reverse=True)\n content=np.delete(content,colskip,1)\n #for i in colskip: content.pop(i)\n\n if axis==0: # if the file oriented column-wise\n #content=list(map(list,zip(*content)))\n content=content.T\n\n\n\n if names is 0:\n variables=np.arange(content.shape[1]).tolist()\n offset=0\n else:\n variables=content[0].tolist()\n offset=1\n\n try:\n content=np.array([conv_col(col) for col in\n content[offset:].T],dtype='object')\n arity=np.array([np.unique(i).size for i in content])\n return dataset(variables,content.T,arity)\n except ValueError: \n print( 'Data could not be loaded, failed converting to float.')\n return content", "def load_dataset(csv_path, label_col='y', add_intercept=False):\n\n def add_intercept_fn(x):\n global add_intercept\n return add_intercept(x)\n\n # Validate label_col argument\n allowed_label_cols = ('y', 't')\n if label_col not in allowed_label_cols:\n raise ValueError('Invalid label_col: {} (expected {})'\n .format(label_col, allowed_label_cols))\n\n # Load headers\n with open(csv_path, 'r') as csv_fh:\n headers = csv_fh.readline().strip().split(',')\n\n # Load features and labels\n x_cols = [i for i in range(len(headers)) if headers[i].startswith('x')]\n l_cols = [i for i in range(len(headers)) if headers[i] == label_col]\n inputs = np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=x_cols)\n labels = np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=l_cols)\n\n if inputs.ndim == 1:\n inputs = np.expand_dims(inputs, -1)\n\n if add_intercept:\n inputs = add_intercept_fn(inputs)\n\n return inputs, labels", "def load_data(fl=\"data.csv\"):\n data = np.loadtxt(fl, delimiter=\",\")\n y1 = data[:, 0]\n y2 = data[:, 1]\n return y1, y2", "def read(train_path, test_path, label_name):\n train_dataset = pd.read_csv(train_path)\n test_dataset = pd.read_csv(test_path)\n\n train_labels = train_dataset.pop(label_name)\n\n imputer = DataFrameImputer().fit(train_dataset)\n train_dataset = imputer.transform(train_dataset)\n test_dataset = imputer.transform(test_dataset)\n\n train_dataset = pd.get_dummies(train_dataset)\n test_dataset = pd.get_dummies(test_dataset)\n\n train_dataset = train_dataset.drop(train_dataset.columns.difference(test_dataset.columns), axis=1)\n test_dataset = test_dataset.drop(test_dataset.columns.difference(train_dataset.columns), axis=1)\n\n scaler = StandardScaler().fit(train_dataset)\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n return train_dataset, train_labels, test_dataset", "def load_data(self):\n\n data_pd = pd.read_csv(self.filename)\n return np.array(data_pd)", "def load_test_data(label_fname, data_fname):\n labels = load_csv(label_fname)\n data = load_csv(data_fname, 'excel-tab')\n\n # Join all data together on the ids given in the files\n joined_data = {}\n for label in labels:\n id = label[0]\n joined_data[id] = {'class': label[1]}\n for rec in data:\n id = rec[0]\n if id in joined_data:\n joined_data[id]['data'] = rec[1]\n\n # Clean and convert the data to reals\n max_features = 0\n for id in joined_data:\n words = clean_text(joined_data[id]['data'])\n reals = convert_to_reals(words)\n joined_data[id]['data'] = reals\n if len(reals) > max_features:\n max_features = len(reals)\n\n # Pad the data\n for id in joined_data:\n reals = joined_data[id]['data']\n joined_data[id]['data'] = reals + (max_features - len(reals)) * [0.0]\n\n # Prepare the data for training\n training_data = np.array([joined_data[id]['data'] for id in joined_data])\n training_labels = [joined_data[id]['class'] == 'OFF' for id in joined_data]\n return training_labels, training_data, max_features", "def load_csv_data(data_path, sub_sample=False):\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=1)\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\n labels = open(data_path,'r').readline()\n\n\n ids = x[:, 0].astype(np.int)\n input_data = x[:, 2:]\n labels = labels.strip().split(\",\")\n del labels[0]\n del labels[0]\n\n # convert class labels from strings to binary (-1,1)\n yb = np.ones(len(y))\n yb[np.where(y == 'b')] = -1\n\n # sub-sample\n if sub_sample:\n yb = yb[::50]\n input_data = input_data[::50]\n ids = ids[::50]\n\n return yb, input_data, ids, labels", "def load_data_and_labels():\n # Load data from files\n positive_examples = []\n for file in os.listdir('with_datarace'):\n filename = os.fsdecode(file)\n ast_file = open('with_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n positive_examples.append(token_vector)\n file_names.append(filename)\n\n negative_examples = []\n for file in os.listdir('without_datarace\\\\'):\n filename = os.fsdecode(file)\n ast_file = open('without_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n negative_examples.append(token_vector) # List of lists\n file_names.append(filename)\n\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = [s.strip() for s in negative_examples]\n\n # Split by words\n x_text = positive_examples + negative_examples # why we didn't cobine it from the beginning?\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n return [x_text, y]", "def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)", "def loadCSVSeeds(self, csvFilePath):\n labels = []\n with open(csvFilePath) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n labels.append([row[0], row[1], [float(row[2]), float(row[3]), float(row[4]) ]])\n print(csvFilePath + \": labels loaded\")\n return labels", "def read_labels(labels_path):\n with open(labels_path, 'r') as file:\n data = file.read()\n data = data.split()\n data = np.array(data)\n data = np.reshape(data, (-1, 2))\n return data", "def read_label(filepath, read_scalars=False):\n label_array = np.loadtxt(filepath, dtype=np.int, skiprows=2, usecols=[0])\n if read_scalars:\n scalar_array = np.loadtxt(filepath, skiprows=2, usecols=[-1])\n return label_array, scalar_array\n return label_array", "def load_train_data():\n\n # Load X_train\n with open('X_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n feature_string_matrix = []\n for row in reader:\n feature_list = []\n for i in range(TRAIN_N):\n x_value = row['x' + str(i)]\n # Hit missing values\n if x_value == '':\n feature_list.append(np.nan)\n else:\n feature_list.append(float(row['x' + str(i)]))\n feature_string_matrix.append(feature_list)\n X_train = np.array(feature_string_matrix)\n # Load Y_train\n with open('y_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n y_string = []\n for row in reader:\n y_value = [float(row['y'])]\n y_string.append(y_value)\n y_train = np.array(y_string)\n return X_train, y_train", "def loadTestData():\n path = raw_input(\"Enter the path of Test Data: \")\n data = np.genfromtxt(path, delimiter=',', dtype=int)\n\n labels = data[:, -1]\n\n unwantedLabels = [4, 5, 6, 7, 8, 9]\n listToDelete = []\n for i, line in enumerate(range(len(data))):\n if labels[i] in unwantedLabels:\n listToDelete.append(i)\n\n actualData = np.delete(data, listToDelete, axis=0)\n\n # print(actualData.shape)\n # Separating the labels and data into different arrays\n actualLabels = actualData[:, -1]\n actualData = actualData[:, :-1]\n\n actualData = pre.scale(actualData)\n\n # Change the label vector to label matrix\n # If Label is 2 then it becomes [0, 1, 0]\n labelMatrix = np.zeros((actualLabels.shape[0], 4))\n for j in range(len(actualLabels)):\n if actualLabels[j] == 0:\n labelMatrix[j][0] = 1\n if actualLabels[j] == 1:\n labelMatrix[j][1] = 1\n if actualLabels[j] == 2:\n labelMatrix[j][2] = 1\n if actualLabels[j] == 3:\n labelMatrix[j][3] = 1\n\n return actualData, actualLabels", "def load_csv(data_file_path, class_index=-1):\n\n handle = open(data_file_path, 'r')\n contents = handle.read()\n handle.close()\n rows = contents.split('\\n')\n out = np.array([[float(i) for i in r.split(',')] for r in rows if r])\n\n if class_index == -1:\n classes = map(int, out[:, class_index])\n features = out[:, :class_index]\n return features, classes\n\n elif class_index == 0:\n classes = map(int, out[:, class_index])\n features = out[:, 1:]\n return features, classes\n\n else:\n return out", "def load_data(csv_filename):\n data = np.genfromtxt(csv_filename, delimiter=\";\", skip_header=1, usecols=range(11))\n return data", "def load_samples_and_labels(data_path, header=True, col=1, train=True):\n if header:\n start_index = 1\n else:\n start_index = 0\n\n with open(data_path, 'r', encoding='utf-8') as f:\n lines = f.read().splitlines()[start_index:]\n samples = [line.split(',')[col] for line in lines]\n samples = [sample.split() for sample in samples]\n\n if train:\n labels = [int(line.split(',')[3]) for line in lines]\n else:\n labels = []\n\n return samples, labels", "def _read_labels_csv_file(self, csv_file_path, image_file_paths):\n\n self.__logger.debug('[Get Labels]')\n self.__logger.debug('Read CSV Labels ( %s ) ...' % csv_file_path)\n\n image_file_names = self.get_file_names_from_file_paths(file_paths=image_file_paths)\n\n labels = []\n\n with open(csv_file_path, newline='') as csvfile:\n read_image_files = 0 # numbers of image files read\n rows = csv.reader(csvfile)\n\n for row in rows:\n file_name = row[0]\n # make file name from '00030183_004.png' to '00030183_004'\n file_name = file_name.split('.')\n file_name = file_name[0]\n\n # if csv file name matches image file name, the label of the former will be stored in labels (list)\n if file_name == image_file_names[read_image_files]: # image_file_name has to remove str '.jpg'\n label = row[1].split('|')\n label_id = []\n for i in range(len(label)):\n label_id.append(Xray_class_id[label[i]])\n labels.append(label_id) # store the label\n\n read_image_files += 1\n if read_image_files == len(image_file_names): # if numbers of image files read equals numbers of\n # batch images, then break\n break\n\n self.__logger.debug('Done !')\n\n return labels", "def get_labels_df():\n labels_df = pd.read_csv('data/train/truth_train.csv', header=None)\n return labels_df", "def read_csv(path_to_file):\n position = []\n classification = []\n with open(path_to_file, 'r') as csv_file:\n reader = csv.reader(csv_file)\n next(reader, None) # skip the header\n\n for row in reader:\n position.append(np.array([float(row[0]), float(row[1])]))\n classification.append(float(row[2]))\n\n return np.array(position), np.array(classification, dtype='uint8')", "def load_csv_data(data_path, sub_sample=False):\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=1)\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\n ids = x[:, 0].astype(np.int)\n input_data = x[:, 2:]\n\n # convert class labels from strings to binary (-1,1)\n yb = np.ones(len(y))\n yb[np.where(y=='b')] = -1\n \n # sub-sample\n if sub_sample:\n yb = yb[::50]\n input_data = input_data[::50]\n ids = ids[::50]\n\n return yb, input_data, ids", "def load_csv_data(data_path, sub_sample=False):\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=1)\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\n ids = x[:, 0].astype(np.int)\n input_data = x[:, 2:]\n\n # convert class labels from strings to binary (-1,1)\n yb = np.ones(len(y))\n yb[np.where(y=='b')] = -1\n \n # sub-sample\n if sub_sample:\n yb = yb[::50]\n input_data = input_data[::50]\n ids = ids[::50]\n\n return yb, input_data, ids", "def load_data(fname, skip_header=0, delimiter=','):\n\n data = np.genfromtxt(fname, dtype=str, comments=None, delimiter=delimiter, skip_header=skip_header)\n\n pathes = data[:, 0]\n labels = data[:, 1]\n\n return pathes, labels" ]
[ "0.706477", "0.68677425", "0.6859685", "0.6782772", "0.67529887", "0.6746655", "0.67292655", "0.66810787", "0.66234106", "0.65906125", "0.6552061", "0.65014195", "0.64909846", "0.646308", "0.6431648", "0.6414293", "0.6412473", "0.6407337", "0.63972247", "0.6389596", "0.63569885", "0.6344617", "0.634075", "0.63401765", "0.6335867", "0.6291479", "0.6288128", "0.62760293", "0.62760293", "0.6273798" ]
0.7765821
0
Given a vector of predictions, save results in CSV format.
def save_results(predictions, filename): with open(filename, 'w') as f: f.write("id,ACTION\n") for i, pred in enumerate(predictions): f.write("%d,%f\n" % (i + 1, pred))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_results(file_path, predictions):\n with open(file_path, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\",\")\n writer.writerow([\"Id\", \"Bound\"])\n for id, bound in enumerate(predictions):\n writer.writerow([id, bound])", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(zip(y_pred))\n out.close()", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()", "def write_predictions(pred, filename=\"pred.csv\"):\n output_file = open(filename, \"wb\")\n writer = csv.writer(output_file)\n datetimes = get_datetimes(\"test.csv\")\n\n writer.writerow([\"datetime\", \"count\"])\n\n for index, count in enumerate(pred):\n writer.writerow([datetimes[index], int(count)])\n\n output_file.close()", "def write_results_to_csv(ids,\n sentiments_actuals,\n sentiments_predictions,\n filename):\n output = pd.DataFrame(data={\n \"id\": ids,\n \"sentiment_actual\": sentiments_actuals,\n \"sentiment_predicted\": sentiments_predictions})\n output.to_csv(filename, index=False, quoting=3)", "def write_predictions_to_file(predictor, testDataFname, enc, outputFname, features=None):\n\n testData, _, testDataIds, _ = make_data(testDataFname, features=features, enc=enc)\n\n dt = datetime.now()\n predictions = predictor.predict(testData)\n print 'predicting took', datetime.now() - dt\n\n featureSelectionOutput = np.transpose(np.vstack((testDataIds, predictions.round().astype(int))))\n\n with open(outputFname, 'wb') as outputFile:\n writer = csv.writer(outputFile)\n writer.writerow(['id', 'loss'])\n writer.writerows(featureSelectionOutput)", "def store_classes_and_predictions(output_file_path, classes, predictions):\n with open(output_file_path, mode='a', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['true', 'predicted'])\n for i in range(len(classes)):\n csvwriter.writerow([classes.iloc[i], predictions.iloc[i]])", "def write_results(self, results):\n predictions = open('hmm_results.csv', 'w')\n predictions.write(\"Type,Prediction\")\n for type in results:\n if type == 'O':\n continue\n predictions.write(\"\\n\" + str(type) + \",\")\n for interval in results[type]:\n predictions.write(str(interval) + \" \")\n predictions.close()", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':r1,'Prediction':round(r2)})", "def write_predictions(prediction_dic, result_path):\n with open(result_path, 'wb') as outfile:\n outfile.write(bytes('Patient_ID,HPV/p16_status\\n', 'UTF-8'))\n for patient_id, pred in prediction_dic.items():\n outfile.write(bytes(str(patient_id) + ',' + str(pred) + '\\n', 'UTF-8'))", "def predictions_to_csv(outstream, decomposition: FreeWilsonDecomposition, predictions):\n writer = None\n for pred in predictions:\n if not writer:\n rgroups = set()\n for rgroup in decomposition.rgroups:\n rgroups.add(rgroup)\n rgroups = sorted(rgroups, key=_rgroup_sort)\n\n lookup = {}\n for i, rg in enumerate(rgroups):\n lookup[rg] = i\n writer = csv.writer(outstream)\n header = ['smiles', 'prediction'] + [f\"{rg}_smiles\" for rg in list(rgroups)]\n writer.writerow(header)\n rg = [\"\"] * len(lookup)\n for s in pred.rgroups:\n rg[lookup[s.rgroup]] = s.smiles\n\n row = [pred.smiles, repr(pred.prediction)] + rg\n writer.writerow(row)\n return header", "def log_inference(tester, name, description):\r\n\tfor dataset, output in tester.preds.items():\r\n\t\tresults = pandas.DataFrame.from_dict(output)\r\n\t\tpath = os.path.join(\r\n\t\t\tEXPERIMENT_PATH, tester.config[\"name\"] + '-' + dataset)\r\n\t\twith open(path + \".csv\", \"w\") as f:\r\n\t\t\tresults.to_csv(f, sep=\"\\t\", encoding='utf-8', \r\n\t\t\t\tfloat_format='%.3f', index=False)\r\n\r\n\t\twith open(path + \"-predictions.csv\", \"w\") as f:\r\n\t\t\tresults[[\"tag\", \"y_hat\"]].to_csv(\r\n\t\t\t\tf, index=False, float_format='%.3f', header=False)", "def save_predictions(gtfilename, loss_type, probs, preds, outfile):\n\n # 1. get file ids\n liste_fileids = []\n targets = []\n passFirstLine=True\n with open(gtfilename, 'r') as fh:\n for line in fh:\n if passFirstLine:\n passFirstLine = False\n continue\n tmp = line.rstrip().split(',')\n liste_fileids.append(tmp[0])\n targets.append(tmp[1])\n\n print 'liste_fileids', len(liste_fileids)\n # 2. save preds\n import csv\n with open(outfile, 'w') as csvfile:\n # fieldnames = ['itemid', 'hasbird', 'pred', 'gt']\n fieldnames = ['itemid', 'hasbird']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n if loss_type == 'categorical_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i, 1], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i, 1]})\n elif loss_type == 'binary_hinge' or loss_type == 'binary_crossentropy' or loss_type == 'weighted_binary_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i][0], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i][0]})\n\n print \"INFO: predictions (positive class probas) saved to file:\", outfile", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id': int(r1), 'Prediction': int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def save_results(self):\n results = pd.concat([\n pd.DataFrame(self.IDs.cpu().numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.cpu().numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.cpu().numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.cpu().numpy(), columns= ['epistemic_uncertainty']), \n pd.DataFrame(self.aleatoric_uncertainty.cpu().numpy(), columns= ['aleatoric_uncertainty']), \n pd.DataFrame(self.total_uncertainty.cpu().numpy(), columns= ['total_uncertainty']), \n ], axis=1)\n\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)", "def exportEvaluation(self,results,url):\n profbox()\n if not os.path.exists(url):\n open(url, 'w').close()\n myfile = open(url, 'a')\n\n wr = csv.writer(myfile)\n r = numpy.array(results)\n if len(r.shape) == 1:\n wr.writerow(results)\n else:\n wr.writerows(results)", "def save_prediction(self, meta, y_pred, y, filename):\n df = pd.DataFrame(meta)\n df['y_pred'] = y_pred\n df['y'] = y\n print(df)\n df.loc[:, 'id'] = df.index\n self.df_to_csv(df, filename, store_header=False)", "def create_csv_submission(ids, y_pred, name):\n # negative class has to be labelled -1 on AIcrowd\n y_pred[y_pred == 0] = -1\n\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def __write_csv(self, prediction_probs, n, filename):\n d = {'Id': pd.Series([i for i in xrange(1, n + 1)]),\n 'Action': pd.Series(prediction_probs)}\n df = pd.DataFrame(d)\n df = df[['Id', 'Action']]\n df.to_csv(filename, sep=',', encoding='utf-8',\n index=False)", "def exportEvaluation(self, results, url):\r\n # research\r\n profprint()\r\n if not os.path.exists(url):\r\n print \"creating new results file: \",url\r\n open(url, 'w').close()\r\n myfile = open(url, 'a')\r\n\r\n wr = csv.writer(myfile)\r\n r = numpy.array(results)\r\n if len(r.shape) == 1:\r\n wr.writerow(results)\r\n else:\r\n wr.writerows(results)", "def writeResultsToDisk(header, results, data, filename):\n for i, datum in enumerate(data):\n np.append(data, results[i])\n header.append(\"diagnosis\")\n np.insert(data, 0, header)\n with open(filename, 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for row in data:\n writer.writerow(row)\n return header, np.array(data)", "def write_results(results):\n with RESULTS_PATH.open(\"w\") as writer:\n csvwriter = csv.writer(writer)\n csvwriter.writerows(results)", "def save_predictions(battle_name: str, data: str, predictions: List):\n path = './data_reader/data/predictions/' + data + '.' + battle_name\n with open(path, 'w') as outfile:\n for prediction in predictions:\n outfile.write(str(prediction) + '\\n')" ]
[ "0.7925112", "0.75827205", "0.75710183", "0.75710183", "0.7489846", "0.74501824", "0.73730016", "0.7322112", "0.70706344", "0.6982982", "0.69460446", "0.6911982", "0.6833308", "0.6815102", "0.67553407", "0.6745336", "0.6745336", "0.6745336", "0.6745336", "0.6745336", "0.6745336", "0.6735976", "0.67324305", "0.67095983", "0.6690879", "0.6612334", "0.6602958", "0.6601568", "0.65663135", "0.6558502" ]
0.78001994
1
The following function is used to format the numbers. In the beginning "th, st, nd, rd" are removed
def clean_numbers(self, x): # remove "th" after a number matches = re.findall(r'\b\d+\s*th\b', x) if len(matches) != 0: x = re.sub(r'\s*th\b', " ", x) # remove "rd" after a number matches = re.findall(r'\b\d+\s*rd\b', x) if len(matches) != 0: x = re.sub(r'\s*rd\b', " ", x) # remove "st" after a number matches = re.findall(r'\b\d+\s*st\b', x) if len(matches) != 0: x = re.sub(r'\s*st\b', " ", x) # remove "nd" after a number matches = re.findall(r'\b\d+\s*nd\b', x) if len(matches) != 0: x = re.sub(r'\s*nd\b', " ", x) # replace standalone numbers higher than 10 by # # this function does not touch numbers linked to words like "G-20" matches = re.findall(r'^\d+\s+|\s+\d+\s+|\s+\d+$', x) if len(matches) != 0: x = re.sub('^[0-9]{5,}\s+|\s+[0-9]{5,}\s+|\s+[0-9]{5,}$', ' ##### ', x) x = re.sub('^[0-9]{4}\s+|\s+[0-9]{4}\s+|\s+[0-9]{4}$', ' #### ', x) x = re.sub('^[0-9]{3}\s+|\s+[0-9]{3}\s+|\s+[0-9]{3}$', ' ### ', x) x = re.sub('^[0-9]{2}\s+|\s+[0-9]{2}\s+|\s+[0-9]{2}$', ' ## ', x) # we do include the range from 1 to 10 as all word-vectors include them # x = re.sub('[0-9]{1}', '#', x) return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _format_numbers(smth: any) -> any:\n if isinstance(smth, int):\n return float(smth)\n elif smth == 'N.V.':\n return 0.0 # meaning, wine is of type 'non-vintage' and is made of grapes from more than one harvest\n else:\n return smth", "def ordinal_filter(value):\n digit = value % 10\n if 10 < value < 20:\n o = 'th'\n elif digit is 1:\n o = 'st'\n elif digit is 2:\n o = 'nd'\n elif digit is 3:\n o = 'rd'\n else:\n o = 'th'\n return '%d%s' % (value, o)", "def format(number):\n number = compact(number)\n return '-'.join([\n number[:2],\n number[2:6],\n number[6:13],\n number[13:]])", "def format(number):\n number = compact(number)\n return ' '.join((number[:2], number[2:5], number[5:8], number[8:]))", "def transform(s):\r\n return 'digit ' + str(s)", "def numerize():\n pass", "def thou(n):\n if pthou:\n return \"{:,d}\".format(n)\n return \"{:d}\".format(n)", "def format_number(separator, n):\n n_s = str(n)\n if len(n_s) <= 3:\n return n_s\n else:\n upper = n_s[:-3]\n lower = n_s[-3:]\n return format_number(separator, upper) + separator + lower", "def _remove_digit_blocks(self, text: str) -> str:\n return re.sub(r\"\\b\\d+\\b\", \" \", str(text))", "def transform(s):\n return 'digit ' + str(s)", "def filter_format_number(val, places: Optional[int] = None, grouping: bool = True) -> str:\n if not isinstance(val, (int, float)):\n return val\n if places is not None:\n format_str = f'%.{places}f'\n elif isinstance(val, int):\n format_str = '%d'\n else:\n format_str = '%.02f'\n\n locale.setlocale(locale.LC_ALL, '')\n return locale.format_string(format_str, val, grouping)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def _num2str(self, num):\n q, mod = divmod(num, 10)\n suffix = \"th\" if q == 1 else self.SUFFIX_DICT[mod]\n return f\"{num}{suffix}\"", "def clean_numbers(text):\n return regex.sub(\"\\d+\", ' NUM', text)", "def tweet_clean_numbers(word):\n if not re.search(r'[0-9]+', word):\n return word\n if len(word)==4 and re.search(r'[0-9]{4}', word) and 1900 < int(word) < 2019:\n return word\n word = re.sub(r'^([0-9]|[\\+\\-%/\\*\\.:])+[0-9%/\\+\\*\\.x:]*$', '<number>', word)\n return word", "def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return ' '.join(new_words)", "def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return ' '.join(new_words)", "def textualize(num):\n if isinstance(num, float):\n num = int(num)\n # special case\n if num == 0:\n return 'zero'\n\n # if the number is negative, we put the word\n # 'negative' in front of it.\n is_negative = False\n if num < 0:\n is_negative = True\n num = -1 * num\n\n num = str(num)\n # pad with zeroes\n while len(num) % 3 != 0:\n num = ''.join([ '0', num ])\n\n # as groups are textualized, their strings will be\n # appended to this list\n num_string = []\n group_counter = 0\n while len(num) > 0:\n group = num[-3:]\n num = num[:-3]\n text = _textualize_group(group)\n\n # thousand, million, etc.\n if group_counter > 0 and text:\n group_name = group_names[group_counter]\n text = ' '.join([ text, group_name ])\n\n if text:\n num_string.insert(0, text)\n\n group_counter += 1\n\n if is_negative:\n num_string.insert(0, 'negative')\n\n return ' '.join(num_string)", "def suffix(d): \n return \"th\" if 11<=d<=13 else {1:\"st\",2:\"nd\",3:\"rd\"}.get(d%10, \"th\")", "def remove_numbers_fun(self):\n self.doc = re.sub(\"[0-9]\", \"\", self.doc)", "def format_number(num):\n result = \" \" + str(num) + \" \"\n if num < 10:\n result = result + \" \"\n return result", "def strip_numbers(s):\n if s:\n s = u' '.join([x for x in s.split(' ') if not x.isdigit()])\n return s", "def compact(number):\n number = clean(number, ' ').upper().strip()\n if number.startswith('AL'):\n number = number[2:]\n if number.startswith('(AL)'):\n number = number[4:]\n return number", "def compact(number):\n return clean(number, ' -./,').strip()", "def scinotation(self, num):\n num = num.replace(\"D\", \"e\")\n return f\"{decimal.Decimal(num):.9e}\"", "def _remove_digits(self, text: str) -> str:\n return re.sub(r\"\\d+\", \" \", str(text))", "def formatted_number(number):\n try:\n number = int(number)\n if number < 0:\n return '-' + formatted_number(-number)\n result = ''\n while number >= 1000:\n number, number2 = divmod(number, 1000)\n result = \",%03d%s\" % (number2, result)\n return \"%d%s\" % (number, result)\n except Exception:\n return \"\"", "def formatter(t: tuple):\n s = 'The {} numbers are: ' + '{}, '*(len(t)-1) + '{}'\n return s.format(len(t),*t)" ]
[ "0.67999727", "0.62027526", "0.61682063", "0.6136683", "0.6127746", "0.6096326", "0.60839456", "0.6068743", "0.6052151", "0.5988611", "0.5986997", "0.59576005", "0.59576005", "0.59576005", "0.5921066", "0.59175247", "0.59080505", "0.59045625", "0.59045625", "0.58787817", "0.58733374", "0.5869161", "0.58451086", "0.5836542", "0.5835291", "0.5827486", "0.5820193", "0.577612", "0.57738763", "0.5761904" ]
0.7203597
0
This function is used to replace "yr,yrs" by year and "hr,hrs" by hour.
def year_and_hour(self, text): # Find matches for "yr", "yrs", "hr", "hrs" matches_year = re.findall(r'\b\d+\s*yr\b', text) matches_years = re.findall(r'\b\d+\s*yrs\b', text) matches_hour = re.findall(r'\b\d+\s*hr\b', text) matches_hours = re.findall(r'\b\d+\s*hrs\b', text) # replace all matches accordingly if len(matches_year) != 0: text = re.sub(r'\b\d+\s*yr\b', "year", text) if len(matches_years) != 0: text = re.sub(r'\b\d+\s*yrs\b', "year", text) if len(matches_hour) != 0: text = re.sub(r'\b\d+\s*hr\b', "hour", text) if len(matches_hours) != 0: text = re.sub(r'\b\d+\s*hrs\b', "hour", text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace_time(text, ori):\n r = ori\n if '**' in text:\n r = 'xxhour'\n else:\n try:\n # handle exceptions with custom rules\n f, s = text.split()\n s = 'am' if s[0] == 'a' else 'pm'\n l, r = f.split(':')\n if l == '' or l == '00':\n if r == '':\n r = str(0).zfill(2)\n l = str(12)\n if int(l) > 12:\n l = str(int(l) % 12)\n f = ':'.join([l, r])\n text = ' '.join([f, s])\n\n d = datetime.strptime(text, '%I:%M %p')\n if d.hour >= 0 and d.hour < 4:\n r = 'xxmidngt'\n elif d.hour >= 4 and d.hour < 8:\n r = 'xxdawn'\n elif d.hour >= 8 and d.hour < 12:\n r = 'xxfore'\n elif d.hour >= 12 and d.hour < 16:\n r = 'xxafter'\n elif d.hour >=16 and d.hour <20:\n r = 'xxdusk'\n else:\n r = 'xxngt'\n except ValueError:\n pass\n return r", "def convert_24hr_12ampm(military_hr): \n\t\n if military_hr == 0:\n hour_ampm_str = \"12am\"\n elif military_hr == 12:\n hour_ampm_str = \"12pm\"\n elif military_hr > 12:\n hour_ampm_str = str(military_hr - 12) + \"pm\"\n else:\n hour_ampm_str = str(military_hr) + \"am\"\n # end of if block\n \n return hour_ampm_str", "def time_input():\n \n year = 2020\n month = 3 # number \n day = 12 # number in month\n hour = 12 # integer between 9 (= 9:00AM) and 17 (= 4:00PM) ## CHECK THIS\n minute = 0 # float between 0 (= 0 min) to 0.983 = 59 min)\n \n date=dt.datetime(year,month,day)\n time = date.timetuple().tm_yday\n time = time + hour/24 + minute/24/60\n \n return year, time", "def normalise_time(time_str):\n\n hour = time_str.split(\":\")[0]\n if int(hour) >= 24:\n normalised_hour = int(hour) % 24\n return time_str.replace(hour, f\"{normalised_hour:02}\")\n\n return time_str", "def normalise_two_digit_year(y):\r\n if y[0] == \"'\":\r\n y = y[1:]\r\n if int(y) < 39:\r\n return '%04d' % (int(y) + 2000)\r\n elif int(y) < 100:\r\n return '%04d' % (int(y) + 1900)\r\n else:\r\n return '%04d' % int(y[:4])", "def test_short_format_contains_year(self):\n locale = {\n 'timeformat': '%H:%M',\n 'dateformat': '%Y-%m-%d',\n 'longdateformat': '%Y-%m-%d',\n 'datetimeformat': '%Y-%m-%d %H:%M',\n 'longdatetimeformat': '%Y-%m-%d %H:%M',\n }\n assert (dt.datetime(2017, 1, 1), dt.datetime(2017, 1, 2), True) == \\\n guessrangefstr('2017-1-1 2017-1-1', locale=locale)", "def do_ry(self, arg):\n self.do_timesheet('report year')", "def readdate(line):\n splitted = line.split('::') \n \n # Convert the date\n date = dt.datetime.strptime(splitted[1].strip(), '%a %b %d %H:%M:%S')\n correctdate = date.replace(year=YEAR)\n return correctdate", "def extract(d):\n \n Y, M, D, W, H = (None for _ in range(5))\n \n def get_hour(groups):\n H, m, s = (int(x) for x in groups[4:7])\n if groups[8] == 'am' and H == 12:\n H = 0\n if groups[8] == 'pm' and 0 < H < 12:\n H += 12\n return H + m/60 + s/3600\n \n if type(d) == str:\n d = d.lower()\n match = re.match(r'^(\\d+)/(\\d+)/(20\\d+)( (\\d+):(\\d+):(\\d+)( (am|pm))?)?', d)\n if match is None:\n match = re.match(r'^(\\d+)-([a-z]+)-(\\d+)( (\\d+):(\\d+):(\\d+)( (am|pm))?)?', d)\n if match is None:\n return\n else:\n month = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']\n D = int(match.group(1))\n M = month.index(match.group(2)) + 1\n Y = 2000 + int(match.group(3))\n W = datetime.date(Y, M, D).timetuple()[6]\n if match.group(4) is None:\n H = -1\n else:\n H = get_hour(match.groups())\n else:\n M, D, Y = (int(x) for x in (match.groups())[:3])\n W = datetime.date(Y, M, D).timetuple()[6]\n if match.group(4) is None:\n H = -1\n else:\n H = get_hour(match.groups())\n return (Y, M, D, W, H)", "def year_expand(s):\n regex = r\"^((?:19|20)\\d{2})?(\\s*-\\s*)?((?:19|20)\\d{2})?$\"\n try:\n start, dash, end = re.match(regex, ustr(s)).groups()\n start = start or 1900\n end = end or 2099\n except AttributeError:\n return 1900, 2099\n return (int(start), int(end)) if dash else (int(start), int(start))", "def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second", "def year(cls, year: typing.Union[int, str])->str:\n yearstr: str\n if isinstance(year, int):\n yearstr = str(year)\n else:\n yearstr = year\n return cls.DATE_AND_TIMES_SIGIL + yearstr + \"-01-01T00:00:00/9\"", "def build_convert_to_hours(time_units):\n if time_units not in VALID_TIME_UNITS:\n raise ValueError('Time units must be one of', VALID_TIME_UNITS)\n \n if time_units == 'min':\n return lambda x: x/60\n elif time_units == 'h':\n return lambda x: x", "def test_evaluate_year_expression(self):\n for f, r in (\n (\"year\", 2013),\n (\"month\", 9),\n (\"day\", 1),\n (\"hour\", 10),\n (\"minute\", 56),\n (\"second\", 0)):\n value = self.evaluate_common(\"%s(datetime'2013-09-01T10:56')\" % f)\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value == r)\n try:\n value = self.evaluate_common(\n \"%s(datetimeoffset'2013-09-01T10:56:12-05:00')\" % f)\n self.fail(\"datetimeoffset %s\" % f)\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\n \"%s(datetime'2013-09-01T10:56',\"\n \"datetime'2013-09-01T10:57')\" % f)\n self.fail(\"2 parameters\")\n except odata.EvaluationError:\n pass", "def test_get_date_format_code(self):\n\n test_format = self.test_format\n start, end = custom_date.get_date_code_span(\"Y\", test_format)\n self.assertEqual(start, 5)\n self.assertEqual(end, 9)\n\n start, end = custom_date.get_date_code_span(\"H\", test_format)\n self.assertEqual(start, 15)\n self.assertEqual(end, 17)", "def converttime(time, currentformat, newformat):\n\n # Define conversion dictionary\n conversions = {\n \"milliseconds\": {\n \"milliseconds\": \"time\",\n \"seconds\": \"time / 1000\",\n \"minutes\": \"time / 1000 / 60\",\n \"hours\": \"time / 1000 / 60 / 60\",\n \"days\": \"time / 1000 / 60 / 60 / 24\",\n \"weeks\": \"time / 1000 / 60 / 60 / 24 / 7\",\n \"fortnights\": \"time / 1000 / 60 / 60 / 24 / 14\",\n \"years\": \"time / 1000 / 60 / 60 / 24 / 365\",\n \"decades\": \"time / 1000 / 60 / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 1000 / 60 / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 1000 / 60 / 60 / 24 / 365 / 1000\"\n },\n \"seconds\": {\n \"milliseconds\": \"time * 1000\",\n \"seconds\": \"time\",\n \"minutes\": \"time / 60\",\n \"hours\": \"time / 60 / 60\",\n \"days\": \"time / 60 / 60 / 24\",\n \"weeks\": \"time / 60 / 60 / 24 / 7\",\n \"fortnights\": \"time / 60 / 60 / 24 / 14\",\n \"years\": \"time / 60 / 60 / 24 / 365\",\n \"decades\": \"time / 60 / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 60 / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 60 / 60 / 24 / 365 / 1000\"\n },\n \"minutes\": {\n \"milliseconds\": \"time * 60 * 1000\",\n \"seconds\": \"time * 60\",\n \"minutes\": \"time\",\n \"hours\": \"time / 60\",\n \"days\": \"time / 60 / 24\",\n \"weeks\": \"time / 60 / 24 / 7\",\n \"fortnights\": \"time / 60 / 24 / 14\",\n \"years\": \"time / 60 / 24 / 365\",\n \"decades\": \"time / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 60 / 24 / 365 / 1000\"\n },\n \"hours\": {\n \"milliseconds\": \"time * 60 * 60 * 1000\",\n \"seconds\": \"time * 60 * 60\",\n \"minutes\": \"time * 60\",\n \"hours\": \"time\",\n \"days\": \"time / 24\",\n \"weeks\": \"time / 24 / 7\",\n \"fortnights\": \"time / 24 / 14\",\n \"years\": \"time / 24 / 365\",\n \"decades\": \"time / 24 / 365 / 10\",\n \"centuries\": \"time / 24 / 365 / 100\",\n \"millenniums\": \"time / 24 / 365 / 1000\"\n },\n \"days\": {\n \"milliseconds\": \"time * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 24 * 60 * 60\",\n \"minutes\": \"time * 24 * 60\",\n \"hours\": \"time * 24\",\n \"days\": \"time\",\n \"weeks\": \"time / 7\",\n \"fortnights\": \"time / 14\",\n \"years\": \"time / 365\",\n \"decades\": \"time / 365 / 10\",\n \"centuries\": \"time / 365 / 100\",\n \"millenniums\": \"time / 365 / 1000\"\n },\n \"weeks\": {\n \"milliseconds\": \"time * 7 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 7 * 24 * 60 * 60\",\n \"minutes\": \"time * 7 * 24 * 60\",\n \"hours\": \"time * 7 * 24\",\n \"days\": \"time * 7\",\n \"weeks\": \"time\",\n \"fortnights\": \"time / 2\",\n \"years\": \"time / 52\",\n \"decades\": \"time / 52 / 10\",\n \"centuries\": \"time / 52 / 100\",\n \"millenniums\": \"time / 52 / 1000\"\n },\n \"fortnights\": {\n \"milliseconds\": \"time * 14 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 14 * 24 * 60 * 60\",\n \"minutes\": \"time * 14 * 24 * 60\",\n \"hours\": \"time * 14 * 24\",\n \"days\": \"time * 14\",\n \"weeks\": \"time * 2\",\n \"fortnights\": \"time\",\n \"years\": \"time / 26\",\n \"decades\": \"time / 26 / 10\",\n \"centuries\": \"time / 26 / 100\",\n \"millenniums\": \"time / 26 / 1000\"\n },\n \"years\": {\n \"milliseconds\": \"time * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 256 * 24 * 60\",\n \"hours\": \"time * 256 * 24\",\n \"days\": \"time * 256\",\n \"weeks\": \"time * 52\",\n \"fortnights\": \"time * 26\",\n \"years\": \"time\",\n \"decades\": \"time / 10\",\n \"centuries\": \"time / 100\",\n \"millenniums\": \"time / 1000\"\n },\n \"decades\": {\n \"milliseconds\": \"time * 10 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 10 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 10 * 256 * 24 * 60\",\n \"hours\": \"time * 10 * 256 * 24\",\n \"days\": \"time * 10 * 256\",\n \"weeks\": \"time * 10 * 52\",\n \"fortnights\": \"time * 10 * 26\",\n \"years\": \"time * 10\",\n \"decades\": \"time\",\n \"centuries\": \"time / 10\",\n \"millenniums\": \"time / 100\"\n },\n \"centuries\": {\n \"milliseconds\": \"time * 100 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 100 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 100 * 256 * 24 * 60\",\n \"hours\": \"time * 100 * 256 * 24\",\n \"days\": \"time * 100 * 256\",\n \"weeks\": \"time * 100 * 52\",\n \"fortnights\": \"time * 100 * 26\",\n \"years\": \"time * 100\",\n \"decades\": \"time * 10\",\n \"centuries\": \"time\",\n \"millenniums\": \"time / 10\"\n },\n \"millenniums\": {\n \"milliseconds\": \"time * 1000 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 1000 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 1000 * 256 * 24 * 60\",\n \"hours\": \"time * 1000 * 256 * 24\",\n \"days\": \"time * 1000 * 256\",\n \"weeks\": \"time * 1000 * 52\",\n \"fortnights\": \"time * 1000 * 26\",\n \"years\": \"time * 1000\",\n \"decades\": \"time * 100\",\n \"centuries\": \"time * 10\",\n \"millenniums\": \"time\"\n }\n }\n\n # Return evaluated value\n return eval(conversions[currentformat][newformat])", "def times_filter(d, times, meets_criteria=matches_timestr):\n mapping = map(type, times)\n if [ str, type(None), type(None) ] == mapping and meets_criteria(times[0]):\n d1 = doytimestr_to_datetime('%d:%s:00' % (d[0].year, times[0].replace('/',':')))\n #return '%s' % d1\n return d1, d1, 0\n elif [ str, str, type(None) ] == mapping and meets_criteria(times[0]) and meets_criteria(times[1]):\n d1 = doytimestr_to_datetime('%d:%s:00' % (d[0].year, times[0].replace('/',':')))\n d2 = doytimestr_to_datetime('%d:%s:00' % (d[1].year, times[1].replace('/',':')))\n #return '%s to %s' % (d1, d2)\n return d1, d2, timedelta_hours(d2-d1)\n else:\n #return ''\n return None, None, None", "def test_short_format_contains_year(self):\n locale = {\n 'timeformat': '%H:%M',\n 'dateformat': '%Y-%m-%d',\n 'longdateformat': '%Y-%m-%d',\n 'datetimeformat': '%Y-%m-%d %H:%M',\n 'longdatetimeformat': '%Y-%m-%d %H:%M',\n }\n assert (dt.datetime(2017, 1, 1), True) == guessdatetimefstr(\n '2017-1-1'.split(), locale=locale, default_day=dt.datetime.today())\n assert (dt.datetime(2017, 1, 1, 16, 30), False) == guessdatetimefstr(\n '2017-1-1 16:30'.split(), locale=locale, default_day=dt.datetime.today())", "def sec_to_hm(t):\n t = int(t)\n s = t % 60\n t //= 60\n m = t % 60\n t //= 60\n return t, m, s", "def scaledTime():\n #return (time.gmtime().tm_wday, time.gmtime().tm_hour)\n epoch = time.strptime(\"2013-02-21 11:30:00\", \"%Y-%m-%d %H:%M:%S\")\n timeInSec = time.mktime(time.gmtime()) - time.mktime(epoch)\n hourSince = timeInSec / Predictor.hourScale\n day = int(hourSince / 24 % 7)\n hour = int(hourSince % 24)\n return (day, hour)", "def check_hour_range(self, hour):\n if 0 <= hour <= 5:\n return 'Early Morning'\n if 6 <= hour <= 11:\n return 'Day Time'\n if 12 <= hour <= 17:\n return 'Afternoon'\n if 18 <= hour <= 23:\n return 'Evening'", "def _set_time(line, old_time, swap_time):\n line = re.sub(str(old_time), str(swap_time), line, 2)\n return line", "def convert_times(value):\r\n day_patern = re.compile('\\d{4}-\\d{2}-\\d{2}')\r\n week_pattern = re.compile('\\d{4}-W\\d{2}')\r\n month_pattern = re.compile('\\d{4}-\\d{2}')\r\n year_pattern = re.compile('\\d{4}')\r\n\r\n if re.match(day_patern, value):\r\n date = datetime.strptime(value, '%Y-%m-%d')\r\n end = date + timedelta(days=1)\r\n return date, end\r\n elif re.match(week_pattern, value):\r\n date = datetime.strptime(value + '-1', '%Y-W%W-%w')\r\n end = date + timedelta(days=7)\r\n return date, end\r\n elif re.match(month_pattern, value):\r\n date = datetime.strptime(value, '%Y-%m')\r\n if date.month == 12:\r\n end = date.replace(year=date.year + 1, month=1)\r\n else:\r\n end = date.replace(month=date.month + 1)\r\n return date, end\r\n elif re.match(year_pattern, value):\r\n date = datetime.strptime(value, '%Y')\r\n end = date.replace(year=date.year + 1)\r\n return date, end\r\n else:\r\n raise ValueError('Date not recognised')", "def test_date_by_yr(self):\n spi_search = \"find date 2002\"\n inv_search = \"year:2002\"\n self._compare_searches(inv_search, spi_search)", "def sanitize(time_string): # Fix non-uniformity in the athletes data to enable sorting\n if '-' in time_string:\n splitter = '-'\n (mins, secs) = time_string.split(splitter)\n elif ':' in time_string:\n splitter = ':'\n (mins, secs) = time_string.split(splitter)\n else:\n return time_string\n return '{0}.{1}'.format(mins, secs)", "def fix_time_fields(self):\n time_fields = {\"Time of day\": lambda time: time.hour, \"Time of year (month)\": lambda time: time.month}\n for time_field in time_fields.keys():\n for i in range(self.df.shape[0]):\n value = self.df[time_field][i]\n if type(value) is datetime.time or type(value) is datetime.datetime:\n self.df[time_field].loc[i] = time_fields[time_field](value)", "def year_tracker(words):\n new_words = []\n for w in words:\n new_word = re.sub(r\"^[1][789][0-9]{2}$\", \"jahreszahl\", w) # for 1700-1999\n new_word = re.sub(r\"^[2][01][0-9]{2}$\", \"jahreszahl\", new_word) # for 2000-2199\n new_words += [new_word]\n return new_words", "def get_year(parameters_dictionary):\n if \"start-year\" in parameters_dictionary.keys():\n year = int(parameters_dictionary[\"start-year\"])\n return str(year) + str(year + 1)\n elif \"end-year\" in parameters_dictionary.keys():\n year = int(parameters_dictionary[\"end-year\"])\n return str(year - 1) + str(year)\n else:\n return str(THIS_YEAR - 1) + str(THIS_YEAR)", "def parse_time(expr):\n # first deal with hour\n hsp = expr.lower().split('h')\n if len(hsp) > 1: h = int(hsp[0])\n else: h = 0\n # now hour is out of the way\n expr = hsp[-1]\n msp = expr.lower().split('m')\n if len(msp) > 1: m = int(msp[0])\n else: m = 0\n return f\"{h:02d}:{m:02d}:00\"", "def translate_years(val):\n if val.find(\"-\") > 0:\n tokens = re.findall(\"[0-9]+\", val)\n one = int(tokens[0])\n two = int(tokens[1])\n one = (1900 + one) if one > 50 else (2000 + one)\n two = (1900 + two) if two > 50 else (2000 + two)\n return range(one, two + 1)\n tokens = re.findall(\"[0-9]+\", val)\n return [int(f\"{'19' if int(t) > 50 else '20'}{t}\") for t in tokens]" ]
[ "0.61535865", "0.54928744", "0.54547983", "0.5405917", "0.5310719", "0.5267757", "0.52620614", "0.5259947", "0.51755303", "0.5144724", "0.51272285", "0.5085674", "0.50707275", "0.50623596", "0.5054484", "0.505168", "0.5038544", "0.50103486", "0.500148", "0.498647", "0.49656528", "0.49499217", "0.49470493", "0.49417716", "0.49321747", "0.49294227", "0.49093542", "0.48932773", "0.489069", "0.48901463" ]
0.7474295
0
Performs an HTTP request set in 'method'. Returns requests object The method will try to catch some of the typical errors and gather error messages from Newrelic API Each known error has a corresponding exception. All exceptions are inherited from generic NewRelicException If HTTP return code is not known a generic NewRelicException is raised.
def _request(self, method, *args, **kwargs): try: r = getattr(requests, method)(*args, **kwargs) except AttributeError: raise NewRelicException( 'Method {} is unsupported by requests module' .format(method) ) except requests.exceptions.Timeout: raise Timeout('Request timed out after {} seconds' .format(self.timeout)) if r.status_code < 200 or r.status_code > 299: # Try to work out all known errors into separate exceptions if r.status_code == 401: try: error_message = r.json()['error']['title'] except (KeyError, ValueError): raise UnathorizedError( 'User is not authorized to perform requested operation' ) else: raise UnathorizedError(error_message) if r.status_code == 402: raise ChecksLimitExceeded( "Creating the monitor will increase your scheduled checks " "past your account's purchased check limit." ) elif r.status_code == 404: try: error_message = r.json()['error']['title'] except (KeyError, ValueError): raise ItemNotFoundError( 'Requested item not found. ' 'No error message was provided by server.' ) else: raise ItemNotFoundError(error_message) else: # If we don't know what to do with specific error code # ( most likely it's 400 ) # We at least try to get error message from the response try: response_errors = r.json()['errors'] raise NewRelicException( "The following errors were returned by server:\n{}" .format('\n' .join( [x['error'] for x in response_errors] )) ) # Sometimes API does not return any useful information. # In this case that's just an HTML page # reporting 400 instead of JSON. # We will just return an error code in this case. except ValueError: raise NewRelicException( 'Got unexpected response code {}. ' 'No additional information provided by server.' .format(r.status_code) ) return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_request(method, url, data=None, headers=None):\n try:\n if method == 'GET':\n resp = requests.get(url, headers=headers)\n return resp\n elif method == 'POST':\n resp = requests.post(url, json=data, headers=headers)\n return resp\n elif method == 'PATCH':\n resp = requests.patch(url, json=data, headers=headers)\n return resp\n except Exception, e:\n print \"Retry {} with {}, {}\".format(str(e), url, data)\n raise e", "def make_http_request(url, method='get', **kwargs):\n try:\n r = getattr(requests, method)(url, data=kwargs, verify=False)\n except AttributeError:\n r = requests.get(url, data=kwargs, verify=False)\n if 200 < r.status_code < 300:\n raise HTTPError(u'Expected HTTP response code \"2xx\" but received \"{}\"'.format(r.status_code))\n return r.content", "def _make_request(self):\n try:\n self.response = requests.request(\n method=self.method,\n url=self.url,\n params=self.params,\n data=self.data,\n )\n\n logger.debug(f\"Request URL: {self.response.url}\")\n\n self.response.raise_for_status()\n\n # wrap all `requests` library error and serve as custom application error\n except RequestException as e:\n logger.error(e.__str__(), exc_info=True)\n raise ExternalAPIError(\n \"Error while communication with External API\"\n )", "def request( # pylint: disable=arguments-differ\n self, method: str, url: str, **kwargs\n ) -> object:\n if self.base_url is not None and not url.startswith('https'):\n url = f'{self.base_url}{url}'\n\n # this kwargs value is used to signal 429 handling that this is a retry, but the super\n # method doesn't expect it so it needs to be removed.\n tc_is_retry = kwargs.pop('tc_is_retry', False)\n\n response: Response = super().request(method, url, **kwargs)\n\n if response.status_code == 429 and not tc_is_retry:\n too_many_requests_handler = self.too_many_requests_handler\n time.sleep(too_many_requests_handler(response))\n kwargs['tc_is_retry'] = True\n return self.request(method, url, **kwargs)\n\n # APP-79 - adding logging of request as curl commands\n if not response.ok or self.log_curl:\n try:\n self.log.debug(\n self.requests_to_curl.convert(\n response.request,\n mask_body=self.mask_body,\n mask_headers=self.mask_headers,\n mask_patterns=self.mask_patterns,\n proxies=self.proxies,\n verify=self.verify,\n )\n )\n except Exception: # nosec\n pass # logging curl command is best effort\n\n self.log.debug(\n f'feature=external-session, request-url={response.request.url}, '\n f'status_code={response.status_code}, elapsed={response.elapsed}'\n )\n\n return response", "def request(self, method, url, **kwargs):\n kwargs.setdefault(\"headers\", kwargs.get(\"headers\", {}))\n kwargs['headers'].setdefault('Accept', 'application/json')\n kwargs[\"headers\"][\"User-Agent\"] = self.user_agent\n if self.original_ip:\n kwargs[\"headers\"][\"Forwarded\"] = \"for=%s;by=%s\" % (\n self.original_ip, self.user_agent)\n if self.timeout is not None:\n kwargs.setdefault(\"timeout\", self.timeout)\n kwargs.setdefault(\"verify\", self.verify)\n if self.cert is not None:\n kwargs.setdefault(\"cert\", self.cert)\n self.serialize(kwargs)\n\n self._http_log_req(method, url, kwargs)\n if self.timings:\n start_time = time.time()\n resp = self.http.request(method, url, **kwargs)\n if self.timings:\n self.times.append((\"%s %s\" % (method, url),\n start_time, time.time()))\n self._http_log_resp(resp)\n\n if resp.status_code >= 400:\n LOG.debug(\n \"Request returned failure status: %s\",\n resp.status_code)\n raise exceptions.from_response(resp, method, url)\n\n return resp", "def make(self, method, extras=None):\n query = self.url_for_request(method, extras)\n logging.info(query)\n\n req = urllib2.Request(query)\n if self.shouldGzip:\n req.add_header('Accept-encoding', 'gzip')\n req.add_header('User-agent', 'Last.fm Explorer')\n\n result = { 'success' : False }\n\n max_retries = 2 \n attempt = 0\n\n while not result['success'] and attempt < max_retries:\n attempt += 1\n try:\n r = urllib2.urlopen(req, timeout=60).read()\n result['data'] = self.__unzip(r) if self.shouldGzip else r\n result['success'] = True\n if self.saveResponses:\n self.__save_response(method, extras, result['data'])\n\n except urllib2.HTTPError, e:\n logging.error(\"Requestor errored accessing \" + query + \" - \" + str(e.code))\n result['error'] = { 'code' : e.code, 'message' : e.msg }\n\n except urllib2.URLError, e:\n logging.error(\"Requestor failed to fetch \" + query + ' - URLError.')\n result['error'] = { 'message' : e.reason }\n\n except BadStatusLine:\n logging.error(\"Requestor caught BadStatusLine, attempt %d\" % (attempt,))\n result['error'] = { 'message' : \"Request gave BadStatusLine\" }\n\n except IOError, e:\n logging.error(\"Requestor caught IOError, attempt %d\" % (attempt,))\n result['error'] = { 'message' : \"Request gave IOError: \" + str(e) }\n\n except Exception as instance:\n logging.error(\"Requestor caught unknown exception for request \" + query + \" - \" + str(type(instance)))\n logging.error(traceback.format_exc())\n result['error'] = { 'messasge' : \"Unknown problem\" }\n\n return result", "def make_request(method, url, headers=None, data=None, retries=3):\n no_retry_status = [404, 401, 403]\n may_retry_status = [408, 500, 502, 503]\n\n if not retries:\n return requests.request(method=method,\n url=url,\n headers=headers,\n data=data)\n\n while retries:\n r = requests.request(method=method,\n url=url,\n headers=headers,\n data=data)\n if r.status_code in no_retry_status:\n return r\n\n elif r.status_code in may_retry_status:\n retries -= 1\n time.sleep(1)\n\n if retries == 0:\n return r\n continue\n\n else:\n return r", "def _issue_http_request(method, path, headers):\n http_client = _obtain_http_client()\n try:\n http_client.request(method, path, headers=headers)\n return http_client.getresponse()\n except (socket.error, http.client.HTTPException):\n LOGGER.exception('Error occurred while issuing http request.')\n raise errors.MetadataServerHttpError", "def _request(self, method, uri, **kwargs):\n url = self.url + uri\n self.logger.debug(\"Requesting {} on {}\".format(method, url))\n response = requests.request(method, url, verify=self.verify, **kwargs)\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError:\n self.logger.error(\"Bad http code {} requesting Clair\".format(response.status_code))\n if response.reason == \"Not Found\":\n raise ResourceNotFoundException(\"Resource not found\")\n raise ClairConnectionError(response)\n return response", "def _request(self, method, url, retries=None, **kwargs):\n if retries is None:\n retries = self.retries\n\n try:\n LOG.debug(\"Attempting: %s %s\", method, kwargs)\n if 'SSL_CA' in os.environ:\n return method(url, verify=os.environ['SSL_CA'], **kwargs)\n else:\n return method(url, **kwargs)\n except (requests.exceptions.SSLError, OpenSSL.SSL.Error):\n if 'SSL_CA' in os.environ:\n LOG.info(\"SSL verification failed, trying default certs.\")\n return method(url, **kwargs)\n else:\n LOG.error(\"SSL verification failed.\")\n raise\n except Exception:\n if retries > 0:\n self._request(method, url, retries=retries-1, **kwargs)\n else:\n raise", "def _make_request(self, url: str, parameters: dict = None,\n method: str = 'GET', *args, **kwargs):\n response = requests.request(\n method=method,\n url=build_url(\n self.BASE_API_URL, url, parameters\n ),\n headers={\n 'Authorization': 'Bearer {}'.format(self._access_token)\n }, **kwargs\n )\n if response.ok:\n return response.json()\n raise MondoApiException(response.json()['message'])", "def request(self, *args, **kwargs):\n try:\n return self._http.request(*args, timeout=TIMEOUT, **kwargs)\n except Exception as exc:\n raise RequestException(exc, args, kwargs)", "def _make_request(self, method: str, params: Dict) -> Dict:\n\n # Define a new session.\n request_session = requests.Session()\n request_session.verify = True\n\n # Define a new request.\n request_request = requests.Request(\n method=method.upper(),\n url=self.bea_url,\n params=params\n ).prepare()\n\n # Send the request.\n response: requests.Response = request_session.send(\n request=request_request\n )\n\n # Close the Session\n request_session.close()\n\n print(response.url)\n\n # If the response is OK then return it.\n if response.ok and self._format == 'JSON':\n return response.json()\n elif response.ok and self._format == 'XML':\n return response.text\n else:\n raise requests.ConnectionError()", "def http_request(method, url, params=None):\n if method.lower() not in _request_methods:\n raise NotImplementedError(\"HTTP request method not implemented\")\n\n\n return _request_methods[method.lower()](url, params)", "def _request(self, method, url, params=None, data=None, request_type=PRIVATE, headers={}):\n self._is_valid_request_option(request_type=request_type)\n\n request_headers = copy.deepcopy(self.BASE_HEADERS)\n request_headers.update(headers)\n\n response = getattr(requests, method.lower())(\n url,\n headers=request_headers,\n params=params,\n data=data\n )\n\n return self._handle_response(response)", "def request(self, *args, **kwargs):\n\n ratelimit_retries, temporary_error_retries, ident_retries = 0, 0, {}\n\n while True:\n try:\n try:\n return self._request(*args, **kwargs)\n except Exception as exc:\n self.error_processor(exc)\n raise\n\n except Retry as exc:\n ident_retries.setdefault(exc.retry_ident, 0)\n ident_retries[exc.retry_ident] += 1\n if ident_retries[exc.retry_ident] <= exc.retry_count:\n self.logger.warning('Retry(%s) after calls(%s/%s) since(%s) on: %s',\n ident_retries[exc.retry_ident], self.calls_count,\n self.calls_elapsed_seconds, self.first_call_time,\n exc.retry_ident)\n if exc.wait_seconds:\n self.sleep(exc.wait_seconds,\n log_reason='retry request: {}'.format(exc.retry_ident))\n else:\n raise self.RetryExceeded(\n exc.result, retry_ident=exc.retry_ident, retry_count=exc.retry_count)\n\n except RatelimitError as exc:\n ratelimit_retries += 1\n if ratelimit_retries <= self.ratelimit_retries:\n self.logger.warning('Retry(%s) after calls(%s/%s) since(%s) on error: %r',\n ratelimit_retries, self.calls_count,\n self.calls_elapsed_seconds, self.first_call_time, exc)\n self.sleep(exc.wait_seconds is not None and exc.wait_seconds\n or self.ratelimit_wait_seconds,\n log_reason='ratelimit wait')\n else:\n if ratelimit_retries - 1:\n raise self.RetryExceeded(exc, retry_count=ratelimit_retries - 1)\n raise\n\n except TemporaryError as exc:\n temporary_error_retries += 1\n if temporary_error_retries <= self.temporary_error_retries:\n self.logger.debug('Retry(%s) after calls(%s/%s) since(%s) on error: %r',\n temporary_error_retries, self.calls_count,\n self.calls_elapsed_seconds, self.first_call_time, exc)\n self.sleep(exc.wait_seconds is not None and exc.wait_seconds\n or self.temporary_error_wait_seconds,\n log_reason='temporary error wait')\n else:\n if temporary_error_retries - 1:\n raise self.RetryExceeded(exc, retry_count=temporary_error_retries - 1)\n raise", "def get_request(self):\n url = self.get_url()\n r = requests.get(url)\n r.raise_for_status()\n return r", "def _request(self, url, params, base_url=None, first_request_time=None, verbose=False, requests_kwargs=None):\n\n if not first_request_time:\n first_request_time = datetime.now()\n\n if base_url is None:\n base_url = self.base_url\n\n elapsed = datetime.now() - first_request_time\n # TODO: to catch timeouts\n # if elapsed > self.retry_timeout:\n # raise TimeOutException()\n\n # create url :: self._generate_query_url(url, params)\n query_url = url\n\n # url encoding of params\n # TODO: use urlencoding here on params\n\n requests_kwargs = requests_kwargs or {}\n final_requests_kwargs = dict(self.requests_kwargs, **requests_kwargs)\n\n # method\n requests_method = self.session.get\n\n try:\n response = requests_method(\n base_url + query_url,\n params=params,\n **final_requests_kwargs)\n\n # temporary, for logging\n if verbose:\n pretty_print_POST(response.request)\n\n except requests.exceptions.Timeout:\n raise TimeOutException()\n except Exception as e:\n raise TransportError(e)\n\n result = self._get_body(response)\n\n return result", "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDuration': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 资源定位信息\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.8.95',\n '_prod_code': 'BOT',\n '_prod_channel': 'undefined'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "def requests_get(*args, **kwargs):\n\n logger = kwargs.pop('logger', None)\n s = requests.Session()\n s.headers[\n 'User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'\n\n try:\n return s.get(*args, **kwargs)\n except RequestException as exc:\n if logger:\n logger.warning('Request failed (%s). Retrying ...', exc)\n return s.get(*args, **kwargs)", "def http_request(self, method: str, url_suffix: str, params: dict = None, json_data: dict = None, **kwargs):\n response = self._http_request(method, url_suffix, params=params, json_data=json_data, resp_type=\"response\",\n ok_codes=[200, *list(HTTP_ERRORS.keys())], raise_on_status=False, **kwargs)\n if response.status_code == 400 and response.json() and response.json().get('Message'):\n raise DemistoException(\n HTTP_ERRORS[response.status_code].format(\"Message:\" + response.json().get(\"Message\")))\n elif response.status_code in list(HTTP_ERRORS.keys()):\n raise DemistoException(HTTP_ERRORS[response.status_code])\n return response.json()", "def make_request(self,\n method, # type: str\n url, # type: str\n retry=False, # type: Union[bool, int]\n cookies=None, # type: Optional[AnyCookiesContainer]\n headers=None, # type: Optional[AnyHeadersContainer]\n **kwargs, # type: Any\n ): # type: (...) -> AnyResponseType\n retries = int(retry) if retry is not None else 0\n cookies = CaseInsensitiveDict(cookies or {})\n headers = CaseInsensitiveDict(headers or {})\n cookies.update(self.get_auth_cookies())\n headers.update(self.headers.copy())\n headers.update(self.get_auth_headers())\n response = request_extra(method, url=url, settings=self.settings, retries=retries,\n headers=headers, cookies=cookies, **kwargs)\n return response", "def request(self, uri, method=\"GET\", body=None, headers=None,\n max_redirects=None, connection_type=None):\n if max_redirects is None:\n max_redirects = self.max_redirects\n if headers is None:\n headers = {}\n # Prepare headers\n headers.pop('cookie', None)\n req = DummyRequest(uri, headers)\n self.cookiejar.lock.acquire()\n try:\n self.cookiejar.add_cookie_header(req)\n finally:\n self.cookiejar.lock.release()\n headers = req.headers\n\n # Wikimedia squids: add connection: keep-alive to request headers\n # unless overridden\n headers['connection'] = headers.pop('connection', 'keep-alive')\n\n # determine connection pool key and fetch connection\n (scheme, authority, request_uri,\n defrag_uri) = httplib2.urlnorm(httplib2.iri2uri(uri))\n conn_key = scheme + \":\" + authority\n\n connection = self.connection_pool.pop_connection(conn_key)\n if connection is not None:\n self.connections[conn_key] = connection\n\n # Redirect hack: we want to regulate redirects\n follow_redirects = self.follow_redirects\n self.follow_redirects = False\n pywikibot.debug(u\"%r\" % (\n (uri.replace(\"%7C\", \"|\"), method, body,\n headers, max_redirects,\n connection_type),\n ), _logger)\n try:\n (response, content) = httplib2.Http.request(\n self, uri, method, body, headers,\n max_redirects, connection_type\n )\n except Exception as e: # what types?\n # return exception instance to be retrieved by the calling thread\n return e\n self.follow_redirects = follow_redirects\n\n # return connection to pool\n self.connection_pool.push_connection(conn_key,\n self.connections[conn_key])\n del self.connections[conn_key]\n\n # First write cookies\n self.cookiejar.lock.acquire()\n try:\n self.cookiejar.extract_cookies(DummyResponse(response), req)\n finally:\n self.cookiejar.lock.release()\n\n # Check for possible redirects\n redirectable_response = ((response.status == 303) or\n (response.status in [300, 301, 302, 307] and\n method in [\"GET\", \"HEAD\"]))\n if (self.follow_redirects and (max_redirects > 0) and\n redirectable_response):\n (response, content) = self._follow_redirect(\n uri, method, body, headers, response, content, max_redirects)\n\n return response, content", "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDuration': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 创建凭证One\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.6.10',\n '_prod_code': 'SHUZIWULIU',\n '_prod_channel': 'undefined'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDurationMillis': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 无分组设备\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.0.45'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "def _api_request(self, path, method, data=None, query=None):\n\n url = request_url(\n self.config['secure'],\n self.config['hostname'],\n self.config['port'],\n path,\n query,\n )\n\n try:\n resp = request(\n url,\n method,\n self._headers(),\n data,\n self.config['timeout'],\n )\n\n return Response(\n resp.get('meta', {}),\n # Response info may have 'object' or 'objects' key, depending\n # on whether there are 1 or multiple results.\n resp.get('object', resp.get('objects'))\n )\n except HTTPError as e:\n response = e.read()\n fallback = '{0} {1}'.format(e.code, e.msg)\n\n if isinstance(response, bytes):\n data = response.decode('utf8')\n else:\n data = response\n\n error = json.loads(data).get('error', {})\n message = error.get('message', fallback)\n raise HTTPResponseError(message, status_code=e.code, cause=e)", "def _doRequest(self, httpClientMethod, *args):\n try:\n resp = httpClientMethod(*args)\n return resp.json()\n except RequestException as e:\n raise checkedError(e)", "def _do_request(self, url: str):\n\n self.debug.ok('method', self.method)\n\n if self.client.fake_response_path:\n with open(self.client.fake_response_path, 'r') as f:\n return constants.ResponseCode.OK, f.read()\n\n elif self.method == constants.RequestConst.GET:\n response = requests.get(\n url, headers=self._headers(), timeout=self._timeout\n )\n\n self.debug.ok(\n constants.RequestConst.QUERY_PARAMETERS,\n self.parameters[constants.RequestConst.QUERY]\n )\n self.debug.ok(constants.ResponseConst.RESPONSE_OBJECT, response)\n\n return response.status_code, response.text\n\n elif self.method in [\n constants.RequestConst.POST,\n constants.RequestConst.PUT,\n constants.RequestConst.DELETE\n ]:\n if self.method == constants.RequestConst.POST:\n send_request = requests.post\n elif self.method == constants.RequestConst.PUT:\n send_request = requests.put\n elif self.method == constants.RequestConst.DELETE:\n send_request = requests.delete\n\n response = send_request(\n url, json=self.parameters[constants.RequestConst.QUERY],\n headers=self._headers(), timeout=self._timeout\n )\n\n self.debug.ok('payload', self.parameters[\n constants.RequestConst.QUERY\n ])\n self.debug.ok(constants.ResponseConst.RESPONSE_OBJECT, response)\n\n return response.status_code, response.text\n\n else:\n return constants.ResponseCode.NOT_FOUND, {}", "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDuration': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 金额\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.1.2',\n '_prod_code': 'DEFINCASHIER',\n '_prod_channel': 'undefined'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDuration': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 身份\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.0.212',\n '_prod_code': 'DEMO',\n '_prod_channel': 'undefined'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'centre-openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)" ]
[ "0.62948996", "0.62826276", "0.62717605", "0.6218818", "0.61214685", "0.6097778", "0.6028783", "0.59454954", "0.5928931", "0.5927472", "0.5919466", "0.5915387", "0.584488", "0.58434975", "0.58159757", "0.5748989", "0.57105803", "0.57093656", "0.5705746", "0.5681902", "0.5676401", "0.56446207", "0.5643871", "0.5628112", "0.56215066", "0.5618409", "0.56130373", "0.5610854", "0.56055844", "0.5594453" ]
0.7337592
0
Wrapper for requests GET method
def _get(self, *args, **kwargs): return self._request('get', *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, *args, **kwargs):\n self.request(\"get\", *args, **kwargs)", "def http_method_get():\n return 'GET'", "def get(self, *path, **data):\n\t\treturn self.request('GET', *path, **data)", "def _get(self, url):\n return self._request(url)", "def get(self, *args, **kwargs):\n return self._request('get', *args, **kwargs)", "def get(self, *args, **kw):\n kw['method'] = 'GET'\n return self.open(*args, **kw)", "def get(self, *args, **kwargs):\n return self._hit(\"GET\", *args, **kwargs)", "def do_GET(self):\n self.http_method = 'GET'\n self.response()", "def get(self, *args, **kwargs):", "def _get(self, request_obj):\n return self._execute_action(request_obj, 'GET')", "def get(self, *args, **kwargs):\n url = urljoin(self.instance(), args[0])\n return self._requests_call(util.requests_get, url, *args[1:], **kwargs)", "def get(self, request):\n pass", "def get(self, method, uri, query_param, request_param, headers, **kwargs):\n raise NotImplementedError", "def do_GET(self):\n self.log.debug('do_GET called')\n self.HeadGet('GET')", "def __get(self, url, headers=None):\n return self.__req(url, \"GET\", headers=headers)", "def aget(url, **kwargs):\n return requests.get(url, **kwargs)", "def _get(self, path=\"\", query={}, **kwargs):\n qs = urllib.urlencode(query)\n uri = force_json(self.uri + path) + \"?\" + qs\n return self.client.request(uri, method=\"GET\", **kwargs)", "def get(self):\n self.get_or_post(method='GET')", "def test_get(self):\n return self.doRequest(self.url, method=\"GET\", body=self.input)", "def do_GET(self):\r\n self._send_handler_response('GET')", "def get(self, *args, **kwargs):\n return self.handle_get_request()", "def get(self, path, req = None, **kwargs):\n req = req or []\n return self.route(path, req=req+[filter_method(['GET'])], **kwargs)", "def get(self, *args, **kwargs):\n return Response({'foo': 'bar'})", "def get(self, url):\n return self._request('GET', url)", "def get(self, *args):", "def get(self, *args, **kwargs):\n if len(args) != 1:\n raise TypeError('wrong number of arguments')\n return self._geturl.get(*args, **kwargs)", "def get(url, to_error=_default_to_error, **kwargs):\n\n return request('get', url, to_error=to_error, **kwargs)", "def get(self, path):\n return self.request(path, method='GET')", "def get(self, *args, **kwargs):\n pass", "def get(self, *args, **kwargs):\n pass" ]
[ "0.7811461", "0.7689053", "0.7667969", "0.75441927", "0.753764", "0.75005716", "0.7481838", "0.746681", "0.7408591", "0.7339474", "0.7280342", "0.7256964", "0.7247393", "0.72263336", "0.7189114", "0.718059", "0.7164386", "0.7148469", "0.71383345", "0.71329045", "0.7128152", "0.7112754", "0.7109763", "0.7102752", "0.70882744", "0.70575804", "0.70422906", "0.703662", "0.70077103", "0.70077103" ]
0.79950804
0
Wrapper for requests POST method
def _post(self, *args, **kwargs): return self._request('post', *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self, *args, **kwargs):\n return self._requests_call(util.requests_post, *args, **kwargs)", "def http_method_post():\n return 'POST'", "def http_post(self, **kwargs):\n return self.rabjcallable.post(**kwargs)", "def post(self, *args, **kwargs):\n self.request(\"post\", *args, **kwargs)", "def test_post(self):\n return self.doRequest(self.url, method=\"POST\", body=self.input)", "def post(self, *path, **data):\n\t\treturn self.request('POST', *path, **data)", "def _post(self, request_obj):\n return self._execute_action(request_obj, 'POST')", "def post(self):", "def post(self, *args, **kwargs):\n return self._hit(\"POST\", *args, **kwargs)", "def _post(self, url, **kwargs):\n return self._call('POST', url, kwargs)", "def _post_request(url, params):\n data = dumps(params).encode(\"utf-8\")\n request = requests.post(url, data=data)\n return request", "def do_POST(self,):\n self.http_method = 'POST'\n self.response()", "def json_post(method):\n def wrap(*args, **kwargs):\n # idx is the position of the data\n idx = 0\n if not isinstance(args[0], webob.Request):\n idx = 1\n\n json_data = json.loads(args[idx].body)\n kwargs['post_data'] = json_data\n\n #print \"JP:\", repr(args), repr(kwargs)\n\n return method(*args, **kwargs)\n \n return json_return(wrap)", "def _post(self, url, data=None):\n if data is not None:\n data = urllib.urlencode(data)\n return self._request(url, method='POST', payload=data)", "def post(self, *args, **kw):\n kw['method'] = 'POST'\n return self.open(*args, **kw)", "def post(self, request):\n pass", "def simulate_post(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'POST', path, **kwargs)", "def post(self, *args, **kwargs):\n return self.handle_post_request()", "def post(self, path, **post_args):\n return self.request(path, data=post_args, method='POST')", "def apost(url, **kwargs):\n return requests.post(url, **kwargs)", "def _createPostRequest(self, postBody: dict) -> object:\n request = HttpRequest()\n request.method = \"POST\"\n for name,value in postBody.items():\n request.POST[name]= value\n return request", "def make_post_request(client, endpoint, data):\n return client.post(endpoint, data=data)", "def post_algorithm():\n try:\n request_json = request.get_json()\n result = json.dumps([])\n response = app.response_class(\n response=result,\n status=200,\n mimetype='application/json')\n except ValueError as e:\n response = app.response_class(\n status=400,\n response=str(e)\n )\n return response", "def post_required(func):\n def post_wrapper(request,*args,**kwds):\n res = http.ResponseBuilder()\n if request.method != 'POST':\n return res.error(\"post is required\").build_json()\n return func(request,*args,**kwds)\n return post_wrapper", "def post(self, *args, **kwargs):\n headers = self.post_headers\n headers.update(kwargs.get('headers', {}))\n kwargs['headers'] = headers\n return self._request('post', *args, **kwargs)", "def post():\n pass", "def make_post_request(url:str, post_params:dict, **kwargs):\n\n print(\"Making call to '{}'...\".format(url))\n resp = requests.post(url, data=post_params, **kwargs)\n print(\"Received response.\")\n\n if not resp.ok:\n return False, resp.status_code, json.loads(resp.content)\n\n return True, resp.status_code, json.loads(resp.content)", "def post(self):\n self.get_or_post(method='POST')", "def post(self, path, req = None, **kwargs):\n req = req or []\n return self.route(path, req=req+[filter_method(['POST'])], **kwargs)", "def post(self, request, *args, **kwargs):\n return self.get(request, *args, **kwargs)" ]
[ "0.7969932", "0.746994", "0.73935425", "0.73165405", "0.72691715", "0.7221908", "0.7146746", "0.713131", "0.71067417", "0.707506", "0.7030268", "0.7027897", "0.7024082", "0.7005868", "0.69719", "0.6945976", "0.69432557", "0.6938295", "0.6930771", "0.68950063", "0.6866864", "0.6850542", "0.68133587", "0.6799788", "0.6766979", "0.67667234", "0.6736962", "0.6731275", "0.6716904", "0.67100143" ]
0.7983252
0
Wrapper for requests PUT method
def _put(self, *args, **kwargs): return self._request('put', *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def http_method_put():\n return 'PUT'", "def put(self, *args, **kwargs):\n self.request(\"put\", *args, **kwargs)", "def put(self, request, pk=None):\n\n return Response({'method': 'put'})", "def do_PUT(self,):\n self.http_method = 'PUT'\n # Nothing to do for now.\n pass", "def put(self, *args, **kw):\n kw['method'] = 'PUT'\n return self.open(*args, **kw)", "def http_put(self, **kwargs):\n return self.rabjcallable.put(**kwargs)", "def put(self, request, pk=None):\n return Response({'method': 'PUT'})", "def put(self, request, pk=None):\n return Response({'method': 'PUT'})", "def put(self, request, pk=None):\n return Response({'method': 'PUT'})", "def put(self, *args, **kwargs):\n return self.handle_put_request()", "def _put(self, path=\"\", **kwargs):\n uri = force_json(self.uri + path)\n return self.client.request(uri, method=\"PUT\", **kwargs)", "def put(self,request, pk =None):\n return Response({'method': 'PUT'})", "def put(self ,request, pk = None):\r\n\r\n return Response({'method ': 'put'})", "def api_put(self, *args, **kwargs):\n return self.api_put_with_response(*args, **kwargs)[0]", "def simulate_put(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'PUT', path, **kwargs)", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def _put(self, url, **kwargs):\n return self._call('PUT', url, kwargs)", "def put(self, request, pk=None):\n return Response({'method': 'patch'})", "def put(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PUT'})", "def aput(url, **kwargs):\n return requests.put(url, **kwargs)", "def put(self, url, body, headers={}):\n return self.request(url, \"PUT\", body, headers)", "def put(self, request, *args, **kwargs):\n verify_secure(request)\n return super().put(request, args, kwargs)", "def put(self, request, *args, **kwargs):\n verify_secure(request)\n return super().put(request, args, kwargs)", "def _put(self, path, data=None):\n headers = {'content-type': 'application/json'}\n if data:\n data = json.dumps(data)\n r = requests.put(self._url(path), data=data, headers=headers)\n assert r.status_code == 200\n return r", "def simulate_put(self, path='/', **kwargs) -> _ResultBase:\n return self.simulate_request('PUT', path, **kwargs)", "def test_put_method(self):\n self.getPage('/blah', method='PUT')\n self.assertStatus('200 OK')\n self.assertHeader('Content-Type', 'application/json')\n self.assertBody('{\"mystring\": \"blah\"}')", "def put(self, path, request):\n\n try:\n data = json_decode(request.body)\n self.interface_data.set(path, data)\n response = self.interface_data.get(path, False)\n status_code = 200\n except MetadataParameterError as e:\n response = {'error': str(e)}\n status_code = 400\n except (TypeError, ValueError) as e:\n response = {'error': 'Failed to decode PUT request body: {}'.format(str(e))}\n status_code = 400\n return ApiAdapterResponse(response, status_code=status_code)", "def simulate_put(self, path='/', **kwargs):\n return self.simulate_request('PUT', path, **kwargs)", "async def simulate_put(self, path='/', **kwargs) -> _ResultBase:\n return await self.simulate_request('PUT', path, **kwargs)", "def put(self,request,pk=None):\n return Response({'method':'Put'})" ]
[ "0.7939011", "0.792131", "0.78856367", "0.7825104", "0.7805697", "0.77722734", "0.76859236", "0.76859236", "0.76859236", "0.7676738", "0.7652082", "0.7603654", "0.7537342", "0.7523113", "0.74227804", "0.7417111", "0.74126923", "0.7345135", "0.7330537", "0.73288274", "0.72923994", "0.7239812", "0.7239812", "0.72376925", "0.7212337", "0.71974623", "0.7172811", "0.7167863", "0.7164798", "0.7163827" ]
0.82278174
0
Wrapper for requests DELETE method
def _delete(self, *args, **kwargs): return self._request('delete', *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def http_delete(self, **kwargs):\n return self.rabjcallable.delete(**kwargs)", "def _delete(self, url):\n return self._request(url, method=\"DELETE\")", "def _delete(self, url, **kwargs):\n return self._call('DELETE', url, kwargs)", "def delete(self, method, uri, query_param, request_param, headers, **kwargs):\n raise NotImplementedError", "def delete(url, **kwargs):\n\n return request('delete', url, **kwargs)", "def httpDelete(self, url='', data='', params={}, headers={}):\n\n return self.httpRequest('DELETE', url, data, params, headers)", "def _delete(self, path=\"\", **kwargs):\n uri = force_json(self.uri + path)\n return self.client.request(uri, method=\"DELETE\", **kwargs)", "def delete(self):\r\n request = http.Request('DELETE', self.get_url())\r\n\r\n return request, parsers.parse_empty", "def delete(self):\r\n return http.Request('DELETE', '{0}'.format(\r\n self.get_url())), parsers.parse_json", "def do_DELETE(self,):\n self.http_method = 'DELETE'\n self.response()", "def delete(self, url):\n return self.request(url, \"DELETE\")", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def __delete(self, url, headers=None):\n return self.__req(url, \"DELETE\", headers=headers)", "def delete(url, to_error=_default_to_error, **kwargs):\n\n return request('delete', url, to_error=to_error, **kwargs)", "def api_delete(self, *args, **kwargs):\n return self.api_delete_with_response(*args, **kwargs)[0]", "def adel(url, **kwargs):\n return requests.delete(url, **kwargs)", "def delete(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"delete\"), kwargs)", "def delete(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"delete\"), kwargs)", "def delete(self, url):\n return self._request('DELETE', url)", "def base_delete(url_path):\n response = requests.delete(url=settings.URL_API + url_path)\n return response", "def delete(self):\r\n self.require_collection()\r\n request = http.Request('DELETE', self.get_url())\r\n\r\n return request, parsers.parse_empty", "def _delete(self, url, **kwargs):\n return self._http.delete(self.cluster + url, timeout=self.timeout, **kwargs)", "def delete(self, *args, **kw):\n kw['method'] = 'DELETE'\n return self.open(*args, **kw)", "def delete(self, request , pk=None): \n return Response({'message':'DELETE'})", "def test_delete_method(self):\n self.getPage('/blah', method='PUT')\n self.getPage('/', method='DELETE')\n self.assertStatus('204 No Content')\n self.assertHeader('Content-Type', 'application/json')", "def api_delete(self, path):\n return self._api_request(path, 'DELETE')", "def _delete(self, url):\n url = urljoin(self.base_url, url)\n try:\n r = self._make_request(**dict(\n method='DELETE',\n url=url,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n if r.status_code == 204:\n return\n return r.json()", "def destroy(self, request, pk=None):\n\n return Response({'http_method': 'DELETE'})", "def simulate_delete(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'DELETE', path, **kwargs)", "def delete(self, *args, **kwargs):\n return self.handle_delete_request()" ]
[ "0.80676454", "0.805195", "0.7881113", "0.78649473", "0.7842438", "0.78090477", "0.78055274", "0.7791732", "0.77519745", "0.76926935", "0.7613614", "0.7541842", "0.7470509", "0.7459804", "0.74210435", "0.7401509", "0.73857987", "0.73857987", "0.7379348", "0.7372078", "0.7366816", "0.73357296", "0.7335039", "0.73283875", "0.73261374", "0.73103845", "0.72918665", "0.72834533", "0.7273474", "0.7267811" ]
0.8166634
0
Load JSON as a protobuf (pb2) object. Any calls to load protobuf objects from JSON in this repository should be through this function. Returns `None` if the loading failed.
def open_pbobject(path, pb_class): assert path.endswith(".json"), 'File extension for {} needs to be json.'.format(path) if path.startswith('s3://'): return open_remote_pb_object(path, pb_class) assert os.path.exists(path), f'Path not found: {path}' with open(path, 'r', encoding='UTF-8') as json_file: pb_object = Parse(json_file.read(), pb_class()) return pb_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self):\n with io.open(self.filename, encoding='utf-8') as f:\n self.load_from_dict(json.loads(f.read()))", "def load_json(json_str):\n return _api_internal._load_json(json_str)", "def _localloadjson(path: str) -> JSONType:\n with open(path, encoding=\"utf-8\") as fh:\n return json.load(fh)", "def load(self):\n data = None\n try:\n with open(self.__filepath, 'r') as file:\n text = file.read()\n data = jsonpickle.decode(text)\n except FileNotFoundError:\n data = None\n except IOError as e:\n print(e)\n return data", "def json_loader(filename):\n\n with open(filename, \"r\", encoding=\"UTF-8\") as source:\n data = json.load(source, object_hook=object_decode)\n return data", "def _remoteloadjson(path: str) -> JSONType:\n return json.loads(request.urlopen(path).read())", "def _load(self, json_str, filepath):\n # pylint: disable=protected-access\n return self.json_o._load(json_str, filepath)", "def load_json(self, json_path=None):\n if json_path is None:\n json_path = self.json_path\n with open(json_path, encoding='utf-8', mode='r') as f:\n data = json.load(f)\n return data", "def load_json(self):\n\n self.load_json_str(self.get_json_str())", "def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)", "def object_from_protobuf(pb, model_type=None):\n key = None\n if isinstance(pb, entity_pb2.Entity):\n pb = pb._pb\n\n if pb.HasField(\"key\"): # Message field (Key)\n key = CustomIterator.key_from_protobuf(pb.key)\n key._type = SubclassMap.get()[key.kind]\n\n entity_props = {}\n\n for prop_name, value_pb in pb.properties.items():\n value = CustomIterator._get_value_from_value_pb(value_pb)\n entity_props[prop_name] = value\n\n obj = model_type._dotted_dict_to_object(entity_props, key)\n return obj", "def from_json(cls, b):\n return cls.from_dict(json.loads(b))", "def load_json(json_string):\n return json.loads(json_string)", "def load(path: str) -> \"DataDescriptor\":\n\n\t\twith open(path, \"r\") as f:\n\t\t\tinfo_dict = json.load(f)\n\n\t\treturn DataDescriptor(\n\t\t\tn_gram_size=int(info_dict[\"n_gram_size\"]),\n\t\t\tcaseless=bool(info_dict[\"caseless\"]),\n\t\t\tignore_punctuation=bool(info_dict[\"ignore_punctuation\"]),\n\t\t\tadd_pos_tags=bool(info_dict[\"add_pos_tags\"]),\n\t\t\tuses_lemma=bool(info_dict[\"uses_lemma\"]),\n\t\t\tuses_sentences=bool(info_dict[\"uses_sentences\"])\n\t\t)", "def util_load_json(path):\n with io.open(path, mode=\"r\", encoding=\"utf-8\") as f:\n return json.loads(f.read())", "def loadFromFile(self, filename):\n with open(filename, 'r') as file:\n raw_data = file.read()\n # data = json.loads(raw_data, encoding='utf-8') # python 3.9 suppression de encoding\n try:\n data = json.loads(raw_data)\n self.deserialize(data)\n self.has_been_modified = False\n except json.JSONDecodeError:\n raise InvalidFile(f'{os.path.basename(filename)} is not a valid JSON file')\n except Exception as e:\n dumpException(e)", "def load_from_json(path):\n fh = open(path)\n contents = fh.read()\n fh.close()\n\n return json.loads(contents)", "def load_json_obj(path: str) -> RAW_CFG:\n with fsspec.open(path) as json_file:\n return json.load(json_file)", "def load_json(filepath: str):\n if not filepath:\n return None\n\n abs_path = _resolve_relative_path(filepath)\n with open(abs_path) as f:\n raw_json = f.read()\n\n return json.loads(raw_json)", "def load_from_json_file(filename):\n with open(filename, 'r') as f:\n obj = json.loads(f.read())\n return obj", "def from_json(_json):\n if isinstance(_json, (str, Path)):\n return from_json_dict(json.loads(_json))\n else: # assume a file-like object\n return from_json_dict(json.load(_json))", "def json_load(fp):\n with _iotools.open_file(fp, \"r\") as f:\n return json.load(f, cls=DataDecoder)", "def load_from_json(filename):\n\n with open(filename, 'r') as file:\n return json.load(file)", "def load(self, loadpath=None):\n\n if loadpath:\n with open(loadpath, mode='r') as f:\n self.update(json.load(f))", "def load_json(value):\n try:\n return json.loads(value)\n except json.JSONDecodeError as e:\n raise JSONDecodeError(e) from e", "def _load_message(self,\n message_pb: descriptor_pb2.DescriptorProto,\n address: metadata.Address,\n path: Tuple[int],\n resources: Mapping[str, wrappers.MessageType],\n ) -> wrappers.MessageType:\n address = address.child(message_pb.name, path)\n\n # Load all nested items.\n #\n # Note: This occurs before piecing together this message's fields\n # because if nested types are present, they are generally the\n # type of one of this message's fields, and they need to be in\n # the registry for the field's message or enum attributes to be\n # set correctly.\n nested_enums = self._load_children(\n message_pb.enum_type,\n address=address,\n loader=self._load_enum,\n path=path + (4,),\n resources=resources,\n )\n nested_messages = self._load_children(\n message_pb.nested_type,\n address=address,\n loader=self._load_message,\n path=path + (3,),\n resources=resources,\n )\n\n oneofs = self._get_oneofs(\n message_pb.oneof_decl,\n address=address,\n path=path + (7,),\n )\n\n # Create a dictionary of all the fields for this message.\n fields = self._get_fields(\n message_pb.field,\n address=address,\n path=path + (2,),\n oneofs=oneofs,\n )\n fields.update(self._get_fields(\n message_pb.extension,\n address=address,\n path=path + (6,),\n oneofs=oneofs,\n ))\n\n # Create a message correspoding to this descriptor.\n self.proto_messages[address.proto] = wrappers.MessageType(\n fields=fields,\n message_pb=message_pb,\n nested_enums=nested_enums,\n nested_messages=nested_messages,\n meta=metadata.Metadata(\n address=address,\n documentation=self.docs.get(path, self.EMPTY),\n ),\n oneofs=oneofs,\n )\n return self.proto_messages[address.proto]", "def testLoadProtojsonWithValidJsonModule(self):\n sys.modules['json'] = ValidJsonModule\n\n # This will cause protojson to reload with the default json module\n # instead of simplejson.\n reload(protojson)\n self.assertEquals('json', protojson.json.name)", "def load_from_json(self, json_fp: str):\n # TODO:\n pass", "def load(filename):\n\n try:\n with open(filename) as data:\n return json.load(data)\n except:\n return None", "def load(cls, path):\n\n with open(path) as f:\n d = json.load(f, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)" ]
[ "0.57719094", "0.57548875", "0.5741168", "0.568899", "0.5675841", "0.55961937", "0.5520938", "0.55098826", "0.55086243", "0.5459598", "0.54020417", "0.5388325", "0.5387577", "0.5386068", "0.5349237", "0.533662", "0.53339887", "0.53334725", "0.5324513", "0.5323394", "0.53115505", "0.5281063", "0.52729297", "0.52476245", "0.5238931", "0.522012", "0.5214781", "0.5202667", "0.5200718", "0.51952773" ]
0.5887734
0
Like open_pboject but source can be a path or a bytestring
def parse_pbobject(source, pb_class): if isinstance(source, str): return open_pbobject(source, pb_class) elif isinstance(source, bytes): pb_object = pb_class() pb_object.ParseFromString(source) return pb_object else: logging.error(f'cannot parse type {type(source)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def source(self) -> str | Path:\n ...", "def open_pds(source):\n\t# if isinstance(source, file):\n\t# \treturn source\n\tif hasattr(source, \"read\"):\n\t\t# sys.stderr.write(\"Identified a file-like object by read() method existence\\n\")\n\t\treturn source\n\n\ttry:\n\t\t# For universal newlines -- i.e. newlines are automatically converted to \"\\n\", use mode \"U\".\n\t\t# For preserved newlines -- e.g. \"\\r\", \"\\r\\n\", \"\\n\", use mode \"rb\".\n\t\t# PDS style newlines are \"\\r\\n\", however, http://pds.jpl.nasa.gov/documents/qs/sample_image.lbl uses \"\\n\".\n\t\t# Check if hasattr(open, 'newlines') to verify that universal newline support is enabeled.\n\t\tf = open(source, \"rb\")\n\t\treturn f\n\texcept (IOError, OSError):\n\t\t# sys.stderr.write(\"Could not open source\\n\")\n\t\traise\n\telse:\n\t\t# sys.stderr.write(\"Opened source\\n\")\n\t\t# Re-raise to catch something hairy.\n\t\traise\n\tfinally:\n\t\tpass\n\t\t# sys.stderr.write(\"Closing previously opened file\\n\")\n\t\t# f.close()\n\t\t\n\tif isinstance(source, str):\n\t\ttry:\n\t\t\timport cStringIO as StringIO\n\t\texcept ImportError:\n\t\t\timport StringIO\n\t\telse:\n\t\t\t# sys.stderr.write(\"Making a file-like object from string source\\n\")\n\t\t\treturn StringIO.StringIO(str(source))\n\t\t\t\n\t# try:\n\t# \timport urllib\n\t# \tf = urllib.urlopen(source)\n\t# \treturn f\n\t# except (IOError, OSError):\n\t# \tpass\n\t# else:\n\t# \t# Re-raise to catch something hairy.\n\t# \traise\n\t# finally:\n\t# \tpass", "def load_stream(source):\n raise NotImplementedError(\"not implemented yet\")", "def test_prepare_source(source):\n assert isinstance(PseudoPotentialData.prepare_source(source), io.BytesIO)\n\n if isinstance(source, io.BytesIO):\n # If we pass a bytestream, we should get the exact same back\n assert PseudoPotentialData.prepare_source(source) is source", "def make(self, source):\n if isinstance(source, str):\n return copy(self.get(source))\n elif self.PB_CLASS and isinstance(source, self.PB_CLASS):\n item = copy(self.get(source.name))\n item._pb = source\n return item\n else:\n return copy(source)", "def _source_path_reader(self, src, encoding=\"utf-8\"):\n if src is None:\n return src\n if isinstance(src, dict) and \"content\" in src:\n with tempfile.NamedTemporaryFile(mode=\"w\", encoding=encoding, delete=False) as fp:\n fp.write(src[\"content\"])\n return fp.name\n elif isinstance(src, dict) and \"file\" in src:\n if os.path.exists(src[\"file\"]) is False:\n raise FileNotFound(src)\n return src[\"file\"]\n else:\n raise InvalidParameter(\"The parameter is invalid.\")", "def Sourceify(path):\n return path", "def test_simple_source_constructor():\n TESTPATH = \"/usr/local/share/testfile.mp3\"\n test01 = Source(path=TESTPATH)\n debug(test01)\n assert(test01.path == TESTPATH)\n assert(test01.fname == \"testfile.mp3\")\n assert(test01.root == \"testfile\")\n assert(test01.ext == \".mp3\")\n assert(test01.isValidExtension(\".mp3\") is True)", "def __init__(self, source):\n self._source = source", "def __init__(self, source):\n self.source = source", "def __init__(self, source):\n self.source = source", "def __init__(self, source):\r\n self.source = source", "def load(source_file):\n return loads(source_file.read())", "def source(request, filepath_pseudos):\n filepath_pseudo = pathlib.Path(filepath_pseudos()) / 'Ar.upf'\n\n if request.param is str:\n return str(filepath_pseudo)\n\n if request.param is pathlib.Path:\n return filepath_pseudo\n\n return io.BytesIO(filepath_pseudo.read_bytes())", "def get_source(self):", "def __init__(self, source):\n self.source = source\n try:\n self._stream = open(source, \"r\" + self.mode)\n except TypeError: # not a path, assume we received a stream\n if self.mode == \"t\":\n if source.read(0) != \"\":\n raise StreamModeError(\n f\"{self.fmt} files must be opened in text mode.\"\n ) from None\n elif self.mode == \"b\":\n if source.read(0) != b\"\":\n raise StreamModeError(\n f\"{self.fmt} files must be opened in binary mode.\"\n ) from None\n else:\n raise ValueError(f\"Unknown mode '{self.mode}'\") from None\n self._stream = source\n self._read_header(self._stream)", "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "def getSource():", "def getsource(object):\r\n lines, lnum = getsourcelines(object)\r\n return string.join(lines, '')", "def read_raw(self, name, source, test_data=''):\n self.m.path.assert_absolute(source)\n step_test_data = lambda: self.test_api.read_raw(test_data)\n result = self._run(name, ['copy', source, self.m.raw_io.output()],\n step_test_data=step_test_data)\n return result.raw_io.output", "def load(source, **kwargs):\n with open(source, \"rb\") as f:\n return torch.load(f, **kwargs)", "def build_from_source(obj):\n if (obj.method == 'robot'):\n print(\"TODO: build obo and owl\")\n elif (obj.method == 'jenkins-archive'):\n print(\"TODO: download and unzip\")\n elif (obj.method == 'github-archive'):\n print(\"TODO: download and unzip\")\n elif (obj.method == 'svn-co'):\n print(\"TODO: run svn\")\n else:\n print(\"UNKNOWN METHOD:\"+obj.method)", "def getsource(object):\n lines, lnum = getsourcelines(object)\n return string.join(lines, '')", "def _openSource(self, source, params=None):\n with self._lastOpenSourceLock:\n if (hasattr(self, '_lastOpenSource') and\n self._lastOpenSource['source'] == source and\n self._lastOpenSource['params'] == params):\n return self._lastOpenSource['ts']\n if not len(large_image.tilesource.AvailableTileSources):\n large_image.tilesource.loadTileSources()\n if ('sourceName' not in source or\n source['sourceName'] not in large_image.tilesource.AvailableTileSources):\n openFunc = large_image.open\n else:\n openFunc = large_image.tilesource.AvailableTileSources[source['sourceName']]\n origParams = params\n if params is None:\n params = source.get('params', {})\n ts = openFunc(source['path'], **params)\n with self._lastOpenSourceLock:\n self._lastOpenSource = {\n 'source': source,\n 'params': origParams,\n 'ts': ts,\n }\n return ts", "def get_source(self, source, driver_name=None):\n if not driver_name:\n driver_name = self.driver_name\n driver = ogr.GetDriverByName(driver_name)\n return driver.Open(source, 0)", "def BootstrapSource (name, source, filename):\n source = binascii.b2a_base64 (zlib.compress (source.encode ('utf-8'))).strip ().decode ('utf-8')\n return source_payload.format (name = name, filename = filename, source = '\\\\\\n'.join (textwrap.wrap (source, 78)))", "def open_pbobject(path, pb_class):\n assert path.endswith(\".json\"), 'File extension for {} needs to be json.'.format(path)\n if path.startswith('s3://'):\n return open_remote_pb_object(path, pb_class)\n assert os.path.exists(path), f'Path not found: {path}'\n with open(path, 'r', encoding='UTF-8') as json_file:\n pb_object = Parse(json_file.read(), pb_class())\n return pb_object", "async def source(ctx):\n await ctx.send(\"The source can be found here: \" +\n \"https://github.com/FrederikBolding/CryptoBot\")" ]
[ "0.6619778", "0.6433512", "0.62496525", "0.61425763", "0.59727526", "0.58300316", "0.57773453", "0.57492805", "0.5733119", "0.5724798", "0.5724798", "0.5724405", "0.57121646", "0.5667463", "0.5616308", "0.55945593", "0.55895805", "0.55895805", "0.55895805", "0.55640024", "0.5558145", "0.5548597", "0.55305743", "0.5526982", "0.55262035", "0.55226374", "0.5479248", "0.5472544", "0.54600084", "0.5425108" ]
0.6567848
1
Save protobuf (pb2) object to JSON file with our standard indent, key ordering, and other settings. Any calls to save protobuf objects to JSON in this repository should be through this function.
def save_pbobject_as_json(pb_object, save_path): if os.path.isdir(save_path): save_path = os.path.join(save_path, generate_uid_from_pbobject(pb_object) + ".json") assert save_path.endswith(".json"), 'File extension for {} needs to be json.'.format(save_path) with open(save_path, "w", encoding='UTF-8') as _f: json.dump( MessageToDict(pb_object, including_default_value_fields=True, preserving_proto_field_name=True), _f, indent=2, sort_keys=True ) return save_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _save(self):\n with open(self.file_path, 'w') as fid:\n json.dump(self.data, fid, indent=4, sort_keys=True)", "def save(self):\n with open(self.file_path, 'w', encoding=Config.ENCODING) as file:\n json.dump(self.data, file, indent=2, ensure_ascii=False)", "def save(self):\n d1 = {}\n with open(self.__file_path, mode=\"w\") as f:\n for k, v in self.__objects.items():\n d1[k] = v.to_dict()\n json.dump(d1, f)", "def save_data(file_to_save, object_to_serialize):\r\n with open(file_to_save, \"w\", encoding=\"utf-8\") as f:\r\n f.write(json.dumps(object_to_serialize, indent=2, ensure_ascii=False))", "def save_to_json_file(my_obj, filename):\n with open(filename, \"w\", encoding=\"utf-8\") as opening:\n json.dump(my_obj, opening)", "def save(self):\n\n\t\tdirectory = os.path.dirname(self.path)\n\n\t\tif not os.path.exists(directory):\n\t\t\tos.makedirs(directory)\n\n\t\twith open(self.path, \"w\") as f:\n\t\t\tf.write(\n\t\t\t\tjson.dumps(\n\t\t\t\t\tself.dump(),\n\t\t\t\t\tindent=4,\n\t\t\t\t\tsort_keys=True\n\t\t\t\t)\n\t\t\t)", "def save_to_json_file(my_obj, filename):\n with open(filename, mode=\"w\", encoding=\"utf-8\") as writer:\n json.dump(my_obj, writer)", "def save_to_json_file(my_obj, filename):\n with open(filename, 'w', encoding='utf-8') as file:\n return file.write(json.dumps(my_obj))", "def save(self):\n a_copy = FileStorage.__objects\n obj_dict = {obj: a_copy[obj].to_dict() for obj in a_copy.keys()}\n with open(FileStorage.__file_path, \"w\") as f:\n json.dump(obj_dict, f)", "def save_to_json_file(my_obj, filename):\n import json\n with open(filename, mode='w', encoding='utf-8') as f:\n json.dump(my_obj, f)", "def write(self, _filepath=None):\n _json_txt = json.dumps(self.json_dict, indent=2)\n self._write_json_text(_json_txt, _filepath)", "def save(self, config_file: typing.TextIO):\n json.dump(self.to_dict(), config_file, indent=4)", "def save(self) -> None:\n with open(dict_path, 'w', encoding='utf-8') as dictionary_file:\n json.dump(self.data, dictionary_file, indent=2, separators=(',', ':'), ensure_ascii=False)", "def save(self):\n with open(FileStorage.__file_path, 'w') as saves:\n copy_dict = {key: self.__objects[key].to_dict()\n for key in self.__objects}\n json.dump(copy_dict, saves)", "def save_to_json_file(my_obj, filename):\n with open(filename, 'w') as file:\n json.dump(my_obj, file)", "def save(self):\n\n with open(FileStorage.__file_path, \"w\") as file:\n dictionary = {}\n for a, b in FileStorage.__objects.items():\n dictionary[a] = b.to_dict()\n ink = json.dumps(dictionary)\n file.write(ink)", "def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent = 4)", "def save_to_json(filename, struct):\n with open(filename, 'w') as outfile:\n json.dump(struct, outfile, sort_keys=True, indent=4)", "def save(self, filepath):\n with open(filepath, 'w') as f:\n json.dump(self, f, indent=2)", "def save_to_json_file(my_obj, filename):\n import json\n with open(filename, 'w') as file:\n json.dump(my_obj, file)", "def save_to_json_file(my_obj, filename):\n with open(filename, 'w+') as json_file:\n json.dump(my_obj, json_file)", "def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)", "def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)", "def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)", "def save(self, filename):\n content = self.to_dict()\n with open(filename, 'w') as f:\n json.dump(content, f)", "def _write_json(self):\n with open(self._file_path, 'w') as f:\n json.dump(self._content, f, indent=4, separators=None,\n encoding='utf-8', sort_keys=False)", "def save(self, path: str):\n with open(path, 'w', encoding='utf-8') as f:\n f.write(self.to_json())", "def saveToFile(self, filename: str):\n with open(filename, 'w') as file:\n serialized = self.serialize()\n file.write(json.dumps(serialized, indent=4))\n self.print('saving to ', filename, ' was successful')\n\n self.has_been_modified = False", "def save_to_json_file(my_obj, filename):\n with open(filename, \"w\") as myfile:\n return myfile.write(json.dumps(my_obj))", "def save(self, filename):\n with open(filename, \"w\") as f:\n m = {\n \"order\": self.order,\n \"pad\": self.pad,\n \"records\": {str(k): v for k, v in self.records.items()}\n }\n json.dump(m, f)" ]
[ "0.64825857", "0.63305324", "0.62367463", "0.61235774", "0.61045235", "0.6103396", "0.60486585", "0.59887636", "0.5981807", "0.5977813", "0.59710145", "0.5964725", "0.5957643", "0.5951203", "0.5950735", "0.59460485", "0.59422106", "0.5939334", "0.592283", "0.59114516", "0.5911261", "0.59078157", "0.59078157", "0.59078157", "0.589008", "0.5878089", "0.5871514", "0.5866423", "0.5866291", "0.5859703" ]
0.72667795
0
Open ontology objects, first attempt to open V2 before trying V1.
def open_ontology_pbobject(ontology_file): try: ontology = parse_pbobject(ontology_file, OntologyV2Pb2) if ontology is not None: logging.info('Successfully loaded Ontology V2 spec.') return ontology except Exception: logging.error('Failed to load ontology file with V2 spec, trying V1 spec.') try: ontology = parse_pbobject(ontology_file, OntologyV1Pb2) if ontology is not None: logging.info('Successfully loaded Ontology V1 spec.') return ontology except Exception: if isinstance(ontology_file, str): logging.error('Failed to load ontology file' + ontology_file + 'with V1 spec also, returning None.') else: logging.error('Failed to load ontology file with V1 spec also, returning None.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open(self):\n\n self._key_generator = KeyGenerator()\n\n # A map from LOD to LODHistory instance for all LODs that have\n # been referenced so far:\n self._lod_histories = {}\n\n # This corresponds to the 'nodes' table in a Subversion fs. (We\n # don't need a 'representations' or 'strings' table because we\n # only track file existence, not file contents.)\n self._node_db = _NodeDatabase()\n\n # Start at revision 0 without a root node.\n self._youngest = 0", "def open_feature_ontology_pbobject(ontology_file):\n try:\n ontology = open_pbobject(ontology_file, FeatureOntologyPb2)\n if ontology is not None:\n logging.info('Successfully loaded FeatureOntology spec.')\n return ontology\n except Exception:\n logging.error('Failed to load ontology file' + ontology_file + '.')", "def test1_loading(self):\n\t\tprint \"\\nTEST 1: Loading ontologies from %s folder.\\n=================\" % DATA_FOLDER\n\t\t\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\t\t\n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\t\n\t\t\t\tself.assertEqual(type(o), ontospy.Ontology)\n\t\t\t\tprint \"Success.\"", "def Open(self, file_object):", "def open(self) -> None:", "def open(self) -> None:", "def open(self) -> None:", "def os_open_graph( self, ):\r\n pass", "def get_ontology(base_iri='emmo-inferred.owl', verbose=False, name=None):\n\n if (not base_iri.endswith('/')) and (not base_iri.endswith('#')):\n base_iri = '%s#'%base_iri\n if base_iri in default_world.ontologies:\n onto = default_world.ontologies[base_iri]\n else:\n onto = MyOntology(default_world, base_iri, name=name)\n onto._verbose = verbose\n return onto", "def open(self, version, force=False, representation=None,\n reference_depth=0, skip_update_check=False):\n version_full_path = os.path.normpath(version.absolute_full_path)\n\n # delete all the comps and open new one\n #comps = self.fusion.GetCompList().values()\n #for comp_ in comps:\n # comp_.Close()\n\n self.fusion.LoadComp(version_full_path.encode())\n\n rfm = RecentFileManager()\n rfm.add(self.name, version.absolute_full_path)\n\n # set the project_directory\n #self.project_directory = os.path.dirname(version.absolute_path)\n\n # TODO: file paths in different OS'es should be replaced with the current one\n # Check if the file paths are starting with a string matching one of\n # the OS'es project_directory path and replace them with a relative one\n # matching the current OS\n\n # replace paths\n #self.replace_external_paths()\n\n # return True to specify everything was ok and an empty list\n # for the versions those needs to be updated\n return empty_reference_resolution()", "def __init__(self, obo_file=OBO_FILE, optional_attrs=None):\n self.optobj = self._init_optional_attrs(optional_attrs) # OboOptionalAttrs or None\n self.format_version = None # e.g., \"1.2\" of \"format-version:\" line\n self.data_version = None # e.g., \"releases/2016-07-07\" from \"data-version:\" line\n self.typedefs = {}\n\n # True if obo file exists or if a link to an obo file exists.\n print(\"obo_file:\")\n print(obo_file)\n if os.path.isfile(obo_file):\n self.obo_file = obo_file\n # GOTerm attributes that are necessary for any operations:\n else:\n raise Exception(\"COULD NOT READ({OBO})\\n\"\n \"download obo file first\\n \"\n \"[http://geneontology.org/ontology/\"\n \"go-basic.obo]\".format(OBO=obo_file))", "def addOntologyToObject(self, obj):\n i = -1\n for item in obj.ontologyItems.items:\n i = i + 1\n ana = vsdModels.ObjectOntology(\n type=vsdModels.OntologyItem(**item).type,\n position=i,\n ontologyItem=vsdModels.APIBase(selfUrl=vsdModels.OntologyItem(**item).selfUrl),\n object=vsdModels.APIBase(selfUrl=obj.selfUrl)\n )\n print(ana.to_struct())\n self.postRequest(\n 'object-ontologies/{0}'.format(\n vsdModels.OntologyItem(**item).type\n ),\n data=ana.to_struct())", "def open(self):\n raise NotImplementedError", "def open(self):\n raise NotImplementedError", "def open(self) -> None:\n\n raise NotImplementedError", "def open( self ):\n pass", "def on_ontology_parse(self, ctx):\n return None", "def importAovs(self):\n\t\tLayersInfo = pickle.load( open( self.aovsPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tfor ao in LayersInfo.keys():\n\t\t\taov.create( ao, LayersInfo[ao]['name'], LayersInfo[ao]['type'], LayersInfo[ao]['enabled'] )\n\t\tmc.refresh( su = 0 )", "def load_gene_ontology(self, file_path):\n\t\tpass", "def test_import_wc2(self):\r\n tree = self.wc2_tree\r\n root = tree.getroot()\r\n assert importer.put_objects(root) == True", "def open_input_files(self):\n self.dictionaryFile = open(self.dictionaryFile, 'r', encoding=self.encoding)\n\n if self.annotationFile :\n self.annotationFile = open(self.annotationFile, 'r', encoding=self.encoding)\n elif self.annotationFile is None:\n try:\n self.annotationFile = open(os.path.join(self.dictionaryPath, self.dictionaryName + '.ann'), 'r', encoding=self.encoding)\n except FileNotFoundError:\n if self.verbose >= 2:\n sys.stdout.write (\"Warning: annotation file is not found.\\n\")\n\n if self.abbreviationsFile :\n self.abbreviationsFile = open(self.abbreviationsFile, 'r', encoding=self.encoding)\n elif self.abbreviationsFile is None:\n try:\n self.abbreviationsFile = open(os.path.join(self.dictionaryPath, self.dictionaryName + '_abrv.dsl'), 'r', encoding=self.encoding)\n except FileNotFoundError:\n if self.verbose >= 2:\n sys.stdout.write (\"Warning: abbreviations file is not found.\\n\")", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self) -> None:\n pass", "def open(self):\n self.solenoid.set(self.OPEN)" ]
[ "0.56189865", "0.5365172", "0.52742535", "0.5039581", "0.50218326", "0.50218326", "0.50218326", "0.49638537", "0.48817602", "0.48285356", "0.47886187", "0.47444418", "0.47354096", "0.47354096", "0.47251382", "0.47234756", "0.4720916", "0.4704863", "0.46936986", "0.4692322", "0.46841657", "0.4680763", "0.4680763", "0.4680763", "0.4680763", "0.4680763", "0.4680763", "0.4680763", "0.4676635", "0.46510923" ]
0.6786061
0
Open feature ontology objects.
def open_feature_ontology_pbobject(ontology_file): try: ontology = open_pbobject(ontology_file, FeatureOntologyPb2) if ontology is not None: logging.info('Successfully loaded FeatureOntology spec.') return ontology except Exception: logging.error('Failed to load ontology file' + ontology_file + '.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_ontology_db_xrefs(self, feature):\n ontology = collections.defaultdict(dict) # type: dict\n db_xrefs = []\n # these are keys are formatted strangely and require special parsing\n for key in (\"go_process\", \"go_function\", \"go_component\"):\n ontology_event_index = self._create_ontology_event(\"GO\")\n for term in feature.get(key, []):\n sp = term.split(\" - \")\n ontology['GO'][sp[0]] = [ontology_event_index]\n self.ontologies_present['GO'][sp[0]] = self.ont_mappings['GO'].get(sp[0], '')\n\n # CATH terms are not distinct from EC numbers so myst be found by key\n for term in feature.get('cath_funfam', []) + feature.get('cath', []):\n for ref in term.split(','):\n ontology['CATH'][ref] = [self._create_ontology_event(\"CATH\")]\n self.ontologies_present['CATH'][ref] = self.ont_mappings['CATH'].get(ref, '')\n\n search_keys = ['ontology_term', 'db_xref', 'dbxref', 'product_source', 'tigrfam', 'pfam',\n 'cog', 'go', 'po', 'ko']\n ont_terms = [] # type: list\n # flatten out into list of values\n for key in search_keys:\n if key in feature:\n ont_terms += [x for y in feature[key] for x in y.split(',')]\n\n for ref in ont_terms:\n if ref.startswith('GO:'):\n ontology['GO'][ref] = [self._create_ontology_event(\"GO\")]\n self.ontologies_present['GO'][ref] = self.ont_mappings['GO'].get(ref, '')\n elif ref.startswith('PO:'):\n ontology['PO'][ref] = [self._create_ontology_event(\"PO\")]\n self.ontologies_present['PO'][ref] = self.ont_mappings['PO'].get(ref, '')\n elif ref.startswith('KO:'):\n ontology['KO'][ref] = [self._create_ontology_event(\"KO\")]\n self.ontologies_present['KO'][ref] = self.ont_mappings['KO'].get(ref, '')\n elif ref.startswith('COG'):\n ontology['COG'][ref] = [self._create_ontology_event(\"COG\")]\n self.ontologies_present['COG'][ref] = self.ont_mappings['COG'].get(ref, '')\n elif ref.startswith('PF'):\n ontology['PFAM'][ref] = [self._create_ontology_event(\"PFAM\")]\n self.ontologies_present['PFAM'][ref] = self.ont_mappings['PFAM'].get(ref, '')\n elif ref.startswith('TIGR'):\n ontology['TIGRFAM'][ref] = [self._create_ontology_event(\"TIGRFAM\")]\n self.ontologies_present['TIGRFAM'][ref] = self.ont_mappings['TIGRFAM'].get(ref, '')\n elif \":\" not in ref:\n db_xrefs.append(tuple([\"Unknown_Source\", ref]))\n else:\n db_xrefs.append(tuple(ref.split(\":\", 1)))\n return dict(ontology), db_xrefs", "def add_ontology(metadata):\n metadata = add_surface_ontology(metadata)\n metadata = add_place_ontology(metadata)\n return metadata", "def open_ontology_pbobject(ontology_file):\n try:\n ontology = parse_pbobject(ontology_file, OntologyV2Pb2)\n if ontology is not None:\n logging.info('Successfully loaded Ontology V2 spec.')\n return ontology\n except Exception:\n logging.error('Failed to load ontology file with V2 spec, trying V1 spec.')\n try:\n ontology = parse_pbobject(ontology_file, OntologyV1Pb2)\n if ontology is not None:\n logging.info('Successfully loaded Ontology V1 spec.')\n return ontology\n except Exception:\n if isinstance(ontology_file, str):\n logging.error('Failed to load ontology file' + ontology_file + 'with V1 spec also, returning None.')\n else:\n logging.error('Failed to load ontology file with V1 spec also, returning None.')", "def open_shapefile(file_path):\n datasource = ogr.Open(file_path)\n layer = datasource.GetLayerByIndex(0)\n print(\"Opening {}\".format(file_path))\n print(\"Number of features: {}\".format(layer.GetFeatureCount()))\n return datasource", "def open(self):\n\n self._key_generator = KeyGenerator()\n\n # A map from LOD to LODHistory instance for all LODs that have\n # been referenced so far:\n self._lod_histories = {}\n\n # This corresponds to the 'nodes' table in a Subversion fs. (We\n # don't need a 'representations' or 'strings' table because we\n # only track file existence, not file contents.)\n self._node_db = _NodeDatabase()\n\n # Start at revision 0 without a root node.\n self._youngest = 0", "def findFeatures(self):\n\t\tpass", "def addOntologyToObject(self, obj):\n i = -1\n for item in obj.ontologyItems.items:\n i = i + 1\n ana = vsdModels.ObjectOntology(\n type=vsdModels.OntologyItem(**item).type,\n position=i,\n ontologyItem=vsdModels.APIBase(selfUrl=vsdModels.OntologyItem(**item).selfUrl),\n object=vsdModels.APIBase(selfUrl=obj.selfUrl)\n )\n print(ana.to_struct())\n self.postRequest(\n 'object-ontologies/{0}'.format(\n vsdModels.OntologyItem(**item).type\n ),\n data=ana.to_struct())", "def test1_loading(self):\n\t\tprint \"\\nTEST 1: Loading ontologies from %s folder.\\n=================\" % DATA_FOLDER\n\t\t\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\t\t\n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\t\n\t\t\t\tself.assertEqual(type(o), ontospy.Ontology)\n\t\t\t\tprint \"Success.\"", "def show(data_objects, **options):\n if not is_loaded():\n return data_objects\n\n # (else)\n if not hasattr(data_objects, '__iter__'):\n data_objects = [data_objects]\n\n # print(data_objects)\n scene = pygeojs.scene(**options)\n scene.createLayer('osm')\n\n if not data_objects:\n print('No data objects')\n return scene\n\n # feature_layer = scene.createLayer('feature')\n feature_layer = None\n\n combined_bounds = None\n # Reverse order so that first item ends on top\n for data_object in reversed(data_objects):\n if data_object._getdatatype() == gaia.types.VECTOR:\n # print('Adding vector object')\n # Special handling for vector datasets:\n # First, make a copy of the geopandas frame\n df = geopandas.GeoDataFrame.copy(data_object.get_data())\n\n # Convert to lon-lat if needed\n epsg = data_object.get_epsg()\n if epsg and str(epsg) != '4326':\n print('Converting crs')\n df[df.geometry.name] = df.geometry.to_crs(epsg='4326')\n\n # Strip any z coordinates (force to z = 1)\n df.geometry = df.geometry.scale(zfact=0.0).translate(zoff=1.0)\n # df.to_file('/home/john/temp/df.pandas')\n # print(df)\n # print(df.geometry)\n\n # Calculate bounds\n geopandas_bounds = df.geometry.total_bounds\n xmin, ymin, xmax, ymax = geopandas_bounds\n meta_bounds = [\n [xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]\n ]\n\n # Add map feature\n if feature_layer is None:\n feature_layer = scene.createLayer('feature')\n\n # Use __geo_interface__ to get the geojson\n feature_layer.readGeoJSON(df.__geo_interface__)\n # print(df.__geo_interface__)\n else:\n # Get bounds, in order to compute overall bounds\n meta = data_object.get_metadata()\n # print('meta: {}'.format(meta))\n # print(meta)\n raster_bounds = meta.get('bounds').get('coordinates')[0]\n # print(meta_bounds)\n assert raster_bounds, 'data_object missing bounds'\n\n # meta bounds inconsistent between sources, so compute brute force\n xvals, yvals = zip(*raster_bounds)\n xmin, xmax = min(xvals), max(xvals)\n ymin, ymax = min(yvals), max(yvals)\n meta_bounds = [\n [xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]\n ]\n\n # Bounds format is [xmin, ymin, xmax, ymax]\n bounds = [\n meta_bounds[0][0], meta_bounds[0][1],\n meta_bounds[2][0], meta_bounds[2][1]\n ]\n\n # print(bounds)\n if combined_bounds is None:\n combined_bounds = bounds\n else:\n combined_bounds[0] = min(combined_bounds[0], bounds[0])\n combined_bounds[1] = min(combined_bounds[1], bounds[1])\n combined_bounds[2] = max(combined_bounds[2], bounds[2])\n combined_bounds[3] = max(combined_bounds[3], bounds[3])\n\n # print('options:', options)\n rep = options.get('representation')\n if rep == 'outline':\n # Create polygon object\n rect = [\n [bounds[0], bounds[1]],\n [bounds[2], bounds[1]],\n [bounds[2], bounds[3]],\n [bounds[0], bounds[3]],\n [bounds[0], bounds[1]],\n ]\n geojs_polygon = geojson.Polygon([rect])\n properties = {\n 'fillColor': '#fff',\n 'fillOpacity': 0.1,\n 'stroke': True,\n 'strokeColor': '#333',\n 'strokeWidth': 2\n }\n geojson_feature = geojson.Feature(\n geometry=geojs_polygon, properties=properties)\n geojson_collection = geojson.FeatureCollection([geojson_feature])\n # print(geojson_collection)\n\n if feature_layer is None:\n feature_layer = scene.createLayer('feature')\n\n feature_layer.createFeature(\n 'geojson', geojson_collection, **options)\n\n elif data_object.__class__.__name__ == 'GirderDataObject':\n if data_object._getdatatype() == 'raster':\n # Use large-image display\n # Todo - verify that it is installed\n tiles_url = data_object._get_tiles_url()\n # print('tiles_url', tiles_url)\n opacity = 1.0\n if hasattr(data_object, 'opacity'):\n opacity = data_object.opacity\n scene.createLayer(\n 'osm', url=tiles_url, keepLower=False, opacity=opacity)\n else:\n raise GaiaException(\n 'Cannot display GirderDataObject with data type {}'.format(\n data_object._getdatatype()))\n\n elif data_object._getdatatype() == gaia.types.VECTOR:\n pass # vector objects handled above\n else:\n msg = 'Cannot display dataobject, type {}'.format(\n data_object.__class__.__name__)\n raise GaiaException(msg)\n\n # Send custom message to (javascript) client to set zoom & center\n rpc = {'method': 'set_zoom_and_center', 'params': combined_bounds}\n scene.send(rpc)\n return scene", "def add_features(self, obj, annotation):\n if annotation['problem']:\n obj.add(folia.Feature, subset='problem', cls=annotation['problem'])\n if annotation['pos']:\n obj.add(folia.Feature, subset='pos', cls=annotation['pos'])", "def __init__(self, features=None, **kwargs):\n super(FeatureIO, self).__init__(**kwargs)\n self.features = features", "def __init__(self, feature):\n\n super(OsmpFeature, self).__init__(geometry=feature.geometry,\n attributes=feature.attributes)\n #self.id = db_id\n #self.id_field_name = id_field_name\n\n # Memoize for later use.\n self._latlon_coords = None\n self._name = None", "def export_representations(self):\n\n dbpath, config = self._start()\n self.logger.msg1(\"Loading ontology\")\n obo_path = check_file(config.obo, dbpath, \"obo\")\n self.obo = MinimalObo(obo_path, True)\n self._export_reference_representations()\n self._export_model_representations(config)\n self._end()", "def __init__(self):\r\n\t\tself.label = \"Linked Data Location Linkage Exploration\"\r\n\t\tself.description = \"\"\"This Tool enables the users to explore the linkages between locations in wikidata. \r\n\t\tGiven an input feature class, this tool gets all properties whose objects are also locations. \r\n\t\tThe output is another feature class which contains the locations which are linked to the locations of input feature class.\"\"\"\r\n\t\tself.canRunInBackground = False", "def __init__(self, features=None):\n self.features = features", "def feature_to_open511_element(feature):\n\n # Using a hash of the geometry for an ID. For proper production use,\n # there'll probably have to be some code in the importer\n # that compares to existing entries in the DB to determine whether\n # this is new or modified...\n geom_hash = hashlib.md5(feature.geom.wkt).hexdigest()\n id = JURISDICTION + ':' + geom_hash\n while id in ids_seen:\n id += 'x'\n ids_seen.add(id)\n\n elem = E.RoadEvent(id=id)\n\n def set_val(tag, val):\n if val not in (None, ''):\n e = etree.Element(tag)\n e.text = unicode(val)\n elem.append(e)\n\n set_val('Title', feature.get('Name').decode('utf8'))\n\n blob = lxml.html.fragment_fromstring(feature.get('Description').decode('utf8'),\n create_parent='content')\n\n description_label = blob.xpath('//strong[text()=\"Description\"]')\n if description_label:\n description_bits = []\n el = description_label[0].getnext()\n while el.tag == 'p':\n description_bits.append(_get_el_text(el))\n el = el.getnext()\n set_val('Description', '\\n\\n'.join(description_bits))\n\n localisation = blob.cssselect('div#localisation p')\n if localisation:\n set_val('AffectedRoads', '\\n\\n'.join(_get_el_text(el) for el in localisation))\n\n try:\n set_val('ExternalURL', blob.cssselect('#avis_residants a, #en_savoir_plus a')[0].get('href'))\n except IndexError:\n pass\n\n facultatif = blob.cssselect('div#itineraire_facult p')\n if facultatif:\n set_val('Detour', '\\n\\n'.join(_get_el_text(el) for el in facultatif))\n\n if blob.cssselect('div#dates strong'):\n try:\n start_date = blob.xpath(u'div[@id=\"dates\"]/strong[text()=\"Date de d\\xe9but\"]')[0].tail\n end_date = blob.xpath(u'div[@id=\"dates\"]/strong[text()=\"Date de fin\"]')[0].tail\n if start_date and end_date:\n set_val('StartDate', _fr_string_to_date(start_date))\n set_val('EndDate', _fr_string_to_date(end_date))\n except IndexError:\n pass\n\n elem.append(E.Geometry(\n geom_to_xml_element(feature.geom)\n ))\n\n return elem", "def getFeatureInfo(self,feature):\n geomRef = feature.GetGeometryRef()\n nameIndex = feature.GetFieldIndex(\"OBJNAM\")\n featureName = \"NO OBJNAM\"\n if(nameIndex != -1 and feature.GetFieldAsString(nameIndex) != \"\" ):\n featureName = feature.GetFieldAsString(nameIndex)\n featureInfo = (featureName, feature.GetFID(), geomRef.GetX(), geomRef.GetY())\n # rospy.loginfo(featureInfo)\n return featureInfo", "def open(self) -> None:", "def open(self) -> None:", "def open(self) -> None:", "def composeWorkplaceOntology():\n\n import ossPyFuncs \n import pandas as pd\n \n #mysql query to extract full table from government organizations\n #certian table columns feature capital letters which cases uproblems\n postgreSql_selectQuery=\"SELECT * FROM us_gov_manual.us_govman_2019 ;\"\n #pass querry and obtain table\n govTable=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n\n #mysql query to obtain academic instutions\n postgreSql_selectQuery=\"SELECT institution FROM hipolabs.universities ;\"\n #pass querry and obtain table\n univTable=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2018_us1000;\"\n businesses1=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2019_us1000;\"\n businesses2=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2020_global2000;\"\n businesses3=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n\n #combine theinsitutions into a vector\n combinedSeries=[govTable['AgencyName'],univTable['institution'],businesses1['company'],businesses2['company'],businesses3['company']]\n #turn the multi item vector into a single series\n fullWordbank=pd.concat(combinedSeries)\n #turn that series into a pd dataframe\n wordbankTable=pd.DataFrame(fullWordbank.unique())\n\n return wordbankTable", "def ontology() -> Ontology:\n return Ontology()", "def os_open_graph( self, ):\r\n pass", "def open(self):", "def get_go():\n # decompress obo file if it wasn't yet\n if not os.path.exists(OBO_FILE):\n _decompress_obofile()\n # create global variable\n if __GO__[0] is None:\n __GO__[0] = onto.Ontology(OBO_FILE, with_rels=True, include_alt_ids=False)\n return __GO__[0]", "def Open(self, file_object):", "def load_features(self, features):\n pass\n # self.features = features", "def __init__(self):\r\n self.label = \"Batch OVL to Feature\"\r\n self.description = \"Batch OVL to Feature searches a folder for OVL files from CPOF, C2PC, GCCS or similar system and converts it to a series of Feature Class for Point, Line, and Polygons.\"\r\n self.canRunInBackground = False", "def load_gene_ontology(self, file_path):\n\t\tpass", "def get_features(item, GP):\n contents_url = '%s/contents' % item['url']\n\n # scrape readme\n gf.get_readme_length(contents_url, GP)\n\n # scrape file-by-file stats\n digest_repo(contents_url, GP)\n\n # scrape commit history\n gf.get_repo_commit_history(item, GP)\n\n # scrape stargazers\n GP.n_stars = item['stargazers_count']\n\n # scrape forks\n GP.n_forks = item['forks_count']\n\n return GP" ]
[ "0.5661019", "0.55700505", "0.5532482", "0.5513073", "0.5394623", "0.53802025", "0.53731954", "0.53539014", "0.5332702", "0.53029007", "0.53000814", "0.52954465", "0.52883095", "0.5224929", "0.522275", "0.5220456", "0.5214841", "0.5178848", "0.5178848", "0.5178848", "0.51736695", "0.51729804", "0.51715726", "0.51650476", "0.5158309", "0.51329446", "0.5130598", "0.51106197", "0.50905573", "0.5075711" ]
0.7093566
0
From a list of 'scene.json' and/or 'scene_.json' paths in s3, return a Scene object for the one with the latest timestamp.
def get_latest_scene(s3_scene_jsons): # Fetch all 'scene*.json' files and load Scenes scenes = [open_remote_pb_object(scene_json, Scene) for scene_json in s3_scene_jsons] # Find Scene with latest creation timestamp creation_ts = [_s.creation_date.ToMicroseconds() for _s in scenes] index = creation_ts.index(max(creation_ts)) return scenes[index], s3_scene_jsons[index]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_latest_year_month_day_prefix(s3_path):\n latest = date.min\n keys = get_contents_of_directory(s3_path)\n\n for key in keys:\n search = re.search(r'.*year=(\\d{4}).*month=(\\d{2}).*day=(\\d{2})', key)\n if search:\n year, month, day = search.groups()\n bucket_date = date(int(year), int(month), int(day))\n if bucket_date > latest:\n latest = bucket_date\n\n if latest == date.min:\n return None\n return latest", "def read_s3_file(date):\n \"\"\" history from S3 \"\"\"\n bucket = os.getenv(\"SPOTIFY_BUCKET_NAME\")\n path = os.getenv(\"SPOTIFY_BUCKET_PATH\")\n s3 = boto3.resource('s3')\n try:\n s3.Object(bucket, \"%s/%s.json\" % (path, date)).load()\n except botocore.exceptions.ClientError as e:\n logger.info(\"No existing history file found for %s, %s\" %\n (date, e.response['Error']['Code']))\n if e.response['Error']['Code'] == '404':\n return []\n else:\n logger.warning(\"Unexpected error code returned!\")\n return []\n else:\n logger.info(\"Reading history file for %s\" % date)\n content_object = s3.Object(bucket, \"%s/%s.json\" % (path, date))\n file_content = content_object.get()['Body'].read().decode('utf-8')\n json_content = json.loads(file_content)\n return json_content", "def latest_archive_zip_revision(doi_id, s3_keys, journal, status):\n s3_key_name = None\n\n name_prefix_to_match = (journal + '-' + utils.pad_msid(doi_id)\n + '-' + status + '-v')\n\n highest = 0\n for key in s3_keys:\n if key[\"name\"].startswith(name_prefix_to_match):\n version_and_date = None\n try:\n parts = key[\"name\"].split(name_prefix_to_match)\n version = parts[1].split('-')[0]\n date_formatted = dateutil.parser.parse(key[\"last_modified\"])\n date_part = date_formatted.strftime(utils.S3_DATE_FORMAT)\n version_and_date = int(version + date_part)\n except:\n pass\n if version_and_date and version_and_date > highest:\n s3_key_name = key[\"name\"]\n highest = version_and_date\n\n return s3_key_name", "def SELECT_LATEST_FILE_JSON(directory=LOCAL_DIRECTORY_OF_SENSOR_DATA):\n latest_time = None\n latest_path = None\n first_loop = True\n for file_name in os.listdir(directory):\n file_path_json = os.path.join(directory, file_name)\n if os.path.isfile(file_path_json):\n current_time = os.stat(file_path_json)\n if not first_loop and int(current_time.st_mtime) > int(latest_time.st_mtime) and \\\n file_path_json[-len('.json'):] == '.json':\n latest_time = os.stat(file_path_json)\n latest_path = file_path_json\n elif first_loop:\n latest_time = os.stat(file_path_json)\n latest_path = file_path_json\n first_loop = False\n return latest_path", "def load_archives_from_s3(self):\n s3_bucket = S3Backend(self.conf).bucket\n try:\n k = Key(s3_bucket)\n k.key = self.backup_key\n\n return json.loads(k.get_contents_as_string())\n except S3ResponseError, exc:\n log.error(exc)\n return {}", "def get_latest_file_name(bucket_name,prefix):\n s3_client = boto3.client('s3')\n objs = s3_client.list_objects_v2(Bucket=bucket_name)['Contents']\n shortlisted_files = dict() \n for obj in objs:\n key = obj['Key']\n timestamp = obj['LastModified']\n # if key starts with folder name retrieve that key\n if key.startswith(prefix): \n # Adding a new key value pair\n shortlisted_files.update( {key : timestamp} ) \n latest_filename = max(shortlisted_files, key=shortlisted_files.get)\n print('Lastest File Name: ' + latest_filename)\n return latest_filename", "def load_s3_njson(bucket, prefix, key_list, honorary_list):\n # Get list of files in bucket and with prefix:\n s3_file_list = list_s3_files(bucket, prefix)\n \n # Load data from all files:\n structured_data = []\n for s3_file in s3_file_list:\n structured_data = structured_data + s3_file_to_dict_list(bucket, s3_file, key_list, honorary_list)\n \n return structured_data", "def get_most_recent_folder_from_s3_bucket():\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(BUCKET_NAME)\n result = bucket.meta.client.list_objects(Bucket=bucket.name, Delimiter='/')\n folders = []\n date_pattern = re.compile(r\"[0-9_]+\")\n for o in result.get('CommonPrefixes'):\n folder_name = o.get('Prefix')\n if re.match(date_pattern, folder_name):\n folders.append(folder_name)\n folders.sort(reverse=True)\n return folders[0]", "def get_archive(katfilenames):\n\timport requests\n\n\tfile_refs = []\n\tfor filename in katfilenames:\n\t\tif filename.startswith('s3'):\n\t\t\tres = requests.post(S3_URL, headers=S3_HEAD, data='{\"s3_ref\":\"%s\",\"ref_key\":\"Nope\"}'%(filename,))\n\t\t\turl = res.json()['url']\n\t\t\tres1 = requests.get(url)\n\t\t\toutfile = filename.split('/')[-1]\n\t\t\topen(outfile, 'wb').write(res1.content)\n\t\t\tfile_refs.append(outfile)\n\t\telse:\n\t\t\tfile_refs.append(filename)\n\treturn file_refs", "def get_old_new(s3, cdc_prefixes: typing.Optional[list] = None, full_load_prefixes: typing.Optional[list] = None,\n old_info: typing.Optional[str] = None):\n if not cdc_prefixes and full_load_prefixes:\n raise ValueError(\"cdc_info and full_load_info cannot both be null. One must be specified\")\n\n if old_info:\n old_bucket, old_prefix = get_bucket_key(old_info)\n s3.download_file(old_bucket, old_prefix, 'old_info.json')\n old_file = open(\"old_info.json\", \"r\")\n old = json.loads(old_file.read())\n old_file.close()\n os.remove('old_info.json')\n new_run_id = old['run_id'] + 1\n else:\n # Assumes that there are no previous runs/no previously processed files\n old = {'cdc_files': {}}\n new_run_id = 0\n\n if cdc_prefixes:\n new_cdc = {}\n # Add any newly added identifiers, update previous prefixes, drop missing ones\n for prefix in cdc_prefixes:\n old_cdc = old['cdc_files']\n old_files = old_cdc.get(prefix, {}).get('files', [])\n since = old_cdc.get(prefix, {}).get('max_ts', \"1970-01-01 00:00:00.000\")\n files, max_ts = find_latest(old_files, s3_list(s3, prefix, ListType.full), since)\n new_cdc[prefix] = {'files': files, 'max_ts': max_ts}\n else:\n new_cdc = {}\n\n if full_load_prefixes:\n new_full = {}\n for prefix in full_load_prefixes:\n files = s3_list(s3, prefix, ListType.full)\n new_full[prefix] = {'files': [x[0] for x in files]}\n else:\n new_full = {}\n\n output = {\n 'cdc_files': new_cdc,\n 'full_load_files': new_full,\n 'run_id': new_run_id\n }\n return output", "def get_last_modified_from_first_matching_file(key_list, framework_slug, prefix):\n path_starts_with = '{}/{}'.format(framework_slug, prefix)\n return next((key for key in key_list if key.get('path').startswith(path_starts_with)), {}).get('last_modified')", "def ingest_latests(last_timestamp, file_list):\n def _iterator(file_name):\n # Is a radar image file\n if re.match(r'cag01est2400\\d{4}-\\d{2}-\\d{2}_\\d{2}:\\d{2}:\\d{2}.png', file_name):\n file_timestamp = datetime.datetime.strptime(\n file_name, 'cag01est2400%Y-%m-%d_%H:%M:%S.png')\n if file_timestamp > last_timestamp:\n return True\n else:\n return False\n else:\n return False\n\n return list(filter(_iterator, file_list))", "def get_gzipped_s3_objects_from_dict(session, event):\n return get_s3_objects_from_dict(\n session, event, default_unzip_s3_object_handler_function\n )", "def download_json_metadata_from_s3(bucket_name, prefix=\"\", num_threads=20):\n\n # simple method for threads to pull from a queue and download JSON files\n def download_object(queue):\n while True:\n obj = queue.get()\n if obj is None:\n break\n obj.Object().download_file(obj.key.replace(prefix, ''))\n queue.task_done()\n\n # create a directory to store downloaded metadata\n cwd = Path.cwd()\n data_dir = cwd / 'data'\n json_dir = data_dir / 'json'\n # try:\n os.makedirs(json_dir, exist_ok=True)\n # except FileExistsError:\n # shutil.rmtree(json_dir)\n # os.makedirs(json_dir)\n os.chdir(json_dir)\n\n # create a queue for objects that need to be downloaded\n # and spawn threads to download them concurrently\n download_queue = Queue(maxsize=0)\n workers = []\n for worker in range(num_threads):\n worker = Thread(target=download_object, args=(download_queue, ))\n worker.setDaemon(True)\n worker.start()\n workers.append(worker)\n\n # loop through the files in the bucket and filter for JSON metadata\n # files for only labeled images; add them to the queue\n s3 = boto3.resource(\"s3\")\n bucket = s3.Bucket(bucket_name)\n for obj in bucket.objects.filter(Prefix=prefix):\n if obj.key.endswith(\"meta.json\"):\n download_queue.put(obj)\n\n # wait for the queue to be empty, then join all threads\n download_queue.join()\n for _ in range(num_threads):\n download_queue.put(None)\n for worker in workers:\n worker.join()\n\n os.chdir(cwd)", "def _get_s3_object(self, s3_path):\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n return self.s3_resource.Object(bucket_name, key)", "def get_pickle_from_s3(path):\n return load_pickle_from_s3(*load_bucket_and_path(path))", "def data_pull_s3(self):\n year = self.month_year[0]\n month = self.month_year[1]\n self.s3 = boto3.resource('s3',aws_access_key_id=self.creds_data['key_id'],\n aws_secret_access_key=self.creds_data['key_access'])\n bucket = self.s3.Bucket('himatdata')\n home = os.getcwd()\n file_path = os.path.join(*[home, 'Trmm/', self.output_folder, year + '_' + month])\n print(file_path)\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n for obj in bucket.objects.filter(Delimiter='', Prefix='Trmm/{}{}_{}'.format(self.output_folder, year, month)):\n if obj.key.endswith('.nc4'):\n bucket.download_file(obj.key,os.path.join(os.path.join(home, obj.key)))\n logging.info(\"Done with Year Month: %s\", month_year)", "def get_latest_data(bucket, dir):\n # get all the scraped json files in the directory in the bucket\n files = client.list_objects_v2(Bucket=BUCKET,\n Prefix=DIR)['Contents']\n # read the data from the object\n str_file = client.get_object(\n Bucket=BUCKET, Key=files[-1]['Key'])['Body'].read().decode('UTF-8')\n data = json.loads(str_file)\n return data", "def get_radar_from_aws(site, datetime_t):\n\n # First create the query string for the bucket knowing\n # how NOAA and AWS store the data\n my_pref = datetime_t.strftime('%Y/%m/%d/') + site\n\n # Connect to the bucket\n conn = S3Connection(anon = True)\n bucket = conn.get_bucket('noaa-nexrad-level2')\n\n # Get a list of files\n bucket_list = list(bucket.list(prefix = my_pref))\n\n # we are going to create a list of keys and datetimes to allow easy searching\n keys = []\n datetimes = []\n\n # populate the list\n for i in range(len(bucket_list)):\n this_str = str(bucket_list[i].key)\n if 'gz' in this_str:\n endme = this_str[-22:-4]\n fmt = '%Y%m%d_%H%M%S_V0'\n dt = datetime.strptime(endme, fmt)\n datetimes.append(dt)\n keys.append(bucket_list[i])\n\n if this_str[-3::] == 'V06':\n endme = this_str[-19::]\n fmt = '%Y%m%d_%H%M%S_V06'\n dt = datetime.strptime(endme, fmt)\n datetimes.append(dt)\n keys.append(bucket_list[i])\n\n # find the closest available radar to your datetime\n closest_datetime = _nearestDate(datetimes, datetime_t)\n index = datetimes.index(closest_datetime)\n\n localfile = tempfile.NamedTemporaryFile()\n keys[index].get_contents_to_filename(localfile.name)\n radar = pyart.io.read(localfile.name)\n return radar", "def from_s3(cls, *, bucket_name, prefix, suffix='.mos.xml', allow_incomplete=False):\n mos_file_keys = s3.get_mos_files(\n bucket_name=bucket_name,\n prefix=prefix,\n suffix=suffix,\n )\n logger.info(\"Making MosCollection from %s S3 files\", len(mos_file_keys))\n mos_readers = sorted([\n mr\n for mr in [MosReader.from_s3(bucket_name, key) for key in mos_file_keys]\n if mr is not None\n ])\n return cls(mos_readers, allow_incomplete=allow_incomplete)", "def latest(self, key, **args):\n record = self.storage.latest(key)\n if record is None:\n return self.klass(**args)\n return self.klass.from_json(record)", "def get_amazon_adj_cls_from_s3(s3_resource, bucket_name, prefix='') -> dict:\n amzn_filename = \"AMZN.json\"\n complete_path = os.path.join(prefix, amzn_filename)\n json_object = s3_resource.Object(bucket_name, complete_path)\n file_content = json_object.get()['Body'].read().decode('utf-8')\n json_content = json.loads(file_content)\n return json_content", "def create_data_schema_from_s3_path(s3_path):\n # We should have only directories at the first level of this S3 path:\n fs = s3fs.S3FileSystem()\n components = fs.ls(s3_path)\n \n # Loops through each subdirectory found in the root dir:\n DATASET_COMPONENT_FIELDS_MAP = dict()\n for subsystem in components:\n # The first tag should always be Timestamp\n subsystem_tags = ['timestamp']\n \n # Opens the first file (they have the same structure):\n files = fs.ls(subsystem)\n for file in files:\n if file[-1] != '/':\n break\n\n current_subsystem_df = pd.read_csv(f's3://{file}', nrows=1)\n subsystem_tags = subsystem_tags + current_subsystem_df.columns.tolist()[1:]\n \n DATASET_COMPONENT_FIELDS_MAP.update({subsystem.split('/')[-1]: subsystem_tags})\n\n # Generate the associated JSON schema:\n schema = create_data_schema(DATASET_COMPONENT_FIELDS_MAP)\n \n return schema", "def get_exports(client, bucket, prefix, latest=True):\n keys = client.list_objects_v2(\n Bucket=bucket, Prefix=prefix, Delimiter='/').get('CommonPrefixes', [])\n found = []\n years = []\n for y in keys:\n part = y['Prefix'].rsplit('/', 2)[-2]\n if not part.isdigit():\n continue\n year = int(part)\n years.append(year)\n\n if not years:\n return []\n\n years.sort(reverse=True)\n if latest:\n years = [years[0]]\n\n for y in years:\n keys = client.list_objects_v2(\n Bucket=bucket, Prefix=\"%s/%d/\" % (prefix.strip('/'), y),\n Delimiter='/').get('CommonPrefixes', [])\n months = []\n for m in keys:\n part = m['Prefix'].rsplit('/', 2)[-2]\n if not part.isdigit():\n continue\n month = int(part)\n date_key = (y, month)\n months.append(month)\n months.sort(reverse=True)\n if not months:\n continue\n if latest:\n months = [months[0]]\n for m in months:\n keys = client.list_objects_v2(\n Bucket=bucket, Prefix=\"%s/%d/%s/\" % (\n prefix.strip('/'), y, ('%d' % m).rjust(2, '0')),\n Delimiter='/').get('CommonPrefixes', [])\n for d in keys:\n part = d['Prefix'].rsplit('/', 2)[-2]\n if not part.isdigit():\n continue\n day = int(part)\n date_key = (y, m, day)\n found.append(date_key)\n found.sort(reverse=True)\n if latest:\n found = [found[0]]\n return found", "def collect_s3(self):\n print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket))\n self.s3 = boto3.resource('s3')\n self.s3_bucket = self.s3.Bucket(s3_bucket)\n self.s3_client = boto3.client('s3')\n for item in self.s3_client.list_objects(Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'):\n self.collect_single(item.get('Key'))\n\n for a in self.artifacts:\n a.download()", "def from_s3(cls, bucket_name, mos_file_key):\n xml = s3.get_file_contents(bucket_name, mos_file_key)\n return cls.from_string(xml)", "def get_s3_objects_from_dict(session, event, object_handler_function):\n\n objects = []\n s3 = session.client(\"s3\")\n # Get the object from the event and show its content type\n for record in event.get(\"Records\", []):\n bucket = record[\"s3\"][\"bucket\"][\"name\"]\n unprocessed_key = record[\"s3\"][\"object\"][\"key\"]\n # urllib changes structure and encoding is different\n # between python 2 and 3\n key = (\n urllib.parse.unquote_plus(unprocessed_key)\n # if sys.version_info[0] >= 3\n # else urllib.unquote_plus(unprocessed_key.encode(\"utf-8\"))\n )\n logging.info(\"Bucket: %s. Key: %s\", bucket, key)\n\n # get S3 object and add it to return list\n response = s3.get_object(Bucket=bucket, Key=key)\n objects.append(object_handler_function(response))\n return objects", "def objs_with_prefix(bucket, log_type, query_time):\n prefix = get_prefix(log_type, query_time)\n # S3 guarantees to return objects in ascending key order based on the UTF-8\n # binary representation of the key. Unfortunately the server-side filtering\n # is quite limited; we can't specify the sort order or the sort key.\n objs = list(bucket.objects.filter(Prefix=prefix))\n logging.info('Found %s files with prefix %s',\n 'no' if not objs else len(objs), prefix)\n return objs", "def lastThree(catalog):\n return model.lastThree(catalog)", "def get_matching_s3_keys(bucket, prefix=\"\", suffix=\"\"):\n for obj in get_matching_s3_objects(bucket, prefix, suffix):\n yield obj[\"Key\"]\n\n def download_froms3(myfile, env='prod'):\n # session = boto3.Session(profile_name=PROFILE)\n boto_s3_session = boto3.Session(profile_name=env)\n s3 = boto_s3_session.resource('s3')\n s3client = boto_s3_session.client('s3', region_name='eu-west-2')\n try:\n file_name = unquote(myfile.split('/')[-1])\n oparse = urlparse(myfile, allow_fragments=False)\n print(oparse)\n S3_SRC_BUCKET_NAME = oparse.netloc\n key = oparse.path[1:]\n download_path = '{0}{1}'.format(BASE_PATH, file_name)\n print(f'Downloading from {S3_SRC_BUCKET_NAME} , {key} to {download_path} ')\n # s3.Bucket(S3_SRC_BUCKET_NAME).download_file(key, download_path)\n # s3.Bucket(S3_SRC_BUCKET_NAME).download_file(file_name, download_path)\n s3client.download_file(S3_SRC_BUCKET_NAME, key, download_path)\n print('File Downloaded')\n except botocore.exceptions.ClientError as err:\n if err.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\", err)\n else:\n # raise\n error = str(err)\n print(error)\n\n return myfile" ]
[ "0.61708826", "0.5877487", "0.55048525", "0.5477997", "0.54031044", "0.5294818", "0.5293787", "0.52524304", "0.5156226", "0.5150664", "0.5150446", "0.512073", "0.49706817", "0.49576333", "0.4952994", "0.49373975", "0.4913797", "0.4860639", "0.48384994", "0.4826581", "0.4820257", "0.4753907", "0.47462967", "0.4735426", "0.4728382", "0.47009814", "0.46539104", "0.46241525", "0.46203536", "0.4601871" ]
0.7924387
0
It builds the configuration space with the needed hyperparameters. It is easily possible to implement different types of hyperparameters. Beside floathyperparameters on a log scale, it is also able to handle categorical input parameter.
def get_configspace(): cs = CS.ConfigurationSpace() # Learning rate hyperparameter lr = CSH.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True) # Stochastic gradient descent momentum as parameter. sgd_momentum = CSH.UniformFloatHyperparameter('sgd_momentum', lower=0.0, upper=0.99, default_value=0.9, log=False) cs.add_hyperparameters([lr, sgd_momentum]) # Optimizer hyperparameters. #optimizer = CSH.CategoricalHyperparameter('optimizer', ['Adam', 'SGD']) #cs.add_hyperparameters([optimizer]) # Only add the sgd_momentum hyperparameter if the optimizer is stochastic gradient descent. Otherwise, it doesn't make sense. #cond = CS.EqualsCondition(sgd_momentum, optimizer, 'SGD') #cs.add_condition(cond) ''' The below is commented out because we're not fiddling with architecture in this optimization.''' #num_new_fc_layers = CSH.UniformIntegerHyperparameter('num_new_fc_layers', lower=0, upper=3, default_value=0, log=False) #num_els_new_1 = CSH.UniformIntegerHyperparameter('num_els_new_1', lower=128, upper=4096, default_value = 1000, log=True) #num_els_new_2 = CSH.UniformIntegerHyperparameter('num_els_new_2', lower=128, upper=4096, default_value = 1000, log=True) #num_els_new_3 = CSH.UniformIntegerHyperparameter('num_els_new_3', lower=128, upper=4096, default_value = 1000, log=True) #freeze0_old = CSH.UniformIntegerHyperparameter('freeze0_cat', lower = 0, upper = 1, default_value = 1, log=False) #freeze1_old = CSH.UniformIntegerHyperparameter('freeze1_cat', lower=0, upper=1, default_value=1, log=False) #cs.add_hyperparameters([num_new_fc_layers, num_els_new_1, num_els_new_2, num_els_new_3, freeze0_old, freeze1_old, batchsize]) dropout_rate = CSH.UniformFloatHyperparameter('dropout_rate', lower=0.0, upper=0.9, default_value=0.5, log=False) cs.add_hyperparameters([dropout_rate]) return cs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_configspace():\r\n cs = CS.ConfigurationSpace()\r\n\r\n lr = CSH.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True)\r\n\r\n # For demonstration purposes, we add different optimizers as categorical hyperparameters.\r\n # To show how to use conditional hyperparameters with ConfigSpace, we'll add the optimizers 'Adam' and 'SGD'.\r\n # SGD has a different parameter 'momentum'.\r\n optimizer = CSH.CategoricalHyperparameter('optimizer', ['Adam', 'SGD'])\r\n\r\n sgd_momentum = CSH.UniformFloatHyperparameter('sgd_momentum', lower=0.0, upper=0.99, default_value=0.9, log=False)\r\n\r\n cs.add_hyperparameters([lr, optimizer, sgd_momentum])\r\n\r\n\r\n\r\n num_conv_layers = CSH.UniformIntegerHyperparameter('num_conv_layers', lower=1, upper=3, default_value=2)\r\n\r\n num_filters_1 = CSH.UniformIntegerHyperparameter('num_filters_1', lower=4, upper=64, default_value=16, log=True)\r\n num_filters_2 = CSH.UniformIntegerHyperparameter('num_filters_2', lower=4, upper=64, default_value=16, log=True)\r\n num_filters_3 = CSH.UniformIntegerHyperparameter('num_filters_3', lower=4, upper=64, default_value=16, log=True)\r\n\r\n cs.add_hyperparameters([num_conv_layers, num_filters_1, num_filters_2, num_filters_3])\r\n\r\n\r\n dropout_rate = CSH.UniformFloatHyperparameter('dropout_rate', lower=0.0, upper=0.9, default_value=0.5, log=False)\r\n num_fc_units = CSH.UniformIntegerHyperparameter('num_fc_units', lower=8, upper=256, default_value=32, log=True)\r\n\r\n cs.add_hyperparameters([dropout_rate, num_fc_units])\r\n\r\n\r\n # The hyperparameter sgd_momentum will be used,if the configuration\r\n # contains 'SGD' as optimizer.\r\n cond = CS.EqualsCondition(sgd_momentum, optimizer, 'SGD')\r\n cs.add_condition(cond)\r\n\r\n # You can also use inequality conditions:\r\n cond = CS.GreaterThanCondition(num_filters_2, num_conv_layers, 1)\r\n cs.add_condition(cond)\r\n\r\n cond = CS.GreaterThanCondition(num_filters_3, num_conv_layers, 2)\r\n cs.add_condition(cond)\r\n\r\n return cs", "def _build_space(self, param_grid):\n if self.verbose>9:\n 'Building param space...'\n \n _warnings.filterwarnings('ignore')\n \n param_grid = param_grid.copy()\n space = {}\n for key in param_grid.keys():\n params = param_grid[key]\n \n if self.verbose>9:\n print('\\tinput:',key, params)\n \n type_str = str(type(params[0]))\n\n if 'float' in type_str or 'int' in type_str:\n \n min_ = min(params)\n max_ = max(params)\n log10_min_ = _np.log10(min_)\n log10_max_ = _np.log10(max_)\n\n if round(log10_max_)-round(log10_min_)>1 and round(log10_max_)-round(log10_min_)!=_np.inf: # use uniform distribution on log spacing \n \n space['log10.'+key] = _hyperopt.hp.uniform(key, log10_min_, log10_max_)\n \n if self.verbose>9:\n print('\\toutput:','log10.'+key, 'uniform', log10_min_, log10_max_)\n \n else:\n if 'int' in type_str:\n space[key] = _hyperopt.hp.quniform(key, min_, max_, 1)\n \n if self.verbose>9:\n print('\\toutput:',key, 'quniform', min_, max_)\n \n elif 'float' in type_str:\n space[key] = _hyperopt.hp.uniform(key, min_, max_)\n \n if self.verbose>9:\n print('\\toutput:',key, 'uniform', min_, max_)\n \n \n elif 'str' in type_str:\n space[key] = _hyperopt.hp.choice(key, [i for i in range(len(params))])\n \n if self.verbose>9:\n print('\\toutput:',key, 'choice', [i for i in range(len(params))])\n\n else:\n raise Exception('type(params[0]) is '+type_str+'. This type of hyperparameter is not yet supported.')\n\n assert(len(space.keys())==len(param_grid.keys())), 'len(space.keys())='+str(len(space.keys()))+', which is not equal to len(param_grid.keys())='+str(len(param_grid.keys()))\n \n if self.verbose>9:\n print('...finished building space')\n \n _warnings.filterwarnings('default')\n\n return space", "def get_configspace() -> CS.Configuration:\n cs = CS.ConfigurationSpace(seed=0)\n # START TODO ################\n lr_hp = CS.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value=1e-2, log=True)\n optimizer_hp = CSH.CategoricalHyperparameter(name='optimizer', choices=['Adam', 'SGD', 'RMSprop'])\n sgd_momentum_hp = CS.UniformFloatHyperparameter('sgd_momentum', lower=0.00, upper=0.99, default_value=0.9)\n\n rms_momentum_hp = CS.UniformFloatHyperparameter('rms_momentum', lower=0.00, upper=0.99, default_value=0.9)\n rms_alpha_hp = CS.UniformFloatHyperparameter('rms_alpha', lower=0.00, upper=0.99, default_value=0.99)\n\n scheduler_hp = CSH.CategoricalHyperparameter(name='scheduler',\n choices=['CosineAnnealingLR', 'CosineAnnealingWarmRestarts'])\n cosine_max_t_hp = CS.UniformIntegerHyperparameter(name='cosine_max_t', lower=50, upper=300, default_value=150)\n cosine_warm_hp = CS.UniformIntegerHyperparameter(name='warm_t_0', lower=50, upper=300, default_value=150)\n\n sgd_cond = CS.EqualsCondition(sgd_momentum_hp, optimizer_hp, 'SGD')\n rms_cond1 = CS.EqualsCondition(rms_momentum_hp, optimizer_hp, 'RMSprop')\n rms_cond2 = CS.EqualsCondition(rms_alpha_hp, optimizer_hp, 'RMSprop')\n cosine_warm_cond = CS.EqualsCondition(cosine_warm_hp, scheduler_hp, 'CosineAnnealingWarmRestarts')\n cosine_cond = CS.EqualsCondition(cosine_max_t_hp, scheduler_hp, 'CosineAnnealingLR')\n cs.add_hyperparameters([lr_hp, optimizer_hp, sgd_momentum_hp, rms_momentum_hp,\n rms_alpha_hp, scheduler_hp, cosine_max_t_hp, cosine_warm_hp])\n cs.add_conditions([sgd_cond, rms_cond1, rms_cond2, cosine_cond, cosine_warm_cond])\n # END TODO ################\n return cs", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 1e-7\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 0.3\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_configspace():\n configspace = cs.ConfigurationSpace()\n\n memory = cs.hyperparameters.UniformIntegerHyperparameter(name='memory', lower=2, upper=25)\n configspace.add_hyperparameter(hyperparameter=memory)\n\n batch_size = cs.hyperparameters.UniformIntegerHyperparameter(\n name='batch_size', lower=32, upper=8192, log=True\n )\n configspace.add_hyperparameter(hyperparameter=batch_size)\n\n frequency = cs.hyperparameters.UniformFloatHyperparameter(\n name='frequency', lower=3e-2, upper=1.0, log=True\n )\n configspace.add_hyperparameter(hyperparameter=frequency)\n\n learning_rate = cs.hyperparameters.UniformFloatHyperparameter(\n name='learning_rate', lower=1e-5, upper=3e-2, log=True\n )\n configspace.add_hyperparameter(hyperparameter=learning_rate)\n\n horizon = cs.hyperparameters.UniformIntegerHyperparameter(\n name='horizon', lower=1, upper=50\n )\n configspace.add_hyperparameter(hyperparameter=horizon)\n\n discount = cs.hyperparameters.UniformFloatHyperparameter(\n name='discount', lower=0.8, upper=1.0, log=True\n )\n configspace.add_hyperparameter(hyperparameter=discount)\n\n ratio_based = cs.hyperparameters.CategoricalHyperparameter(\n name='ratio_based', choices=('no', 'yes')\n )\n configspace.add_hyperparameter(hyperparameter=ratio_based)\n\n clipping_value = cs.hyperparameters.UniformFloatHyperparameter(\n name='clipping_value', lower=0.05, upper=0.5\n )\n configspace.add_hyperparameter(hyperparameter=clipping_value)\n\n baseline = cs.hyperparameters.CategoricalHyperparameter(\n name='baseline',\n choices=('no', 'auto', 'same-network', 'same-policy', 'same-policy-noopt')\n )\n configspace.add_hyperparameter(hyperparameter=baseline)\n\n baseline_learning_rate = cs.hyperparameters.UniformFloatHyperparameter(\n name='baseline_learning_rate', lower=1e-5, upper=3e-2, log=True\n )\n configspace.add_hyperparameter(hyperparameter=baseline_learning_rate)\n\n estimate_advantage = cs.hyperparameters.CategoricalHyperparameter(\n name='estimate_advantage', choices=('no', 'yes')\n )\n configspace.add_hyperparameter(hyperparameter=estimate_advantage)\n\n entropy_regularization = cs.hyperparameters.UniformFloatHyperparameter(\n name='entropy_regularization', lower=1e-5, upper=1.0, log=True\n )\n configspace.add_hyperparameter(hyperparameter=entropy_regularization)\n\n configspace.add_condition(\n condition=cs.EqualsCondition(child=clipping_value, parent=ratio_based, value='yes')\n )\n\n configspace.add_condition(\n condition=cs.NotEqualsCondition(\n child=baseline_learning_rate, parent=baseline, value='no'\n )\n )\n\n configspace.add_condition(\n condition=cs.NotEqualsCondition(\n child=estimate_advantage, parent=baseline, value='no'\n )\n )\n\n return configspace", "def init_parameters(obj, hyperparameters):\n # Initialize Global Configuration Parameter\n params = hyperparameters['global']\n setattr(obj, 'param', params)\n\n # Initialize Attributes (Pre-Checked Parameters)\n setattr(obj, 'learning_rate', params['learning_rate'])\n setattr(obj, 'loss', params['loss'])\n setattr(obj, 'max_iter', params['max_iter'])\n\n if params['loss'] == 'least_squares':\n setattr(obj, 'num_classes', 1)\n elif params['loss'] in ['binary_crossentropy', 'categorical_crossentropy', 'auto']:\n setattr(obj, 'num_classes', params['num_classes'])\n\n # Initialize Attributes (Optional Values - Based on Default Parameters)\n if 'l2_regularization' not in params or params['l2_regularization'] is None:\n setattr(obj, 'l2_regularization', 0)\n else:\n setattr(obj, 'l2_regularization', params['l2_regularization'])\n\n if 'max_bins' not in params:\n setattr(obj, 'max_bins', 255)\n else:\n setattr(obj, 'max_bins', params['max_bins'])\n\n if 'max_depth' not in params or params['max_depth'] is None:\n setattr(obj, 'max_depth', None)\n else:\n setattr(obj, 'max_depth', params['max_depth'])\n\n if 'max_leaf_nodes' not in params or params['max_leaf_nodes'] is None:\n setattr(obj, 'max_leaf_nodes', 31)\n else:\n setattr(obj, 'max_leaf_nodes', params['max_leaf_nodes'])\n\n if 'min_samples_leaf' not in params or params['min_samples_leaf'] is None:\n setattr(obj, 'min_samples_leaf', 20)\n else:\n setattr(obj, 'min_samples_leaf', params['min_samples_leaf'])\n\n if 'random_state' in params:\n setattr(obj, 'random_state', params['random_state'])\n else:\n setattr(obj, 'random_state', None)\n\n if 'scoring' in params:\n setattr(obj, 'scoring', params['scoring'])\n else:\n setattr(obj, 'scoring', None)\n\n if 'verbose' not in params or params['verbose'] is None:\n setattr(obj, 'verbose', False)\n else:\n setattr(obj, 'verbose', True)\n\n return obj", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_configspace(self):\n cd = self.cd\n sp_dict = {}\n sp_dict['epochs'] = int(cd['epochs'])\n sp_dict['gamma'] = self._get_range_uniform('gamma', cd)\n sp_dict['multilabel'] = self._get_atomic('multilabel', cd)\n sp_dict['lr'] = self._get_range_uniform('lr', cd)\n sp_dict['optimizer'] = self._get_categorical('optimizer', cd)\n sp_dict['n_latent'] = self._get_range_integer('n_latent',cd)\n sp_dict['enc_hidden_dim'] = self._get_range_integer('enc_hidden_dim', cd)\n sp_dict['batch_size'] = self._get_range_integer('batch_size', cd)\n sp_dict['coherence_loss_wt'] = self._get_range_uniform('coherence_loss_wt', cd) or 0.0\n sp_dict['redundancy_loss_wt'] = self._get_range_uniform('redundancy_loss_wt', cd) or 0.0\n sp_dict['num_enc_layers'] = self._get_range_integer('num_enc_layers', cd) or 1\n sp_dict['enc_dr'] = self._get_range_uniform('enc_dr', cd) or 0.0\n sp_dict['covar_net_layers'] = self._get_range_integer('covar_net_layers', cd) or 1\n sp_dict['classifier_dropout'] = self._get_range_uniform('classifier_dropout', cd) or 0.1\n\n embedding_types = cd['embedding']\n embedding_space = [] \n for et in embedding_types:\n if et['source'] == 'random':\n embedding_space.append(ag.space.Dict(**{'source': 'random', 'size': self._get_range_integer('size', et)}))\n else:\n fixed_assigned = et.get('fixed')\n if fixed_assigned is None:\n embedding_space.append(ag.space.Dict(**{'source': et['source'], 'fixed': ag.space.Bool()}))\n else:\n embedding_space.append(ag.space.Dict(**{'source': et['source'], 'fixed': fixed_assigned.lower()}))\n sp_dict['embedding'] = ag.space.Categorical(*embedding_space)\n\n latent_types = cd['latent_distribution']\n latent_space = []\n for lt in latent_types:\n dist_type = lt['dist_type']\n if dist_type == 'vmf':\n latent_space.append(ag.space.Dict(**{'dist_type': 'vmf', 'kappa': self._get_range_uniform('kappa', lt)}))\n elif dist_type == 'logistic_gaussian':\n latent_space.append(ag.space.Dict(**{'dist_type': 'logistic_gaussian', 'alpha': self._get_range_uniform('alpha', lt)}))\n else:\n latent_space.append(ag.space.Dict(**{'dist_type': 'gaussian'}))\n sp_dict['latent_distribution'] = ag.space.Categorical(*latent_space)\n return sp_dict", "def hyperparams():\n H = 6\n return Munch(N=500, H=H, D=(H // 2) ** 2, batch_size=10, precision=to.float32)", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def get_param_grid():\n layer_width = [32, 64, 128, 256, 512]\n layers = [2, 3, 4, 5, 6]\n epochs = [10, 25, 50, 75, 100]\n batch_size = [32, 64, 96, 128, 160, 192, 224, 256]\n activation = ['softmax', 'softplus', 'softsign', 'relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear']\n init_mode = ['uniform', 'lecun_uniform', 'normal', 'zero', 'glorot_normal', 'glorot_uniform', 'he_normal',\n 'he_uniform']\n dropout_rate = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n optimizer = ['adam', 'sgd', 'adadelta', 'adagrad', 'adamax', 'ftrl', 'nadam', 'rmsprop']\n\n grid = {'layer_width': layer_width,\n 'layers': layers,\n 'epochs': epochs,\n 'batch_size': batch_size,\n 'activation': activation,\n 'init_mode': init_mode,\n 'dropout_rate': dropout_rate,\n 'optimizer': optimizer}\n\n return grid", "def __init__(self, input_size, hidden_size, output_size, std=1e-4):\n self.params = {}\n self.params['W1'] = std * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = std * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def get_hyperparameter_configuration(cat_hparam, num_hparam, layers_hparam, combinations, n, random_state=420):\n np.random.seed(seed=random_state)\n configuration = dict.fromkeys(range(n))\n for ind in range(n):\n configuration[ind] = {'hparams': None}\n configuration[ind]['hparams'] = dict.fromkeys(\n [*cat_hparam.keys(), *num_hparam.keys(), 'list_hidden_layer']\n )\n if len(layers_hparam['num_hidden_layer']) == 3:\n try:\n distribution = eval(\n layers_hparam['num_hidden_layer'][2].replace(\"-\", \"\"))\n num_hidden_layer = int(distribution.rvs(\n layers_hparam['num_hidden_layer'][0], layers_hparam['num_hidden_layer'][1]-layers_hparam['num_hidden_layer'][0]))\n except NameError:\n logging.warning(\n f'WARNING: Distribution {layers_hparam[\"num_hidden_layer\"][2]} not found, generating random number uniformly.')\n num_hidden_layer = randint.rvs(\n layers_hparam['num_hidden_layer'][0], layers_hparam['num_hidden_layer'][1]+1)\n else:\n num_hidden_layer = randint.rvs(\n layers_hparam['num_hidden_layer'][0], layers_hparam['num_hidden_layer'][1]+1)\n\n if len(layers_hparam['num_neuron']) == 3:\n try:\n distribution = eval(\n layers_hparam['num_neuron'][2].replace(\"-\", \"\"))\n configuration[ind]['hparams']['list_hidden_layer'] = distribution.rvs(\n layers_hparam['num_neuron'][0], layers_hparam['num_neuron'][1]-layers_hparam['num_neuron'][0], size=num_hidden_layer).astype(int).tolist()\n except NameError:\n logging.warning(\n f'WARNING: Distribution {layers_hparam[\"num_neuron\"][2]} not found, generating random number uniformly.')\n configuration[ind]['hparams']['list_hidden_layer'] = randint.rvs(\n layers_hparam['num_neuron'][0], layers_hparam['num_neuron'][1]+1, size=num_hidden_layer).tolist()\n else:\n configuration[ind]['hparams']['list_hidden_layer'] = randint.rvs(\n layers_hparam['num_neuron'][0], layers_hparam['num_neuron'][1]+1, size=num_hidden_layer).tolist()\n\n if len(cat_hparam):\n cat_combination_num = random.randint(\n 0, len(combinations)-1)\n for hparam in cat_hparam.keys():\n configuration[ind]['hparams'][hparam] = combinations.loc[cat_combination_num, hparam]\n\n if len(num_hparam):\n for hparam in num_hparam.keys():\n if len(num_hparam[hparam]) == 3:\n try:\n distribution = eval(\n num_hparam[hparam][2].replace(\"-\", \"\"))\n if (type(num_hparam[hparam][0]) == int) and (type(num_hparam[hparam][1]) == int):\n configuration[ind]['hparams'][hparam] = int(distribution.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]-num_hparam[hparam][0]))\n else:\n configuration[ind]['hparams'][hparam] = distribution.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]-num_hparam[hparam][0])\n except NameError:\n logging.warning(\n f'WARNING: Distribution {num_hparam[hparam][2]} not found, generating random number uniformly.')\n if (type(num_hparam[hparam][0]) == int) and (type(num_hparam[hparam][1]) == int):\n configuration[ind]['hparams'][hparam] = randint.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]+1)\n else:\n configuration[ind]['hparams'][hparam] = uniform.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]-num_hparam[hparam][0])\n else:\n if (type(num_hparam[hparam][0]) == int) and (type(num_hparam[hparam][1]) == int):\n configuration[ind]['hparams'][hparam] = randint.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]+1)\n else:\n configuration[ind]['hparams'][hparam] = uniform.rvs(\n num_hparam[hparam][0], num_hparam[hparam][1]-num_hparam[hparam][0])\n\n return configuration", "def build_param_grid(self, C_list:list=[0.1, 1, 10, 100], gamma_list:list=[1, 0.1, 0.01, 0.001], kernel_list:list=['rbf']):\n ans = {}\n ans['C'] = C_list\n ans['gamma'] = gamma_list\n ans['kernel'] = kernel_list\n self.param_grid = ans\n return ans", "def __init__(self, input_dim=(3, 32, 32), hidden_dims_CNN = ((32, 5, 1, 1), (2, 2, 2)),\n hidden_dims_FC = ((1024), (0.5)), num_classes=10, weight_scale=1e-3, \n reg=0.0, dtype=np.float32):\n self.params = {}\n self.fix_params = {}\n self.reg = reg\n self.dtype = dtype\n \n C_input, H_input, W_input = input_dim\n pre_C = C_input \n pre_H = H_input\n pre_W = W_input\n \n num_CNN = len(hidden_dims_CNN)\n num_FC = len(hidden_dims_FC)\n\n for i in range(0, num_CNN):\n W_name = \"W\" + str(i)\n b_name = \"b\" + str(i)\n conv_param_name = \"conv_param\" + str(i)\n gamma_name = \"gamma\" + str(i)\n beta_name = \"beta\" + str(i)\n bn_param_name = \"bn_param\" + str(i)\n pool_param_name = \"pool_param\" + str(i)\n\n if num_CNN == 1:\n num_filters, filter_size, stride, pad = hidden_dims_CNN[0] # (F, filter_size, stride, pad)\n pool_stride, pool_height, pool_width = hidden_dims_CNN[1] # (pooling_stride, pooling_size)\n else:\n num_filters, filter_size, stride, pad = hidden_dims_CNN[i][0] # (F, filter_size, stride, pad)\n pool_stride, pool_height, pool_width = hidden_dims_CNN[i][1] # (pooling_stride, pooling_size)\n \n if weight_scale == -1:\n self.params[W_name] = np.random.randn(num_filters, pre_C, filter_size, filter_size) / np.sqrt(filter_size * filter_size * pre_C)\n else: \n self.params[W_name] = np.random.randn(num_filters, pre_C, filter_size, filter_size) * weight_scale\n self.params[b_name] = np.zeros(num_filters)\n self.fix_params[conv_param_name] = {'stride': stride, 'pad': pad}\n \n self.params[gamma_name] = np.random.randn(num_filters)\n self.params[beta_name] = np.random.randn(num_filters)\n self.fix_params[bn_param_name] = {'mode': 'train'}\n\n self.fix_params[pool_param_name] = {'pool_height': pool_height, 'pool_width': pool_width, 'stride': pool_stride}\n \n pre_H, pre_W = cnn_out_shape(pre_H, pre_W, filter_size, filter_size, stride, pad)\n pre_C = num_filters \n pre_H, pre_W = pool_out_shape(pre_H, pre_W, pool_height, pool_width, pool_stride)\n\n pre_fc_dim = pre_H * pre_W * pre_C\n\n for i in range(0, num_FC):\n W_name = \"W\" + str(i + num_CNN)\n b_name = \"b\" + str(i + num_CNN)\n gamma_name = \"gamma\" + str(i + num_CNN)\n beta_name = \"beta\" + str(i + num_CNN)\n bn_param_name = \"bn_param\" + str(i + num_CNN)\n drop_name = \"drop_ratio\" + str(i + num_CNN)\n \n if num_FC == 1 :\n fc_num = hidden_dims_FC[0]\n drop_ratio = hidden_dims_FC[1]\n else:\n fc_num = hidden_dims_FC[i][0]\n drop_ratio = hidden_dims_FC[i][1]\n\n if weight_scale == -1:\n self.params[W_name] = np.random.randn(pre_fc_dim, fc_num) / np.sqrt(pre_fc_dim)\n else:\n self.params[W_name] = np.random.randn(pre_fc_dim, fc_num) * weight_scale\n self.params[b_name] = np.zeros(fc_num)\n\n self.params[gamma_name] = np.random.randn(fc_num)\n self.params[beta_name] = np.random.randn(fc_num)\n self.fix_params[bn_param_name] = {'mode': 'train'}\n\n self.fix_params[drop_name] = {'mode': 'train', 'p': drop_ratio}\n\n pre_fc_dim = fc_num\n\n total_layer = num_CNN + num_FC\n W_name = \"W\" + str(total_layer)\n b_name = \"b\" + str(total_layer)\n if weight_scale == -1:\n self.params[W_name] = np.random.randn(pre_fc_dim, num_classes) / np.sqrt(pre_fc_dim)\n else:\n self.params[W_name] = np.random.randn(pre_fc_dim, num_classes) * weight_scale\n self.params[b_name] = np.zeros(num_classes)\n\n\n self.num_CNN = num_CNN\n self.num_FC = num_FC\n self.total_layer = num_CNN + num_FC\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def get_hyperparams(self):", "def customize_experiment_config(self, config):\n # TODO: use ConfigList from Coach launcher, and share customization code.\n hyperparams_dict = json.loads(os.environ.get(\"SM_HPS\", \"{}\"))\n\n # Set output dir to intermediate\n # TODO: move this to before customer-specified so they can override\n hyperparams_dict[\"rl.training.local_dir\"] = \"/opt/ml/output/intermediate\"\n\n self.hyperparameters = ConfigurationList() # TODO: move to shared\n for name, value in hyperparams_dict.items():\n # self.map_hyperparameter(name, val) #TODO\n if name.startswith(\"rl.\"):\n # self.apply_hyperparameter(name, value) #TODO\n self.hyperparameters.store(name, value)\n # else:\n # raise ValueError(\"Unknown hyperparameter %s\" % name)\n\n self.hyperparameters.apply_subset(config, \"rl.\")\n return config", "def build(dynamic_hyperparams_config, is_training):\n if not isinstance(dynamic_hyperparams_config,\n hyperparams_pb2.Hyperparams):\n raise ValueError('dynamic_hyperparams_config not of type '\n 'hyperparams_pb.Hyperparams.')\n\n batch_norm = None\n batch_norm_params = None\n if dynamic_hyperparams_config.HasField('batch_norm'):\n batch_norm = slim.batch_norm\n batch_norm_params = _build_batch_norm_params(\n dynamic_hyperparams_config.batch_norm, is_training)\n\n affected_ops = [dynamic_conv2d]\n with slim.arg_scope(\n affected_ops,\n activation_fn=_build_activation_fn(dynamic_hyperparams_config.activation),\n normalizer_fn=batch_norm,\n normalizer_params=batch_norm_params) as sc:\n return sc", "def _init_hyperparam(self, **p_par):\r\n \r\n try:\r\n p_input_size = self._input_space.get_num_dim()\r\n p_output_size = self._output_space.get_num_dim()\r\n except:\r\n raise ParamError('Input size and/or output size of the network are not defined.')\r\n \r\n if 'p_update_rate' not in p_par:\r\n p_par['p_update_rate'] = 1\r\n elif p_par.get('p_update_rate') < 1:\r\n raise ParamError(\"p_update_rate must be equal or higher than 1.\")\r\n \r\n if 'p_num_hidden_layers' not in p_par:\r\n raise ParamError(\"p_num_hidden_layers is not defined.\")\r\n \r\n if 'p_output_activation_fct' not in p_par:\r\n p_par['p_output_activation_fct'] = None\r\n \r\n if 'p_optimizer' not in p_par:\r\n raise ParamError(\"p_optimizer is not defined.\")\r\n \r\n if 'p_loss_fct' not in p_par:\r\n raise ParamError(\"p_loss_fct is not defined.\")\r\n\r\n if 'p_test_data' not in p_par:\r\n p_par['p_test_data'] = 0.3\r\n\r\n if 'p_batch_size' not in p_par:\r\n p_par['p_batch_size'] = 100\r\n\r\n if 'p_seed_buffer' not in p_par:\r\n p_par['p_seed_buffer'] = 1\r\n\r\n if 'p_learning_rate' not in p_par:\r\n p_par['p_learning_rate'] = 3e-4\r\n \r\n if 'p_hidden_size' not in p_par:\r\n raise ParamError(\"p_hidden_size is not defined.\")\r\n try:\r\n if len(p_par['p_hidden_size']) != p_par['p_num_hidden_layers']:\r\n raise ParamError(\"length of p_hidden_size list must be equal to p_num_hidden_layers or an integer.\")\r\n except:\r\n p_par['p_hidden_size'] = [int(p_par['p_hidden_size'])] * int(p_par['p_num_hidden_layers'])\r\n \r\n if 'p_activation_fct' not in p_par:\r\n raise ParamError(\"p_activation_fct is not defined.\")\r\n try:\r\n if len(p_par['p_activation_fct']) != p_par['p_num_hidden_layers']:\r\n raise ParamError(\"length of p_activation_fct list must be equal to p_num_hidden_layers or a single activation function.\")\r\n except:\r\n if isinstance(p_par['p_activation_fct'], list):\r\n raise ParamError(\"length of p_activation_fct list must be equal to p_num_hidden_layers or a single activation function.\")\r\n else:\r\n p_par['p_activation_fct'] = [p_par['p_activation_fct']] * int(p_par['p_num_hidden_layers'])\r\n \r\n if 'p_weight_bias_init' not in p_par:\r\n p_par['p_weight_bias_init'] = True\r\n \r\n if p_par['p_weight_bias_init']:\r\n if 'p_weight_init' not in p_par:\r\n p_par['p_weight_init'] = torch.nn.init.orthogonal_\r\n \r\n if 'p_bias_init' not in p_par:\r\n p_par['p_bias_init'] = lambda x: torch.nn.init.constant_(x, 0)\r\n \r\n if 'p_gain_init' not in p_par:\r\n p_par['p_gain_init'] = np.sqrt(2)\r\n \r\n self._hyperparam_space.add_dim(HyperParam('p_input_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_output_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_update_rate','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_num_hidden_layers','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_hidden_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_activation_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_output_activation_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_optimizer'))\r\n self._hyperparam_space.add_dim(HyperParam('p_loss_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_test_data'))\r\n self._hyperparam_space.add_dim(HyperParam('p_batch_size'))\r\n self._hyperparam_space.add_dim(HyperParam('p_seed_buffer'))\r\n self._hyperparam_space.add_dim(HyperParam('p_learning_rate'))\r\n self._hyperparam_space.add_dim(HyperParam('p_weight_bias_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_weight_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_bias_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_gain_init'))\r\n self._hyperparam_tuple = HyperParamTuple(self._hyperparam_space)\r\n \r\n ids_ = self.get_hyperparam().get_dim_ids()\r\n self.get_hyperparam().set_value(ids_[0], p_input_size)\r\n self.get_hyperparam().set_value(ids_[1], p_output_size)\r\n self.get_hyperparam().set_value(ids_[2], p_par['p_update_rate'])\r\n self.get_hyperparam().set_value(ids_[3], p_par['p_num_hidden_layers'])\r\n self.get_hyperparam().set_value(ids_[4], p_par['p_hidden_size'])\r\n self.get_hyperparam().set_value(ids_[5], p_par['p_activation_fct'])\r\n self.get_hyperparam().set_value(ids_[6], p_par['p_output_activation_fct'])\r\n self.get_hyperparam().set_value(ids_[7], p_par['p_optimizer'])\r\n self.get_hyperparam().set_value(ids_[8], p_par['p_loss_fct'])\r\n self.get_hyperparam().set_value(ids_[9], p_par['p_test_data'])\r\n self.get_hyperparam().set_value(ids_[10], p_par['p_batch_size'])\r\n self.get_hyperparam().set_value(ids_[11], p_par['p_seed_buffer'])\r\n self.get_hyperparam().set_value(ids_[12], p_par['p_learning_rate'])\r\n self.get_hyperparam().set_value(ids_[13], p_par['p_weight_bias_init'])\r\n self.get_hyperparam().set_value(ids_[14], p_par['p_weight_init'])\r\n self.get_hyperparam().set_value(ids_[15], p_par['p_bias_init'])\r\n self.get_hyperparam().set_value(ids_[16], p_par['p_gain_init'])", "def create_hparams(experiment):\n hparams = {}\n\n # General parameters.\n hparams['batch_size'] = 64\n hparams['eval_batch_size'] = 64\n hparams['learning_rate_warmup_steps'] = 2000\n hparams['learning_rate_constant'] = 1\n hparams['learning_rate'] = 0.001\n hparams['train_epoches'] = 200\n hparams['steps_per_epoch'] = 30\n hparams['train_steps'] = 1000 * 1000\n hparams['eval_steps'] = 100\n hparams['caption_optimizer'] = 't2t'\n hparams['clip_norm'] = 5.0\n hparams['train_files'] = ''\n hparams['eval_files'] = ''\n hparams['train_buffer_size'] = 2000\n hparams['eval_buffer_size'] = 500\n hparams['train_pixel_encoder'] = True\n hparams['debug'] = False\n hparams['distribution_strategy'] = 'mirrored'\n\n # Embedding parameters.\n hparams['embedding_file'] = ''\n hparams['word_vocab_path'] = ''\n hparams['glove_trainable'] = True\n hparams['vocab_size'] = 10000\n\n # View hierarchy encoder parameters.\n hparams['max_pixel_pos'] = 100\n hparams['max_dom_pos'] = 500\n hparams['screen_encoder'] = 'pixel_transformer'\n hparams['screen_embedding_feature'] = ['text', 'type', 'pos', 'click', 'dom']\n hparams['obj_text_aggregation'] = 'max'\n hparams['synthetic_screen_noise'] = 0.\n\n # General parameters.\n hparams['num_hidden_layers'] = 2\n hparams['hidden_size'] = 2\n hparams['filter_size'] = 2\n hparams['num_heads'] = 2\n hparams['dropout'] = 0.2\n hparams['layer_prepostprocess_dropout'] = 0.2\n hparams['attention_dropout'] = 0.2\n hparams['relu_dropout'] = 0.2\n\n transformer_hparams = model_params.BASE_PARAMS\n\n # Add parameters from transformer model.\n hparams.update(transformer_hparams)\n\n # Rewrite all the parameters from command-line flags.\n config = screen2words_experiment_config.experiments[experiment]\n hparams.update(config)\n\n return hparams", "def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def _starting_hparams():\n hparams = contrib_training.HParams()\n hparams.add_hparam('batch_style', 'bucket')\n hparams.add_hparam('gradient_clipping_decay', 0.9999)\n hparams.add_hparam('learning_rate', 0.0005)\n hparams.add_hparam('lr_decay_rate', .997)\n hparams.add_hparam('lr_decay_steps', 1000)\n hparams.add_hparam('lr_warmup_steps', 3000)\n hparams.add_hparam('model_type', 'cnn')\n hparams.add_hparam('resnet_bottleneck_factor', 0.5)\n hparams.add_hparam('decision_threshold', 0.5)\n hparams.add_hparam('denominator_power', 1.0) # Standard mean-pooling.\n return hparams", "def set_hyperparams(self, params):", "def create_hparams(hparam_string=None):\n hparams = tf.contrib.training.HParams(\n # The name of the architecture to use.\n arch='resnet',\n lrelu_leakiness=0.2,\n batch_norm_decay=0.9,\n weight_decay=1e-5,\n normal_init_std=0.02,\n generator_kernel_size=3,\n discriminator_kernel_size=3,\n\n # Stop training after this many examples are processed\n # If none, train indefinitely\n num_training_examples=0,\n\n # Apply data augmentation to datasets\n # Applies only in training job\n augment_source_images=False,\n augment_target_images=False,\n\n # Discriminator\n # Number of filters in first layer of discriminator\n num_discriminator_filters=64,\n discriminator_conv_block_size=1, # How many convs to have at each size\n discriminator_filter_factor=2.0, # Multiply # filters by this each layer\n # Add gaussian noise with this stddev to every hidden layer of D\n discriminator_noise_stddev=0.2, # lmetz: Start seeing results at >= 0.1\n # If true, add this gaussian noise to input images to D as well\n discriminator_image_noise=False,\n discriminator_first_stride=1, # Stride in first conv of discriminator\n discriminator_do_pooling=False, # If true, replace stride 2 with avg pool\n discriminator_dropout_keep_prob=0.9, # keep probability for dropout\n\n # DCGAN Generator\n # Number of filters in generator decoder last layer (repeatedly halved\n # from 1st layer)\n num_decoder_filters=64,\n # Number of filters in generator encoder 1st layer (repeatedly doubled\n # after 1st layer)\n num_encoder_filters=64,\n\n # This is the shape to which the noise vector is projected (if we're\n # transferring from noise).\n # Write this way instead of [4, 4, 64] for hparam search flexibility\n projection_shape_size=4,\n projection_shape_channels=64,\n\n # Indicates the method by which we enlarge the spatial representation\n # of an image. Possible values include:\n # - resize_conv: Performs a nearest neighbor resize followed by a conv.\n # - conv2d_transpose: Performs a conv2d_transpose.\n upsample_method='resize_conv',\n\n # Visualization\n summary_steps=500, # Output image summary every N steps\n\n ###################################\n # Task Classifier Hyperparameters #\n ###################################\n\n # Which task-specific prediction tower to use. Possible choices are:\n # none: No task tower.\n # doubling_pose_estimator: classifier + quaternion regressor.\n # [conv + pool]* + FC\n # Classifiers used in DSN paper:\n # gtsrb: Classifier used for GTSRB\n # svhn: Classifier used for SVHN\n # mnist: Classifier used for MNIST\n # pose_mini: Classifier + regressor used for pose_mini\n task_tower='doubling_pose_estimator',\n weight_decay_task_classifier=1e-5,\n source_task_loss_weight=1.0,\n transferred_task_loss_weight=1.0,\n\n # Number of private layers in doubling_pose_estimator task tower\n num_private_layers=2,\n\n # The weight for the log quaternion loss we use for source and transferred\n # samples of the cropped_linemod dataset.\n # In the DSN work, 1/8 of the classifier weight worked well for our log\n # quaternion loss\n source_pose_weight=0.125 * 2.0,\n transferred_pose_weight=0.125 * 1.0,\n\n # If set to True, the style transfer network also attempts to change its\n # weights to maximize the performance of the task tower. If set to False,\n # then the style transfer network only attempts to change its weights to\n # make the transferred images more likely according to the domain\n # classifier.\n task_tower_in_g_step=True,\n task_loss_in_g_weight=1.0, # Weight of task loss in G\n\n #########################################\n # 'simple` generator arch model hparams #\n #########################################\n simple_num_conv_layers=1,\n simple_conv_filters=8,\n\n #########################\n # Resnet Hyperparameters#\n #########################\n resnet_blocks=6, # Number of resnet blocks\n resnet_filters=64, # Number of filters per conv in resnet blocks\n # If true, add original input back to result of convolutions inside the\n # resnet arch. If false, it turns into a simple stack of conv/relu/BN\n # layers.\n resnet_residuals=True,\n\n #######################################\n # The residual / interpretable model. #\n #######################################\n res_int_blocks=2, # The number of residual blocks.\n res_int_convs=2, # The number of conv calls inside each block.\n res_int_filters=64, # The number of filters used by each convolution.\n\n ####################\n # Latent variables #\n ####################\n # if true, then generate random noise and project to input for generator\n noise_channel=True,\n # The number of dimensions in the input noise vector.\n noise_dims=10,\n\n # If true, then one hot encode source image class and project as an\n # additional channel for the input to generator. This gives the generator\n # access to the class, which may help generation performance.\n condition_on_source_class=False,\n\n ########################\n # Loss Hyperparameters #\n ########################\n domain_loss_weight=1.0,\n style_transfer_loss_weight=1.0,\n\n ########################################################################\n # Encourages the transferred images to be similar to the source images #\n # using a configurable metric. #\n ########################################################################\n\n # The weight of the loss function encouraging the source and transferred\n # images to be similar. If set to 0, then the loss function is not used.\n transferred_similarity_loss_weight=0.0,\n\n # The type of loss used to encourage transferred and source image\n # similarity. Valid values include:\n # mpse: Mean Pairwise Squared Error\n # mse: Mean Squared Error\n # hinged_mse: Computes the mean squared error using squared differences\n # greater than hparams.transferred_similarity_max_diff\n # hinged_mae: Computes the mean absolute error using absolute\n # differences greater than hparams.transferred_similarity_max_diff.\n transferred_similarity_loss='mpse',\n\n # The maximum allowable difference between the source and target images.\n # This value is used, in effect, to produce a hinge loss. Note that the\n # range of values should be between 0 and 1.\n transferred_similarity_max_diff=0.4,\n\n ################################\n # Optimization Hyperparameters #\n ################################\n learning_rate=0.001,\n batch_size=32,\n lr_decay_steps=20000,\n lr_decay_rate=0.95,\n\n # Recomendation from the DCGAN paper:\n adam_beta1=0.5,\n clip_gradient_norm=5.0,\n\n # The number of times we run the discriminator train_op in a row.\n discriminator_steps=1,\n\n # The number of times we run the generator train_op in a row.\n generator_steps=1)\n\n if hparam_string:\n tf.logging.info('Parsing command line hparams: %s', hparam_string)\n hparams.parse(hparam_string)\n\n tf.logging.info('Final parsed hparams: %s', hparams.values())\n return hparams", "def build(self):\n return self.hyperparams.items()", "def make_params(config):\n params = copy.deepcopy(config.view.params)\n params.t2bins = np.arange(0, params.t2bin_max + 1e-4, params.t2bin_stepsize)\n params.out = make_Bunch(\"State and output of detection processing\") # outputs are not parameters, maybe separate \n return params", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)" ]
[ "0.7177107", "0.68647355", "0.66281545", "0.6522021", "0.6518543", "0.651809", "0.651809", "0.6490908", "0.64341015", "0.6414481", "0.6403292", "0.63531125", "0.6241054", "0.62265354", "0.62035", "0.6195846", "0.61952543", "0.61585575", "0.6135327", "0.6123834", "0.61211634", "0.611494", "0.6112285", "0.6109049", "0.6108987", "0.6056004", "0.60485643", "0.60371566", "0.6032182", "0.6009671" ]
0.713302
1
Method computes lookup tables of the cumulative ``galprop`` PDF defined by ``input_galaxy_table``.
def build_one_point_lookup_table(self, **kwargs): galaxy_table = kwargs['input_galaxy_table'] prim_galprop_bins = kwargs['prim_galprop_bins'] self.one_point_lookup_table = np.zeros( len(prim_galprop_bins)+1, dtype=object) binned_prim_galprop = np.digitize( galaxy_table[self.prim_galprop_key], self.prim_galprop_bins) for i in range(len(self.one_point_lookup_table)): idx_bini = np.where(binned_prim_galprop == i)[0] if model_helpers.custom_len(idx_bini) > self.minimum_sampling: gals_bini = galaxy_table[idx_bini] abcissa = np.arange(len(gals_bini))/float(len(gals_bini)-1) ordinates = np.sort(gals_bini[self.galprop_key]) self.one_point_lookup_table[i] = ( model_helpers.custom_spline(abcissa, ordinates, k=2) ) # For all empty lookup tables, fill them with the nearest lookup table unfilled_lookup_table_idx = np.where( self.one_point_lookup_table == 0)[0] filled_lookup_table_idx = np.where( self.one_point_lookup_table != 0)[0] if len(unfilled_lookup_table_idx) > 0: msg = ("When building the one-point lookup table from input_galaxy_table, " + "there were some bins of prim_galprop_bins that contained fewer than " + str(self.minimum_sampling)+ " galaxies. In such cases, the lookup table " + "of the nearest sufficiently populated bin will be chosen.") warn(msg) for idx in unfilled_lookup_table_idx: closest_filled_idx_idx = array_utils.find_idx_nearest_val( filled_lookup_table_idx, idx) closest_filled_idx = filled_lookup_table_idx[closest_filled_idx_idx] self.one_point_lookup_table[idx] = ( self.one_point_lookup_table[closest_filled_idx])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mc_galprop(self, seed=None, **kwargs):\n model_helpers.update_param_dict(self, **kwargs)\n self._set_correlation_strength()\n\n if ('galaxy_table' in kwargs.keys()) & ('halos' in kwargs.keys()):\n msg = (\"The mc_\"+self.galprop_key+\" method accepts either \" + \n \"a halos keyword argument, or a galaxy_table keyword argument\" + \n \" but never both.\")\n raise KeyError(msg)\n elif 'galaxy_table' in kwargs.keys():\n galaxy_table = kwargs['galaxy_table']\n operative_sec_haloprop_key = (\n model_defaults.host_haloprop_prefix + self.sec_haloprop_key)\n elif 'halos' in kwargs.keys():\n galaxy_table = kwargs['halos']\n operative_sec_haloprop_key = self.sec_haloprop_key\n else:\n msg = (\"The mc_\"+self.galprop_key+\" requires either \" + \n \"a halos keyword argument, or a galaxy_table keyword argument\")\n raise KeyError(msg)\n\n self.add_new_haloprops(galaxy_table)\n\n # All at once, draw all the randoms we will need\n np.random.seed(seed=seed)\n all_randoms = np.random.random(len(galaxy_table)*2)\n galprop_cumprob = all_randoms[0:len(galaxy_table)]\n galprop_scatter = all_randoms[len(galaxy_table):]\n\n # Initialize the output array\n output_galprop = np.zeros(len(galaxy_table))\n\n # Determine binning and loop range\n if 'galaxy_table_slice_array' not in kwargs.keys():\n binned_prim_galprop = np.digitize(\n galaxy_table[self.prim_galprop_key], \n self.prim_galprop_bins)\n prim_galprop_loop_range = set(binned_prim_galprop)\n else:\n prim_galprop_loop_range = range(len(self.one_point_lookup_table))\n\n for i in prim_galprop_loop_range:\n\n # Determine the slice corresponding to the i^th prim_galprop bin\n if 'galaxy_table_slice_array' not in kwargs.keys():\n idx_bini = np.where(binned_prim_galprop==i)[0]\n num_bini = len(idx_bini)\n else:\n idx_bini = kwargs['galaxy_table_slice_array'][i]\n num_bini = len(galaxy_table[idx_bini])\n\n if len(idx_bini) > 0:\n # Fetch the appropriate number of randoms\n # for the i^th prim_galprop bin\n galprop_cumprob_bini = galprop_cumprob[idx_bini]\n galprop_scatter_bini = galprop_scatter[idx_bini]\n\n # Fetch the halos in the i^th prim_galprop bin, \n # and determine how they are sorted\n haloprop_bini = galaxy_table[idx_bini][operative_sec_haloprop_key]\n idx_sorted_haloprop_bini = np.argsort(haloprop_bini)\n\n galprop_bini = self._condition_matched_galprop(\n haloprop_bini[idx_sorted_haloprop_bini], \n galprop_cumprob_bini, i, galprop_scatter_bini, self.tol)\n\n # Assign the final values to the \n # appropriately sorted subarray of output_galprop\n output_galprop[idx_bini[idx_sorted_haloprop_bini]] = galprop_bini\n\n return output_galprop", "def make_derived_table(filename):\r\n column_keys, get_data = get_csv(filename)\r\n\r\n year_column = column_keys[1:].index('Year')\r\n pcg_column = column_keys[1:].index('PrimaryConditionGroup')\r\n \r\n #pcg_keys = list(PCG_LUT.keys())\r\n \r\n t0 = time.clock()\r\n \r\n NUM_GROUPS = 100\r\n num_rows = 0\r\n for group in range(NUM_GROUPS):\r\n derived_dict = {'ALL':{}, 'Y1':{}, 'Y2':{}, 'Y3':{}}\r\n print 'group=%d' % group\r\n _, get_data = get_csv(filename)\r\n for k,v in get_data():\r\n if (int(k) % NUM_GROUPS) != group:\r\n continue\r\n year = v[year_column]\r\n pcg = get_pcg_index(v[pcg_column])\r\n #if not v[pcg_column] in pcg_keys:\r\n # pcg_keys.append(v[pcg_column])\r\n #print '>', v[pcg_column]\r\n #print '\"%s\" => %d' % (v[pcg_column], pcg)\r\n \r\n if num_rows and num_rows % 10000 == 0:\r\n t = time.clock() - t0\r\n eta = int(t * (2668990 - num_rows)/num_rows)\r\n print ' %8d row (%4.1f%%) %7.1f sec, %4d rows/sec, eta = %6d sec' % (num_rows, \r\n 100.0 * num_rows/2668990, t, int(num_rows/t), eta) \r\n\r\n for y in (year, 'ALL'):\r\n if not k in derived_dict[y].keys():\r\n derived_dict[y][k] = [0, {}] \r\n derived_dict[y][k][0] += 1\r\n derived_dict[y][k][1][pcg] = derived_dict[y][k][1].get(pcg, 0) + 1 \r\n \r\n num_rows += 1\r\n \r\n print 'Coallescing' \r\n for year in derived_dict:\r\n for k in derived_dict[year].keys():\r\n if int(k) % NUM_GROUPS != group:\r\n continue\r\n derived_dict[year][k][1] = get_max_key(derived_dict[year][k][1]) \r\n pickled_path = make_group_name(group) \r\n pkl_file = open(pickled_path , 'wb')\r\n pickle.dump(derived_dict, pkl_file, -1) # Pickle the data using the highest protocol available.\r\n pkl_file.close() \r\n\r\n derived_dict = {'ALL':{}, 'Y1':{}, 'Y2':{}, 'Y3':{}} \r\n for group in range(NUM_GROUPS):\r\n pickled_path = make_group_name(group) \r\n pkl_file = open(pickled_path , 'rb')\r\n part_dict = pickle.load(pkl_file) \r\n pkl_file.close()\r\n for y,d in part_dict.items():\r\n for k,v in d.items():\r\n derived_dict[y][k] = (part_dict[y][k][0], part_dict[y][k][1]) \r\n\r\n if False:\r\n print '-' *80\r\n for k in pcg_keys:\r\n print \" '%s',\" % k \r\n exit() \r\n \r\n for year in derived_dict:\r\n derived_filename = '%s%s_%s' % (DERIVED_PREFIX, year, filename)\r\n data_writer = csv.writer(open(derived_filename , 'wb'), delimiter=',', quotechar='\"')\r\n data_writer.writerow(DERIVED_COLUMN_KEYS)\r\n for k in sorted(derived_dict[year].keys()):\r\n v = derived_dict[year][k]\r\n #print ' ', derived_dict[year][k], v2\r\n data_writer.writerow([k, str(v[0]), str(v[1])])", "def compute_lookuptable(self):\n\n if self.uselookuptable:\n # Evaluation lookup tables \n self.action_isok = np.zeros( ( self.nodes_n , self.actions_n ) , dtype = bool )\n self.x_next = np.zeros( ( self.nodes_n , self.actions_n , self.DS.n ) , dtype = float ) # lookup table for dynamic\n \n # For all state nodes \n for node in range( self.nodes_n ): \n \n x = self.nodes_state[ node , : ]\n \n # For all control actions\n for action in range( self.actions_n ):\n \n u = self.actions_input[ action , : ]\n \n # Compute next state for all inputs\n x_next = self.DS.fc( x , u ) * self.dt + x\n \n # validity of the options\n x_ok = self.DS.isavalidstate(x_next)\n u_ok = self.DS.isavalidinput(x,u)\n \n self.x_next[ node, action, : ] = x_next\n self.action_isok[ node, action] = ( u_ok & x_ok )", "def tabulate_pdf(self):\n\n from mitsuba.core import Float, Vector2f, ScalarVector2f\n\n extents = self.bounds.extents()\n endpoint = self.bounds.max - extents / ScalarVector2f(self.res)\n\n # Compute a set of nodes where the PDF should be evaluated\n x, y = ek.meshgrid(\n ek.linspace(Float, self.bounds.min.x, endpoint.x, self.res.x),\n ek.linspace(Float, self.bounds.min.y, endpoint.y, self.res.y)\n )\n\n endpoint = extents / ScalarVector2f(self.res)\n eps = 1e-4\n nx = ek.linspace(Float, eps, endpoint.x * (1 - eps), self.ires)\n ny = ek.linspace(Float, eps, endpoint.y * (1 - eps), self.ires)\n wx = [1 / (self.ires - 1)] * self.ires\n wy = [1 / (self.ires - 1)] * self.ires\n wx[0] = wx[-1] = wx[0] * .5\n wy[0] = wy[-1] = wy[0] * .5\n\n integral = 0\n\n self.histogram_start = time.time()\n for yi, dy in enumerate(ny):\n for xi, dx in enumerate(nx):\n xy = self.domain.map_forward(Vector2f(x + dx, y + dy))\n pdf = self.pdf_func(xy)\n integral = ek.fmadd(pdf, wx[xi] * wy[yi], integral)\n self.histogram_end = time.time()\n\n self.pdf = integral * (ek.hprod(extents / ScalarVector2f(self.res))\n * self.sample_count)\n\n # A few sanity checks\n pdf_min = ek.hmin(self.pdf) / self.sample_count\n if not pdf_min >= 0:\n self._log('Failure: Encountered a cell with a '\n 'negative PDF value: %f' % pdf_min)\n self.fail = True\n\n self.pdf_sum = ek.hsum(self.pdf) / self.sample_count\n if self.pdf_sum > 1.1:\n self._log('Failure: PDF integrates to a value greater '\n 'than 1.0: %f' % self.pdf_sum)\n self.fail = True", "def create_exgauss_lookup_table(self):\n return self.exgauss_cdf_nparray(range(self.xmin,self.xmax, self.dx)).tolist(), range(self.xmin,self.xmax, self.dx)", "def pc_project(\n mt: hl.MatrixTable,\n loadings_ht: hl.Table,\n loading_location: str = \"loadings\",\n af_location: str = \"pca_af\",\n) -> hl.Table:\n mt = pc_hwe_gt(mt, loadings_ht, loading_location, af_location)\n mt = mt.annotate_cols(scores=hl.agg.array_sum(mt.pca_loadings * mt.GTN))\n return mt.cols().select(\"scores\")", "def create_smarter_lookup_table(self, y=0.95):\n # First determine an approximate starting point for the lookup taqble by halving the max value till the point \n # where the cdf value is less than the cdf value we are looking for\n xold = self.xmax\n xnew = self.xmax\n y_calc = self.exgauss_cdf(xnew)\n while y_calc > y:\n xold = xnew\n xnew = xnew/2.\n y_calc = self.exgauss_cdf(xnew)\n \n # Make sure the interval over which this is being constructed is okay\n npts = 10. # Number of data pts in case the interval xold-xnew is smaller than self.dx\n if xold-xnew < self.dx:\n dx = int((xold-xnew)/npts)\n else: \n dx = self.dx\n # Now start building the lookup table from the value of x\n return self.exgauss_cdf_nparray(range(int(xnew),int(xold), dx)).tolist(), range(int(xnew),int(xold), dx)", "def _sql_gen_intermediate_pi_aggregate(params, table_name=\"df_e\"):\n\n gamma_cols_expr = \", \".join(params._gamma_cols)\n\n sql = f\"\"\"\n select {gamma_cols_expr}, sum(match_probability) as expected_num_matches, sum(1- match_probability) as expected_num_non_matches, count(*) as num_rows\n from {table_name}\n group by {gamma_cols_expr}\n \"\"\"\n return sql", "def _compute_pTable(self, expand=False, factor=False,\n simplify=False):\n if self._has(\"p\"):\n return\n if not self._has(\"k\"):\n self.kTable(expand=expand, factor=factor, simplify=simplify)\n if not self._has(\"m\"):\n self.multiplicities(expand=expand, factor=factor,\n simplify=simplify)\n p = Array3D(self._.d + 1)\n self._compute_parameters(p, self._.P, self._.m, integral=True,\n name=PARAMETER, sym=SYMBOL)\n self._.p = p\n self.check_handshake()", "def _engprop(l): # {{{1\n print(\" \\\\begin{tabular}[t]{rcrrl}\")\n print(\" \\\\multicolumn{4}{c}{\\\\small\"\n \"\\\\textbf{Laminate stacking}}\\\\\\\\[0.1em]\")\n print(\" \\\\toprule %% \\\\usepackage{booktabs}\")\n print(\" Layer & Weight & Angle & vf & Fiber type\\\\\\\\\")\n print(\" & [g/m$^2$] & [$\\\\circ$] & [\\\\%]\\\\\\\\\")\n print(\" \\\\midrule\")\n for ln, la in enumerate(l.layers, start=1):\n s = \" {} & {:4.0f} & {:5.0f} & {:.3g} & {}\\\\\\\\\"\n texfname = la.fiber.name.replace('_', '\\_')\n print(s.format(ln, la.fiber_weight, la.angle, la.vf*100, texfname))\n print(\" \\\\bottomrule\")\n print(\" \\\\end{tabular}\\\\hspace{0.02\\\\textwidth}\")\n print(\" \\\\begin{tabular}[t]{rrl}\")\n print(\" \\\\multicolumn{3}{c}{\\\\small\\\\textbf{Engineering\"\n \" properties}}\\\\\\\\[0.1em]\")\n print(\" \\\\toprule\")\n print(\" Property & Value & Dimension\\\\\\\\\")\n print(\" \\\\midrule\")\n print(\" $\\\\mathrm{{v_f}}$ & {:.3g} &\\\\%\\\\\\\\\".format(l.vf*100))\n print(\" $\\\\mathrm{{w_f}}$ & {:.3g} &\\\\%\\\\\\\\\".format(l.wf*100))\n print(\" thickness & {:.3g} & mm\\\\\\\\\".format(l.thickness))\n print(\" density & {:.3g} & g/cm$^3$\\\\\\\\\".format(l.ρ))\n s = \" weight & {:.0f} & g/m$^2$\\\\\\\\\"\n print(s.format(l.fiber_weight+l.resin_weight))\n print(\" resin & {:.0f} & g/m$^2$\\\\\\\\\".format(l.resin_weight))\n print(\" \\\\midrule\")\n print(\" $\\\\mathrm{{E_x}}$ & {:8.0f} & MPa\\\\\\\\\".format(l.Ex))\n print(\" $\\\\mathrm{{E_y}}$ & {:8.0f} & MPa\\\\\\\\\".format(l.Ey))\n print(\" $\\\\mathrm{{G_{{xy}}}}$ & {:8.0f} & MPa\\\\\\\\\".format(l.Gxy))\n print(\" $\\\\mathrm{{\\\\nu_{{xy}}}}$ & {:g} &-\\\\\\\\\".format(l.νxy))\n print(\" $\\\\mathrm{{\\\\nu_{{yx}}}}$ & {:g} &-\\\\\\\\\".format(l.νyx))\n s = \" $\\\\mathrm{{\\\\alpha_x}}$ & {:g} & K$^{{-1}}$\\\\\\\\\"\n print(s.format(l.αx))\n s = \" $\\\\mathrm{{\\\\alpha_y}}$ & {:g} & K$^{{-1}}$\\\\\\\\\"\n print(s.format(l.αy))\n print(\" \\\\bottomrule\")\n print(\" \\\\end{tabular}\")", "def do_latex_table_middle(self, tabletype, injkey):\n if tabletype == 'fiducial_fit_params':\n h0_params, h1_params = self.get_resulting_hypo_params(\n injkey=injkey\n )\n data_params = self.get_injected_params()\n for param in h0_params.keys():\n # Get the units for this parameter\n val, param_units = self.parse_pint_string(\n pint_string=h0_params[param]\n )\n # Get priors if they exists\n if 'gaussian' in self.all_params['h0_params'][param]['prior']:\n h0stddev, h0maximum = self.extract_gaussian(\n prior_string=self.all_params['h0_params'][\n param]['prior'],\n units=param_units\n )\n else:\n h0stddev = None\n h0maximum = None\n if 'gaussian' in self.all_params['h1_params'][param]['prior']:\n h1stddev, h1maximum = self.extract_gaussian(\n prior_string=self.all_params['h1_params'][\n param]['prior'],\n units=param_units\n )\n else:\n h1stddev = None\n h1maximum = None\n # Include injected parameter, fitted parameters and\n # differences with appropriate formatting.\n if data_params is not None:\n tableline = \" \"\n tableline += \"%s \"%self.tex_axis_label(param)\n if param == 'deltam31':\n tableline += r\" / $10^{-3}$ \"\n if param_units != 'dimensionless':\n tableline += \"(%s) &\"%self.tex_axis_label(param_units)\n else:\n tableline += \"&\"\n if param in data_params.keys():\n dataval = self.extract_paramval(\n injparams=data_params,\n systkey=param\n )\n if param == 'deltam31':\n dataval *= 1000.0\n if (np.abs(dataval) < 1e-2) and (dataval != 0.0):\n tableline += \"%.2e &\"%dataval\n else:\n tableline += \"%.3g &\"%dataval\n # If no injected parameter, show this and the\n # deltas with a line\n else:\n dataval = '--'\n tableline += \"%s &\"%dataval\n h0val = self.extract_paramval(\n injparams=h0_params,\n systkey=param\n )\n if param == 'deltam31':\n h0val *= 1000.0\n tableline += self.format_table_line(\n val=h0val,\n dataval=dataval,\n stddev=h0stddev,\n maximum=h0maximum\n )\n h1val = self.extract_paramval(\n injparams=h1_params,\n systkey=param\n )\n if param == 'deltam31':\n h1val *= 1000.0\n tableline += self.format_table_line(\n val=h1val,\n dataval=dataval,\n stddev=h1stddev,\n maximum=h1maximum,\n last=True\n )\n tableline += r\" \\\\\\\\ \\hline\\n\"\n self.texfile.write(tableline)\n # If no injected parameters it's much simpler\n else:\n h0val = self.extract_paramval(\n injparams=h0_params,\n systkey=param\n )\n h1val = self.extract_paramval(\n injparams=h1_params,\n systkey=param\n )\n if (np.abs(h0val) < 1e-2) and (h0val != 0.0):\n self.texfile.write(r\" %s & %.2e & %.2e\\n\"%(\n self.tex_axis_label(param), h0val, h1val))\n else:\n self.texfile.write(r\" %s & %.3g & %.3g\\n\"%(\n self.tex_axis_label(param), h0val, h1val))\n elif tabletype == \"fiducial_fit_metrics\":\n h0_fid_metric = self.fid_values[injkey][\n 'h0_fit_to_%s'%(self.labels.dict['data'])]['metric_val']\n h1_fid_metric = self.fid_values[injkey][\n 'h1_fit_to_%s'%(self.labels.dict['data'])]['metric_val']\n\n # Need the type of metric here. Doesn't matter which\n # fit that comes from so just choose h0_fit_to_h0_fid\n # since it will always exist.\n metric_type = self.values[injkey][\n 'h0_fit_to_h0_fid']['metric_val']['type']\n # In the case of likelihood, the maximum metric is the better fit.\n # With chi2 metrics the opposite is true, and so we must multiply\n # everything by -1 in order to apply the same treatment.\n if 'chi2' not in metric_type:\n logging.info(\n \"Converting likelihood metric to chi2 equivalent.\"\n )\n h0_fid_metric *= -1\n h1_fid_metric *= -1\n\n # If truth is known, report the fits the correct way round\n if self.labels.dict['data_name'] is not None:\n if self.labels.dict['data_name'] in \\\n self.labels.dict['h0_name']:\n delta = h1_fid_metric-h0_fid_metric\n elif self.labels.dict['data_name'] in \\\n self.labels.dict['h1_name']:\n delta = h0_fid_metric-h1_fid_metric\n else:\n logging.warning(\n \"Truth is known but could not be identified in \"\n \"either of the hypotheses. The difference between\"\n \" the best fit metrics will just be reported as \"\n \"positive and so will not necessarily reflect if \"\n \"the truth was recovered.\"\n )\n if h1_fid_metric > h0_fid_metric:\n delta = h0_fid_metric-h1_fid_metric\n else:\n delta = h1_fid_metric-h0_fid_metric\n # Else just report it as delta between best fits\n else:\n if h1_fid_metric > h0_fid_metric:\n delta = h0_fid_metric-h1_fid_metric\n else:\n delta = h1_fid_metric-h0_fid_metric\n # Write this in the file\n newline = \" %.3g \"%h0_fid_metric\n newline += \"& %.3g \"%h1_fid_metric\n newline += \"& %.3g \"%delta\n newline += r\"\\\\\\\\ \\hline\\n\"\n self.texfile.write(newline)\n else:\n raise ValueError(\n \"This function is only for adding the content to metric\"\n \" or fit param tables in LaTeX. Got type %s\"%tabletype\n )", "def calc_table(resolution, i_dc_max, u_pn_max, lut_fn, log_fn=None):\n grid_res = [resolution, resolution, resolution]\n\n if log_fn is not None:\n log_file = open(log_fn, mode='w')\n else:\n log_file = sys.stderr\n\n i_dc_range = np.linspace(0, i_dc_max, num=grid_res[0])\n u_pn_range = np.linspace(0, u_pn_max, num=grid_res[1])\n u_bc_range = np.linspace(0, 0.5, num=grid_res[2])\n\n opt_mode = np.zeros(grid_res) # optimizer return code (error code, 0 means success)\n grid_res.append(4)\n sw_times = np.zeros(grid_res)\n n_not_solved = 0\n\n log_file.write('resolution: {}\\n'.format(resolution))\n log_file.write('i_dc_max: {}\\n'.format(i_dc_max))\n log_file.write('u_pn_max: {}\\n'.format(u_pn_max))\n\n time.clock()\n total_pts = len(i_dc_range) * len(u_pn_range) * len(u_bc_range)\n pts_done = 0\n\n # sweep the 3D grid, u_bc must be the inner most loop for convergence reasons\n for (k1, i_dc) in enumerate(i_dc_range):\n log_file.write('---------------------\\n')\n for (k2, u_pn) in enumerate(u_pn_range):\n log_file.write('--------\\n')\n log_file.write('k1={0:} k2={1:}\\n'.format(k1,k2))\n\n last_t_opt = []\n\n # traverse starting with u2=05 for which we operate like a conventional DAB were we have a closed\n # analytic solution. This is then used as starting point for the next point\n for (k3, u_bc) in reversed(list(enumerate(u_bc_range))):\n u_ac = 1 # this is our normalization ref voltage\n u_ab = u_ac - u_bc\n u = [u_ab, u_bc, u_pn]\n log_file.write('u={0:} i_dc={1:.7f}\\n'.format(u, i_dc))\n\n t_opt, m = calc_t_opt(u, i_dc, i_dc, last_t_opt, do_print=False)\n\n if m == 0:\n # double check the validity of the obtained solution\n m = check_solution(u, t_opt, i_dc)\n\n opt_mode[k1, k2, k3] = m\n sw_times[k1, k2, k3, 0:4] = t_opt\n\n if m != 0:\n n_not_solved += 1\n log_file.write('^ not solved\\n')\n # mark point in table so the user can investigate the problem\n else :\n last_t_opt = t_opt # keep a copy of our initial conditions\n # show a progress bar in the terminal\n pts_done = pts_done + 1\n suffix = 'elapsed: {}s'.format(int(time.clock()))\n print_progress(pts_done, total_pts, prefix='Progress', suffix=suffix, decimals=1, bar_length=80)\n\n log_file.write('\\nnumber of points not solved: {}\\n'.format(n_not_solved))\n if log_fn is not None:\n log_file.close()\n sys.stderr.write('\\nnumber of points not solved: {}\\n'.format(n_not_solved))\n # write LUT data to file\n export_csv(lut_fn, grid_res, i_dc_range, u_pn_range, u_bc_range, sw_times)", "def table_gen(NamesL_pairs, p_pL, m_mL, p_mL, m_pL, p_valsL, p_vals_BonferoniL, RatiosL, p_valsL_divergent_convergent,\n p_valsL_divergent_convergent_BonferoniL, RatiosL_divergent_convergent, output_table):\n datafile = open(output_table, \"w\")\n datafile.write(\n \"Feature_1\" + '\\t' + \"Feature_2\" + \"\\t\" + \"plus_plus\" + '\\t' + \"minus_minus\" + '\\t' + \"plus_minus\" + '\\t' + \"minus_plus\" + '\\t' + \"p_value_same_opposite\" + '\\t' + \"p-value_same_opposite_Bonferoni_corrected\" + '\\t' + \"Ratio_same_opposite\" + '\\t' + \"p_value_divergent_convergent\" + '\\t' + \"p_value_divergent_convergent Bonferoni corrected\" + '\\t' + \"Ratio divergent convergent\" + '\\n')\n for i in range(len(NamesL_pairs)):\n datafile.write(\n NamesL_pairs[i][0] + '\\t' + NamesL_pairs[i][1] + '\\t' + str(p_pL[i]) + '\\t' + str(m_mL[i]) + '\\t' + str(\n p_mL[i]) + '\\t' + str(m_pL[i]) + '\\t' + str(p_valsL[i]) + '\\t' + str(p_vals_BonferoniL[i]) + '\\t' + str(\n RatiosL[i]) + '\\t' + str(p_valsL_divergent_convergent[i]) + '\\t' + str(\n p_valsL_divergent_convergent_BonferoniL[i]) + '\\t' + str(RatiosL_divergent_convergent[i]) + '\\n')\n datafile.close()\n return", "def calc_prior_path_prob(self, output_filenm=\"\"):\n logger.info(\"Calculating prior map\")\n programs_map = {}\n unique_cluster_ids = set() # have to do this since the assigned cluster ids doesnt seems to be contiguous or start from 0 or end at K-1\n for c in self.args.cluster_assignments:\n unique_cluster_ids.add(c)\n for c in unique_cluster_ids:\n for _, ((e1, r), e2_list) in enumerate(tqdm((self.train_map.items()))):\n if self.args.cluster_assignments[self.entity_vocab[e1]] != c:\n # if this entity does not belong to this cluster, don't consider.\n continue\n if c not in programs_map:\n programs_map[c] = {}\n if r not in programs_map[c]:\n programs_map[c][r] = {}\n all_paths_around_e1 = self.all_paths[e1]\n nn_answers = e2_list\n for nn_ans in nn_answers:\n programs = self.get_programs(e1, nn_ans, all_paths_around_e1)\n for p in programs:\n p = tuple(p)\n if len(p) == 1:\n if p[0] == r: # don't store query relation\n continue\n if p not in programs_map[c][r]:\n programs_map[c][r][p] = 0\n programs_map[c][r][p] += 1\n for c, r in programs_map.items():\n for r, path_counts in programs_map[c].items():\n sum_path_counts = 0\n for p, p_c in path_counts.items():\n sum_path_counts += p_c\n for p, p_c in path_counts.items():\n programs_map[c][r][p] = p_c / sum_path_counts\n\n if not output_filenm:\n dir_name = os.path.join(args.data_dir, \"data\", self.args.dataset_name, \"linkage={}\".format(self.args.linkage))\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n output_filenm = os.path.join(dir_name, \"path_prior_map.pkl\")\n\n logger.info(\"Dumping path prior pickle at {}\".format(output_filenm))\n with open(output_filenm, \"wb\") as fout:\n pickle.dump(programs_map, fout)", "def map_cell_property(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n counter = 0\n fignum = 1\n if p.gal_index == 'all':\n for gal_index in range(GR.N_gal):\n\n if counter == 0:\n fig, axes = plt.subplots(3, 3, figsize=(20,15))\n axs = [axes[0,0],axes[0,1],axes[0,2],axes[1,0],axes[1,1],axes[1,2],axes[2,0],axes[2,1],axes[2,2]]\n counter = 9\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n print('Now mapping %s' % gal_ob.name)\n isrf_ob = gal.isrf(gal_index)\n\n # Load SKIRT output\n wavelengths,bin_width = aux.read_probe_wavelengths(isrf_ob.name)\n N_start,N_stop = aux.FUV_index(wavelengths)\n image_data,units = isrf_ob._get_cut_probe(orientation=p.orientation)\n\n # Plot\n ax1 = axs[9 - counter]\n if p.prop == 'FUV':\n # FUV_xy_image = np.array([np.trapz(image_data[N_start:N_stop,:,:],x=wavelengths[N_start:N_stop]) \\\n # for i in range(len(df))])\n FUV_xy_image = image_data[N_start:N_stop,:,:].sum(axis=0) * 4 * np.pi\n FUV_xy_image = ndimage.rotate(FUV_xy_image, 0, reshape=True)\n # FUV_xy_image = np.fliplr(FUV_xy_image)\n FUV_xy_image[FUV_xy_image <= 0] = np.min(FUV_xy_image[FUV_xy_image > 0])\n im = ax1.imshow(np.log10(FUV_xy_image),\\\n extent=[-isrf_ob.radius,isrf_ob.radius,-isrf_ob.radius,isrf_ob.radius],\\\n vmin=p.vmin,\\\n cmap='twilight')\n lab = 'FUV flux [W/m$^2$/micron]'\n\n # pdb.set_trace()\n\n ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-0.8*gal_ob.radius,0.8*gal_ob.radius])\n ax1.set_ylim([-0.8*gal_ob.radius,0.8*gal_ob.radius])\n if p.prop == 'm':\n ax1.text(0.05,0.85,'M$_{gas}$=%.2eM$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n\n counter -= 1\n\n\n if counter == 0:\n cbar = fig.colorbar(im, ax=axes.ravel().tolist(), shrink=0.95, label=lab)\n # fig.colorbar(im,shrink=0.8,label=lab)\n\n if counter == 0 or gal_index == GR.N_gal-1:\n figname = p.d_plot + 'cell_data/map_%s_%s_gals_%s_%i.png' % (p.prop,p.z1,p.orientation,fignum)\n print('Saving in ' + figname)\n # plt.tight_layout()\n plt.savefig(figname, format='png', dpi=250, facecolor='w')\n fignum += 1\n pdb.set_trace()\n else:\n fig, ax1 = plt.subplots(figsize=(10,10))\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type='cell_data')\n print(simgas.keys())\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n\n # Plot\n Rmax = max_scale/2\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D = np.log10(map2D)\n if not p.log: map2D[map2D < p.vmin] = p.vmin/2 #np.min(map2D[map2D > 0])\n im = ax1.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,cmap=p.cmap)\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-2/3*gal_ob.radius,2/3*gal_ob.radius])\n ax1.set_ylim([-2/3*gal_ob.radius,2/3*gal_ob.radius])\n fig.colorbar(im,shrink=0.8,ax=ax1,label=lab)\n ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n\n print('Saving in ' + p.d_plot + 'sim_data/map_%s_G%i.png' % (p.prop,p.gal_index))\n if not os.path.isdir(p.d_plot + 'cell_data/'): os.mkdir(p.d_plot + 'cell_data/')\n plt.savefig(p.d_plot + 'cell_data/map_%s_G%i.png' % (p.prop,p.gal_index), format='png', dpi=250, facecolor='w')", "def dp_cal_and_pro_only(foods, cal_goal, pro_goal):\n macros = init_two_d_array((cal_goal, pro_goal), 999999999)\n foods_used = init_two_d_array((cal_goal, pro_goal), {})\n\n for i in range(cal_goal):\n for j in range(pro_goal):\n for n in range(len(foods)):\n food = foods[n]\n if (int(food['calories']) > i and int(food['protein']) > j):\n continue\n if (macros[i - int(food['calories'])]\n [j - int(food['protein'])]\n == 999999999):\n prev_cost = 0\n prev_foods_used = {}\n else:\n prev_cost = (macros[i - int(food['calories'])]\n [j - int(food['protein'])])\n prev_foods_used = \\\n (foods_used[i - int(food['calories'])]\n [j - int(food['protein'])]).copy()\n new_cal = calories(foods, prev_foods_used) + food['calories']\n new_pro = protein(foods, prev_foods_used) + food['protein']\n if (macros[i][j] > prev_cost + food['serving_cost']\n and new_cal > i - 50 and new_cal < i + 10\n and new_pro > j - 5 and new_pro < j + 5):\n macros[i][j] = prev_cost + food['serving_cost']\n try:\n prev_foods_used[n] += 1\n except KeyError:\n prev_foods_used[n] = 1\n foods_used[i][j] = prev_foods_used\n return foods_used[cal_goal - 1][pro_goal - 1]", "def computeProp(self):\n self.chem = {}\n for key in self.config.C:\n if key in ['P', 'T', 'Z', 'DZ']:\n continue\n self.chem[key] = chemistry.ConstituentProperties(key)\n\n # nAtm = len(self.gas[self.config.C['P']])\n self.property = []\n for op in self.config.LP:\n self.property.append([])\n zOffset = 0.0\n iOffset = 0\n psep = 1.0E6\n for i, zv in enumerate(self.gas[self.config.C['Z']]): # find the nearest z value at p_ref\n P = self.gas[self.config.C['P']][i]\n if abs(P - self.config.p_ref) < psep:\n psep = abs(P - self.config.p_ref)\n iOffset = i\n zOffset = self.gas[self.config.C['Z']][iOffset]\n z_at_p_ref = self.config.Req\n\n for i, zv in enumerate(self.gas[self.config.C['Z']]):\n T = self.gas[self.config.C['T']][i]\n P = self.gas[self.config.C['P']][i]\n self.property[self.config.LP['P']].append(P)\n self.property[self.config.LP['Z']].append(zv)\n rr = z_at_p_ref + zv - zOffset\n # note that this is the \"actual\"z along equator referenced to planet center (aka radius)\n self.property[self.config.LP['R']].append(rr)\n # ##set mean amu\n amulyr = 0.0\n for key in self.chem:\n amulyr += self.chem[key].amu * self.gas[self.config.C[key]][i]\n self.property[self.config.LP['AMU']].append(amulyr)\n # ##set GM pre-calc (normalized further down) and get lapse rate\n if not i:\n self.property[self.config.LP['GM']].append(0.0)\n self.property[self.config.LP['LAPSE']].append(0.0)\n self.property[self.config.LP['LAPSEP']].append(0.0)\n else:\n rho = (amulyr * P) / (chemistry.R * T)\n dr = abs(zv - self.gas[self.config.C['Z']][i - 1])\n dV = 4.0 * np.pi * (rr**2) * dr\n dM = 1.0e11 * rho * dV\n GdM = self.property[self.config.LP['GM']][i - 1] + chemistry.GravConst * dM\n # in km3/s2\n # mass added as you make way into atmosphere by radius r (times G)\n self.property[self.config.LP['GM']].append(GdM)\n dT = abs(T - self.gas[self.config.C['T']][i - 1])\n dP = abs(P - self.gas[self.config.C['P']][i - 1])\n self.property[self.config.LP['LAPSE']].append(dT / dr)\n self.property[self.config.LP['LAPSEP']].append(dT / dP)\n # ##set refractivity and index of refraction\n refrlyr = 0.0\n for key in self.chem:\n refrlyr += self.chem[key].refractivity(T=T) * self.gas[self.config.C[key]][i]\n refrlyr = refrlyr * P * (293.0 / T)\n self.property[self.config.LP['REFR']].append(refrlyr)\n nlyr = refrlyr / 1.0E6 + 1.0\n self.property[self.config.LP['N']].append(nlyr)\n\n # ##Now need to normalize GM to planet and calculate scale height (H)\n GMnorm = self.property[self.config.LP['GM']][iOffset] # G*(Mass added by p_ref)\n for i, mv in enumerate(self.property[self.config.LP['GM']]):\n gm = self.config.GM_ref - (mv - GMnorm)\n self.property[self.config.LP['GM']][i] = gm\n little_g = gm / self.property[self.config.LP['R']][i]**2\n m_bar = self.property[self.config.LP['AMU']][i]\n T = self.gas[self.config.C['T']][i]\n self.property[self.config.LP['H']].append((chemistry.R * T) /\n (little_g * m_bar) / 1000.0)\n self.property[self.config.LP['g']].append(little_g)\n self.property = np.array(self.property)", "def lookup_capacity(lookup, environment, ant_type, frequency,\n bandwidth, generation):\n if (environment, ant_type, frequency, bandwidth, generation) not in lookup:\n raise KeyError(\"Combination %s not found in lookup table\",\n (environment, ant_type, frequency, bandwidth, generation))\n\n density_capacities = lookup[\n (environment, ant_type, frequency, bandwidth, generation)\n ]\n\n return density_capacities", "def get_table(casedata, controldata, locus):\n import numpy, pandas\n tables = [] # - a list of lists\n for casecol,controlcol in pairs.items():\n # get ploidy of pop\n pop = casecol.split('.FREQ')[0]\n pop_ploidy = ploidy[pop]\n\n # get case-control frequencies of ALT allele\n case_freq = get_freq(casedata.loc[locus, casecol])\n cntrl_freq = get_freq(controldata.loc[locus, controlcol])\n\n # see if either freq is np.nan, if so, skip this pop\n if sum([x!=x for x in [case_freq, cntrl_freq]]) > 0:\n continue\n\n # collate info for locus (create contingency table data)\n t = []\n for freq in [cntrl_freq, case_freq]:\n t.extend([(1-freq)*pop_ploidy,\n freq*pop_ploidy])\n tables.append(t)\n # return contingency tables (elements of list) for this locus stratified by population (list index)\n return [numpy.reshape(x.tolist(), (2, 2)) for x in numpy.asarray(tables)]", "def compute_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n probabilities[possible_target_location] = integrate.quad(\n integral_function,\n -np.inf, np.inf,\n args=(possible_target_location,Dprime_map[fixation]),\n epsabs=0,\n limit=100,\n full_output=1\n )[0] #MOD Dprime_map deleted\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted", "def lookup_cost(lookup, strategy, environment):\n if (strategy, environment) not in lookup:\n raise KeyError(\"Combination %s not found in lookup table\",\n (strategy, environment))\n\n density_capacities = lookup[\n (strategy, environment)\n ]\n\n return density_capacities", "def set_lookup_qn(diagram, p_cm, p_max, gammas, skip=True, verbose=0):\n\n lookup_p = set_lookup_p(p_max, p_cm, diagram, skip)\n lookup_g = set_lookup_g(gammas, diagram)\n\n # TODO: A more elegant solution for combining lookup_p and lookup_g is welcome\n # maybe Multiindex.from_product()\n tmp = it.product(lookup_p, lookup_g)\n lookup_qn = []\n for t in tmp:\n lookup_qn.append(t[0]+t[1])\n lookup_qn = DataFrame(lookup_qn, columns=['p_{so}', 'p_{si}', '\\gamma_{so}', '\\gamma_{si}'])\n# lookup_qn['p_{so}'] = qn['p_{so}'].apply(np.array)\n# lookup_qn['p_{si}'] = qn['p_{si}'].apply(np.array)\n \n return lookup_qn", "def _get_feature_tables_for_protein(feature_table, accession) -> str:\n if not feature_table:\n return \"\"\n\n if accession not in feature_table:\n return \"\"\n\n ft_str = \"\"\n for key in feature_table[accession].keys():\n if key == \"VARIANT\":\n for ft_var in feature_table[accession][key]:\n if len(ft_var[0]) == 3: # CASE Replacement\n ft_str += (\n ('''\\nFT VARIANT {position}\\n''' +\n '''FT /note=\"{from_aa} -> {to_aa} (in GEN_BY_PG; {desc})\"\\n''' +\n '''FT /id=\"CUSTOM_{id}\"''').format(\n position=ft_var[0][2], from_aa=ft_var[0][0], to_aa=ft_var[0][1],\n desc=ft_var[1], id=ft_var[2]\n )\n )\n elif len(ft_var[0]) == 2: # CASE Replacement\n ft_str += (\n ('''\\nFT VARIANT {position}\\n''' +\n '''FT /note=\"Missing (in GEN_BY_PG; {desc})\"\\n''' +\n '''FT /id=\"CUSTOM_{id}\"''').format(\n position=ft_var[0][1],\n desc=ft_var[1], id=ft_var[2]\n )\n )\n\n return ft_str", "def compute_perlin(self, x, y, permutation_table):\n\n xi, yi = x.astype(int), y.astype(int)\n xg, yg = x - xi, y - yi\n xf, yf = self.compute_fade(xg), self.compute_fade(yg)\n\n p00 = permutation_table[permutation_table[xi] + yi]\n p01 = permutation_table[permutation_table[xi] + yi + 1]\n p10 = permutation_table[permutation_table[xi + 1] + yi]\n p11 = permutation_table[permutation_table[xi + 1] + yi + 1]\n\n n00 = self.compute_gradient(p00, xg, yg)\n n01 = self.compute_gradient(p01, xg, yg - 1)\n n10 = self.compute_gradient(p10, xg - 1, yg)\n n11 = self.compute_gradient(p11, xg - 1, yg - 1)\n\n x1 = self.compute_lerp(n00, n10, xf)\n x2 = self.compute_lerp(n01, n11, xf)\n return self.compute_lerp(x1, x2, yf)", "def merge_tables():\r\n filename = \"ppxf_results_best.dat\"\r\n s1 = np.genfromtxt(filename, usecols=(0,), dtype=None).tolist()\r\n sref = s1[:]\r\n sref.sort()\r\n x, y = get_positions(sref).T\r\n r = np.sqrt(x * x + y * y)\r\n pa = np.rad2deg(np.arctan2(x, y))\r\n pa[pa < 0.] += 360.\r\n data1 = np.loadtxt(filename, usecols=np.arange(1, 11))\r\n ##########################################################################\r\n # Account for difference in resolution\r\n # Not used anymore because the resolution is now matched in pPXF\r\n # fwhm_dif = (2.5 - 2.1) * c / 5500. / 2.3548\r\n # data1[:,2] = np.sqrt(data1[:,2]**2 - fwhm_dif**2)\r\n ##########################################################################\r\n data1 = match_data(s1, sref, data1)\r\n results = np.column_stack((sref, x, y, r, pa, data1))\r\n header = ['FILE', \"X[kpc]\", \"Y[kpc]\",\r\n \"R[kpc]\", \"PA\",\r\n 'V', 'dV', 'S', 'dS', 'h3', 'dh3',\r\n 'h4', 'dh4', 'chi/DOF', 'S/N']\r\n with open(outtable, \"w\") as f:\r\n for i, field in enumerate(header):\r\n print \"# {0} : {1}\\n\".format(i, field)\r\n f.write(\"# {0} : {1}\\n\".format(i, field))\r\n np.savetxt(f, results, fmt=\"%s\")\r\n return", "def PDF(gal_index,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n # PDF PLACEHOLDER\n lognHs = np.linspace(-5,8,200)\n total_PDF = np.zeros(len(lognHs))\n\n # READ CELL DATA\n gal_ob = gal.galaxy(gal_index)\n df = gal_ob.cell_data.get_dataframe()\n\n bins = 50\n\n # READ FIT PARAMS OF PDF\n if '_arepoPDF' in p.table_ext:\n fit_params_SFR = np.load(p.d_table+'fragment/PDFs%s_%ipc.npy' % (p.table_ext,p.res),allow_pickle=True).item()\n fit_params = fit_params_SFR['fit_params']\n\n # OPTIONAL : SELECT PART OF FITS\n # fit_params_SFR['SFR_bins'] = fit_params_SFR['SFR_bins'][0:-2]\n # fit_params = fit_params[:,0:-2,:]\n # fit_params_collapse = fit_params_collapse[:,0:-2,:]\n\n fit_lognH_bins = fit_params_SFR['n_vw_bins'] # log\n fit_nSFR_bins = fit_params_SFR['SFR_bins'] # log\n fit_lognH_bins_c = fit_lognH_bins[0:-1] + (fit_lognH_bins[-1]-fit_lognH_bins[-2])/2\n fit_nSFR_bins_c = fit_nSFR_bins[0:-1] + (fit_nSFR_bins[-1]-fit_nSFR_bins[-2])/2\n lognSFR_bins = fit_nSFR_bins#np.linspace(fit_nSFR_bins.min(),fit_nSFR_bins.max(),bins)\n print('log nH bins:')\n print(fit_lognH_bins_c)\n print('log SFR bins:')\n print(fit_nSFR_bins_c)\n if '_arepoPDF' not in p.table_ext:\n lognSFR_bins = np.linspace(-10,1,bins)\n\n # BIN CELL DATA TO REDUCE COMPUTATION TIME\n lognH_bins = np.linspace(-8,2,bins)\n lognH_bins_c = lognH_bins[0:-1] + (lognH_bins[1] - lognH_bins[0])/2\n lognSFR_bins_c = lognSFR_bins[0:-1] + (lognSFR_bins[1] - lognSFR_bins[0])/2\n\n # ADD THIS LOWER VALUE TO INCLUDE ALL CELLS (except density = 0)\n lognH_bins[0] = -30\n lognSFR_bins[0] = -30\n lognSFR_bins[-1] = 10\n\n df.SFR_density[df.SFR_density <= 10.**lognSFR_bins.min()] = 10.**(lognSFR_bins.min()+1)\n df.SFR_density[np.isnan(df.SFR_density)] = 10.**(lognSFR_bins.min()+1)\n\n if not p.add:\n fig = plt.figure(figsize=(15,6))\n ax = fig.add_subplot(1,2,1)\n\n print('Number of cells: ',len(df))\n if p.ow == False:\n try:\n PDF = pd.read_pickle(p.d_XL_data + 'data/cell_data/PDFs/%s%s_%s%s_%s' % (p.sim_name,p.sim_run,gal_ob.name,p.table_ext,p.res))\n total_PDF = PDF['total_PDF'].values\n lognHs = PDF['lognHs'].values\n except:\n p.ow = True\n if p.ow == True:\n print('Re-calculating PDF')\n i = 0\n poly1 = 0\n N_cells = 0\n \n for i_lognH in range(len(lognH_bins)-1):\n for i_lognSFR in range(len(lognSFR_bins)-1):\n \n df_cut = df[(df.nH >= 10**(lognH_bins[i_lognH])) & \\\n (df.nH < 10**(lognH_bins[i_lognH+1]))].reset_index(drop=True)\n if i_lognSFR > 0:\n # (for the first bin in nSFR, doesn't matter if cell has no nSFR)\n df_cut = df_cut[(df_cut.SFR_density >= 10**(lognSFR_bins[i_lognSFR])) & \\\n (df_cut.SFR_density < 10**(lognSFR_bins[i_lognSFR+1]))].reset_index(drop=True)\n N_cells += len(df_cut)\n lognH_mean, lognSFR = lognH_bins_c[i_lognH], lognSFR_bins_c[i_lognSFR]\n \n if '_arepoPDF' in p.table_ext:\n # print(lognH_mean,lognSFR,len(df_cut))\n if (lognH_bins[i_lognH] >= fit_lognH_bins[0]):\n print(lognH_bins[i_lognH],len(df_cut))\n i_fit_lognH_bins = np.argmin(np.abs(fit_lognH_bins_c - lognH_mean))\n i_fit_lognSFR_bins = np.argmin(np.abs(fit_nSFR_bins_c - lognSFR))\n fit_params_1 = fit_params[i_fit_lognH_bins,i_fit_lognSFR_bins,:]\n print(lognH_mean,lognSFR,fit_params_1)\n \n if np.sum(fit_params_1) != 0:\n PDF_integrated = 10.**aux.parametric_PDF(lognHs,lognH_mean,fit_params_1[1],fit_params_1[2])\n if fit_params_1[2] == -1.5:\n PDF_integrated = 10.**aux.parametric_PDF(lognHs,fit_params_1[0],fit_params_1[1],fit_params_1[2])\n poly1 += 1\n \n if np.sum(fit_params_1) == 0:\n print('uhoh',lognH_mean,lognSFR)\n PDF_integrated = aux.lognormal_PDF(10.**lognHs,10.**lognH_mean,Mach=1)\n \n if (lognH_mean < fit_lognH_bins[0]):\n PDF_integrated = aux.lognormal_PDF(10.**lognHs,10.**lognH_mean,Mach=10)\n PDF_integrated[np.isnan(PDF_integrated)] = 0\n if (lognH_mean < -4):\n PDF_integrated = aux.lognormal_PDF(10.**lognHs,10.**lognH_mean,Mach=1)\n PDF_integrated[np.isnan(PDF_integrated)] = 0\n \n if p.table_ext == '_M10':\n PDF_integrated = aux.lognormal_PDF(10.**lognHs,10.**lognH_mean,Mach=10)\n PDF_integrated[np.isnan(PDF_integrated)] = 0\n \n # Add to total PDF, weigthed by the mass of that cell\n total_PDF += PDF_integrated * np.sum(df_cut.m)/np.sum(df.m)\n if not p.add: ax.plot(10.**lognHs,PDF_integrated * np.sum(df_cut.m)/np.sum(df.m),color='grey',lw=1,alpha=0.3)\n if np.isnan(np.sum(total_PDF)):\n print(np.sum(df_cut.m)/np.sum(df.m),PDF_integrated)\n pdb.set_trace()\n i += 1\n # if i == 10: pdb.set_trace()\n \n print('Total number of cells processed: ',N_cells)\n print('Total number of bins: ',bins**2)\n print('Number of bins with parametric PDFs: %i' % (poly1))\n total_PDF = total_PDF / np.sum(total_PDF)\n PDF = pd.DataFrame({'lognHs':lognHs,'total_PDF':total_PDF})\n PDF.to_pickle(p.d_XL_data + 'data/cell_data/PDFs/%s%s_%s%s_%s' % (p.sim_name,p.sim_run,gal_ob.name,p.table_ext,p.res))\n\n print('TEST!!!')\n total_PDF = total_PDF[(lognHs >= -4) & (lognHs <= 7)]\n lognHs = lognHs[(lognHs >= -4) & (lognHs <= 7)]\n total_PDF = total_PDF / np.sum(total_PDF)\n if not p.add:\n # First figure: One panel of individual binned PDFs and one panel of total PDF\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel(getlabel('lnH'))\n ax.set_ylabel('dM/dlognH')\n ax.set_ylim([1e-12,1e-1])\n ax.set_xlim([1e-4,1e7])\n \n ax2 = fig.add_subplot(1,2,2)\n ax2.plot(10.**lognHs,total_PDF)\n ax2.set_xscale('log')\n ax2.set_yscale('log')\n ax2.set_xlabel(getlabel('lnH'))\n ax2.set_ylabel('dM/dlognH')\n ax2.set_ylim([1e-4,1e-1])\n ax2.set_xlim([1e-4,1e5])\n \n if not os.path.isdir(p.d_plot + 'cell_data/PDFs/'): os.mkdir(p.d_plot + 'cell_data/PDFs/') \n plt.savefig(p.d_plot + 'cell_data/PDFs/PDF_%s%s_%s.png' % (gal_ob.name,p.table_ext,p.res), format='png', dpi=250, facecolor='w')\n\n labels = {'_M10':'Mach = 10','_arepoPDF_M51':'AREPO-M51 parametrized PDF','_arepoPDF_CMZ':'AREPO-CMZ parametrized PDF'}\n\n # New figure: One panel of PDF and cumulative mass function (optional)\n if p.add:\n ax1 = p.ax#plt.gca()\n else:\n fig,ax1 = plt.subplots(figsize=(8,6))\n ax1.plot(lognHs,total_PDF,ls=p.ls,lw=2.5,color=p.color,label=labels[p.table_ext])\n ax1.set_yscale('log')\n if not p.add:\n ax1.set_xlabel('log nH [cm$^{-3}$]')\n ax1.set_ylabel('Mass fraction per bin')\n ax1.set_xlim([-4,7])\n ax1.set_ylim([1e-4,1e-1])\n ax1.grid(axis='x')\n #if p.add: ax1.legend()\n if not p.add:\n ax2 = ax1.twinx()\n ax2.plot(lognHs,np.cumsum(total_PDF),'--')\n ax2.grid(axis='y')\n ax2.set_ylim([0,1])\n ax2.set_ylabel('Cumulative mass fraction')\n ax2.text(0.4,0.1,'Mass fraction at nH > 1e3: %.1f %%' % (100*np.sum(total_PDF[lognHs >= 3])),\\\n transform=ax1.transAxes,fontsize=15,bbox=dict(facecolor='white', alpha=0.7))\n if not os.path.isdir(p.d_plot + 'cell_data/PDFs'): os.mkdir(p.d_plot + 'cell_data/PDFs') \n if not p.add: plt.savefig(p.d_plot + 'cell_data/PDFs/simple_PDF_%s%s_%s.png' % (gal_ob.name,p.table_ext,p.res), format='png', dpi=250, facecolor='w')\n\n # pdb.set_trace()", "def calc_resources(self):\n self.popula = self.energy = self.popula_used = self.energy_used = 0\n self.cnt_public = self.cnt_shop = self.cnt_1 = self.cnt_2 = self.cnt_3 = self.cnt_4 = self.cnt_5 = self.cnt_office = 0\n self.popula += self.extra_pop\n for i in range(20):\n b = self.b[i]\n if b == 'T':\n self.popula += self.f[i] * 2\n self.energy_used += 1\n elif b == 'O':\n self.popula_used += 1\n self.energy_used += 1\n self.cnt_office += self.f[i]\n elif b == 'U':\n self.popula_used += 1\n self.cnt_public += 1\n elif b == 'S':\n self.energy_used += 1\n self.cnt_shop += 1\n elif b == '1':\n self.popula += 1\n self.energy += 1\n self.popula_used += 1\n self.cnt_1 += 1\n elif b == '2':\n self.popula_used += 1\n self.cnt_2 += 1\n elif b == '3':\n self.popula_used += 1\n self.cnt_3 += 1\n elif b == '4':\n self.popula += 2\n self.popula_used += 1\n self.cnt_4 += 1\n elif b == '5':\n self.energy += 2\n self.popula_used += 1\n self.cnt_5 += 1\n elif b == 'A':\n self.energy += 2\n self.popula_used += 1\n elif b == 'F':\n self.energy += 3\n self.popula_used += 1\n elif b == 'G':\n self.popula += 1\n if 'tvst' in args.exp:\n self.popula += self.cnt_shop\n if 'ward' in args.exp:\n self.popula += 3\n if 'elec' in args.exp:\n self.energy += 3\n if 'capi' in args.exp:\n self.popula_used += 2\n if 'fire' in args.exp:\n self.popula_used += 1\n if 'park' in args.exp:\n self.popula_used += 1", "def set_lookup_g(gammas, diagram):\n\n if diagram == 'C20':\n lookup_so = it.product([g for gamma in gammas for g in gamma[:-1]])\n lookup_so, lookup_si = it.tee(lookup_so, 2)\n elif diagram == 'C2+':\n lookup_so = it.product([5])\n lookup_so, lookup_si = it.tee(lookup_so, 2)\n elif diagram == 'C3+':\n lookup_so = it.product([5], [5]) \n lookup_si = it.product([g for gamma in gammas for g in gamma[:-1]])\n elif diagram.startswith('C4'):\n lookup_so = it.product([5], [5]) \n lookup_so, lookup_si = it.tee(lookup_so, 2)\n else:\n print 'in set_lookup_g: diagram unknown! Quantum numbers corrupted.'\n return\n# indices = [[1,2,3],[10,11,12],[13,14,15]]\n# lookup_g2 = [list(it.product([i[j] for i in indices], repeat=2)) for j in range(len(indices[0]))]\n# lookup_g = [item for sublist in lookup_g2 for item in sublist]\n\n lookup_g = it.product(lookup_so, lookup_si)\n return list(lookup_g)", "def cloudy_table_map(x_index='lognHs',y_index='lognSFRs',**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n cloudy_library = clo.library()\n lookup_table = cloudy_library._restore_lookup_table()\n print(lookup_table.nH_mw.min())\n print(lookup_table.nH_mw.max())\n\n fig,ax = plt.subplots(figsize=(8,5))\n\n key_const1, key_const2, key_const3, key_const4 = list(p.keep_const.keys())[0],list(p.keep_const.keys())[1],list(p.keep_const.keys())[2],list(p.keep_const.keys())[3]\n value_const1, value_const2, value_const3, value_const4 = list(p.keep_const.values())[0],list(p.keep_const.values())[1],list(p.keep_const.values())[2],list(p.keep_const.values())[3]\n\n # for key, value in p.cloudy_param.items():\n # key = key\n # value = value\n\n # cloudy_parameters = np.array(['logNHs','lognHs','lognSFRs','logZs','logFUVs'])\n\n # x_index = cloudy_parameters[(cloudy_parameters != key) & (cloudy_parameters != 'Machs')][0]\n # y_index = cloudy_parameters[(cloudy_parameters != key) & (cloudy_parameters != 'Machs')][1]\n\n print('%s table values:' % key_const1)\n print(np.unique(lookup_table[key_const1]))\n print('kept fixed at %f' % value_const1)\n\n print('%s table values:' % key_const2)\n print(np.unique(lookup_table[key_const2]))\n print('kept fixed at %f' % value_const2)\n\n print('%s table values:' % key_const3)\n lookup_table[key_const3] = np.round(lookup_table[key_const3]*10.)/10.\n print(np.unique(lookup_table[key_const3]))\n print('kept fixed at %f' % value_const3)\n\n print('%s table values:' % key_const4)\n print(np.unique(lookup_table[key_const4]))\n print('kept fixed at %f' % value_const4)\n\n lookup_table_cut = lookup_table[(lookup_table[key_const1] == value_const1) & \\\n (lookup_table[key_const2] == value_const2) & \\\n (lookup_table[key_const3] == value_const3) & \\\n (lookup_table[key_const4] == value_const4)]\n x, y = lookup_table_cut[x_index].values, lookup_table_cut[y_index].values\n\n X, Y = np.meshgrid(np.unique(x), np.unique(y))\n print(lookup_table_cut.nH_mw.min())\n print(lookup_table_cut.nH_mw.max())\n\n\n if p.line == '[CII]158_CO(1-0)':\n line_lum = 10.**lookup_table_cut['[CII]158'].values / 10.**lookup_table_cut['CO(1-0)'].values\n line_lum = np.log10(line_lum)\n if p.line == 'alpha_CO':\n line_lum = 1e4 / aux.Lsun_to_K_km_s_pc2(10.**lookup_table_cut['CO(1-0)'].values,'CO(1-0)') \n try:\n line_lum = lookup_table_cut[p.line].values\n except:\n pass\n\n lum = line_lum.reshape([len(np.unique(x)), len(np.unique(y))]).T\n\n vmin = np.min(lum)\n vmax = np.max(lum)\n print(vmin,vmax)\n if p.zlim:\n vmin = p.zlim[0]\n vmax = p.zlim[1]\n lum[lum < vmin] = vmin\n lum[lum > vmax] = vmax\n if p.log: \n print('AAAA')\n lum = np.log10(lum)\n vmin,vmax = np.log10(vmin),np.log10(vmax)\n\n print('Highest and lowest value to be mapped:', np.min(lum), np.max(lum))\n print(vmin,vmax)\n\n cf = ax.contourf(X,Y, lum, cmap=\"jet\", vmin=vmin, vmax=vmax, levels=30, lw=0, rstride=1, cstride=1,alpha=0.8)\n if getlabel(p.line) == '':\n if p.log: plt.colorbar(cf,label='log '+p.line)\n if not p.log: plt.colorbar(cf,label=p.line)\n else: \n plt.colorbar(cf,label=getlabel(p.line))\n \n # Show where grid points are, but only where lum > 0\n failed_models = lookup_table_cut['fail'].values\n ax.plot(x[failed_models == 0],y[failed_models == 0],'x',ms=5,mew=2,color='w')\n\n translate_labels = {'lognHs':'lnH','logNHs':'lNH','logFUVs':'lG0','logZs':'lZ','lognSFRs':'lSFR_density'}\n ax.set_xlabel(getlabel(translate_labels[x_index]))\n ax.set_ylabel('\\n\\n' + getlabel(translate_labels[y_index]))\n if p.ylim: ax.set_ylim(p.ylim)\n if p.xlim: ax.set_xlim(p.xlim)\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'look-up/'): os.mkdir(p.d_plot + 'look-up/') \n plt.savefig(p.d_plot + 'look-up/cloudy_table%s_%s.png' % (p.grid_ext,p.line), format='png', dpi=300)", "def calories_protein(og, fg):\n\n return 0.994 * fg * real_extract(og, fg)" ]
[ "0.5527577", "0.48766977", "0.48666936", "0.48461375", "0.48401406", "0.48253617", "0.48167393", "0.47914568", "0.4777298", "0.46711516", "0.46498317", "0.46371827", "0.46279138", "0.46278507", "0.4626222", "0.46053305", "0.45921257", "0.45880622", "0.4578638", "0.45648557", "0.45647752", "0.45577258", "0.45500246", "0.454028", "0.4537981", "0.45366195", "0.45360622", "0.4533559", "0.4513368", "0.45079282" ]
0.63473904
0
Method creates ``self.param_dict`` regulating the strength of the correlation between sec_haloprop and galprop at each value of prim_galprop.
def _build_param_dict(self, **kwargs): if 'correlation_strength' in kwargs.keys(): correlation_strength = kwargs['correlation_strength'] if custom_len(correlation_strength) > 1: try: self.correlation_strength_abcissa = kwargs['correlation_strength_abcissa'] except KeyError: msg = ("If correlation_strength keyword is passed to the constructor, \n" + "you must also pass a correlation_strength_abcissa keyword argument " + "storing an array of the same length as correlation_strength.") raise(msg) else: self.correlation_strength_abcissa = [0] correlation_strength = [correlation_strength] self._param_dict_keys = ['correlation_param' + str(i+1) for i in range(len(correlation_strength))] self.param_dict = {key:value for key, value in zip(self._param_dict_keys, correlation_strength)} else: self.param_dict = {'correlation_param1': 1.0} self._set_correlation_strength()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_log: np.ndarray = 4 * np.random.rand(9) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(9)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_log: np.ndarray = 4 * np.random.rand(3) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(3)) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n param_dict.update(tCa_dict)\n return param_dict", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_kvhh', 'g_cav', 'g_kca', 'g_nap']\n gX_log: np.ndarray = 4 * np.random.rand(5) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(5)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(tCa_dict)\n return param_dict", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_name: List[str] = list(itertools.compress(gX_name, list(self.channel_bool.values())[:9]))\n gX_log: np.ndarray = 4 * np.random.rand(len(gX_name)) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(len(gX_name))) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_name: List[str] = list(itertools.compress(gR_name, list(self.channel_bool.values())[9:12]))\n gR_log: np.ndarray = 4 * np.random.rand(len(gR_name)) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(len(gR_name))) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n\n if self.channel_bool['ca']:\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n param_dict.update(tCa_dict)\n\n return param_dict", "def _mc_galprop(self, seed=None, **kwargs):\n model_helpers.update_param_dict(self, **kwargs)\n self._set_correlation_strength()\n\n if ('galaxy_table' in kwargs.keys()) & ('halos' in kwargs.keys()):\n msg = (\"The mc_\"+self.galprop_key+\" method accepts either \" + \n \"a halos keyword argument, or a galaxy_table keyword argument\" + \n \" but never both.\")\n raise KeyError(msg)\n elif 'galaxy_table' in kwargs.keys():\n galaxy_table = kwargs['galaxy_table']\n operative_sec_haloprop_key = (\n model_defaults.host_haloprop_prefix + self.sec_haloprop_key)\n elif 'halos' in kwargs.keys():\n galaxy_table = kwargs['halos']\n operative_sec_haloprop_key = self.sec_haloprop_key\n else:\n msg = (\"The mc_\"+self.galprop_key+\" requires either \" + \n \"a halos keyword argument, or a galaxy_table keyword argument\")\n raise KeyError(msg)\n\n self.add_new_haloprops(galaxy_table)\n\n # All at once, draw all the randoms we will need\n np.random.seed(seed=seed)\n all_randoms = np.random.random(len(galaxy_table)*2)\n galprop_cumprob = all_randoms[0:len(galaxy_table)]\n galprop_scatter = all_randoms[len(galaxy_table):]\n\n # Initialize the output array\n output_galprop = np.zeros(len(galaxy_table))\n\n # Determine binning and loop range\n if 'galaxy_table_slice_array' not in kwargs.keys():\n binned_prim_galprop = np.digitize(\n galaxy_table[self.prim_galprop_key], \n self.prim_galprop_bins)\n prim_galprop_loop_range = set(binned_prim_galprop)\n else:\n prim_galprop_loop_range = range(len(self.one_point_lookup_table))\n\n for i in prim_galprop_loop_range:\n\n # Determine the slice corresponding to the i^th prim_galprop bin\n if 'galaxy_table_slice_array' not in kwargs.keys():\n idx_bini = np.where(binned_prim_galprop==i)[0]\n num_bini = len(idx_bini)\n else:\n idx_bini = kwargs['galaxy_table_slice_array'][i]\n num_bini = len(galaxy_table[idx_bini])\n\n if len(idx_bini) > 0:\n # Fetch the appropriate number of randoms\n # for the i^th prim_galprop bin\n galprop_cumprob_bini = galprop_cumprob[idx_bini]\n galprop_scatter_bini = galprop_scatter[idx_bini]\n\n # Fetch the halos in the i^th prim_galprop bin, \n # and determine how they are sorted\n haloprop_bini = galaxy_table[idx_bini][operative_sec_haloprop_key]\n idx_sorted_haloprop_bini = np.argsort(haloprop_bini)\n\n galprop_bini = self._condition_matched_galprop(\n haloprop_bini[idx_sorted_haloprop_bini], \n galprop_cumprob_bini, i, galprop_scatter_bini, self.tol)\n\n # Assign the final values to the \n # appropriately sorted subarray of output_galprop\n output_galprop[idx_bini[idx_sorted_haloprop_bini]] = galprop_bini\n\n return output_galprop", "def polarParams(pol, chord, cl_lin_method='leastsquare', DS_constants='OpenFAST', tau=None):\n # Return interpolant\n fPolar = pol.interpolant(variables=['cl','cd','cm','fs','cl_inv','cl_fs'], radians=True)\n\n p=dict()\n p['Polar'] = pol # backup\n p['fPolar'] = fPolar\n\n # Linear region\n linear_region = np.array([-5, 10])*np.pi/180\n Cl_slope, alpha_0 = pol.cl_linear_slope(window=linear_region, method=cl_lin_method, radians=True)\n #print('Cl_slope',Cl_slope, '[1/rad] - alpha_0', alpha_0*180/np.pi,'[deg]')\n\n p['alpha_0'] = alpha_0 # TODO HARMONIZATION WITH DS\n p['Cl_slope'] = Cl_slope # TODO HARMONIZATION WITH DS\n p['alpha_range'] = None\n p['alpha_range_lin'] = None\n\n # Dynamic stall\n p.update(dynstall_mhh_param_from_polar(pol, chord, constants=DS_constants))\n p.update(dynstall_oye_param_from_polar(pol, tau=tau)) # TODO\n return p", "def _build_param_dict(self):\n self._build_common_param_dict()\n\n self._param_dict.add(Parameter.NUM_AVG_SAMPLES,\n r'ScansToAverage>([\\d]+)</ScansToAverage>',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Scans to Average\",\n description=\"Number of samples to average (must be even)\",\n range=INT16,\n startup_param=True,\n direct_access=False,\n default_value=4,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.MIN_COND_FREQ,\n r'MinimumCondFreq>([\\d]+)</MinimumCondFreq',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Minimum Conductivity Frequency\",\n range=INT16,\n description=\"Minimum conductivity frequency to enable pump turn-on.\",\n startup_param=True,\n direct_access=False,\n default_value=500,\n units=Units.HERTZ,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.PUMP_DELAY,\n r'PumpDelay>([\\d]+)</PumpDelay',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Pump Delay\",\n range=INT16,\n description=\"Time to wait after minimum conductivity frequency is reached before turning pump on.\",\n startup_param=True,\n direct_access=False,\n default_value=60,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.AUTO_RUN,\n r'AutoRun>(.*)</AutoRun',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Auto Run\",\n description=\"Enable automatic logging when power is applied: (true | false).\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.IGNORE_SWITCH,\n r'IgnoreSwitch>(.*)</IgnoreSwitch',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Ignore Switch\",\n description=\"Disable magnetic switch position for starting or stopping logging: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.OPTODE,\n r'OPTODE>(.*)</OPTODE',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Optode Attached\",\n description=\"Enable optode: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.VOLT1,\n r'ExtVolt1>(.*)</ExtVolt1',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Volt 1\",\n description=\"Enable external voltage 1: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n\n self._build_ctd_specific_params()", "def initialize(self):\n params = {}\n for i in range(1, len(self.layer_dimensions)):\n params['b_' + str(i)] = np.ones((self.layer_dimensions[i], 1))\n if self.he_initialization:\n params['W_' + str(i)] = np.random.randn(self.layer_dimensions[i],\n self.layer_dimensions[i - 1]) * np.sqrt(\n 2 / self.layer_dimensions[i - 1])\n else:\n params['W_' + str(i)] = np.random.rand(self.layer_dimensions[i], self.layer_dimensions[i - 1]) - 0.5\n return params", "def _set_correlation_strength(self):\n\n if hasattr(self, 'correlation_strength_abcissa'):\n abcissa = self.correlation_strength_abcissa\n ordinates = [self.param_dict['correlation_param'+str(i+1)] for i in range(len(abcissa))]\n correlation_strength_spline = model_helpers.custom_spline(abcissa, ordinates, k=custom_len(abcissa)-1)\n self.correlation_strength = correlation_strength_spline(self.prim_galprop_bins)\n else:\n self.correlation_strength = np.repeat(self.param_dict['correlation_param1'], len(self.prim_galprop_bins))\n\n self.correlation_strength[self.correlation_strength > 1] = 1\n self.correlation_strength[self.correlation_strength <- 1] = -1\n\n self.correlation_strength = np.append(\n self.correlation_strength, self.correlation_strength[-1])", "def param_init(self, sig=0.01):\n self.rhos = np.ones(self.Ndim)\n self.a = np.random.rand(self.Ndim, self.Nhidden)\n self.c = np.random.rand(self.Nhidden)\n self.W = np.random.randn(self.Nhidden, self.Ndim) * sig\n self.alphas = np.zeros((self.Ndim, self.Ncomponents))\n self.mus = np.zeros((self.Ndim, self.Ncomponents))\n self.sigmas = np.zeros((self.Ndim, self.Ncomponents))\n self.optimize_params = [self.rhos, self.c, self.W]\n\n types = ['alpha', 'mu', 'sigma']\n self.bs = {}\n self.Vs = {}\n for t in types:\n self.bs[t] = np.random.randn(self.Ndim, self.Ncomponents) * sig\n self.Vs[t] = np.random.randn(self.Ndim, self.Nhidden,\n self.Ncomponents) * sig\n self.optimize_params.append(self.bs[t])\n self.optimize_params.append(self.Vs[t])", "def get_params(self, deep=False):\n sampling_params = {'n_dim': self.n_dim,\n 'simplex_sampling': self.simplex_sampling,\n 'within_simplex_sampling': self.within_simplex_sampling,\n 'gaussian_component': self.gaussian_component}\n return {'ss_params': sampling_params,\n **RandomStateMixin.get_params(self, deep)}", "def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0", "def sample_parameters_given_hyper(self, gen_seed=0):\n if type(gen_seed) is not int:\n raise TypeError(\"gen_seed should be an int\")\n\n rng = random.Random(gen_seed)\n\n hypers = self.get_hypers()\n s = hypers[b's']\n r = hypers[b'r']\n nu = hypers[b'nu']\n m = hypers[b'mu']\n\n rho = rng.gammavariate(nu/2.0, s)\n mu = rng.normalvariate(m, (r/rho)**.5)\n\n assert(rho > 0)\n\n params = {'mu': mu, 'rho': rho}\n\n return params", "def getGPEParams(self):\n outKeysScaleDouble = ['R', 'gamma_C', 'gamma_R', 'g_C', 'g_R', 'k',\n 'Pth']\n outKeysScaleSingle = outKeysScaleDouble + ['gamma_nl']\n outKeysScale = outKeysScaleSingle if self.singleComp else\\\n outKeysScaleDouble\n outKeys = ['charL', 'charT']\n out = {key: self.__dict__[key + '_scaled'] for key in outKeysScale}\n for key in outKeys:\n out[key] = self.__dict__[key]\n return out", "def _set_init_param_dict(self):\n\n self.param_dict = {}\n\n try:\n suppress_warning = self._suppress_repeated_param_warning\n except AttributeError:\n suppress_warning = False\n msg = (\"\\n\\nThe param_dict key %s appears in more than one component model.\\n\"\n \"This is permissible, but if you are seeing this message you should be sure you \"\n \"understand it.\\nIn particular, double-check that this parameter does not have \"\n \"conflicting meanings across components.\\n\"\n \"\\nIf you do not wish to see this message every time you instantiate, \\n\"\n \"simply attach a _suppress_repeated_param_warning attribute \\n\"\n \"to any of your component models and set this variable to ``True``.\\n\")\n\n for component_model in self.model_dictionary.values():\n\n if not hasattr(component_model, 'param_dict'):\n component_model.param_dict = {}\n intersection = set(self.param_dict) & set(component_model.param_dict)\n if intersection != set():\n for key in intersection:\n if suppress_warning is False:\n warn(msg % key)\n\n for key, value in component_model.param_dict.iteritems():\n self.param_dict[key] = value\n\n self._init_param_dict = copy(self.param_dict)", "def get_hyperparams_dict(self, lr, gamma):\n hyperparams_dict = Storage.BASE_HYPERPARAMS_DICT\n hyperparams_dict[\"learning_rate\"] = lr\n hyperparams_dict[\"gamma\"] = gamma\n return hyperparams_dict", "def init_params(self):\n self.params = Parameters()\n self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('yscale', self.yscale, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('int_bg', self.int_bg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rc', self.Rc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sur_den', self.sur_den, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('ion_depth', self.ion_depth, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)", "def get_params(self) -> Dict:\n params: Dict = {}\n params['g_leak'] = self.leak.get_g()\n params['g_kvhh'] = self.kvhh.get_g()\n params['g_cav'] = self.cav.get_g()\n params['g_kca'] = self.kca.get_g()\n params['g_nap'] = self.nap.get_g()\n params['t_ca'] = self.tau_ca\n return params", "def add_to_dict(param_dict):\n ### Sample - Int\n sample_s = param_dict['ml_args'].sample_s\n ### Sample - Mr\n sample_Mr = param_dict['ml_args'].sample_Mr\n ## Sample volume\n # Units (Mpc/h)**3\n volume_sample = { '18': 37820 / 0.01396,\n '19': 6046016.60311 ,\n '20': 2.40481e7 ,\n '21': 8.79151e7 }\n vol_mr = volume_sample[sample_s]\n ##\n ## Choice of Centrals and Satellites\n cens = int(1)\n sats = int(0)\n ## Other constants\n # Speed of light - In km/s\n speed_c = ac.c.to(u.km/u.s).value\n ## Number of CPU's to use\n cpu_number = int(cpu_count() * param_dict['cpu_frac'])\n ##\n ## Plotting constants\n plot_dict = { 'size_label':23,\n 'size_title':25,\n 'color_ham' :'red',\n 'color_dyn' :'blue'}\n ##\n ## Catalogue Prefix string\n catl_str_fig = param_dict['ml_args'].catl_alg_comp_fig_str()\n ##\n ## Saving to `param_dict`\n param_dict['sample_s' ] = sample_s\n param_dict['sample_Mr' ] = sample_Mr\n param_dict['vol_mr' ] = vol_mr\n param_dict['cens' ] = cens\n param_dict['sats' ] = sats\n param_dict['speed_c' ] = speed_c\n param_dict['cpu_number' ] = cpu_number\n param_dict['plot_dict' ] = plot_dict\n param_dict['catl_str_fig'] = catl_str_fig\n\n return param_dict", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def evaluate_reco_param(self):\n evals = self.input_binning['true_energy'].weighted_centers.magnitude\n n_e = len(self.input_binning['true_energy'].weighted_centers.magnitude)\n n_cz = len(self.input_binning['true_coszen'].weighted_centers.magnitude)\n eval_dict = deepcopy(self.param_dict)\n for flavintgroup, dim_dict in eval_dict.items():\n for dim, dist_list in dim_dict.items():\n for dist_prop_dict in dist_list:\n for dist_prop in dist_prop_dict.keys():\n if dist_prop == 'dist':\n continue\n if callable(dist_prop_dict[dist_prop]):\n func = dist_prop_dict[dist_prop]\n vals = func(evals)\n dist_prop_dict[dist_prop] =\\\n np.repeat(vals,n_cz).reshape((n_e,n_cz))\n elif isinstance(dist_prop_dict[dist_prop], dict):\n assert dist_prop == 'kwargs'\n for kwarg in dist_prop_dict['kwargs'].keys():\n func = dist_prop_dict['kwargs'][kwarg]\n vals = func(evals)\n dist_prop_dict['kwargs'][kwarg] =\\\n np.repeat(vals,n_cz).reshape((n_e,n_cz))\n # Now check for consistency, to not have to loop over all dict\n # entries again at a later point in time\n self.check_reco_dist_consistency(dist_list)\n return eval_dict", "def init_params():\n p = {}\n \n # p['rootFolder'] = 'C:/Users/Umberto Gostoli/SPHSU/Social Care Model II'\n # p['rootFolder'] = 'N:/Social Care Model Paper III'\n \n p['noPolicySim'] = False\n p['multiprocessing'] = True\n p['numberProcessors'] = 9\n p['numRepeats'] = 3\n \n p['startYear'] = 1860\n p['endYear'] = 2040\n p['thePresent'] = 2012\n p['statsCollectFrom'] = 1990\n p['regressionCollectFrom'] = 1960 \n p['implementPoliciesFromYear'] = 2020\n p['yearOutcome'] = 2015\n \n p['favouriteSeed'] = 123\n p['loadFromFile'] = False\n p['verboseDebugging'] = False\n p['singleRunGraphs'] = False\n p['saveChecks'] = True\n p['getCheckVariablesAtYear'] = 2015\n # To change through command-line arguments\n\n p['numberPolicyParameters'] = 2\n p['valuesPerParam'] = 1\n p['numberScenarios'] = 3\n \n ############ Policy Parameters #######################\n p['incomeCareParam'] = 0.0005 #[0.00025 - 0.001]\n p['taxBreakRate'] = 0.0\n p['ageOfRetirement'] = 65\n p['socialSupportLevel'] = 5\n # p['educationCosts']\n #############################################################\n p['socialCareCreditShare'] = 0.0\n p['maxWtWChildAge'] = 5\n # The basics: starting population and year, etc.\n \n p['discountingFactor'] = 0.03\n \n \n p['initialPop'] = 600 \n \n p['minStartAge'] = 24\n p['maxStartAge'] = 45\n p['numberClasses'] = 5\n p['socialClasses'] = ['unskilled', 'skilled', 'lower', 'middle', 'upper']\n p['initialClassShares'] = [0.2, 0.25, 0.3, 0.2, 0.05]\n p['initialUnemployment'] = [0.25, 0.2, 0.15, 0.1, 0.1]\n p['unemploymentAgeBandParam'] = 0.3\n \n # doDeath function parameters\n p['mortalityBias'] = 0.85 # After 1950\n p['careNeedBias'] = 0.9\n p['unmetCareNeedBias'] = 0.5\n p['baseDieProb'] = 0.0001\n p['babyDieProb'] = 0.005\n p['maleAgeScaling'] = 14.0\n p['maleAgeDieProb'] = 0.00021\n p['femaleAgeScaling'] = 15.5\n p['femaleAgeDieProb'] = 0.00019\n \n p['orphansRelocationParam'] = 0.5\n \n # doBirths function parameters\n p['minPregnancyAge'] = 17\n p['maxPregnancyAge'] = 42\n p['growingPopBirthProb'] = 0.215\n p['fertilityCorrector'] = 1.0\n p['fertilityBias'] = 0.9\n \n # careTransitions function parameters\n p['zeroYearCare'] = 80.0\n p['childcareDecreaseRate'] = 0.25\n p['personCareProb'] = 0.0008\n p['maleAgeCareScaling'] = 18.0 # p['maleAgeCareProb'] = 0.0008\n p['femaleAgeCareScaling'] = 19.0 # p['femaleAgeCareProb'] = 0.0008\n p['baseCareProb'] = 0.0002\n p['careBias'] = 0.9\n p['careTransitionRate'] = 0.7\n\n p['unmetNeedExponent'] = 1.0 # 0.005 #[0.005 - 0.02]\n \n p['numCareLevels'] = 5\n p['careLevelNames'] = ['none','low','moderate','substantial','critical']\n p['careDemandInHours'] = [ 0.0, 8.0, 16.0, 32.0, 80.0 ]\n p['quantumCare'] = 4.0\n \n # careSupplies getCare and probSuppliers function parameters\n \n ######## Key parameter 1 ##############\n \n \n p['weeklyHours'] = 40.0\n \n \n p['priceChildCare'] = 0.76 # 6 \n p['schoolAge'] = 5\n p['maxFormalChildcareHours'] = 48\n p['schoolHours'] = 30\n p['freeChildcareHours'] = 15\n p['workingParentsFreeChildcareHours'] = 30\n p['minAgeStartChildCareSupport'] = 3\n p['minAgeStartChildCareSupportByIncome'] = 2\n p['maxHouseholdIncomeChildCareSupport'] = 40 # 320\n \n ######## Key parameter 2 ##############\n # 5: No public supply \n \n p['retiredHours'] = [48.0, 36.0, 20.0, 10.0] # 60.0\n p['studentHours'] = [24.0, 16.0, 8.0, 4.0]\n p['teenAgersHours'] = [16.0, 0.0, 0.0, 0.0]\n p['unemployedHours'] = [32.0, 24.0, 16.0, 8.0]\n p['employedHours'] = [28.0, 20.0, 12.0, 8.0]\n p['formalCareDiscountFactor'] = 0.5\n \n p['socialNetworkDistances'] = [0.0, 1.0, 2.0, 1.0, 2.0, 2.0, 3.0, 3.0]\n p['networkDistanceParam'] = 2.0\n p['socialCareWeightBias'] = 1.0\n p['unmetCareNeedDiscountParam'] = 0.5\n p['shareUnmetNeedDiscountParam'] = 0.5\n # p['pastShareUnmetNeedWeight'] = 0.5\n \n \n \n p['networkSizeParam'] = 10.0 # 1.0\n \n p['careSupplyBias'] = 0.5\n p['careIncomeParam'] = 0.001\n \n # Hospitalization Costs\n p['qalyBeta'] = 0.18\n p['qalyAlpha'] = 1.5\n p['qalyDiscountRate'] = 0.035\n p['qalyIndexes'] = [1.0, 0.8, 0.6, 0.4, 0.2]\n p['unmetCareHealthParam'] = 0.1\n p['hospitalizationParam'] = 0.5\n p['needLevelParam'] = 2.0\n p['unmetSocialCareParam'] = 2.0\n p['costHospitalizationPerDay'] = 400\n \n # ageTransitions, enterWorkForce and marketWage functions parameters\n p['ageTeenagers'] = 12\n p['minWorkingAge'] = 16\n \n ######## Key parameter 3 ##############\n \n p['careBankingSchemeOn'] = False\n p['socialCareBankingAge'] = 65\n \n p['absoluteCreditQuantity'] = False\n p['quantityYearlyIncrease'] = 0.0\n p['socialCareCreditQuantity'] = 0\n p['kinshipNetworkCarePropension'] = 0.5\n p['volunteersCarePropensionCoefficient'] = 0.01\n p['pensionContributionRate'] = 0.05\n \n p['hillHealthLevelThreshold'] = 3\n p['seriouslyHillSupportRate'] = 0.5\n \n ### Prices ####\n p['pricePublicSocialCare'] = 20.0 # [2.55] # 20\n p['priceSocialCare'] = 17.0 # [2.29] # 18\n p['taxBrackets'] = [663, 228, 0] # [28.16, 110.23] # [221, 865]\n p['taxBandsNumber'] = 3\n p['bandsTaxationRates'] = [0.4, 0.2, 0.0] # [0.0, 0.2, 0.4]\n # Tax Break Policy\n\n \n p['pensionWage'] = [5.0, 7.0, 10.0, 13.0, 18.0] # [0.64, 0.89, 1.27, 1.66, 2.29] # \n p['incomeInitialLevels'] = [5.0, 7.0, 9.0, 11.0, 14.0] #[0.64, 0.89, 1.15, 1.40, 1.78] # \n p['incomeFinalLevels'] = [10.0, 15.0, 22.0, 33.0, 50.0] #[1.27, 1.91, 2.80, 4.21, 6.37] # \n p['educationCosts'] = [0.0, 100.0, 150.0, 200.0] #[0.0, 12.74, 19.12, 25.49] # \n \n # Priced growth #####\n p['wageGrowthRate'] = 1.0 # 1.01338 # \n\n p['incomeGrowthRate'] = [0.4, 0.35, 0.35, 0.3, 0.25]\n \n # SES inter-generational mobility parameters\n p['leaveHomeStudentsProb'] = 0.5\n \n p['eduWageSensitivity'] = 0.2 # 0.5\n p['eduRankSensitivity'] = 3.0 # 5.0\n p['costantIncomeParam'] = 80.0 # 20.0\n p['costantEduParam'] = 10.0 # 10.0\n p['careEducationParam'] = 0.005 # 0.04\n \n \n \n # p['incEduExp'] = 0.25\n p['educationLevels'] = ['GCSE', 'A-Level', 'HND', 'Degree', 'Higher Degree']\n p['workingAge'] = [16, 18, 20, 22, 24]\n \n # doDivorce function parameters\n p['basicDivorceRate'] = 0.06\n p['variableDivorce'] = 0.06\n p['divorceModifierByDecade'] = [ 0.0, 1.0, 0.9, 0.5, 0.4, 0.2, 0.1, 0.03, 0.01, 0.001, 0.001, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n p['divorceBias'] = 1.0\n \n # doMarriages function parameters\n p['deltageProb'] = [0.0, 0.1, 0.25, 0.4, 0.2, 0.05]\n p['incomeMarriageParam'] = 0.025\n p['studentFactorParam'] = 0.5\n ######## Key parameter 4 ##############\n p['betaGeoExp'] = 2.0 #[1.0 - 4.0]\n \n p['betaSocExp'] = 2.0\n p['rankGenderBias'] = 0.5\n p['basicMaleMarriageProb'] = 0.9\n p['maleMarriageModifierByDecade'] = [ 0.0, 0.16, 0.5, 1.0, 0.8, 0.7, 0.66, 0.5, 0.4, 0.2, 0.1, 0.05, 0.01, 0.0, 0.0, 0.0, 0.0 ]\n \n # jobMarket, updateWork and unemploymentRate functions parameters\n p['unemploymentClassBias'] = 0.75\n p['unemploymentAgeBias'] = [1.0, 0.55, 0.35, 0.25, 0.2, 0.2]\n p['numberAgeBands'] = 6\n p['jobMobilitySlope'] = 0.004\n p['jobMobilityIntercept'] = 0.05\n p['ageBiasParam'] = [7.0, 3.0, 1.0, 0.5, 0.35, 0.15]\n p['deltaIncomeExp'] = 0.05\n p['unemployedCareBurdernParam'] = 0.025\n # Potential key parameter\n p['relocationCareLossExp'] = 1.0 # 40.0 # \n p['incomeSocialCostRelativeWeight'] = 0.5\n \n p['firingParam'] = 0.2\n p['wageVar'] = 0.06\n p['workDiscountingTime'] = 0.75 # 0.8\n p['sizeWeightParam'] = 0.7\n p['minClassWeightParam'] = 1.0\n p['incomeDiscountingExponent'] = 4.0\n p['discountingMultiplier'] = 2.0\n #p['incomeDiscountingParam'] = 2.0\n \n # relocationPensioners function parameters\n p['agingParentsMoveInWithKids'] = 0.1\n p['variableMoveBack'] = 0.1\n p['retiredRelocationParam'] = 0.001 # 0.005\n \n # houseMap function parameters\n p['geoDistanceSensitivityParam'] = 2.0\n p['socDistanceSensitivityParam'] = 2.0\n p['classAffinityWeight'] = 4.0\n p['distanceSensitivityParam'] = 0.5\n \n # relocationProb function parameters\n p['baseRelocatingProb'] = 0.05\n p['relocationParameter'] = 1.0 \n p['apprenticesRelocationProb'] = 0.5\n #p['expReloc'] = 1.0\n \n # computeRelocationCost and relocation Propensity functions parameters\n p['yearsInTownSensitivityParam'] = 0.5\n \n ######## Key parameter 5 ##############\n p['relocationCostParam'] = 0.5 # 1.0 \n \n ######## Key parameter 6 ##############\n p['propensityRelocationParam'] = 2.0 # 2.0 \n p['denRelocationWeight'] = 0.5\n \n \n ## Description of the map, towns, and houses\n p['mapGridXDimension'] = 8\n p['mapGridYDimension'] = 12 \n p['townGridDimension'] = 70\n p['cdfHouseClasses'] = [ 0.6, 0.9, 5.0 ]\n p['ukMap'] = [[ 0.0, 0.1, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.1, 0.1, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.2, 1.0, 0.5, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.4, 0.0, 0.2, 0.2, 0.4, 0.0, 0.0, 0.0 ],\n [ 0.6, 0.0, 0.0, 0.3, 0.8, 0.2, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, 0.6, 0.8, 0.4, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.2, 1.0, 0.8, 0.6, 0.1, 0.0 ],\n [ 0.0, 0.0, 0.1, 0.2, 1.0, 0.6, 0.3, 0.4 ],\n [ 0.0, 0.0, 0.5, 0.7, 0.5, 1.0, 1.0, 0.0 ],\n [ 0.0, 0.0, 0.2, 0.4, 0.6, 1.0, 1.0, 0.0 ],\n [ 0.0, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0 ]]\n p['ukClassBias'] = [[ 0.0, -0.05, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, -0.05, -0.05, 0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],\n [ 0.0, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.0, -0.05, 0.0, -0.05, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, -0.05, 0.0, 0.2, 0.15, 0.0 ],\n [ 0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.15, 0.0 ],\n [ 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0 ] ]\n p['mapDensityModifier'] = 0.6\n # p['numHouseClasses'] = 3\n # p['houseClasses'] = ['small','medium','large']\n \n ## Graphical interface details\n p['interactiveGraphics'] = False #True\n p['delayTime'] = 0.0\n p['screenWidth'] = 1300\n p['screenHeight'] = 700\n p['bgColour'] = 'black'\n p['mainFont'] = 'Helvetica 18'\n p['fontColour'] = 'white'\n p['dateX'] = 70\n p['dateY'] = 20\n p['popX'] = 70\n p['popY'] = 50\n p['pixelsInPopPyramid'] = 2000\n p['num5YearAgeClasses'] = 28\n p['careLevelColour'] = ['blue','green','yellow','orange','red']\n p['houseSizeColour'] = ['brown','purple','yellow']\n p['pixelsPerTown'] = 56\n p['maxTextUpdateList'] = 22\n \n # p['eduEduSensitivity'] = 0.5\n # p['mortalityBias'] = [1.0, 0.92, 0.84, 0.76, 0.68]\n # p['fertilityBias'] = [1.0, 0.92, 0.84, 0.76, 0.68]\n # p['divorceBias'] = [2.0, 1.5, 1.0, 0.75, 0.5]\n\n ## Transitions to care statistics\n \n ## Availability of care statistics\n \n #p['childHours'] = 5.0\n # p['employedHours'] = 12.0\n #p['homeAdultHours'] = 30.0\n #p['workingAdultHours'] = 25.0\n #p['maxEmployedHours'] = 60.0\n \n #p['lowCareHandicap'] = 0.5\n #p['hourlyCostOfCare'] = 20.0\n \n ## Fertility statistics\n \n # p['steadyPopBirthProb'] = 0.13\n # p['transitionYear'] = 1965\n \n ## Class and employment statistics\n # p['numClasses'] = 5\n # p['occupationClasses'] = ['lower','intermediate','higher']\n # p['cdfOccupationClasses'] = [ 0.6, 0.9, 1.0 ]\n\n ## Age transition statistics\n # p['ageOfAdulthood'] = 17\n \n ## Marriage function parameters\n \n # p['basicFemaleMarriageProb'] = 0.25\n # p['femaleMarriageModifierByDecade'] = [ 0.0, 0.5, 1.0, 1.0, 1.0, 0.6, 0.5, 0.4, 0.1, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['femaleMarriageProb'] = [0.01, 0.15, 0.3, 0.2, 0.1, 0.1, 0.06, 0.05, 0.02, 0.01, 0.01, 0.005]\n # p['maleMarriageProb'] = [0.005, 0.08, 0.25, 0.25, 0.15, 0.1, 0.07, 0.05, 0.03, 0.02, 0.01, 0.005]\n \n ## Leaving home and moving around statistics\n # p['probApartWillMoveTogether'] = 0.3\n # p['coupleMovesToExistingHousehold'] = 0.3\n # p['basicProbAdultMoveOut'] = 0.22\n # p['probAdultMoveOutModifierByDecade'] = [ 0.0, 0.2, 1.0, 0.6, 0.3, 0.15, 0.03, 0.03, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['basicProbSingleMove'] = 0.05\n # p['probSingleMoveModifierByDecade'] = [ 0.0, 1.0, 1.0, 0.8, 0.4, 0.06, 0.04, 0.02, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['basicProbFamilyMove'] = 0.03\n # p['probFamilyMoveModifierByDecade'] = [ 0.0, 0.5, 0.8, 0.5, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]\n\n \n return p", "def init_parameters(obj, hyperparameters):\n # Initialize Global Configuration Parameter\n params = hyperparameters['global']\n setattr(obj, 'param', params)\n\n # Initialize Attributes (Pre-Checked Parameters)\n setattr(obj, 'learning_rate', params['learning_rate'])\n setattr(obj, 'loss', params['loss'])\n setattr(obj, 'max_iter', params['max_iter'])\n\n if params['loss'] == 'least_squares':\n setattr(obj, 'num_classes', 1)\n elif params['loss'] in ['binary_crossentropy', 'categorical_crossentropy', 'auto']:\n setattr(obj, 'num_classes', params['num_classes'])\n\n # Initialize Attributes (Optional Values - Based on Default Parameters)\n if 'l2_regularization' not in params or params['l2_regularization'] is None:\n setattr(obj, 'l2_regularization', 0)\n else:\n setattr(obj, 'l2_regularization', params['l2_regularization'])\n\n if 'max_bins' not in params:\n setattr(obj, 'max_bins', 255)\n else:\n setattr(obj, 'max_bins', params['max_bins'])\n\n if 'max_depth' not in params or params['max_depth'] is None:\n setattr(obj, 'max_depth', None)\n else:\n setattr(obj, 'max_depth', params['max_depth'])\n\n if 'max_leaf_nodes' not in params or params['max_leaf_nodes'] is None:\n setattr(obj, 'max_leaf_nodes', 31)\n else:\n setattr(obj, 'max_leaf_nodes', params['max_leaf_nodes'])\n\n if 'min_samples_leaf' not in params or params['min_samples_leaf'] is None:\n setattr(obj, 'min_samples_leaf', 20)\n else:\n setattr(obj, 'min_samples_leaf', params['min_samples_leaf'])\n\n if 'random_state' in params:\n setattr(obj, 'random_state', params['random_state'])\n else:\n setattr(obj, 'random_state', None)\n\n if 'scoring' in params:\n setattr(obj, 'scoring', params['scoring'])\n else:\n setattr(obj, 'scoring', None)\n\n if 'verbose' not in params or params['verbose'] is None:\n setattr(obj, 'verbose', False)\n else:\n setattr(obj, 'verbose', True)\n\n return obj", "def _build_param_dict(self):\n # Add parameter handlers to parameter dict. \n self._param_dict.add(SBE37Parameter.OUTPUTSAL,\n r'(do not )?output salinity with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.OUTPUTSV,\n r'(do not )?output sound velocity with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.NAVG,\n r'number of samples to average = (\\d+)',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.SAMPLENUM,\n r'samplenumber = (\\d+), free = \\d+',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.INTERVAL,\n r'sample interval = (\\d+) seconds',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.STORETIME,\n r'(do not )?store time with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.TXREALTIME,\n r'(do not )?transmit real-time data',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.SYNCMODE,\n r'serial sync mode (enabled|disabled)',\n lambda match : False if (match.group(1)=='disabled') else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.SYNCWAIT,\n r'wait time after serial sync sampling = (\\d+) seconds',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.TCALDATE,\n r'temperature: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.TA0,\n r' +TA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA1,\n r' +TA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA2,\n r' +TA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA3,\n r' +TA3 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CCALDATE,\n r'conductivity: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.CG,\n r' +G = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CH,\n r' +H = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CI,\n r' +I = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CJ,\n r' +J = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.WBOTC,\n r' +WBOTC = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CTCOR,\n r' +CTCOR = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CPCOR,\n r' +CPCOR = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PCALDATE,\n r'pressure .+ ((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.PA0,\n r' +PA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PA1,\n r' +PA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PA2,\n r' +PA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA0,\n r' +PTCA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA1,\n r' +PTCA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA2,\n r' +PTCA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB0,\n r' +PTCSB0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB1,\n r' +PTCSB1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB2,\n r' +PTCSB2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.POFFSET,\n r' +POFFSET = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RCALDATE,\n r'rtc: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.RTCA0,\n r' +RTCA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RTCA1,\n r' +RTCA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RTCA2,\n r' +RTCA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)", "def prior_param(self, param_dict={}): \n self.param_obj = Params(param_dict) # parameter object \n self.param_names = param_dict.keys() \n self.n_params = len(param_dict.keys()) # number of parameters in theta ", "def setParams(self, p = 2):\n self.p = p\n self.l = p - 1\n self.id_ntot = {}\n self.id_y = {}\n self.id_W = {}\n self.id_X = {}\n for i in self.uniids:\n tracker = (self.data['id'] == i)\n self.id_ntot.update({i: np.sum(tracker)})\n self.id_y.update({i:\n self.data['weight'][tracker].reshape(np.sum(tracker), 1)})\n self.id_W.update({i: self._designMatrix_(p, tracker)})\n self.id_X.update({i:\n self._designMatrix_(self.l+1,tracker,is_X=True)})\n self.id_Z = self.id_W.copy()", "def set_rand_params(self) -> Dict:\n new_params: Dict = self.gen_params()\n self.set_params(new_params)\n return new_params", "def get_prob_params():\n prob = Namespace()\n prob.study_name = STUDY_NAME\n if IS_DEBUG:\n prob.num_trials = 3\n prob.max_capital = 10\n else:\n prob.num_trials = NUM_TRIALS\n prob.max_capital = MAX_CAPITAL\n # Common\n prob.time_distro = TIME_DISTRO\n prob.num_workers = NUM_WORKERS\n _study_params = {\n 'branin': ('synthetic/branin/config_mf.json',\n branin_mf, cost_branin_mf, 0.1, 0, 1),\n 'hartmann3_2': ('synthetic/hartmann3_2/config_mf.json',\n hartmann3_2_mf, cost_hartmann3_2_mf, 0.1, 0, 1),\n 'hartmann6_4': ('synthetic/hartmann6_4/config_mf.json',\n hartmann6_4_mf, cost_hartmann6_4_mf, 0.1, 0, 1),\n 'borehole_6': ('synthetic/borehole_6/config_mf.json',\n borehole_6_mf, cost_borehole_6_mf, 1, 0, 1),\n 'park2_4': ('synthetic/park2_4/config_mf.json',\n park2_4_mf, cost_park2_4_mf, 0.3, 0, 1),\n 'park2_3': ('synthetic/park2_3/config_mf.json',\n park2_3_mf, cost_park2_3_mf, 0.1, 0, 1),\n 'park1_3': ('synthetic/park1_3/config_mf.json',\n park1_3_mf, cost_park1_3_mf, 0.5, 0, 1),\n }\n (domain_config_file_suffix, raw_func, raw_fidel_cost_func, _fc_noise_scale,\n _initial_pool_size, _) = _study_params[prob.study_name]\n domain_config_file = os.path.join(DRAGONFLY_EXPERIMENTS_DIR, domain_config_file_suffix)\n # noisy\n prob.noisy_evals = NOISY_EVALS\n if NOISY_EVALS:\n noise_type = 'gauss'\n noise_scale = _fc_noise_scale\n else:\n noise_type = 'no_noise'\n noise_scale = None\n # Create domain, function_caller and worker_manager\n config = load_config_file(domain_config_file)\n func_caller = get_multifunction_caller_from_config(raw_func, config,\n raw_fidel_cost_func=raw_fidel_cost_func, noise_type=noise_type,\n noise_scale=noise_scale)\n # Set max_capital\n if hasattr(func_caller, 'fidel_cost_func'):\n prob.max_capital = prob.max_capital * \\\n func_caller.fidel_cost_func(func_caller.fidel_to_opt)\n else:\n prob.max_capital = prob.max_capital\n # Store everything in prob\n prob.func_caller = func_caller\n prob.worker_manager = SyntheticWorkerManager(prob.num_workers,\n time_distro='caller_eval_cost')\n prob.save_file_prefix = prob.study_name + ('-debug' if IS_DEBUG else '')\n prob.methods = METHODS\n prob.save_results_dir = SAVE_RESULTS_DIR\n prob.reporter = get_reporter('default')\n # evaluation options\n prob.evaluation_options = Namespace(prev_eval_points='none',\n initial_pool_size=_initial_pool_size)\n return prob", "def get_param_dict(self, theta):\r\n # create dictionary setting parameters in the feature map to a value corresponding to the random samples\r\n vf_param = list(self.circuit.parameters)\r\n zip_it = zip(vf_param, theta)\r\n param_dict = dict(zip_it)\r\n\r\n return param_dict", "def create_param_grid ( param_grid: Dict ):\n \n return (\n dict ( zip ( param_grid.keys(), instance ) )\n for instance in product ( * param_grid.values() )\n ) # End create_param_grid", "def create_param_grid ( param_grid: Dict ):\n \n return (\n dict ( zip ( param_grid.keys(), instance ) )\n for instance in product ( * param_grid.values() )\n ) # End create_param_grid" ]
[ "0.6686416", "0.6469411", "0.64443755", "0.6407086", "0.5964358", "0.5906427", "0.5815626", "0.57940143", "0.5791521", "0.5775666", "0.5771342", "0.5738936", "0.57148474", "0.57035136", "0.56996524", "0.5679744", "0.56791604", "0.56763065", "0.56688666", "0.56636876", "0.56462115", "0.5642801", "0.56396246", "0.56326836", "0.562053", "0.55855", "0.5557907", "0.5498345", "0.5497285", "0.5497285" ]
0.70243114
0
Method uses the current values in the param_dict to update the strength of the correlation between sec_haloprop and galprop at each value of prim_galprop.
def _set_correlation_strength(self): if hasattr(self, 'correlation_strength_abcissa'): abcissa = self.correlation_strength_abcissa ordinates = [self.param_dict['correlation_param'+str(i+1)] for i in range(len(abcissa))] correlation_strength_spline = model_helpers.custom_spline(abcissa, ordinates, k=custom_len(abcissa)-1) self.correlation_strength = correlation_strength_spline(self.prim_galprop_bins) else: self.correlation_strength = np.repeat(self.param_dict['correlation_param1'], len(self.prim_galprop_bins)) self.correlation_strength[self.correlation_strength > 1] = 1 self.correlation_strength[self.correlation_strength <- 1] = -1 self.correlation_strength = np.append( self.correlation_strength, self.correlation_strength[-1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mc_galprop(self, seed=None, **kwargs):\n model_helpers.update_param_dict(self, **kwargs)\n self._set_correlation_strength()\n\n if ('galaxy_table' in kwargs.keys()) & ('halos' in kwargs.keys()):\n msg = (\"The mc_\"+self.galprop_key+\" method accepts either \" + \n \"a halos keyword argument, or a galaxy_table keyword argument\" + \n \" but never both.\")\n raise KeyError(msg)\n elif 'galaxy_table' in kwargs.keys():\n galaxy_table = kwargs['galaxy_table']\n operative_sec_haloprop_key = (\n model_defaults.host_haloprop_prefix + self.sec_haloprop_key)\n elif 'halos' in kwargs.keys():\n galaxy_table = kwargs['halos']\n operative_sec_haloprop_key = self.sec_haloprop_key\n else:\n msg = (\"The mc_\"+self.galprop_key+\" requires either \" + \n \"a halos keyword argument, or a galaxy_table keyword argument\")\n raise KeyError(msg)\n\n self.add_new_haloprops(galaxy_table)\n\n # All at once, draw all the randoms we will need\n np.random.seed(seed=seed)\n all_randoms = np.random.random(len(galaxy_table)*2)\n galprop_cumprob = all_randoms[0:len(galaxy_table)]\n galprop_scatter = all_randoms[len(galaxy_table):]\n\n # Initialize the output array\n output_galprop = np.zeros(len(galaxy_table))\n\n # Determine binning and loop range\n if 'galaxy_table_slice_array' not in kwargs.keys():\n binned_prim_galprop = np.digitize(\n galaxy_table[self.prim_galprop_key], \n self.prim_galprop_bins)\n prim_galprop_loop_range = set(binned_prim_galprop)\n else:\n prim_galprop_loop_range = range(len(self.one_point_lookup_table))\n\n for i in prim_galprop_loop_range:\n\n # Determine the slice corresponding to the i^th prim_galprop bin\n if 'galaxy_table_slice_array' not in kwargs.keys():\n idx_bini = np.where(binned_prim_galprop==i)[0]\n num_bini = len(idx_bini)\n else:\n idx_bini = kwargs['galaxy_table_slice_array'][i]\n num_bini = len(galaxy_table[idx_bini])\n\n if len(idx_bini) > 0:\n # Fetch the appropriate number of randoms\n # for the i^th prim_galprop bin\n galprop_cumprob_bini = galprop_cumprob[idx_bini]\n galprop_scatter_bini = galprop_scatter[idx_bini]\n\n # Fetch the halos in the i^th prim_galprop bin, \n # and determine how they are sorted\n haloprop_bini = galaxy_table[idx_bini][operative_sec_haloprop_key]\n idx_sorted_haloprop_bini = np.argsort(haloprop_bini)\n\n galprop_bini = self._condition_matched_galprop(\n haloprop_bini[idx_sorted_haloprop_bini], \n galprop_cumprob_bini, i, galprop_scatter_bini, self.tol)\n\n # Assign the final values to the \n # appropriately sorted subarray of output_galprop\n output_galprop[idx_bini[idx_sorted_haloprop_bini]] = galprop_bini\n\n return output_galprop", "def update_params(self, learning_rate):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tself._W = self._W - learning_rate * self._grad_W_current\n\t\tself._b = self._b - learning_rate * self._grad_b_current\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def update_parameters(self):\n self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')\n self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')\n self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')\n self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')\n self.max_speed = rospy.get_param('/dyn_reconf/max_speed')\n self.max_force = rospy.get_param('/dyn_reconf/max_force')\n self.friction = rospy.get_param('/dyn_reconf/friction')\n self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')\n self.search_radius = rospy.get_param('/dyn_reconf/search_radius')\n\n rospy.loginfo(rospy.get_caller_id() + \" -> Parameters updated\")\n if DEBUG:\n print('alignment_factor: ', self.alignment_factor)\n print('cohesion_factor: ', self.cohesion_factor)\n print('separation_factor: ', self.separation_factor)\n print('avoid_factor: ', self.avoid_factor)\n print('max_speed: ', self.max_speed)\n print('max_force: ', self.max_force)\n print('friction: ', self.friction)\n print('crowd_radius: ', self.crowd_radius)\n print('search_radius: ', self.search_radius)", "def polarParams(pol, chord, cl_lin_method='leastsquare', DS_constants='OpenFAST', tau=None):\n # Return interpolant\n fPolar = pol.interpolant(variables=['cl','cd','cm','fs','cl_inv','cl_fs'], radians=True)\n\n p=dict()\n p['Polar'] = pol # backup\n p['fPolar'] = fPolar\n\n # Linear region\n linear_region = np.array([-5, 10])*np.pi/180\n Cl_slope, alpha_0 = pol.cl_linear_slope(window=linear_region, method=cl_lin_method, radians=True)\n #print('Cl_slope',Cl_slope, '[1/rad] - alpha_0', alpha_0*180/np.pi,'[deg]')\n\n p['alpha_0'] = alpha_0 # TODO HARMONIZATION WITH DS\n p['Cl_slope'] = Cl_slope # TODO HARMONIZATION WITH DS\n p['alpha_range'] = None\n p['alpha_range_lin'] = None\n\n # Dynamic stall\n p.update(dynstall_mhh_param_from_polar(pol, chord, constants=DS_constants))\n p.update(dynstall_oye_param_from_polar(pol, tau=tau)) # TODO\n return p", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def updateRNGParam(self, dictParam):\n for key in dictParam:\n if key == 'tolerance':\n self.RNGtolerance = dictParam['tolerance']\n elif key == 'initialGridDisc':\n self.RNGInitDisc = dictParam['initialGridDisc']\n self._distribution.updateRNGparameter(self.RNGtolerance,self.RNGInitDisc)", "def update_param(param, param_dict, alg=\"IID_LINEAR\", prefix=\"\"):\n default_len = len(param.defaults)\n if param.defaults:\n for index, value in enumerate(reversed(param.args)):\n if value not in [\"self\", \"W\", \"method\", \"causal_matrix\", \"topology_matrix\"]:\n if index < default_len:\n p_value = list(reversed(param.defaults))[index]\n else:\n p_value = None\n if value is \"sem_type\":\n p_value = sem_type_set(\"sem_type\", alg)[0]\n param_dict.update({prefix + value: p_value})", "def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here", "def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0", "def updateParams(self,gradients):\n for i in xrange(len(self.params)):\n self.params[i].set_value(self.params[i].get_value()-gradients[i]/(1/self.learning_rate+self.iterations))", "def _build_param_dict(self, **kwargs):\n \n if 'correlation_strength' in kwargs.keys():\n\n correlation_strength = kwargs['correlation_strength']\n if custom_len(correlation_strength) > 1:\n try:\n self.correlation_strength_abcissa = kwargs['correlation_strength_abcissa']\n except KeyError:\n msg = (\"If correlation_strength keyword is passed to the constructor, \\n\" + \n \"you must also pass a correlation_strength_abcissa keyword argument \" + \n \"storing an array of the same length as correlation_strength.\")\n raise(msg)\n else:\n self.correlation_strength_abcissa = [0]\n correlation_strength = [correlation_strength]\n\n self._param_dict_keys = ['correlation_param' + str(i+1) for i in range(len(correlation_strength))]\n self.param_dict = {key:value for key, value in zip(self._param_dict_keys, correlation_strength)}\n else:\n self.param_dict = {'correlation_param1': 1.0}\n self._set_correlation_strength()", "def update_parameters(parameters, grads, learning_rate):\n pass", "def _update_params(self, gradients: dict, learning_rate: float):\n L = len(self.activations)\n\n for l in range(L):\n self.params[\"W_\" + str(l + 1)] = self.params[\"W_\" + str(l + 1)] - learning_rate * gradients[\n \"dW\" + str(l + 1)]\n\n self.params[\"b_\" + str(l + 1)] = self.params[\"b_\" + str(l + 1)] - learning_rate * gradients[\n \"db\" + str(l + 1)]", "def evaluate_reco_param(self):\n evals = self.input_binning['true_energy'].weighted_centers.magnitude\n n_e = len(self.input_binning['true_energy'].weighted_centers.magnitude)\n n_cz = len(self.input_binning['true_coszen'].weighted_centers.magnitude)\n eval_dict = deepcopy(self.param_dict)\n for flavintgroup, dim_dict in eval_dict.items():\n for dim, dist_list in dim_dict.items():\n for dist_prop_dict in dist_list:\n for dist_prop in dist_prop_dict.keys():\n if dist_prop == 'dist':\n continue\n if callable(dist_prop_dict[dist_prop]):\n func = dist_prop_dict[dist_prop]\n vals = func(evals)\n dist_prop_dict[dist_prop] =\\\n np.repeat(vals,n_cz).reshape((n_e,n_cz))\n elif isinstance(dist_prop_dict[dist_prop], dict):\n assert dist_prop == 'kwargs'\n for kwarg in dist_prop_dict['kwargs'].keys():\n func = dist_prop_dict['kwargs'][kwarg]\n vals = func(evals)\n dist_prop_dict['kwargs'][kwarg] =\\\n np.repeat(vals,n_cz).reshape((n_e,n_cz))\n # Now check for consistency, to not have to loop over all dict\n # entries again at a later point in time\n self.check_reco_dist_consistency(dist_list)\n return eval_dict", "def glcmProps(P, prop='contrast'):\n\n (num_level, num_level2, num_dist, num_angle) = P.shape\n assert num_level == num_level2\n assert num_dist > 0\n assert num_angle > 0\n\n # create weights for specified property\n I, J = np.ogrid[0:num_level, 0:num_level]\n if prop == 'contrast':\n weights = (I - J) ** 2\n elif prop in ['ASM', 'energy', 'correlation']:\n pass\n elif prop == 'mean':\n weights, _ = np.mgrid[0:num_level, 0:num_level]\n elif prop == 'dissimilarity':\n weights = np.abs(I - J)\n elif prop == 'homogeneity':\n weights = 1. / (1. + (I - J) ** 2)\n else:\n raise ValueError('%s is an invalid property' % (prop))\n\n # compute property for each GLCM\n if prop == 'energy':\n asm = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]\n results = np.sqrt(asm)\n elif prop == 'ASM':\n results = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]\n elif prop == 'correlation':\n results = np.zeros((num_dist, num_angle), dtype=np.float64)\n I = np.array(range(num_level)).reshape((num_level, 1, 1, 1))\n J = np.array(range(num_level)).reshape((1, num_level, 1, 1))\n diff_i = I - np.apply_over_axes(np.sum, (I * P), axes=(0, 1))[0, 0]\n diff_j = J - np.apply_over_axes(np.sum, (J * P), axes=(0, 1))[0, 0]\n\n std_i = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_i) ** 2),\n axes=(0, 1))[0, 0])\n std_j = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_j) ** 2),\n axes=(0, 1))[0, 0])\n cov = np.apply_over_axes(np.sum, (P * (diff_i * diff_j)),\n axes=(0, 1))[0, 0]\n\n # handle the special case of standard deviations near zero\n mask_0 = std_i < 1e-15\n mask_0[std_j < 1e-15] = True\n results[mask_0] = 1\n\n # handle the standard case\n mask_1 = mask_0 == False\n results[mask_1] = cov[mask_1] / (std_i[mask_1] * std_j[mask_1])\n elif prop in ['contrast', 'dissimilarity', 'homogeneity', 'mean']:\n weights = weights.reshape((num_level, num_level, 1, 1))\n results = np.apply_over_axes(np.sum, (P * weights), axes=(0, 1))[0, 0]\n\n return results", "def add_to_dict(param_dict):\n ### Sample - Int\n sample_s = param_dict['ml_args'].sample_s\n ### Sample - Mr\n sample_Mr = param_dict['ml_args'].sample_Mr\n ## Sample volume\n # Units (Mpc/h)**3\n volume_sample = { '18': 37820 / 0.01396,\n '19': 6046016.60311 ,\n '20': 2.40481e7 ,\n '21': 8.79151e7 }\n vol_mr = volume_sample[sample_s]\n ##\n ## Choice of Centrals and Satellites\n cens = int(1)\n sats = int(0)\n ## Other constants\n # Speed of light - In km/s\n speed_c = ac.c.to(u.km/u.s).value\n ## Number of CPU's to use\n cpu_number = int(cpu_count() * param_dict['cpu_frac'])\n ##\n ## Plotting constants\n plot_dict = { 'size_label':23,\n 'size_title':25,\n 'color_ham' :'red',\n 'color_dyn' :'blue'}\n ##\n ## Catalogue Prefix string\n catl_str_fig = param_dict['ml_args'].catl_alg_comp_fig_str()\n ##\n ## Saving to `param_dict`\n param_dict['sample_s' ] = sample_s\n param_dict['sample_Mr' ] = sample_Mr\n param_dict['vol_mr' ] = vol_mr\n param_dict['cens' ] = cens\n param_dict['sats' ] = sats\n param_dict['speed_c' ] = speed_c\n param_dict['cpu_number' ] = cpu_number\n param_dict['plot_dict' ] = plot_dict\n param_dict['catl_str_fig'] = catl_str_fig\n\n return param_dict", "def set_params(self, params: Dict) -> None:\n self.leak.set_g(params[\"g_leak\"])\n self.kvhh.set_g(params[\"g_kvhh\"])\n self.cav.set_g(params[\"g_cav\"])\n self.kca.set_g(params[\"g_kca\"])\n self.nap.set_g(params[\"g_nap\"])\n self.tau_ca = params[\"t_ca\"]", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def update_parameters(parameters, grads, learning_rate=0.01):\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n ### END CODE HERE ###\n\n # Retrieve each gradient from the dictionary \"grads\"\n ### START CODE HERE ### (≈ 4 lines of code)\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n dW3 = grads[\"dW3\"]\n db3 = grads[\"db3\"]\n ## END CODE HERE ###\n\n # Update rule for each parameter\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = W1 - (learning_rate * dW1)\n b1 = b1 - (learning_rate * db1)\n W2 = W2 - (learning_rate * dW2)\n b2 = b2 - (learning_rate * db2)\n W3 = W3 - (learning_rate * dW3)\n b3 = b3 - (learning_rate * db3)\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n\n return parameters", "def update_distribution(self, gp_conn=None):\n results = self.results_from_db(gp_conn)\n\n for i in self.active_arms:\n try:\n self.arms_dict_params[i]['success'].append(results[i]['success'])\n self.arms_dict_params[i]['trials'].append(results[i]['trials'])\n self.arms_dict_params[i]['current_alpha'] += self.arms_dict_params[i]['success'][-1]\n self.arms_dict_params[i]['current_beta'] += self.arms_dict_params[i]['trials'][-1] - \\\n self.arms_dict_params[i]['success'][-1]\n except:\n # вот тут надо рэйзить ошибку\n self.arms_dict_params[i]['success'].append(0)\n self.arms_dict_params[i]['trials'].append(0)\n\n return self.arms_dict_params", "def update_param(self, lr):\n\n\n self.W=self.W-lr*self.W_grad\n self.b = self.b - lr*self.b_grad", "def updateParameters(self, paramDict):\n\n params = ['taux', 'mu', 'G', 'alpha_0', 'delta', 'p', 'I0', 'kparam']\n\n # Now set the parameters\n for k in paramDict.keys():\n mycode = 'self.' + k + \"=paramDict[\\'\" + k + \"\\']\"\n exec(mycode)", "def HC_update(self, GS_HC, hc_ro):\n\n # Backward inference using GS - HC connectivity (generative model).\n # Weighted mean of GS - HC connectivity, with HC estimate as weights.\n hc_fb = np.average(GS_HC, 0, hc_ro)\n hc_fb = hc_fb / hc_fb.sum()\n\n self.P = hc_fb * self.P\n self.pass_through_lateral_conn()", "def _update_parameters(self, curr_state, reward, next_state):\n phi = self._features.vector(curr_state)\n phi_dash = self._features.vector(next_state)\n\n self._A += np.outer(phi, (phi - self._gamma * phi_dash))\n self._b += reward * phi", "def update_parameters(parameters, grads, learning_rate):\n L = len(parameters) // 2\n\n for i in range(L):\n parameters[\"W\"+str(i+1)] = parameters[\"W\"+str(i+1)] - learning_rate * grads[\"dW\"+str(i+1)]\n parameters[\"b\"+str(i+1)] = parameters[\"b\"+str(i+1)] - learning_rate * grads[\"db\"+str(i+1)]\n\n return parameters", "def update(self, newparams):\n for k, v in list(newparams.items()):\n if k in self.basis_params:\n # Make sure parameter is in dict, and check if it changed\n if k not in self.params:\n self.basis_dirty = True\n self.params[k] = v\n if np.any(v != self.params.get(k)):\n self.basis_dirty = True\n else:\n try:\n # here the sps.params.dirtiness should increase to 2 if\n # there was a change\n self.ssp.params[k] = v[0]\n except KeyError:\n pass\n # now update params\n self.params[k] = np.copy(np.atleast_1d(v))\n # if we changed only csp_params but are relying on COMPSP, make\n # sure we remake the basis\n if self.safe and (self.ssp.params.dirtiness == 1):\n self.basis_dirty = True\n # if we changed only csp_params propagate them through but don't\n # force basis remake (unless basis_dirty)\n if self.ssp.params.dirtiness == 1:\n self.ssp._update_params()\n\n if self.basis_dirty | (self.ssp.params.dirtiness == 2):\n self.build_basis()", "def update_parameters(parameters, grads, learning_rate = 1.2):\n\t# Retrieve each parameter from the dictionary \"parameters\"\n\tW1 = parameters['W1']\n\tb1 = parameters['b1']\n\tW2 = parameters['W2']\n\tb2 = parameters['b2']\n\n\t# Retrieve each gradient from the dictionary \"grads\"\n\tdW1 = grads['dW1']\n\tdb1 = grads['db1']\n\tdW2 = grads['dW2']\n\tdb2 = grads['db2']\n\n\t# Update rule for each parameter\n\tW1 = W1 - learning_rate*dW1\n\tb1 = b1 - learning_rate*db1\n\tW2 = W2 - learning_rate*dW2\n\tb2 = b2 - learning_rate*db2\n\n\tparameters = {\"W1\": W1,\n\t\t\t\t\t\"b1\": b1,\n\t\t\t\t\t\"W2\": W2,\n\t\t\t\t\t\"b2\": b2}\n\n\treturn parameters", "def update_parameters_with_gd(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)]-learning_rate* grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)]-learning_rate* grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters", "def update_arm_parameters(self, arm_intuition, arm_selection, success):\n if success:\n self.alpha_params[arm_intuition, arm_selection] += 1\n else:\n self.beta_params[arm_intuition, arm_selection] += 1", "def set_params(self, params: Dict) -> None:\n self.leak.set_g(params['g_leak'])\n self.nav.set_g(params['g_nav'])\n self.kvhh.set_g(params['g_kvhh'])\n self.kva.set_g(params['g_kva'])\n self.kvsi.set_g(params['g_kvsi'])\n self.cav.set_g(params['g_cav'])\n self.kca.set_g(params['g_kca'])\n self.nap.set_g(params['g_nap'])\n self.kir.set_g(params['g_kir'])\n self.ampar.set_g(params['g_ampar'])\n self.nmdar.set_g(params['g_nmdar'])\n self.gabar.set_g(params['g_gabar'])\n self.tau_ca = params['t_ca']" ]
[ "0.6255577", "0.62177485", "0.6214475", "0.5984097", "0.5967375", "0.59337133", "0.59327203", "0.59210086", "0.58783424", "0.5867557", "0.57922786", "0.57610255", "0.57574403", "0.57097393", "0.5705617", "0.5699275", "0.56962854", "0.5681587", "0.5657348", "0.56380326", "0.5610231", "0.56098706", "0.5598722", "0.5591783", "0.5584359", "0.5582361", "0.554142", "0.553775", "0.5519301", "0.5495096" ]
0.63643956
0
Method calls ``new_haloprop_func_dict`` to create new halo properties as columns to the mock catalog, if applicable.
def add_new_haloprops(self, galaxy_table): if hasattr(self, 'new_haloprop_func_dict'): d = self.new_haloprop_func_dict for key, func in d.iteritems(): if key not in galaxy_table.keys(): galaxy_table[key] = func(galaxy_table=galaxy_table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_make_hmp(self):\n table_factory = DataTableFactory(PACKET_DIR)\n table_factory.hmp()", "def _parse_constructor_kwargs(self, **kwargs):\n\n try:\n halo_id = np.array(kwargs['halo_id'])\n assert type(halo_id) is np.ndarray\n Nhalos = custom_len(halo_id)\n except KeyError:\n msg = (\"\\nThe UserSuppliedHaloCatalog requires a ``halo_id`` keyword argument.\")\n raise HalotoolsError(msg)\n\n halo_table_dict = (\n {key: np.array(kwargs[key]) for key in kwargs\n if ((type(kwargs[key]) is np.ndarray) | (type(kwargs[key]) is Column)) and\n (custom_len(kwargs[key]) == Nhalos) and (key[:5] == 'halo_')})\n self._test_halo_table_dict(halo_table_dict)\n\n metadata_dict = (\n {key: kwargs[key] for key in kwargs\n if (key not in halo_table_dict) and (key != 'ptcl_table')}\n )\n\n return halo_table_dict, metadata_dict", "def _add_hybrid_cols(self):\n for new_col_name, method in HYBRID_METHODS.items():\n out = method(self)\n if out is not None:\n try:\n self._hybrid_meta[new_col_name] = out\n except ValueError as e:\n msg = (\"Unable to add {!r} column to hybrid meta. The \"\n \"following exception was raised when adding \"\n \"the data output by '{}': {!r}.\")\n w = msg.format(new_col_name, method.__name__, e)\n logger.warning(w)\n warn(w, OutputWarning)", "def _make_haloupdate(self, f, fixed, halos, **kwargs):\n return", "def create():\n df = prepare_dataframe(io[\"harmonization_df_output_path\"], index_col=\"label\")\n assumption_map = create_assumption_map(columns, df)\n assumption_map.to_csv(io[\"harmonization_output_assumption_path\"], index=False)\n\n # Heat Rate regression Map, Valid only for the Coal\n regression_map = create_regression_map(df)\n\n res = other_regression(df[df[\"fuel_type\"] == \"coal\"], [\"heat_rate\"], \"delta_heatrate\")\n regression_map[\"intersect_err\"] = res.bse[0]\n regression_map[\"slope_err\"] = res.bse[1]\n print(regression_map)\n regression_map.to_csv(io[\"harmonization_output_regression_path\"], index=False)", "def _generate_hcs_meta(self):\n self.hcs_meta = {'plate': self.plate_meta}\n\n well_metas = []\n for well in self.wells:\n meta = self.store[well].attrs.get('well')\n well_metas.append(meta)\n\n self.hcs_meta['well'] = well_metas", "def _mc_galprop(self, seed=None, **kwargs):\n model_helpers.update_param_dict(self, **kwargs)\n self._set_correlation_strength()\n\n if ('galaxy_table' in kwargs.keys()) & ('halos' in kwargs.keys()):\n msg = (\"The mc_\"+self.galprop_key+\" method accepts either \" + \n \"a halos keyword argument, or a galaxy_table keyword argument\" + \n \" but never both.\")\n raise KeyError(msg)\n elif 'galaxy_table' in kwargs.keys():\n galaxy_table = kwargs['galaxy_table']\n operative_sec_haloprop_key = (\n model_defaults.host_haloprop_prefix + self.sec_haloprop_key)\n elif 'halos' in kwargs.keys():\n galaxy_table = kwargs['halos']\n operative_sec_haloprop_key = self.sec_haloprop_key\n else:\n msg = (\"The mc_\"+self.galprop_key+\" requires either \" + \n \"a halos keyword argument, or a galaxy_table keyword argument\")\n raise KeyError(msg)\n\n self.add_new_haloprops(galaxy_table)\n\n # All at once, draw all the randoms we will need\n np.random.seed(seed=seed)\n all_randoms = np.random.random(len(galaxy_table)*2)\n galprop_cumprob = all_randoms[0:len(galaxy_table)]\n galprop_scatter = all_randoms[len(galaxy_table):]\n\n # Initialize the output array\n output_galprop = np.zeros(len(galaxy_table))\n\n # Determine binning and loop range\n if 'galaxy_table_slice_array' not in kwargs.keys():\n binned_prim_galprop = np.digitize(\n galaxy_table[self.prim_galprop_key], \n self.prim_galprop_bins)\n prim_galprop_loop_range = set(binned_prim_galprop)\n else:\n prim_galprop_loop_range = range(len(self.one_point_lookup_table))\n\n for i in prim_galprop_loop_range:\n\n # Determine the slice corresponding to the i^th prim_galprop bin\n if 'galaxy_table_slice_array' not in kwargs.keys():\n idx_bini = np.where(binned_prim_galprop==i)[0]\n num_bini = len(idx_bini)\n else:\n idx_bini = kwargs['galaxy_table_slice_array'][i]\n num_bini = len(galaxy_table[idx_bini])\n\n if len(idx_bini) > 0:\n # Fetch the appropriate number of randoms\n # for the i^th prim_galprop bin\n galprop_cumprob_bini = galprop_cumprob[idx_bini]\n galprop_scatter_bini = galprop_scatter[idx_bini]\n\n # Fetch the halos in the i^th prim_galprop bin, \n # and determine how they are sorted\n haloprop_bini = galaxy_table[idx_bini][operative_sec_haloprop_key]\n idx_sorted_haloprop_bini = np.argsort(haloprop_bini)\n\n galprop_bini = self._condition_matched_galprop(\n haloprop_bini[idx_sorted_haloprop_bini], \n galprop_cumprob_bini, i, galprop_scatter_bini, self.tol)\n\n # Assign the final values to the \n # appropriately sorted subarray of output_galprop\n output_galprop[idx_bini[idx_sorted_haloprop_bini]] = galprop_bini\n\n return output_galprop", "def add_properties_to_df(df):\n\n df['N_rot'] = df.apply(Nrot, axis=1)\n df['HAC'] = df.apply(heavy_atoms, axis=1)\n df['cLogP'] = df.apply(clogP, axis =1)\n df['TSPA'] = df.apply(TPSA, axis=1)\n df['NDon'] = df.apply(NDon, axis=1)\n df['NAcc'] = df.apply(NAcc, axis=1)\n df['Fsp3'] = df.apply(Fsp3, axis=1)\n\n return df", "def test_attributes(self):\n result = self.plugin_instance.create_probability_cube(\n self.percentiles_cube, self.orography_cube)\n self.assertEqual(result.units, \"1\")\n self.assertEqual(result.name(), self.new_name)\n self.assertEqual(result.attributes['relative_to_threshold'], 'below')\n self.assertEqual(result.attributes['thresholded_using'],\n 'surface_altitude')", "def test_health_ng(self, mock):\n mock.configure_mock(**(self.config_payload(1, 0)))\n d = lf.lambda_handler(**(self.lambdaparam))\n self.assertEqual(d, 1)\n mock.client.return_value.update_thing_shadow.assert_called_once_with(\n thingName=self.thingname,\n payload=lf.payload_put(lf.shadow_update_data))", "def addfunctions2new(abunch, key):\n snames = [\n \"BuildingSurface:Detailed\",\n \"Wall:Detailed\",\n \"RoofCeiling:Detailed\",\n \"Floor:Detailed\",\n \"FenestrationSurface:Detailed\",\n \"Shading:Site:Detailed\",\n \"Shading:Building:Detailed\",\n \"Shading:Zone:Detailed\",\n ]\n snames = [sname.upper() for sname in snames]\n if key in snames:\n func_dict = {\n \"area\": fh.area,\n \"height\": fh.height, # not working correctly\n \"width\": fh.width, # not working correctly\n \"azimuth\": fh.azimuth,\n \"tilt\": fh.tilt,\n \"coords\": fh.getcoords, # needed for debugging\n }\n try:\n abunch.__functions.update(func_dict)\n except KeyError as e:\n abunch.__functions = func_dict\n return abunch", "def test_metadata_filter_hmp(self):\n table_factory = DataTableFactory(PACKET_DIR)\n hmp1 = table_factory.hmp()\n\n metadata = pd.DataFrame({'foo': {'haib18CEM5332_HMGTJCCXY_SL342402': 1}})\n table_factory.set_metadata(metadata)\n hmp2 = table_factory.hmp()\n\n self.assertEqual(hmp1.shape[0] // 2, hmp2.shape[0])", "def set_hod(self, hod_dict):\n self.__init__(hod_dict)", "def make(self, halo_spots):\n calls = {}\n generated = OrderedDict()\n for hs in halo_spots:\n # 1) Callables/Calls for send/recv\n begin_exchange = []\n for f, v in hs.fmapper.items():\n # Sanity check\n assert f.is_Function\n assert f.grid is not None\n\n # Note: to construct the halo exchange Callables, use the generic `df`,\n # instead of `f`, so that we don't need to regenerate code for Functions\n # that are symbolically identical to `f` except for the name\n df = f.__class__.__base__(name='a', grid=f.grid, shape=f.shape_global,\n dimensions=f.dimensions)\n # `gather`, `scatter`, `sendrecv` and `haloupdate` are generic by\n # construction -- they only need to be generated once for each unique\n # pair (`ndim`, `halos`)\n if (f.ndim, v) not in generated:\n key = len(generated)\n haloupdate = self._make_haloupdate(df, v.loc_indices, v.halos, key)\n sendrecv = self._make_sendrecv(df, v.loc_indices)\n gather = self._make_copy(df, v.loc_indices)\n scatter = self._make_copy(df, v.loc_indices, swap=True)\n # Arrange the newly constructed Callables in a suitable data\n # structure to capture the call tree. This may be useful to\n # the HaloExchangeBuilder user\n haloupdate = EFuncNode(haloupdate)\n sendrecv = EFuncNode(sendrecv, haloupdate)\n gather = EFuncNode(gather, sendrecv)\n scatter = EFuncNode(scatter, sendrecv)\n\n generated[(f.ndim, v)] = haloupdate\n\n # `haloupdate` Call construction\n comm = f.grid.distributor._obj_comm\n nb = f.grid.distributor._obj_neighborhood\n loc_indices = list(v.loc_indices.values())\n args = [f, comm, nb] + loc_indices\n begin_exchange.append(Call(generated[(f.ndim, v)].name, args))\n\n # 2) Callables/Calls for wait (no-op in case of synchronous halo exchange)\n wait_exchange = []\n for f, v in hs.fmapper.items():\n # TODO\n pass\n\n # 3) Callables/Calls for remainder computation (no-op in case of\n # synchronous halo exchange)\n remainder = []\n\n calls[hs] = List(body=begin_exchange + [hs.body] + wait_exchange + remainder)\n\n return flatten(generated.values()), calls", "def _apply_harmonized_metadata_to_sample(sample: Sample, harmonized_metadata: dict):\n for key, value in harmonized_metadata.items():\n setattr(sample, key, value)", "def setUp(self):\n problem = setup_house_L(size=(40, 40))\n\n env = MetroLayoutEnv()\n\n costfn = objectives.ConstraintsHeur(problem,\n wmap={'AspectConstraint':0.1,\n 'AreaConstraint': 2\n },\n default=1.)\n\n model = algo.MetropolisHastings(env, costfn)\n\n self.exp = SimpleMH(\n env,\n problem,\n model=model,\n cost_fn=costfn,\n num_iter=1000,\n initializer=PointsInBound(problem, env, size=3, seed=69)\n )", "def test_property_cols():\n image_file = 'input/D00572501_z_c01_r3624p01_immasked.fits.fz'\n cat_file = 'input/D00572501_z_c01_r5473p01_piff.fits'\n psf_file = os.path.join('output','test_property_cols.piff')\n hsm_file = os.path.join('output','test_property_cols_hsm.fits')\n\n nstars = 25\n scale = 0.26\n size = 15\n order = 1\n stamp_size = 25\n\n config = {\n 'input' : {\n 'nstars': nstars,\n 'image_file_name' : image_file,\n 'image_hdu' : 1,\n 'weight_hdu' : 3,\n 'badpix_hdu' : 2,\n 'cat_file_name' : cat_file,\n 'x_col' : 'XWIN_IMAGE',\n 'y_col' : 'YWIN_IMAGE',\n 'sky_col' : 'BACKGROUND',\n 'stamp_size' : stamp_size,\n 'ra' : 'TELRA',\n 'dec' : 'TELDEC',\n 'gain' : 'GAINA',\n 'satur' : 'SATURATA',\n 'chipnum': 1,\n # Select ones with a variety of dtypes.\n 'property_cols' : ['SOURCE_ID', 'GI_COLOR', 'FLAGS', 'FLAG_COLOR', 'SPREAD_MODEL'],\n },\n 'select' : {\n 'type': 'Properties',\n 'where': 'np.abs(SPREAD_MODEL) < 3.e-4',\n\n 'reserve_frac' : 0.2,\n 'seed' : 1234,\n },\n 'psf' : {\n 'model' : {\n 'type' : 'PixelGrid',\n 'scale' : scale,\n 'size' : size,\n 'interp' : 'Lanczos(5)',\n },\n 'interp' : {\n 'type' : 'BasisPolynomial',\n 'order' : [1, 1, 1],\n 'keys': ['u', 'v', 'GI_COLOR'],\n },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats': [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n },\n ],\n },\n }\n\n piff.piffify(config)\n hsm = fitsio.read(hsm_file)\n cat = fitsio.read(cat_file)\n\n print('hsm dtype = ',hsm.dtype)\n print('cat dtype = ',cat.dtype)\n\n for key in hsm.dtype.names:\n print(key)\n if key in cat.dtype.names:\n assert hsm[key].dtype.type == cat[key].dtype.type\n elif key == 'reserve':\n assert hsm[key].dtype.type == np.dtype(bool).type\n elif key.startswith('flag'):\n assert hsm[key].dtype.type == np.dtype(int).type\n elif key == 'sky':\n # This one is read from the input catalog, but renamed\n assert hsm[key].dtype.type == np.float32\n else:\n assert hsm[key].dtype.type == np.dtype(float).type\n\n # Check that drawing the image works without specifying chipnum.\n # This is ok so long as the input is really only a single chip.\n # cf. Issue #140\n psf = piff.read(psf_file)\n im1 = psf.draw(35, 40, center=True, GI_COLOR=1)\n\n # If the input field didn't include chipnum, then it makes no difference for a single chip.\n del config['input']['chipnum']\n piff.piffify(config)\n psf = piff.read(psf_file)\n im2 = psf.draw(35, 40, center=True, GI_COLOR=1)\n assert im1 == im2", "def columns_setup(self):\n self.required = None\n self.addition = None\n self.deletion = None\n self.retention = None\n self.rename = None", "def _setup_hpos():\n hpo_dao = HPODao()\n hpo_dao.insert(\n HPO(hpoId=UNSET_HPO_ID, name=\"UNSET\", displayName=\"Unset\", organizationType=OrganizationType.UNSET)\n )\n hpo_dao.insert(\n HPO(hpoId=PITT_HPO_ID, name=\"PITT\", displayName=\"Pittsburgh\", organizationType=OrganizationType.HPO)\n )\n hpo_dao.insert(\n HPO(hpoId=AZ_HPO_ID, name=\"AZ_TUCSON\", displayName=\"Arizona\", organizationType=OrganizationType.HPO)\n )\n\n org_dao = OrganizationDao()\n org_dao.insert(\n Organization(\n organizationId=AZ_ORG_ID,\n externalId=\"AZ_TUCSON_BANNER_HEALTH\",\n displayName=\"Banner Health\",\n hpoId=AZ_HPO_ID,\n )\n )\n org_dao.insert(\n Organization(\n organizationId=PITT_ORG_ID,\n externalId=\"PITT_BANNER_HEALTH\",\n displayName=\"PITT display Banner Health\",\n hpoId=PITT_HPO_ID,\n )\n )\n\n site_dao = SiteDao()\n site_dao.insert(\n Site(\n siteName=\"Monroeville Urgent Care Center\",\n googleGroup=\"hpo-site-monroeville\",\n mayolinkClientNumber=7035769,\n organizationId=PITT_ORG_ID,\n hpoId=PITT_HPO_ID,\n )\n )\n site_dao.insert(\n Site(\n siteName=\"Phoenix Urgent Care Center\",\n googleGroup=\"hpo-site-bannerphoenix\",\n mayolinkClientNumber=7035770,\n organizationId=PITT_ORG_ID,\n hpoId=PITT_HPO_ID,\n )\n )\n site_dao.insert(\n Site(\n siteName=\"Phoenix clinic\",\n googleGroup=\"hpo-site-clinic-phoenix\",\n mayolinkClientNumber=7035770,\n organizationId=AZ_ORG_ID,\n hpoId=AZ_HPO_ID,\n )\n )", "def generate_test_floors(self):\n def generate_floors_for_location(floor_names, location_name):\n item_dict = {}\n for name in floor_names:\n number = int(''.join([n for n in name if n.isdigit()]))\n item_dict['{}_{}'.format(name, location_name)] = {\n 'number': number,\n 'location': location_name\n }\n return item_dict\n\n self.fs_l1_o1_dict = \\\n generate_floors_for_location(\n ['f0', 'f1', 'f2', 'f3_del', 'f4_del'],\n 'l1_o1')\n\n self.fs_l1_sub1_o1_dict = \\\n generate_floors_for_location(\n ['f0', 'f1', 'f2', 'f3', 'f4'],\n 'l1_sub1_o1')\n\n self.fs_l1_o2_dict = \\\n generate_floors_for_location(\n ['f0', 'f1', 'f2'],\n 'l1_o2')\n\n self.fs_l1_sub1_o2_dict = \\\n generate_floors_for_location(\n ['f0', 'f1', 'f2'],\n 'l1_sub1_o2')\n\n self.fs_dict = {\n **self.fs_l1_o1_dict,\n **self.fs_l1_sub1_o1_dict,\n **self.fs_l1_o2_dict,\n **self.fs_l1_sub1_o2_dict,\n }\n\n self.floors = self.create_floors_from_data(\n self.fs_dict, self.locations)", "def __init__(self, request, **kwargs):\n super(PSIHDReport, self).__init__(request, **kwargs)\n calculate_fn = lambda key, _: key[len(self.place_types) + 1]\n self.columns['demo_type'] = Column(\"Worker Type\", calculate_fn=calculate_fn)\n self.columns['demo_type'].view = FunctionView(calculate_fn=calculate_fn)\n self.function_views['demo_type'] = self.columns['demo_type'].view", "def _setup_metadata(self):\n # loom_metadata is what we use to pass all the information about\n # the loom (max_depth, which typeshapes are supported, and the signatures of\n # the LoomOps) to scheduler.cc\n loom_metadata = loom_pb2.LoomMetadata()\n loom_metadata.max_depth = self._max_depth\n for ts, tensor_names in zip(\n self._type_shapes, self._ts_idx_to_tensor_names):\n type_shape_metadata = loom_metadata.type_shape_metadata.add()\n type_shape_metadata.dtype = ts.dtype_enum\n type_shape_metadata.shape.extend(ts.shape)\n type_shape_metadata.tag = ts.tag\n type_shape_metadata.name = str(ts) # Debug string.\n type_shape_metadata.tensor_names.extend(tensor_names)\n type_shape_metadata.is_batch_input = (\n (ts in self._batch_inputs) or self._direct_feed_dict)\n\n for op_name, op in zip(self._loom_op_names, self._loom_ops):\n op_metadata = loom_metadata.op_metadata.add()\n op_metadata.name = op_name\n op_metadata.input_ts_idx.extend(\n self._type_shape_to_idx[ts] for ts in op.input_type_shapes)\n op_metadata.output_ts_idx.extend(\n self._type_shape_to_idx[ts] for ts in op.output_type_shapes)\n\n self._loom_metadata_str = (\n loom_metadata.SerializeToString())", "def build_metrics_columns(manager):\r\n manager.gen_labordollar_perhour_column(with_formulas=False)\r\n manager.gen_laborhours_unitarea(with_formulas=False)\r\n manager.color_column(\"Labor $/Hr\")\r\n manager.color_column(\"Labor Hours/Unit Area\")", "def __init__(self,cosmology, mass_function, halo_physics, kh_vector, mass_bins, volume, kh_min=0, pt_type = 'EFT', pade_resum = True, smooth_density = True, IR_resum = True, npoints = 1000, verb=False):\n\n # Write attributes, if they're of the correct type\n if isinstance(cosmology, Cosmology):\n self.cosmology = cosmology\n else:\n raise TypeError('cosmology input must be an instance of the Cosmology class!')\n if isinstance(mass_function, MassFunction):\n self.mass_function = mass_function\n else:\n raise TypeError('mass_function input must be an instance of the MassFunction class!')\n if isinstance(halo_physics, HaloPhysics):\n self.halo_physics = halo_physics\n else:\n raise TypeError('halo_physics input must be an instance of the HaloPhysics class!')\n\n # Write useful attributes\n self.kh_vector = kh_vector\n self.kh_min = kh_min\n self.mass_bins = mass_bins\n self.N_bins = len(mass_bins)-1\n self.N_k = len(self.kh_vector)\n self.volume = volume\n self.verb = verb\n self.pt_type = pt_type\n self.pade_resum = pade_resum\n self.smooth_density = smooth_density\n self.IR_resum = IR_resum\n self.npoints = npoints\n\n # Generate a power spectrum class with this k-vector\n self.halo_model = HaloModel(cosmology, mass_function, halo_physics, kh_vector, kh_min,verb=self.verb)\n\n # Copy in the MassIntegrals class\n self.mass_integrals = self.halo_model.mass_integrals\n\n if self.cosmology.use_neutrinos:\n if self.verb:\n print(\"Note: massive neutrinos are not implemented in full, so we assume CDM+baryon power spectra here.\")\n print(\"(This will creates only a (subdominant) percent-level error for typical neutrino masses.)\")\n\n # Run some checks\n assert self.mass_bins[0]>=np.power(10.,self.mass_integrals.min_logM_h), 'Minimum bin must be above MassIntegral limit!'\n assert self.mass_bins[-1]<=np.power(10.,self.mass_integrals.max_logM_h), 'Maximum bin must be below MassIntegral limit!'\n\n # Compute linear power for the k-vector\n self.linear_power = self.cosmology.compute_linear_power(self.kh_vector,self.kh_min).copy()", "def init_columns(self, project, columns):\n self.projects.update_entry(pk=project, project={\"columns\": []}).result()\n cols = []\n\n for path, unit in columns.items():\n col = {\"path\": f\"data.{path}\"}\n if unit is not None:\n col[\"unit\"] = unit\n\n cols.append(col)\n\n return self.projects.update_entry(\n pk=project, project={\"columns\": cols}\n ).result()", "def _add_vars_metadata(nemo_hr):\n nemo_hr.atmpres.attrs[\"level\"] = \"mean sea level\"\n nemo_hr.atmpres.attrs[\"long_name\"] = \"Pressure Reduced to MSL\"\n nemo_hr.atmpres.attrs[\"standard_name\"] = \"air_pressure_at_sea_level\"\n nemo_hr.atmpres.attrs[\"units\"] = \"Pa\"\n\n # nemo_hr.LHTFL_surface.attrs[\"level\"] = \"surface\"\n # nemo_hr.LHTFL_surface.attrs[\"long_name\"] = \"\"\n # nemo_hr.LHTFL_surface.attrs[\"standard_name\"] = \"\"\n # nemo_hr.LHTFL_surface.attrs[\"units\"] = \"\"\n # nemo_hr.LHTFL_surface.attrs[\"ioos_category\"] = \"\"\n # nemo_hr.LHTFL_surface.attrs[\"comment\"] = \"how calculated\"\n\n nemo_hr.percentcloud.attrs[\"long_name\"] = \"Cloud Fraction\"\n nemo_hr.percentcloud.attrs[\"standard_name\"] = \"cloud_area_fraction\"\n nemo_hr.percentcloud.attrs[\"units\"] = \"percent\"\n\n nemo_hr.PRATE_surface.attrs[\"level\"] = \"surface\"\n nemo_hr.PRATE_surface.attrs[\"long_name\"] = \"Precipitation Rate\"\n nemo_hr.PRATE_surface.attrs[\"standard_name\"] = \"precipitation_flux\"\n nemo_hr.PRATE_surface.attrs[\"units\"] = \"kg/m^2/s\"\n\n nemo_hr.nav_lat.attrs[\"ioos_category\"] = \"location\"\n\n nemo_hr.nav_lon.attrs[\"ioos_category\"] = \"location\"\n\n nemo_hr.precip.attrs[\"level\"] = \"surface\"\n nemo_hr.precip.attrs[\"long_name\"] = \"Total Precipitation\"\n nemo_hr.precip.attrs[\"standard_name\"] = \"precipitation_flux\"\n nemo_hr.precip.attrs[\"units\"] = \"kg/m^2/s\"\n\n nemo_hr.qair.attrs[\"level\"] = \"2 m above surface\"\n nemo_hr.qair.attrs[\"long_name\"] = \"Specific Humidity\"\n nemo_hr.qair.attrs[\"standard_name\"] = \"specific_humidity_2maboveground\"\n nemo_hr.qair.attrs[\"units\"] = \"kg/kg\"\n nemo_hr.qair.attrs[\n \"comment\"\n ] = \"calculated from sea level air pressure and dewpoint temperature via WMO 2012 ocean best practices\"\n\n nemo_hr.RH_2maboveground.attrs[\"level\"] = \"2 m above surface\"\n nemo_hr.RH_2maboveground.attrs[\"long_name\"] = \"Relative Humidity\"\n nemo_hr.RH_2maboveground.attrs[\"standard_name\"] = \"relative_humidity_2maboveground\"\n nemo_hr.RH_2maboveground.attrs[\"units\"] = \"percent\"\n nemo_hr.RH_2maboveground.attrs[\n \"comment\"\n ] = \"calculated from air temperature and dewpoint temperature via WMO 2012 ocean best practices\"\n\n nemo_hr.solar.attrs[\"level\"] = \"surface\"\n nemo_hr.solar.attrs[\"long_name\"] = \"Downward Short-Wave Radiation Flux\"\n nemo_hr.solar.attrs[\"standard_name\"] = \"net_downward_shortwave_flux_in_air\"\n nemo_hr.solar.attrs[\"units\"] = \"W/m^2\"\n\n nemo_hr.tair.attrs[\"level\"] = \"2 m above surface\"\n nemo_hr.tair.attrs[\"long_name\"] = \"Air Temperature\"\n nemo_hr.tair.attrs[\"standard_name\"] = \"air_temperature_2maboveground\"\n nemo_hr.tair.attrs[\"units\"] = \"K\"\n\n nemo_hr.therm_rad.attrs[\"level\"] = \"surface\"\n nemo_hr.therm_rad.attrs[\"long_name\"] = \"Downward Long-Wave Radiation Flux\"\n nemo_hr.therm_rad.attrs[\"standard_name\"] = \"net_downward_longwave_flux_in_air\"\n nemo_hr.therm_rad.attrs[\"units\"] = \"W/m^2\"\n nemo_hr.therm_rad.attrs[\"comment\"] = (\n \"calculated from saturation water vapour pressure, air temperature, and cloud fraction \"\n \"via Dilly-Unsworth correlation\"\n )\n\n nemo_hr.u_wind.attrs[\"level\"] = \"10 m above surface\"\n nemo_hr.u_wind.attrs[\"long_name\"] = \"U-Component of Wind\"\n nemo_hr.u_wind.attrs[\"standard_name\"] = \"x_wind\"\n nemo_hr.u_wind.attrs[\"units\"] = \"m/s\"\n nemo_hr.u_wind.attrs[\"ioos_category\"] = \"wind speed and direction\"\n\n nemo_hr.v_wind.attrs[\"level\"] = \"10 m above surface\"\n nemo_hr.v_wind.attrs[\"long_name\"] = \"V-Component of Wind\"\n nemo_hr.v_wind.attrs[\"standard_name\"] = \"y_wind\"\n nemo_hr.v_wind.attrs[\"units\"] = \"m/s\"\n nemo_hr.v_wind.attrs[\"ioos_category\"] = \"wind speed and direction\"\n\n nemo_hr.attrs[\n \"history\"\n ] += f\"\\n{arrow.now().format('ddd MMM DD HH:mm:ss YYYY')}: Add data variables metadata\"", "def setUp(self):\r\n\r\n self.otu_table_values = array([[0, 0, 9, 5, 3, 1],\r\n [1, 5, 4, 0, 3, 2],\r\n [2, 3, 1, 1, 2, 5]])\r\n {(0, 2): 9.0, (0, 3): 5.0, (0, 4): 3.0, (0, 5): 1.0,\r\n (1, 0): 1.0, (1, 1): 5.0, (1, 2): 4.0, (1, 4): 3.0, (1, 5): 2.0,\r\n (2, 0): 2.0, (2, 1): 3.0, (2, 2): 1.0, (2, 3): 1.0, (2, 4): 2.0, (2, 5): 5.0}\r\n self.otu_table = table_factory(self.otu_table_values,\r\n ['Sample1', 'Sample2', 'Sample3',\r\n 'Sample4', 'Sample5', 'Sample6'],\r\n ['OTU1', 'OTU2', 'OTU3'],\r\n [None, None, None, None, None, None],\r\n [{\"taxonomy\": ['Bacteria']},\r\n {\"taxonomy\": ['Archaea']},\r\n {\"taxonomy\": ['Streptococcus']}])\r\n self.otu_table_f = table_factory(self.otu_table_values,\r\n ['Sample1', 'Sample2', 'Sample3',\r\n 'Sample4', 'Sample5', 'Sample6'],\r\n ['OTU1', 'OTU2', 'OTU3'],\r\n [None, None, None, None, None, None],\r\n [{\"taxonomy\": ['1A', '1B', '1C', 'Bacteria']},\r\n {\"taxonomy\":\r\n ['2A', '2B', '2C', 'Archaea']},\r\n {\"taxonomy\": ['3A', '3B', '3C', 'Streptococcus']}])\r\n\r\n self.full_lineages = [['1A', '1B', '1C', 'Bacteria'],\r\n ['2A', '2B', '2C', 'Archaea'],\r\n ['3A', '3B', '3C', 'Streptococcus']]\r\n self.metadata = [[['Sample1', 'NA', 'A'],\r\n ['Sample2', 'NA', 'B'],\r\n ['Sample3', 'NA', 'A'],\r\n ['Sample4', 'NA', 'B'],\r\n ['Sample5', 'NA', 'A'],\r\n ['Sample6', 'NA', 'B']],\r\n ['SampleID', 'CAT1', 'CAT2'], []]\r\n self.tree_text = [\"('OTU3',('OTU1','OTU2'))\"]\r\n fh, self.tmp_heatmap_fpath = mkstemp(prefix='test_heatmap_',\r\n suffix='.pdf')\r\n close(fh)", "def test_load_from_function(self):\n self.test_table.load_from_function(create_small_test_odml)\n self.assertEqual(self.test_table._odmldict, self.expected_odmldict)", "def test_create_from_dicts(self):\n cols = list(zip(*self.dtypes))[0]\n dicts = [dict([(cols[i], d[i]) for i in xrange(len(d))])\n for d in self.idata]\n\n tbl = Table.create(\n ':memory:', \"Bar\", dicts, verbose=True,\n primary_key='id', autoincrement=True)\n\n self.check_index(self.idata, tbl.select())\n for idx, col in enumerate(cols):\n if col == 'id':\n continue\n self.check_data(self.idata[:, [0, idx]], tbl[col])", "def _get_mock_phyps(self):\n mock_lpar_4A = mock.Mock()\n mock_lpar_4A.configure_mock(id=4, name='A')\n mock_lpar_4A.processor = mock.MagicMock(\n util_cap_proc_cycles=5005045000,\n util_uncap_proc_cycles=5005045000,\n idle_proc_cycles=10000)\n mock_lpar_4A_prev = mock.Mock()\n mock_lpar_4A_prev.configure_mock(id=4, name='A')\n mock_lpar_4A_prev.processor = mock.MagicMock(\n util_cap_proc_cycles=40000,\n util_uncap_proc_cycles=40000,\n idle_proc_cycles=0)\n mock_phyp = mock.MagicMock(sample=mock.MagicMock(lpars=[mock_lpar_4A]))\n mock_prev_phyp = mock.MagicMock(\n sample=mock.MagicMock(lpars=[mock_lpar_4A_prev]))\n return mock_phyp, mock_prev_phyp" ]
[ "0.53807044", "0.52395046", "0.50270265", "0.49795195", "0.49258766", "0.49224955", "0.4920414", "0.49198395", "0.49087453", "0.49079537", "0.48617426", "0.48479292", "0.4806662", "0.48012027", "0.47516495", "0.47495013", "0.47483745", "0.46836653", "0.46500537", "0.46343023", "0.46319044", "0.4618239", "0.4617335", "0.46090215", "0.45768186", "0.45753703", "0.45643967", "0.45495805", "0.45277986", "0.45172945" ]
0.639631
0
Currently not implemented First print returns date of modifications to the video file Second print prints date of Creation of the video file, literally time when it was written to folder
def creation_date_video(path_to_file): print("Last modified: %s" % time.ctime(os.path.getmtime(path_to_file))) print("Created: %s" % time.ctime(os.path.getctime(path_to_file))) # return os.path.getctime(path_to_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_timestamps(dir_video):\n print(\"Adding creation dates to file names\")\n os.chdir(dir_video)\n # get only top level dir info\n dir_data_video_files = next(os.walk(dir_video))\n list_video_files = dir_data_video_files[2] # get file list\n for f_name in list_video_files:\n if GOPRO_PATTERN.search(f_name):\n f_time = time.strftime(r\"%Y-%m-%d_%H-%M\", time.localtime(os.path.getctime(f_name)))\n os.rename(f_name, f\"{f_time}_{f_name}\")", "def get_file_modification_date() -> str:\n file_modification_date = datetime.now().strftime(\"%d.%m.%Y\")\n print(file_modification_date)\n return file_modification_date", "def creation_date(path_to_file):\n if platform.system() == 'Windows':\n print(\"last modified: %s\" % time.ctime(os.path.getmtime(path_to_file)))\n modtime = time.ctime(os.path.getmtime(path_to_file))\n \n print(\"created: %s\" % time.ctime(os.path.getctime(path_to_file)))\n modtime = datetime.datetime.strptime(modtime, \"%a %b %d %H:%M:%S %Y\")\n modtime = datetime.datetime.strftime(modtime, \"%Y-%m-%d\")\n return modtime", "def printVersionInfo():\n #pass\n pathname = sys.argv[0]\n myMtime = os.stat(pathname)[ST_MTIME]\n modDate = CONFIG['utils'].mktime(myMtime)\n logIt(\"Python Script: \" + pathname + \"\\n\")\n logIt(\"Version Date: \" + modDate + \"\\n\")", "def getTimeToFileName(self):\n return self.sNow.strftime(\"%d-%m-%Y_%H-%M-%S\")", "def media_file_info(self):\n\n if self.observationId and self.playerType == VLC:\n\n media = self.mediaplayer.get_media()\n\n logging.info(\"State: {}\".format(self.mediaplayer.get_state()))\n logging.info(\"Media (get_mrl): {}\".format(bytes_to_str(media.get_mrl())))\n logging.info(\"media.get_meta(0): {}\".format(media.get_meta(0)))\n logging.info(\n \"Track: {}/{}\".format(self.mediaplayer.video_get_track(), self.mediaplayer.video_get_track_count()))\n logging.info(\"number of media in media list: {}\".format(self.media_list.count()))\n logging.info(\"get time: {} duration: {}\".format(self.mediaplayer.get_time(), media.get_duration()))\n logging.info(\"Position: {} %\".format(self.mediaplayer.get_position()))\n logging.info(\"FPS: {}\".format(self.mediaplayer.get_fps()))\n logging.info(\"Rate: {}\".format(self.mediaplayer.get_rate()))\n logging.info(\"Video size: {}\".format(self.mediaplayer.video_get_size(0)))\n logging.info(\"Scale: {}\".format(self.mediaplayer.video_get_scale()))\n logging.info(\"Aspect ratio: {}\".format(self.mediaplayer.video_get_aspect_ratio()))\n logging.info(\"is seekable? {0}\".format(self.mediaplayer.is_seekable()))\n logging.info(\"has_vout? {0}\".format(self.mediaplayer.has_vout()))\n\n vlc_output = (\"State: {}<br>\"\n \"Media Resource Location: {}<br>\"\n \"File name: {}<br>\"\n \"Track: {}/{}<br>\"\n \"Number of media in media list: {}<br>\"\n \"get time: {}<br>\"\n \"duration: {}<br>\"\n \"Position: {} %<br>\"\n \"FPS: {}<br>\"\n \"Rate: {}<br>\"\n \"Video size: {}<br>\"\n \"Scale: {}<br>\"\n \"Aspect ratio: {}<br>\"\n \"is seekable? {}<br>\"\n \"has_vout? {}<br>\").format(self.mediaplayer.get_state(),\n bytes_to_str(media.get_mrl()),\n media.get_meta(0),\n self.mediaplayer.video_get_track(),\n self.mediaplayer.video_get_track_count(),\n self.media_list.count(),\n self.mediaplayer.get_time(),\n self.convertTime(media.get_duration() / 1000),\n self.mediaplayer.get_position(),\n self.mediaplayer.get_fps(),\n self.mediaplayer.get_rate(),\n self.mediaplayer.video_get_size(0),\n self.mediaplayer.video_get_scale(),\n self.mediaplayer.video_get_aspect_ratio(),\n \"Yes\" if self.mediaplayer.is_seekable() else \"No\",\n \"Yes\" if self.mediaplayer.has_vout() else \"No\"\n )\n\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Media file information\")\n self.results.ptText.setReadOnly(True)\n\n self.results.ptText.appendHtml(\"<b>VLC analysis</b><hr>\" + vlc_output)\n\n # FFmpeg analysis\n self.results.ptText.appendHtml(\"<br><b>FFmpeg analysis</b><hr>\")\n for nplayer in self.pj[OBSERVATIONS][self.observationId][FILE]:\n for filePath in self.pj[OBSERVATIONS][self.observationId][FILE][nplayer]:\n media_full_path = project_functions.media_full_path(filePath, self.projectFileName)\n # nframes, duration_ms, duration, fps, hasVideo, hasAudio = accurate_media_analysis(self.ffmpeg_bin, media_full_path)\n\n r = utilities.accurate_media_analysis2(self.ffmpeg_bin, media_full_path)\n nframes = r[\"frames_number\"]\n\n if \"error\" in r:\n self.results.ptText.appendHtml(\n \"File path: {filePath}<br><br>{error}<br><br>\".format(filePath=media_full_path,\n error=r[\"error\"]))\n else:\n self.results.ptText.appendHtml(\n \"File path: {}<br>Duration: {}<br>Bitrate: {}k<br>FPS: {}<br>Has video: {}<br>Has audio: {}<br><br>\".\n format(media_full_path, self.convertTime(r[\"duration\"]), r[\"bitrate\"], r[\"fps\"],\n r[\"has_video\"], r[\"has_audio\"]))\n\n self.results.ptText.appendHtml(\"Total duration: {} (hh:mm:ss.sss)\".\n format(self.convertTime(sum(self.duration) / 1000)))\n\n self.results.show()\n\n else:\n\n fn = QFileDialog(self).getOpenFileName(self, \"Select a media file\", \"\", \"Media files (*)\")\n filePath = fn[0] if type(fn) is tuple else fn\n\n if filePath:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Media file information\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(\"<br><b>FFmpeg analysis</b><hr>\")\n # nframes, duration_ms, duration, fps, hasVideo, hasAudio = accurate_media_analysis(self.ffmpeg_bin, filePath)\n r = utilities.accurate_media_analysis2(self.ffmpeg_bin, filePath)\n if \"error\" in r:\n self.results.ptText.appendHtml(\n \"File path: {filePath}<br><br>{error}<br><br>\".format(filePath=filePath, error=r[\"error\"]))\n else:\n self.results.ptText.appendHtml(\n \"File path: {}<br>Duration: {}<br>Bitrate: {}k<br>FPS: {}<br>Has video: {}<br>Has audio: {}<br><br>\".\n format(filePath, self.convertTime(r[\"duration\"]), r[\"bitrate\"], r[\"fps\"], r[\"has_video\"],\n r[\"has_audio\"]))\n\n self.results.show()", "def last_modified():\n return \"Last modified: %s\" % time.ctime(os.path.getmtime(FILE_NAME))", "def check_video_timestamps(movie_file, desired_format='.mp4', desired_framerate=30):\n\n check_video_format(movie_file, desired_format='.mp4', original_format='.avi')\n\n new_movie_file = movie_file+'_tt'+desired_format\n if not os.path.isfile(new_movie_file):\n #Convert file to 30 fps\n cmd = ['ffmpeg', '-i', movie_file+desired_format]\n cmd += ['-r', str(desired_framerate)]\n cmd += ['-y', movie_file+'_t'+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd]) \n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()\n\n #Add timecode text to video\n cmd = 'ffmpeg -i '+movie_file+'_t'+desired_format+' -vf drawtext=\\\"fontfile=/opt/X11/share/fonts/TTF/VeraMoBd.ttf: timecode=\\'00\\:00\\:00\\:00\\':rate=30: [email protected]: x=7: y=460\\\" -an -y '+movie_file+'_tt'+desired_format\n args = shlex.split(cmd)\n #print args\n p = subprocess.Popen(args, shell=False)\n p.wait()\n\n os.remove(movie_file+'_t'+desired_format)\n\n return new_movie_file", "def get_creation_time(ts):\n path_to_embed_file = os.path.join(DATA_DIR, STUDY, \"experiment_files\", \"experiment_\"+ ts, \"triplet_training_validation_embeddings.h5\")\n\n if os.path.exists(path_to_embed_file):\n stat = os.stat(path_to_embed_file)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime\n else:\n print (\"here, path is: \", path_to_embed_file)\n return None", "def video_times():\n p = parse_cmdline(get_parser=get_parser_times)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.DEBUG)\n vis.show_video_abi_glm_times(\n start_date=p.start_time,\n end_date=p.end_time,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir,\n sector=p.sector,\n area=p.area)\n print(\"Files written to:\", p.outdir)", "def record_button_action(self):\r\n #on button click Stop/Start recording\r\n if self.RECORD_FLAG == True: #if recording\r\n self.RECORD_FLAG = False #stop recording\r\n return #and quit\r\n \r\n #frame height width\r\n size = (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)),\r\n int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\r\n \r\n # ** filename - 'date_time.avi'\r\n #get time date\r\n dt = datetime.datetime.now()\r\n # %b month - %Y year - day %d - %H Hour - %M minute - %S second\r\n # *Zero padded\r\n str_dt = (str(dt.strftime('%b')) \r\n + str(dt.strftime('%Y')) \r\n + str(dt.strftime('%d'))\r\n + \"_\"\r\n + str(dt.strftime('%H'))\r\n + str(dt.strftime('%M'))\r\n + str(dt.strftime('%S'))\r\n )\r\n #print(str_dt)\r\n\r\n self.video_writer = cv2.VideoWriter(\r\n f\"{PARENT_PATH}//{VIDEO_RECORD_DIR}//rec{str_dt}.avi\",\r\n cv2.VideoWriter_fourcc('I', '4', '2', '0'),\r\n FPS, size)\r\n\r\n self.RECORD_FLAG = True #start recording\r\n return", "def get_mod_time(self):\n if self.file_meta[:2] == b'bp':\n file_meta_plist = ccl_bplist.load(BytesIO(self.file_meta))\n raw_date_time = file_meta_plist['$objects'][1]['LastModified']\n converted_time = datetime.datetime.fromtimestamp(raw_date_time)\n converted_time = converted_time.timetuple()\n return converted_time\n else:\n file_meta_plist = plistlib.loads(self.file_meta)\n return file_meta_plist['modified'].timetuple()", "def process_file(self, file_info, sort_option):\n video_orig_writeback = open(\n str(file_info.current_folder + '/' + file_info.filename), 'rb')\n\n video_file_orig = video_orig_writeback.read()\n video_orig_writeback.close()\n\n time = os.path.getmtime(\n file_info.current_folder + \"\\\\\" + file_info.filename)\n date = datetime.datetime.fromtimestamp(\n time).strftime('%Y-%m-%d %H:%M:%S')\n file_name = helper.create_filename_for_file(\n sort_option, MP4[1:], date)\n helper.write_file(file_name, file_info.destination + \"\\\\\", video_file_orig)\n self.incr_writes()", "def getLastModifiedTime(self): #$NON-NLS-1$\r", "def mtime(name):", "def last_video(self) -> str:\n return max(glob.glob(VIDEOS_DIR), key=os.path.getmtime)", "def get_upload_date(self, video_ID):\n self.cur.execute(\"SELECT upload_date FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n return self.cur.fetchone()[0]", "def mtime(self):\r\n return self.info().mtime", "def watch_movie():\r\n if os.path.isfile('files/final_movie.mp4'): # if the file exists\r\n with open('files/final_movie.mp4', 'rb') as f:\r\n video_data = f.read()\r\n st.video(video_data)\r\n else: # if the file doesn't exist, let the user know\r\n st.header(\"You haven't created a movie yet!\")", "def timestamp():\n print(datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\") + \" \" + __file__)", "def creation_month(path_to_file):\n if platform.system() == 'Windows':\n print(\"last modified: %s\" % time.ctime(os.path.getmtime(path_to_file)))\n modtime = time.ctime(os.path.getmtime(path_to_file))\n \n print(\"created: %s\" % time.ctime(os.path.getctime(path_to_file)))\n modtime = datetime.datetime.strptime(modtime, \"%a %b %d %H:%M:%S %Y\")\n modtime = datetime.datetime.strftime(modtime, \"%B\")\n return modtime", "def get_info(frame_or_sketch_or_vid_path):\n if \".mp4\" not in frame_or_sketch_or_vid_path:\n # invalid file path ()\n # TODO: allow other video extensions\n return None\n\n ret_dict = {}\n ret_dict[\"path\"] = frame_or_sketch_or_vid_path\n ret_dict[\"file_name\"] = utils.get_file_name(frame_or_sketch_or_vid_path)\n ret_dict[\"file_ext\"] = utils.get_file_ext(frame_or_sketch_or_vid_path)\n\n # find video file name = video_id\n file_dir_last = utils.get_nth_parentdir(frame_or_sketch_or_vid_path)\n\n # file_dir_full = utils.get_file_path(frame_or_sketch_or_vid_path)\n # file_name = utils.get_full_file_name(frame_or_sketch_or_vid_path)\n\n video_id = f\"{file_dir_last.split('.mp4_')[0]}.mp4\"\n start_end_time = file_dir_last.split(\".mp4_\")[1]\n start_end_time_parts = start_end_time.split(\"_\")\n\n # OLD\n # tmp = frame_or_sketch_or_vid_path.rsplit(\"video_\")[1].replace(\".mp4\", \"\")\n # tmp_parts = tmp.split(\"/\")[0].split(\"_\") # remove frame part if existent\n # ret_dict[\"video_id\"] = tmp_parts[0]\n # ret_dict[\"start_time\"] = float(tmp_parts[1])\n # ret_dict[\"end_time\"] = ret_dict[\"start_time\"]\n\n ret_dict[\"video_id\"] = video_id\n ret_dict[\"start_time\"] = float(start_end_time_parts[0])\n if len(start_end_time_parts) > 1:\n ret_dict[\"end_time\"] = float(start_end_time_parts[1])\n\n if ret_dict[\"file_ext\"] == \".jpg\":\n ret_dict[\"frame\"] = int(ret_dict[\"file_name\"].split(\"_\")[1])\n elif ret_dict[\"file_ext\"] == \".json\":\n ret_dict[\"frame\"] = get_sketch_frame(ret_dict[\"path\"])\n else:\n ret_dict[\"fps\"] = get_fps(ret_dict[\"path\"])\n ret_dict[\"start_frame\"] = time_to_frame(ret_dict[\"start_time\"], ret_dict[\"fps\"])\n ret_dict[\"end_frame\"] = time_to_frame(ret_dict[\"end_time\"], ret_dict[\"fps\"])\n return ret_dict", "def time(self):\r\n return conf.lib.clang_getFileTime(self)", "def last_videos_recorded(self) -> list:\n return sorted(glob.glob(VIDEOS_DIR), key=os.path.getmtime)[-20:]", "def GetModTime(self):\n return self.file.ModTime", "def writeMetadata(path,filename,filetype,ObjectList,VideoRecorder = None):\n\tprint('writing metadata, for saving to {}'.format(path+filename+'.pickle'))\n\tnow = datetime.datetime.now() # current date and time\n\tmetadata = OrderedDict()\n\tmetadata['Path'] = path\n\tmetadata['Filename'] = filename\n\tmetadata['Format'] = filetype\n\tmetadata['datetime'] = now\n\tv = cv2.VideoCapture(path+filename+filetype)\n\tmetadata['Frames'] = v.get(cv2.CAP_PROP_FRAME_COUNT)\n\n\tif VideoRecorder is not None:\n\t\tfps = VideoRecorder.FPStracker.fps() # if you have a more accurate measure\n\telse:\n\t\ttry:\n\t\t\tfps = loadData(path,filename)[0]['FPS']\n\t\texcept:\n\t\t\tfps = None\n\t\tif fps is not None:\n\t\t\tpass\n\t\telse:\n\t\t\tfps = v.get(cv2.CAP_PROP_FPS) # trusting camera FPS\n\tmetadata['FPS'] = fps\n\tmetadata['Length'] = metadata['Frames']/metadata['FPS']\n\tmetadata['Resolution'] = [v.get(3),v.get(4)]\n\tv.release()\n\t# Save the object description (not the x,y,theta data: no processing yet)\n\t# and tracker coordinates for every object\n\tmetadata['Num Objects'] = len(ObjectList)\n\tfor i,object in enumerate(ObjectList):\n\t\tkey = \"object{}\".format(i)\n\t\tt1 = object.Tracker1\n\t\tt2 = object.Tracker2\n\t\tcoord1 = [t1.x,t1.y,t1.w,t1.h,t1.ang]\n\t\tcoord2 = [t2.x,t2.y,t2.w,t2.h,t2.ang]\n\t\tmetadata[key+'_ID'] = object.ID\n\t\tmetadata[key+'_profile'] = object.IDprofile\n\t\tmetadata[key+'_Tracker1_Coords'] = coord1\n\t\tmetadata[key+'_Tracker1_BGR_range'] = t1.bgrRange\n\t\tmetadata[key+'_Tracker2_Coords'] = coord2\n\t\tmetadata[key+'_Tracker2_BGR_range'] = t2.bgrRange\n\treturn metadata", "def _GetUpdateTime(filename):\n stat_info = os.stat(filename)\n return (stat_info.st_atime, stat_info.st_mtime)", "def creation_year(path_to_file):\n if platform.system() == 'Windows':\n print(\"last modified: %s\" % time.ctime(os.path.getmtime(path_to_file)))\n modtime = time.ctime(os.path.getmtime(path_to_file))\n \n print(\"created: %s\" % time.ctime(os.path.getctime(path_to_file)))\n modtime = datetime.datetime.strptime(modtime, \"%a %b %d %H:%M:%S %Y\")\n modtime = datetime.datetime.strftime(modtime, \"%Y\")\n return modtime", "def get_source_ctime(self):\n return self.source_file_ctime", "def _get_video_filename(self):\n fnd = self._get_session_dir()\n self.video_number += 1\n fn = os.path.join(fnd, 'V%4.4d.avi' % self.video_number)\n return fn" ]
[ "0.6664668", "0.6570803", "0.6459046", "0.6410988", "0.63897413", "0.6233249", "0.6155345", "0.6155294", "0.6054676", "0.6041707", "0.6028616", "0.59397346", "0.5918884", "0.5896831", "0.58840746", "0.5882793", "0.58803463", "0.587276", "0.5860853", "0.5849358", "0.58296496", "0.58121204", "0.5811267", "0.58063513", "0.5797488", "0.5792633", "0.5781213", "0.57787424", "0.57633865", "0.5763021" ]
0.8283047
0
Actions for Diffrn objects.
def action_diffrn(obj: Diffrn, thread: QtCore.QThread): w_actions = [] f_setup = obj.is_attribute("setup") f_diffrn_radiation = obj.is_attribute("diffrn_radiation") f_diffrn_orient_matrix = obj.is_attribute("diffrn_orient_matrix") f_diffrn_refln = obj.is_attribute("diffrn_refln") f_phase = obj.is_attribute("phase") if not(f_setup & f_diffrn_radiation & f_diffrn_orient_matrix & f_diffrn_refln & f_phase): if not(f_setup): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add setup") qtb_1.clicked.connect(lambda: add_items(obj, [Setup()], thread)) w_actions.append(qtb_1) if not(f_diffrn_radiation): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add diffrn_radiation") qtb_1.clicked.connect(lambda: add_items( obj, [DiffrnRadiation()], thread)) w_actions.append(qtb_1) if not(f_diffrn_orient_matrix): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add diffrn_orient_matrix") qtb_1.clicked.connect(lambda: add_items(obj, [DiffrnOrientMatrix( ub_11=1., ub_12=0., ub_13=0., ub_21=0., ub_22=1., ub_23=0., ub_31=0., ub_32=0., ub_33=1.,)], thread)) w_actions.append(qtb_1) if not(f_diffrn_refln): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add diffrn_refln") qtb_1.clicked.connect(lambda: add_items( obj, [DiffrnReflnL()], thread)) w_actions.append(qtb_1) if not(f_phase): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add phase") qtb_1.clicked.connect(lambda: add_items(obj, [ Phase(label="phase")], thread)) w_actions.append(qtb_1) if f_diffrn_refln: diffrn_refln = obj.diffrn_refln w_actions.extend(action_diffrn_refln_l(diffrn_refln, thread)) if f_diffrn_orient_matrix: diffrn_orient_matrix = obj.diffrn_orient_matrix w_actions.extend(action_diffrn_orient_matrix( diffrn_orient_matrix, thread)) return w_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_diffs(history):\n\n # First get all possible representations\n mgr = plugins_get_mgr() \n keys = mgr.search('representation')['representation']\n representations = [mgr.get_by_key('representation', k) for k in keys]\n\n for i in range(len(history)):\n if i+1 > len(history) - 1:\n continue\n\n prev = history[i]\n curr = history[i+1]\n\n #print(prev['subject'], \"==>\", curr['subject'])\n #print(curr['changes'])\n for c in curr['changes']:\n \n path = c['path']\n\n # Skip the metadata file\n if c['path'].endswith('datapackage.json'): \n continue \n\n # Find a handler for this kind of file...\n handler = None \n for r in representations: \n if r.can_process(path): \n handler = r \n break \n \n if handler is None: \n continue \n\n # print(path, \"being handled by\", handler)\n\n v1_hex = prev['commit']\n v2_hex = curr['commit']\n\n temp1 = tempfile.mkdtemp(prefix=\"dgit-diff-\") \n \n try: \n for h in [v1_hex, v2_hex]: \n filename = '{}/{}/checkout.tar'.format(temp1, h)\n try:\n os.makedirs(os.path.dirname(filename))\n except:\n pass \n extractcmd = ['git', 'archive', '-o', filename, h, path]\n output = run(extractcmd)\n if 'fatal' in output: \n raise Exception(\"File not present in commit\") \n with cd(os.path.dirname(filename)): \n cmd = ['tar', 'xvf', 'checkout.tar']\n output = run(cmd) \n if 'fatal' in output: \n print(\"Cleaning up - fatal 1\", temp1)\n shutil.rmtree(temp1)\n continue \n\n # Check to make sure that \n path1 = os.path.join(temp1, v1_hex, path) \n path2 = os.path.join(temp1, v2_hex, path) \n if not os.path.exists(path1) or not os.path.exists(path2): \n # print(\"One of the two output files is missing\") \n shutil.rmtree(temp1)\n continue \n\n #print(path1, path2) \n\n # Now call the handler\n diff = handler.get_diff(path1, path2)\n\n # print(\"Inserting diff\", diff)\n c['diff'] = diff\n\n except Exception as e: \n #traceback.print_exc() \n #print(\"Cleaning up - Exception \", temp1)\n shutil.rmtree(temp1)", "def __actions__(self, obj):\n primary_fields = self.__provider__.get_primary_fields(self.__entity__)\n pklist = '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n #if has_permission('manage'):############\n \n historial = DBSession.query(Item.nrohistorial).filter_by(id=pklist).first()\n idlineabase = DBSession.query(Item.idLineaBase).filter_by(nrohistorial=historial, ultimaversion=1).first()\n lineabase = DBSession.query(LineaBase).filter_by(id=idlineabase).first()\n \n value = '<div></div>'\n \n if lineabase != None:\n if str(lineabase.estado).__eq__('abierta'):\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n else:\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n \n return value", "def PostProcessDiff(self, diff):\r\n return diff", "def run(self, obj, diff):\n for a in AlertRule.objects.all():\n a = json.loads(a.alert_rule)\n\n if a[\"object\"] == obj.__class__.__name__:\n if a[\"attribute\"] in diff:\n if diff[a[\"attribute\"]] == a[\"changed_to\"]:\n # Criteria Satisfied. Run Alert Action\n\n subject = Template(a[\"action\"][\"subject\"]).render(c)\n msg = Template(a[\"action\"][\"message\"]).render(c)\n\n if \"type\" == \"email\":\n # Fill out subject/message Template\n c = Context({\n \"object\": obj,\n \"diff\": diff\n })\n\n if a[\"action\"][\"type\"] == \"email\":\n send_mail(\n subject,\n msg,\n settings.DEFAULT_FROM_EMAIL,\n [a[\"action\"][\"to\"]],\n fail_silently=False,\n )\n\n # TODO Add More Alert Types (phone, text, im)", "def actions(self):\n raise NotImplementedError", "def _diff(self, param, diff):\n pass", "def svn_diff_diff(*args):\n return _diff.svn_diff_diff(*args)", "def diff(request):\n if request.patch.no_base_file:\n # Can't show side-by-side diff since we don't have the base file. Show the\n # unified diff instead.\n return patch_helper(request, 'diff')\n\n patchset = request.patchset\n patch = request.patch\n\n patchsets = list(request.issue.patchsets)\n\n context = _get_context_for_user(request)\n column_width = _get_column_width_for_user(request)\n if patch.is_binary:\n rows = None\n else:\n try:\n rows = _get_diff_table_rows(request, patch, context, column_width)\n except FetchError as err:\n return HttpTextResponse(str(err), status=404)\n\n _add_next_prev(patchset, patch)\n return respond(request, 'diff.html',\n {'issue': request.issue,\n 'patchset': patchset,\n 'patch': patch,\n 'view_style': 'diff',\n 'rows': rows,\n 'context': context,\n 'context_values': models.CONTEXT_CHOICES,\n 'column_width': column_width,\n 'patchsets': patchsets,\n })", "def getChanges():", "def cmd_get_diff(base, target):\n return ['git', 'diff', base, target]", "def __actions__(self, obj):\n bool_ultimo = obj.bool_ultimo \n primary_fields = self.__provider__.get_primary_fields(self.__entity__)\n pklist = '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n\n if bool_ultimo == 1:\n cod_item = obj.cod_item\n value = '<div>'\n if has_permission('editar_item'):\n value = value + '<div><a class=\"edit_link\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">edit</a></div>'\n if has_permission('eliminar_relacion'):\n value = value + '<div><form method=\"POST\" action=\"'+pklist+'\" class=\"button-to\"><input type=\"hidden\" name=\"_method\" value=\"DELETE\" /><input class=\"delete-button\" onclick=\"return confirm(\\'Are you sure?\\');\" value=\"delete\" type=\"submit\" style=\"background-color: transparent; float:left; border:0; color: #286571; display: inline; margin: 0; padding: 0;\"/></form></div>'\n value = value + '<div><a class=\"relacion_link\" href=\"../relacions/?iid='+pklist+'\">Relaciones </a><br/><a class=\"versiones_link\" href=\"./?codi='+cod_item+'\">Revertir</a></div></div>'\n \n else:\n id_item_rev = DBSession.query(Item).filter_by(cod_item = obj.cod_item, bool_ultimo = 1).one().id_item\n ids = str(pklist) + \"-\" + str(id_item_rev)\n href = \"./revertir/?ids=\" + ids\n value = '<div><div><a class=\"edit_link\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">edit</a>'\\\n '</div><div>'\\\n '<form method=\"POST\" action=\"'+pklist+'\" class=\"button-to\">'\\\n '<input type=\"hidden\" name=\"_method\" value=\"DELETE\" />'\\\n '<input class=\"delete-button\" onclick=\"return confirm(\\'Are you sure?\\');\" value=\"delete\" type=\"submit\" '\\\n 'style=\"background-color: transparent; float:left; border:0; color: #286571; display: inline; margin: 0; padding: 0;\"/>'\\\n '</form>'\\\n '<a class=\"relacion_link\" href=\"../relacions/?iid='+pklist+'\">Relaciones </a>'\\\n '<a class=\"volver_link\" href=\"'+href+'\">Volver a</a>'\\\n '</div></div>'\n\n return value", "def transact(self):", "def transact(self):", "def objects(self):", "def _populateModel(self):\n\n self.repoPath = self.argv[1]\n self.rev = self.argv[2]\n self.model.rev = self.rev\n self.model.repo = os.path.split(self.repoPath)[-1]\n self.prefix = (self.addRepoPrefix() and ('/' + self.model.repo)) or ''\n\n # First, get the user and log message\n lines = self._svnlook('info')\n self.model.user = lines[0][:-1]\n self.model.log = ''.join(lines[3:]).strip()\n\n # Now build an initial tree of file and tree changes\n for line in self._svnlook('changed'):\n action = self.actions[line[0]]\n target = '/' + line[4:-1]\n\n if target.endswith('/'):\n directory = self.model.directory(self.prefix + target)\n directory.action = action\n else:\n parts = target.split('/')\n name = parts[-1]\n directoryPath = '/' + '/'.join(parts[0:-1]) + '/'\n\n file = File(name, self.model.directory(self.prefix + directoryPath), action)\n\n # Markers to tell us when we hit a new diff\n markers = ['Modified', 'Added', 'Copied', 'Deleted', 'Property changes on']\n\n # Recontruct each diff by parsing through the output of svnlook line by line\n diffs = []\n partialDiff = None\n\n #A marker word after a \"____\" line is a change in a property and shouldn't be added as a change\n #in a file. InProperty keeps track of this. If it's 0 this is a normal line, any larger \n #and it's a property line.\n inProperty = 1\n for line in self.getDiffLines():\n inProperty = max(0, inProperty-1)\n if line == \"___________________________________________________________________\\n\":\n inProperty = 2\n\n # Look for Modified:, Added:, etc.\n if line[0:line.find(':')] in markers and not inProperty > 0:\n # Handle starting a new diff\n partialDiff = [line]\n diffs.append(partialDiff)\n elif partialDiff:\n partialDiff.append(line)\n\n if len(diffs) == 0:\n for file in self.model.files():\n file.delta = '<Unavailable>'\n file.diff = ''\n\n # And finally parse through the diffs and save them into our tree of changes\n for diff in diffs:\n # Use [:-1] to leave of the trailing \\n\n start = diff[0].find(': ') + 2\n stop = diff[0].find('(') - 1 # -1 ignores the space before the paren\n if stop == -2: stop = len(diff[0])\n\n filePath = '/' + diff[0][:-1][start:stop]\n\n # This could be a file or a directory - going ahead with the .file()\n # call for most directories is fine as it will just return null.\n #\n # Howeever, root / will exception out as an invalid file path so\n # just special case it\n if filePath == '/':\n file = None\n else:\n file = self.model.file(self.prefix + filePath)\n\n # Maybe its a directory\n if file:\n isFile = True\n else:\n file = self.model.directory(self.prefix + filePath + '/')\n isFile = False\n\n if not diff[0].startswith('Property changes on:'):\n file.delta, file.diff = self._parse_diff(diff)\n else:\n if file.diff:\n # Only files will already have a diff set\n file.diff = file.diff + '\\n\\n' + ''.join(diff)\n else:\n # If the 'Property changes on' line is here without a\n # file.diff, that file.diff will never come because it would\n # have been printed before us\n if isFile:\n sep = '===================================================================\\n\\n'\n file.diff = ''.join([sep] + diff)\n file.delta = '+0 -0'\n else:\n file.diff = ''.join(diff)", "def diff(self):\n return self.client.api.diff(self.id)", "def list_operations():", "def GenerateDiff(self, args):\r\n raise NotImplementedError(\r\n \"abstract method -- subclass %s must override\" % self.__class__)", "def __actions__(self, obj):\n\t\t\tprimary_fields \t= self.__provider__.get_primary_fields(self.__entity__)\n\t\t\tpklist \t\t= '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n\n\t\t\tvalue \t\t= '<div>'\n\t\t\tif has_permission('editar_LB'):\n\t\t\t\tvalue = value + '<div><a class=\"edit_link\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">edit</a></div>'\n\t\t\tif has_permission('eliminar_LB'):\n\t\t\t\tvalue = value + '<div><form method=\"POST\" action=\"'+pklist+'\" class=\"button-to\"><input type=\"hidden\" name=\"_method\" value=\"DELETE\" /><input class=\"delete-button\" onclick=\"return confirm(\\'Est&aacute; seguro que desea eliminar?\\');\" value=\"delete\" type=\"submit\" style=\"background-color: transparent; float:left; border:0; color: #286571; display: inline; margin: 0; padding: 0;\"/></form></div>'\n\t\t\tvalue = value + '</div>'\n\t\t\treturn value", "def diff(self):\n if self.event == 'Create':\n old = ''\n else:\n # Get the Change just ahead of _this_ change because that has the\n # state of the Resource before this Change occurred.\n # TODO(nickpegg): Get rid of this if we change the behavior of\n # Change to store the previous version of the object\n old_change = Change.objects.filter(\n change_at__lt=self.change_at,\n resource_id=self.resource_id,\n resource_name=self.resource_name\n ).order_by(\n '-change_at'\n ).first()\n old = json.dumps(old_change._resource, indent=2, sort_keys=True)\n\n if self.event == 'Delete':\n current = ''\n else:\n resource = apps.get_model(self._meta.app_label, self.resource_name)\n obj = resource.objects.get(pk=self.resource_id)\n\n serializer_class = self.get_serializer_for_resource(\n self.resource_name)\n serializer = serializer_class(obj)\n current = json.dumps(serializer.data, indent=2, sort_keys=True)\n\n diff = \"\\n\".join(difflib.ndiff(\n old.splitlines(),\n current.splitlines()\n ))\n\n return diff", "def do_manipulations(self, *args, **kwargs):\n pass", "def _git_diff_files(ref=\"master\"):\n result = []\n command = [\"git\", \"diff\", \"--name-status\", \"%s\" % (ref)]\n exit_code, output = _execute(command)\n if exit_code != 0:\n print(\"Failed to diff files.\")\n sys.exit(1)\n\n for line in output.decode(\"utf-8\").splitlines():\n parts = line.split(\"\\t\")\n action = parts[0]\n name = parts[-1]\n action = action.lower()\n result.append((action, name))\n\n return result", "def _buildDiff(self):\n outputList = []\n for tag, alo, ahi, blo, bhi in self.cruncher.get_opcodes():\n if tag == 'replace':\n # Text replaced = deletion + insertion\n outputList.append(self.delTag % u\" \".join(self.source[alo:ahi]))\n outputList.append(self.insTag % u\" \".join(self.target[blo:bhi]))\n self.replaceCount += 1\n elif tag == 'delete':\n # Text deleted\n outputList.append(self.delTag % u\" \".join(self.source[alo:ahi]))\n self.deleteCount += 1\n elif tag == 'insert':\n # Text inserted\n outputList.append(self.insTag % u\" \".join(self.target[blo:bhi]))\n self.insertCount += 1\n diffText = u\" \".join(outputList)\n #diffText = \" \".join(diffText.split())\n self.diffText = diffText.replace(self.nl, u\"\\n\")", "def actions():\n pass", "def diff(self, rev=None):\r\n args = []\r\n if rev is not None:\r\n args.append(\"-r %d\" % rev)\r\n out = self._authsvn('diff', args)\r\n return out", "def diff(ctx, files, metrics, all, detail, revision, wrap):\n config = ctx.obj[\"CONFIG\"]\n\n if not exists(config):\n handle_no_cache(ctx)\n\n if not metrics:\n metrics = get_default_metrics(config)\n logger.info(f\"Using default metrics {metrics}\")\n else:\n metrics = metrics.split(\",\")\n logger.info(f\"Using specified metrics {metrics}\")\n\n from wily.commands.diff import diff\n\n logger.debug(f\"Running diff on {files} for metric {metrics}\")\n diff(\n config=config,\n files=files,\n metrics=metrics,\n changes_only=not all,\n detail=detail,\n revision=revision,\n wrap=wrap,\n )", "def svn_diff_file_diff(*args):\n return _diff.svn_diff_file_diff(*args)", "def actions(self, state):\n\t\traise NotImplementedError", "def post_revert(self):", "def GenerateDiff(self, args):\n raise NotImplementedError(\n \"abstract method -- subclass %s must override\" % self.__class__)" ]
[ "0.5404981", "0.53203255", "0.52869326", "0.5260477", "0.52597624", "0.5186789", "0.518508", "0.5163919", "0.514824", "0.50897974", "0.50714517", "0.50684255", "0.50684255", "0.5065854", "0.5063287", "0.50580245", "0.5037629", "0.50206566", "0.50169116", "0.5014264", "0.5000727", "0.4991861", "0.49900994", "0.4989343", "0.49879155", "0.49770758", "0.4972681", "0.49633923", "0.4942804", "0.49408433" ]
0.5864252
0
Method to scan product. Adds the product order to the list of orders.
def scan(self, product_code): self.order.add_product(product_code)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, product):\n pass", "def orderWatch(self, order):\r\n\t\tself.orders.append(order)", "def add_product(self):\n self.owner.new_product(self.barcode, self.description, self.price, self._add_product_callback)", "def orderWatch(self, order):\r\n\t\tself.pair.orders.append(order)", "def process(self, order):\r\n self._elements.append(order)", "def ProcessOrder(product_id):\n product = Product.query.filter_by(product_id = product_id)\n \n if (product):\n product.qty = product \n db.session.commit()", "def product(self, product):\n self._product = product", "def insert(self, product):\n pass", "def payload_add_products(self, payload: dict, order: Order, language: str):\n order_lines: [OrderLine] = OrderLine.objects.filter(order=order.id)\n items: [dict] = []\n\n area = resolve_area(order)\n\n # Additional product orders doesn't have berth product\n if hasattr(order, \"product\") and order.product:\n product = order.product\n int_tax = int(order.tax_percentage)\n assert (\n int_tax == product.tax_percentage\n ) # make sure the tax is a whole number\n with override(language):\n lease = order.lease\n place = (\n lease.berth\n if hasattr(lease, \"berth\")\n else lease.place\n if hasattr(lease, \"place\") and lease.place\n else lease.section\n if hasattr(lease, \"section\") and lease.section\n else area\n )\n product_name = f\"{product.name}: {place}\"\n items.append(\n {\n \"id\": get_talpa_product_id(product.id, area),\n \"title\": product_name,\n \"price\": price_as_fractional_int(order.price),\n \"pretax_price\": price_as_fractional_int(order.pretax_price),\n \"tax\": int_tax,\n \"count\": 1,\n \"type\": 1,\n }\n )\n\n for order_line in order_lines:\n product: AdditionalProduct = order_line.product\n int_tax = int(product.tax_percentage)\n assert (\n int_tax == product.tax_percentage\n ) # make sure the tax is a whole number\n with override(language):\n product_name = product.name\n items.append(\n {\n \"id\": get_talpa_product_id(\n product.id,\n area,\n is_storage_on_ice=product.service\n == ProductServiceType.STORAGE_ON_ICE,\n ),\n \"title\": product_name,\n \"price\": price_as_fractional_int(order_line.price),\n \"pretax_price\": price_as_fractional_int(order_line.pretax_price),\n \"tax\": int_tax,\n \"count\": order_line.quantity,\n \"type\": 1,\n }\n )\n payload[\"amount\"] = price_as_fractional_int(order.total_price)\n payload[\"products\"] = items", "def agregar_producto(self, producto):\n\n self.productos.append(producto)", "def product(self, product):\n\n self._product = product", "def product(self, product):\n\n self._product = product", "def add_products():\n result = order_obj.add_products(request.forms) \n return result", "def btn_create_order_pro(self):\n\t\tprint()\n\t\tprint('treatment - btn_create_order_pro')\n\n\t\t# Search Partner\n\t\tpartner = tre_funcs.get_partner(self, self.patient.name)\n\n\t\t# Search pricelist\n\t\tpricelist = tre_funcs.get_pricelist(self)\n\n\t\t# Search product\n\t\t# Create Product tuple\n\t\tproduct_tup = []\n\t\t#for service in self.service_all_ids:\n\t\tfor service in self.service_ids:\n\t\t\t#print()\n\t\t\t#print('* Create Product tuple')\n\t\t\t#print(service)\n\t\t\t#print(service.service)\n\t\t\t#print(service.service.name)\n\t\t\t#print(service.qty)\n\t\t\t#print(service.service.list_price)\n\t\t\t\n\t\t\t# Init\n\t\t\tproduct_template = service.service\n\t\t\tname = service.service.name\n\t\t\tqty = service.qty\n\t\t\tprice = service.service.list_price\n\t\t\t\n\t\t\t# Check Exceptions\n\t\t\ttry:\n\t\t\t\tprice_list = '2019'\n\t\t\t\tproduct = tre_funcs.get_product_product(self, name, price_list)\n\t\t\t\tproduct_tup.append((product, qty, price))\n\n\t\t\texcept Exception:\n\t\t\t\tprint('ERROR - Treatment - Product not in 2019 price_list !')\n\t\t\t\tprint('Search in other price_lists')\n\n\t\t\t\ttry:\n\t\t\t\t\tprice_list = False\n\t\t\t\t\tproduct = tre_funcs.get_product(self, name, price_list)\n\t\t\t\t\tprint(product)\n\t\t\t\t\tproduct_tup.append((product, qty, price))\n\n\t\t\t\texcept Exception:\n\t\t\t\t\tprint('ERROR - Treatment - Product Not Available at all !!!!!')\n\n\t\t\t#else:\n\t\t\t#\tprint('jx - Else !')\n\t\t\t\t#pass\n\n\n\t\t\t# Check \n\t\t\ttre_funcs.check_product(self, '2019', product, product_template)\n\t\t\n\t\t# Create order \n\t\torder = pl_creates.create_order(self, partner.id, pricelist.id, product_tup)\n\t\tprint(order)\n\n\t\t# Open Order\n\t\treturn action_funcs.open_order(order)", "def test_add_product_to_cart(self, driver):\n logging.info(\"Start test case: Continue Shop\")\n data = self.test_data[\"Continue Shop\"][\"Products\"][0]\n logging.info(f\"Test data: [{data}]\")\n product_name = data[\"Product Name\"]\n\n select_product(driver, data[\"Page\"], product_name)\n add_product_to_cart(driver, data[\"Size\"], data[\"Color\"], data[\"Quantity\"])\n assert is_product_in_cart(driver, product_name)\n continue_shopping_from_order_summary(driver)\n assert verify_current_page_is_home(driver)", "def place_order(self, **kwargs):\r\n create_options = self._generate_create_dict(**kwargs)\r\n return self.client['Product_Order'].placeOrder(create_options)", "def on_scan(self, product):\n self.new_product = product\n if self.active:\n self.sm.on_state_event(self.events.SCAN)", "def process_order(self, order_event : event.EventOrder) :\n pass", "def products(self, products):\n\n self._products = products", "def products(self, products):\n\n self._products = products", "async def on_order_updated(self, order: MetatraderOrder):\n for i in range(len(self._orders)):\n if self._orders[i]['id'] == order['id']:\n self._orders[i] = order\n break\n else:\n self._orders.append(order)", "def addProduct(self, product):\n self._checkDeleted()\n product._checkDeleted()\n\n productPath = self.productSearch.productClient.product_path(\n project=self.productSearch.projectId, location=self.productSearch.location, product=product.productId)\n\n self.productSearch.productClient.add_product_to_product_set(name=self.productSetPath, product=productPath)", "def _serialize_order_and_product_data(order_data:dict):\n\n placed_orders = []\n ordered_products = []\n\n for order in order_data:\n if order[\"financial_status\"] not in COMPLETE_ORDER_STATUSES:\n continue\n \n items = []\n products = []\n for item in order[\"line_items\"]:\n items.append(\n {\n \"ProductID\": item[\"id\"],\n \"SKU\": item[\"sku\"],\n \"ProductName\": item[\"title\"],\n \"Quantity\": item[\"quantity\"],\n \"ItemPrice\": item[\"name\"]\n }\n )\n\n products.append(\n {\n \"token\": PUBLIC_KEY,\n \"event\": \"Ordered Product\",\n \"customer_properties\": {\n \"$email\": order[\"customer\"][\"email\"],\n \"$first_name\": order[\"customer\"][\"first_name\"],\n \"$last_name\": order[\"customer\"][\"last_name\"]\n },\n \"properties\": {\n \"$event_id\": item[\"id\"],\n \"$value\": item[\"price\"],\n \"ProductID\": item[\"product_id\"],\n \"SKU\": item[\"sku\"],\n \"ProductName\": item[\"title\"],\n \"Quantity\": item[\"quantity\"]\n }\n }\n )\n \n ordered_products.append({\"order_id\":order[\"id\"], \"body\": products})\n\n placed_orders.append(\n {\n \"token\": PUBLIC_KEY,\n \"event\": \"Placed Order\",\n \"customer_properties\": {\n \"$email\": order[\"customer\"][\"email\"],\n \"$first_name\": order[\"customer\"][\"first_name\"],\n \"$last_name\": order[\"customer\"][\"last_name\"],\n \"$phone_number\": order[\"customer\"][\"phone\"],\n \"$address1\": order[\"customer\"][\"default_address\"][\"address1\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$address2\": order[\"customer\"][\"default_address\"][\"address2\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$city\": order[\"customer\"][\"default_address\"][\"city\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$zip\": order[\"customer\"][\"default_address\"][\"zip\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$region\": order[\"customer\"][\"default_address\"][\"province_code\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$country\": order[\"customer\"][\"default_address\"][\"country_name\"] if \"default_address\" in order[\"customer\"].keys() else None,\n },\n \"properties\": {\n \"$event_id\": order[\"id\"],\n \"$value\": order[\"total_price\"],\n \"ItemNames\": [item[\"name\"] for item in order[\"line_items\"]],\n \"DiscountCode\": order[\"discount_codes\"],\n \"DiscountValue\": order[\"total_discounts\"],\n \"Items\": items,\n \"BillingAddress\": None if \"billing_address\" not in order.keys() else\n {\n \"FirstName\": order[\"billing_address\"][\"first_name\"],\n \"LastName\": order[\"billing_address\"][\"last_name\"],\n \"Company\": order[\"billing_address\"][\"company\"],\n \"Addaress1\": order[\"billing_address\"][\"address1\"],\n \"Address2\": order[\"billing_address\"][\"address2\"],\n \"City\": order[\"billing_address\"][\"city\"],\n \"Region\": order[\"billing_address\"][\"province\"],\n \"RegionCode\": order[\"billing_address\"][\"province_code\"],\n \"Country\": order[\"billing_address\"][\"country\"],\n \"CountryCode\": order[\"billing_address\"][\"country_code\"],\n \"Zip\": order[\"billing_address\"][\"zip\"],\n \"Phone\": order[\"billing_address\"][\"phone\"]\n },\n \"ShippingAddress\": None if \"shipping_address\" not in order.keys() else\n {\n \"FirstName\": order[\"shipping_address\"][\"first_name\"],\n \"LastName\": order[\"shipping_address\"][\"last_name\"],\n \"Company\": order[\"shipping_address\"][\"company\"],\n \"Addaress1\": order[\"shipping_address\"][\"address1\"],\n \"Address2\": order[\"shipping_address\"][\"address2\"],\n \"City\": order[\"shipping_address\"][\"city\"],\n \"Region\": order[\"shipping_address\"][\"province\"],\n \"RegionCode\": order[\"shipping_address\"][\"province_code\"],\n \"Country\": order[\"shipping_address\"][\"country\"],\n \"CountryCode\": order[\"shipping_address\"][\"country_code\"],\n \"Zip\": order[\"shipping_address\"][\"zip\"],\n \"Phone\": order[\"shipping_address\"][\"phone\"]\n }\n },\n \"time\": int(time.time())\n }\n )\n \n return placed_orders, ordered_products", "def on_order(self, order: OrderData):\n pass", "def on_order(self, order: OrderData):\n pass", "def on_order(self, order: OrderData):\n pass", "def test_get_order_by_product(self):\n test_order = self._create_orders(1)[0]\n resp = self.app.get('/orders/products/{}'.format(test_order.product_id),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()[0]\n self.assertEqual(data['uuid'], test_order.uuid)", "def order(self, typ, price, volume):\r\n self.count_submitted += 1\r\n self.client.send_order_add(typ, price, volume)", "def on_order(self, order: OrderData):\n # print(\"on_order\")\n # print(order)\n pass", "def add_product(self, product):\n return self._make_post_request(self._urls['products'],\n data=dict(name=product))" ]
[ "0.6195393", "0.6153183", "0.6137681", "0.60344553", "0.60218424", "0.5938747", "0.582828", "0.57936937", "0.576499", "0.5742286", "0.57390934", "0.57390934", "0.56984586", "0.56835234", "0.5683081", "0.567723", "0.5611014", "0.55724466", "0.5555511", "0.5555511", "0.5548241", "0.55362535", "0.5534266", "0.55217916", "0.55217916", "0.55217916", "0.5518086", "0.5509723", "0.54920816", "0.5484648" ]
0.7857374
0
Attribute which calculates the total amount on the order after deducting discounts.
def total(self): total_price = self.get_total_amount() discounts = self.get_total_discount() return total_price - discounts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def discount_amount(self):\r\n customer = self.records.find_customers(str(self.__customer).strip())\r\n order_value = self.order_value\r\n discount = customer.get_discount(order_value)\r\n return discount", "def total_amount(self):\n full_price = sum(item.price for item in self._products) if self._products else 0.0\n return full_price - self._get_discount()", "def basket_total_before_discounts_excl_tax(self):\n result = self.lines.aggregate(total=Sum(\"line_price_before_discounts_excl_tax\"))\n return result[\"total\"]", "def total_discount_incl_tax(self):\n discount = D(\"0.00\")\n for line in self.lines.all():\n discount += line.discount_incl_tax\n return discount", "def calculate_total(self):\n if self.total_price == 0:\n for discount in self.discounts:\n for item in self.items:\n item.add_discount(discount)\n\n for item in self.items:\n self.total_price += item.final_price()\n\n return self.total_price", "def discount_amount(self):\n return self._discount_amount", "def amount(self):\n return self.subtotal + self.tax_subtotal + self.shipping", "def get_debt(self):\n sum_import = self.invoice_set.filter(\n expiration_date__lte=date.today(),\n paid=False,\n debited=False,\n canceled=False,\n uncollectible=False,\n ).aggregate(Sum(\"amount\"))\n return sum_import.get(\"amount__sum\", None)", "def total_donated(self):\n if not hasattr(self, 'dynamic_total'):\n agg = self.donations.aggregate(Sum('amount'))\n self.dynamic_total = agg['amount__sum']\n return self.current + (self.dynamic_total or 0)", "def get_total_cost(self):\n total_cost = sum([item.quantity * item.product.price for item in self.orderitem_set.all()])\n return total_cost - total_cost * (self.discount / Decimal('100'))", "def update_on_delete(sender, instance, **kwargs):\n instance.order.update_grand_total()", "def basket_total_before_discounts_incl_tax(self):\n result = self.lines.aggregate(total=Sum(\"line_price_before_discounts_incl_tax\"))\n return result[\"total\"]", "def amount_due(self):\n queryset = self.supplyorderitem_set.filter(delivery_date__isnull=False).aggregate(\n amount_due=Sum(F('unit_price')*F('quantity_ordered'))\n )\n return queryset['amount_due'] or 0", "def _total_d(self):\n debit = 0.0\n for l in self.data:\n debit += l['debit']\n self.t_credit += l['credit']\n self.t_balance += l['balance']\n return debit", "def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if(line.is_discount_allow and line.price_subtotal > 100):\n line.price_subtotal = line.price_subtotal - 100", "def get_total_discount(self):\n total_discount = 0.00\n\n for promotion in self.pricing_rules:\n discount = promotion.get_discount(self.order)\n total_discount += discount\n\n return total_discount", "def bulk_item(order: Order) -> Decimal:\n discount = Decimal(0)\n for item in order.cart:\n if item.quantity >= 20:\n discount += item.total() * Decimal('0.1')\n return discount", "def amount(self):\n return(self.order_master.amount)", "def action_update_total(self):\n for order in self:\n amount_untaxed = 0.0\n for line in order.order_line_ids:\n amount_untaxed += line.price_subtotal\n order.price_subtotal = amount_untaxed", "def discount(self, cart):", "def delete_on_save(sender, instance, **kwargs):\n instance.order.update_total()", "def discount_tax_compensation_amount(self):\n return self._discount_tax_compensation_amount", "def basket_total_excl_tax(self):\n return self.total_excl_tax - self.shipping_excl_tax - self.surcharge_excl_tax", "def total_donation(self):\n return self._total_donation", "def _amount_all(self):\n for order in self:\n amount_untaxed = amount_tax = 0.0\n order_amount_total = 0.0\n for line in order.order_line:\n amount_untaxed += line.price_subtotal\n amount_tax += line.price_tax\n self_amount_total = amount_untaxed + amount_tax\n if not order.discount_fixed_percent:\n order_amount_total = self_amount_total\n if order.discount_fixed_percent == 'Percent':\n order_amount_total = self_amount_total * (1 - (order.discount or 0.0) / 100.0)\n if order.discount_fixed_percent == 'Fixed':\n order_amount_total = self_amount_total - order.discount_value\n order.update({\n 'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),\n 'amount_tax': order.pricelist_id.currency_id.round(amount_tax),\n 'amount_before_disc': amount_untaxed + amount_tax,\n 'amount_total': order_amount_total,\n })", "def _compute_amount_qty_delivered(self):\n for line in self:\n # if line.product_id.invoice_policy == 'delivery':\n # qty = line.qty_delivered\n # else:\n # qty = line.product_uom_qty\n # line.price_total_without_discount = qty * line.price_unit\n # line.price_discount = (line.price_total_without_discount * line.discount) / 100\n line.update({\n # 'price_discount': line.price_discount,\n # 'price_total_without_discount': line.price_total_without_discount,\n 'sea_price_total_qty_delivered': line.untaxed_amount_to_invoice + line.untaxed_amount_invoiced,\n })", "def get_quote_discount(self):\n return self.quoteitem_set.all().annotate(\n total_quote_price=F('price') * F('quantity')).annotate(\n calculate_discount=(F('total_quote_price') * F('discount') / 100)).aggregate(\n Sum('calculate_discount'))['calculate_discount__sum']", "def _amount_all(self):\n for order in self:\n amount_untaxed = 0.0\n for line in order.order_items_ids:\n amount_untaxed += line.price_subtotal\n order.update({\n 'amount_untaxed': amount_untaxed,\n })", "def discharge(self):\n return self._discharge", "def _compute_amount(self):\n for line in self:\n price = line.price_unit\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty,\n product=line.product_id, partner=line.order_id.partner_shipping_id)\n self_price_subtotal = taxes['total_excluded']\n if not line.discount_fixed_percent:\n self_price_subtotal = self_price_subtotal\n if line.discount_fixed_percent == 'Percent':\n self_price_subtotal = self_price_subtotal * (1 - (line.discount or 0.0) / 100.0)\n if line.discount_fixed_percent == 'Fixed':\n self_price_subtotal = self_price_subtotal - line.discount_value\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': self_price_subtotal,\n })" ]
[ "0.67122465", "0.6551784", "0.6532095", "0.6519561", "0.651557", "0.6478766", "0.6468313", "0.6403922", "0.63672656", "0.63636285", "0.63349086", "0.63340664", "0.6284223", "0.62303615", "0.62296844", "0.6176185", "0.61685467", "0.61363816", "0.61200655", "0.61124754", "0.60852194", "0.60741585", "0.6065453", "0.6024393", "0.6001514", "0.599198", "0.59908557", "0.59903145", "0.5985894", "0.59717315" ]
0.66989225
1
Calculates total discount applicable on this order.
def get_total_discount(self): total_discount = 0.00 for promotion in self.pricing_rules: discount = promotion.get_discount(self.order) total_discount += discount return total_discount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def discount_amount(self):\r\n customer = self.records.find_customers(str(self.__customer).strip())\r\n order_value = self.order_value\r\n discount = customer.get_discount(order_value)\r\n return discount", "def calculate_total(self):\n if self.total_price == 0:\n for discount in self.discounts:\n for item in self.items:\n item.add_discount(discount)\n\n for item in self.items:\n self.total_price += item.final_price()\n\n return self.total_price", "def total_discount_incl_tax(self):\n discount = D(\"0.00\")\n for line in self.lines.all():\n discount += line.discount_incl_tax\n return discount", "def get_total_cost(self):\n total_cost = sum([item.quantity * item.product.price for item in self.orderitem_set.all()])\n return total_cost - total_cost * (self.discount / Decimal('100'))", "def total(self):\n total_price = self.get_total_amount()\n discounts = self.get_total_discount()\n\n return total_price - discounts", "def discount_amount(self):\n return self._discount_amount", "def total_amount(self):\n full_price = sum(item.price for item in self._products) if self._products else 0.0\n return full_price - self._get_discount()", "def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if(line.is_discount_allow and line.price_subtotal > 100):\n line.price_subtotal = line.price_subtotal - 100", "def compute_amount_discounted(promotion, amount):\n if promotion.promo_type == '1': # % off\n amount_discounted = promotion.promo_amount * amount / Decimal(100)\n amount_discounted = Decimal(str(round(amount_discounted, 2)))\n elif promotion.promo_type == '2': # $ off\n if promotion.promo_amount < amount:\n amount_discounted = promotion.promo_amount\n else:\n amount_discounted = amount\n elif promotion.promo_type == '3': # fixed $ cost\n if promotion.promo_amount < amount:\n amount_discounted = amount - promotion.promo_amount\n else:\n # If you have a fixed cost promo of $20, but your items \n # only cost $10, you don't save.\n amount_discounted = 0\n LOG.debug('compute discount: amount_discounted = %s' % amount_discounted)\n return amount_discounted", "def _get_discount(self):\n\n # For every 2 PENS, one free discount\n number_of_pens = len([x for x in self._products if x.code == 'PEN'])\n discount = 5.0 * int(number_of_pens / 2)\n\n # If there are more than 3 T-Shirts in the basket, 5 EUR of discount in every of them (25%)\n number_of_tshirts = len([x for x in self._products if x.code == 'TSHIRT'])\n if number_of_tshirts >= 3:\n discount += 5.0 * number_of_tshirts\n\n return discount", "def bulk_item(order: Order) -> Decimal:\n discount = Decimal(0)\n for item in order.cart:\n if item.quantity >= 20:\n discount += item.total() * Decimal('0.1')\n return discount", "def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result", "def discount(self, period):\n\t\treturn 1.0/compound(period)", "def _compute_amount(self):\n for line in self:\n price = line.price_unit\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty,\n product=line.product_id, partner=line.order_id.partner_shipping_id)\n self_price_subtotal = taxes['total_excluded']\n if not line.discount_fixed_percent:\n self_price_subtotal = self_price_subtotal\n if line.discount_fixed_percent == 'Percent':\n self_price_subtotal = self_price_subtotal * (1 - (line.discount or 0.0) / 100.0)\n if line.discount_fixed_percent == 'Fixed':\n self_price_subtotal = self_price_subtotal - line.discount_value\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': self_price_subtotal,\n })", "def discount(self, cart):", "def discounted(self):\n return self._discounted", "def bulk_item_promo(order: Order):\n discount = 0\n for item in order.cart:\n if item.quantity >= 20:\n discount += item.total() * 0.1\n return discount", "def get_quote_discount(self):\n return self.quoteitem_set.all().annotate(\n total_quote_price=F('price') * F('quantity')).annotate(\n calculate_discount=(F('total_quote_price') * F('discount') / 100)).aggregate(\n Sum('calculate_discount'))['calculate_discount__sum']", "def get_discount(self, price):\r\n pass", "def apply_discount(self, product):\n pass", "def implied_discount_factor(p1: Instrument, c1: Instrument, p2: Instrument, c2: Instrument) -> float:\n return (c1.price - p1.price - c2.price + p2.price)/ (c2.strike - c1.strike)", "def discounted_reward(self, discount):\n\n tl = len(self)\n return (1 - discount) * np.sum(discount ** np.arange(tl) * self.rewards)", "def base_discount_amount(self):\n return self._base_discount_amount", "def discount_tax_compensation_amount(self):\n return self._discount_tax_compensation_amount", "def bogof_discount(self):\n bogof_discount = 0\n for item in self.cart.items:\n if item.quantity > 1:\n bogof_discount += (math.floor(item.quantity / 2) * item.product.price)\n\n self.cart._total -= bogof_discount", "def bulk_item(order):\n discount = 0\n for item in order.cart:\n if item.quantity >= 20:\n discount += item.total() * .1\n return discount", "def calculate_price(self):\n\n cargo_weight = self.cargo.weight\n tax_rate = Decimal(0.18)\n\n untaxed_total = Decimal(cargo_weight) * Decimal(self.price_per_unit_weight)\n\n total_price = (untaxed_total * tax_rate) + untaxed_total\n\n return total_price", "def total_cost(self):\n return (self.food_amount + self.local_transport_amount + self.other_expenses +\n self.travel_amount + self.accomodation_amount)", "def total_price(self) -> Decimal:\n total_price: Decimal = ZERO_AMOUNT\n\n # Calculate the total price\n order_item: OrderItem\n for order_item in self.orderitem_set.all():\n total_price += order_item.total_price\n\n return total_price", "def loyalty_discount(self):\n if self.cart.user.is_loyal:\n self.cart._total *= 0.98" ]
[ "0.76241744", "0.73756224", "0.7202873", "0.70788705", "0.6990974", "0.6952235", "0.6787426", "0.67570555", "0.6640429", "0.6552841", "0.652841", "0.64726514", "0.64331007", "0.64018595", "0.6394077", "0.6384539", "0.6307665", "0.62925327", "0.6282091", "0.6279652", "0.627704", "0.6252722", "0.6191731", "0.6179777", "0.6114019", "0.6084302", "0.6071334", "0.59924626", "0.59892535", "0.59868103" ]
0.8006258
0
Return total but in a pretty format with Euro sign.
def get_total_display(self): total = self.total return '%.2f\N{euro sign}' % total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_total_elle(self):\r\n \r\n return str(round(self._total_elle, 2))", "def get_total(self):\r\n \r\n return str(round(self._total, 2))", "def amount_ui(self) -> str:\n return \"{:,.2f}\".format(self.amount)", "def display_price(self):\n return '$ '+str(self.price)", "def currency(self, commas=True):\n sign, digits, exp = self.quantize(Decimal('0.01')).as_tuple()\n digits = list(map(unicode, digits))\n result = []\n for i in range(2):\n result.append(digits.pop() if digits else u'0')\n result.append(u'.')\n if not digits:\n result.append(u'0')\n count = 0\n while digits:\n result.append(digits.pop())\n count += 1\n if count == 3 and digits and commas:\n count = 0\n result.append(u',')\n result.append(u'-' if sign else u'')\n return u''.join(reversed(result))", "def eur(value):\n float(value)\n return f\"€{value:,.2f}\"", "def format_usd(my_price):\n return f\"${my_price:,.2f}\"", "def money_format(ammount):\n\td = Decimal(ammount) / Decimal(\"100\")\n\treturn u'£%s' % d.quantize(Decimal(\"0.01\"))", "def format_as_usd(value):\n return f\"${value:,.2f}\"", "def text_transform(val):\n if CURRENCY == \"USD\":\n return \"$%d\" % val\n if CURRENCY == \"EUR\":\n return \"‎€%d\" % val\n if CURRENCY == \"GBP\":\n return \"£%d\" % val\n return \"%d\" % val", "def format_currency(amount):\n pretty_amount = str(amount)\n\n if amount < 0:\n pretty_amount = pretty_amount[:1] + \"$\" + pretty_amount[1:]\n else:\n pretty_amount = \"$%s\" % pretty_amount\n\n return pretty_amount", "def __str__(self) -> str:\n return f'{self.amount}{self.currency}'", "def func(pct, allvals):\n return str(format(round(pct/100.*np.sum(allvals), 2),\".2f\")) + \"€\"", "def convert_to_euros(self):\n return 'Currency is', self.currency_type", "def display(self):\n\n return str(self.total)", "def to_usd(my_price):\n return f\"${my_price:,.2f}\"", "def format_amount(self) -> str:\n if self.amount_debit != '':\n return self.amount_debit.replace('-', '')\n return self.amount_credit.replace('-', '')", "def __str__(self):\n return str(self.currentTotal)", "def format_tuition(self, data):\n d = u'$%.2f' % data\n return d.replace('.00','')", "def to_usd(price):\n return \"${0:,.2f}\".format(price)", "def calculate_tax(subtotal):\n return \"TAX: \"+format_usd(0.0875*subtotal)", "def to_usd(my_price):\n return f\"${my_price:,.2f}\" #> $12,000.71", "def to_usd(my_price):\n return f\"${my_price:,.2f}\" #> $12,000.71", "def to_usd(my_price):\n return f\"${my_price:,.2f}\" #> $12,000.71", "def to_usd(my_price):\n return f\"${my_price:,.2f}\" #> $12,000.71", "def to_usd(my_price):\n return f\"${my_price:,.2f}\" #> $12,000.71", "def usd(value):\r\n return f\"${Decimal(value):,.2f}\"", "def to_usd(my_price):\n return \"${0:,.2f}\".format(my_price)", "def pretty_print(self, value, add_unit=False):\n s = \"%.1f\" % self.internal_to_friendly(value)\n if add_unit: s += \" \" + self.friendly_units\n return s", "def dollar():\r\n price = give_price_website_2(\"https://www.tgju.org/%D9%82%DB%8C%D9%85%D8%AA-%D8%AF%D9%84%D8%A7%D8%B1\")\r\n\r\n if users_language[update.effective_chat.id] == \"english\":\r\n return \"dollar : \" + format(price/10000, '.2f') + \" kTomans\"\r\n elif users_language[update.effective_chat.id] == \"persian\":\r\n return \" هزارتومان\" + format(price/10000000, '.3f') + \"دلار : \"" ]
[ "0.6930728", "0.6926814", "0.6910748", "0.69088656", "0.65733093", "0.6563159", "0.65392554", "0.6340894", "0.62750125", "0.62716407", "0.62608546", "0.6224019", "0.6152539", "0.61229", "0.6107103", "0.60937685", "0.60425156", "0.6040899", "0.60386825", "0.6008453", "0.59992117", "0.597815", "0.597815", "0.597815", "0.597815", "0.597815", "0.5967632", "0.5877372", "0.58641386", "0.5839082" ]
0.81358236
0
MessagingCampaign a model defined in Swagger
def __init__(self): self.swagger_types = { 'id': 'str', 'name': 'str', 'date_created': 'datetime', 'date_modified': 'datetime', 'version': 'int', 'division': 'DomainEntityRef', 'campaign_status': 'str', 'callable_time_set': 'DomainEntityRef', 'contact_list': 'DomainEntityRef', 'dnc_lists': 'list[DomainEntityRef]', 'always_running': 'bool', 'contact_sorts': 'list[ContactSort]', 'messages_per_minute': 'int', 'errors': 'list[RestErrorDetail]', 'sms_config': 'SmsConfig', 'self_uri': 'str' } self.attribute_map = { 'id': 'id', 'name': 'name', 'date_created': 'dateCreated', 'date_modified': 'dateModified', 'version': 'version', 'division': 'division', 'campaign_status': 'campaignStatus', 'callable_time_set': 'callableTimeSet', 'contact_list': 'contactList', 'dnc_lists': 'dncLists', 'always_running': 'alwaysRunning', 'contact_sorts': 'contactSorts', 'messages_per_minute': 'messagesPerMinute', 'errors': 'errors', 'sms_config': 'smsConfig', 'self_uri': 'selfUri' } self._id = None self._name = None self._date_created = None self._date_modified = None self._version = None self._division = None self._campaign_status = None self._callable_time_set = None self._contact_list = None self._dnc_lists = None self._always_running = None self._contact_sorts = None self._messages_per_minute = None self._errors = None self._sms_config = None self._self_uri = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self):\n json_data = request.get_json()\n json_data[\"sender_id\"] = current_user.id\n try:\n new_campaign = self.schema.load(json_data)\n except ValidationError as err:\n return {\"message\": err.messages}, HTTPStatus.BAD_REQUEST\n if Campaign.query.filter_by(mailchimp_id=new_campaign.mailchimp_id).first() is not None:\n return {\"message\": \"Campaign already exists.\"}, HTTPStatus.CONFLICT\n db.session.add(new_campaign)\n db.session.commit()\n return self.schema.dump(new_campaign), HTTPStatus.CREATED", "def test_create_campaign(self):\n campaign = self.campaign\n\n self.assertTrue(isinstance(campaign, Campaign))\n self.assertEqual(campaign.name, \"Test Campaign\")", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'start_date': 'datetime',\n 'length_minutes': 'int',\n 'activities': 'list[BuAgentScheduleActivity]',\n 'manually_edited': 'bool',\n 'schedule': 'BuScheduleReference'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'start_date': 'startDate',\n 'length_minutes': 'lengthMinutes',\n 'activities': 'activities',\n 'manually_edited': 'manuallyEdited',\n 'schedule': 'schedule'\n }\n\n self._id = None\n self._start_date = None\n self._length_minutes = None\n self._activities = None\n self._manually_edited = None\n self._schedule = None", "def create_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)", "def __init__(self, campaign, campaign_email, *args, **kwargs):\n super(TrackedEmailMessage, self).__init__(*args, **kwargs)\n\n self._set_campaign(campaign)\n self._set_campaign_email(campaign_email)", "def __init__(self):\n self.swagger_types = {\n 'id_template_notificacao': 'int',\n 'destinatarios': 'list[str]',\n 'anexos': 'list[AnexoNotificacaoEmailRequest]',\n 'parametros_conteudo': 'dict(str, object)'\n }\n\n self.attribute_map = {\n 'id_template_notificacao': 'idTemplateNotificacao',\n 'destinatarios': 'destinatarios',\n 'anexos': 'anexos',\n 'parametros_conteudo': 'parametrosConteudo'\n }\n\n self._id_template_notificacao = None\n self._destinatarios = None\n self._anexos = None\n self._parametros_conteudo = None", "def test_admin_sms_campaign_view_list(self):\n response = self.client.get('/admin/sms_module/smscampaign/')\n self.failUnlessEqual(response.status_code, 200)", "def __init__(self, email: str=None, is_bot: bool=None, avatar_url: str=None, avatar_version: int=None, full_name: str=None, is_admin: bool=None, is_owner: bool=None, is_billing_admin: bool=None, role: int=None, bot_type: int=None, user_id: int=None, bot_owner_id: int=None, is_active: bool=None, is_guest: bool=None, timezone: str=None, date_joined: str=None, delivery_email: str=None, profile_data: Dict[str, object]=None):\n self.openapi_types = {\n 'email': str,\n 'is_bot': bool,\n 'avatar_url': str,\n 'avatar_version': int,\n 'full_name': str,\n 'is_admin': bool,\n 'is_owner': bool,\n 'is_billing_admin': bool,\n 'role': int,\n 'bot_type': int,\n 'user_id': int,\n 'bot_owner_id': int,\n 'is_active': bool,\n 'is_guest': bool,\n 'timezone': str,\n 'date_joined': str,\n 'delivery_email': str,\n 'profile_data': Dict[str, object]\n }\n\n self.attribute_map = {\n 'email': 'email',\n 'is_bot': 'is_bot',\n 'avatar_url': 'avatar_url',\n 'avatar_version': 'avatar_version',\n 'full_name': 'full_name',\n 'is_admin': 'is_admin',\n 'is_owner': 'is_owner',\n 'is_billing_admin': 'is_billing_admin',\n 'role': 'role',\n 'bot_type': 'bot_type',\n 'user_id': 'user_id',\n 'bot_owner_id': 'bot_owner_id',\n 'is_active': 'is_active',\n 'is_guest': 'is_guest',\n 'timezone': 'timezone',\n 'date_joined': 'date_joined',\n 'delivery_email': 'delivery_email',\n 'profile_data': 'profile_data'\n }\n\n self._email = email\n self._is_bot = is_bot\n self._avatar_url = avatar_url\n self._avatar_version = avatar_version\n self._full_name = full_name\n self._is_admin = is_admin\n self._is_owner = is_owner\n self._is_billing_admin = is_billing_admin\n self._role = role\n self._bot_type = bot_type\n self._user_id = user_id\n self._bot_owner_id = bot_owner_id\n self._is_active = is_active\n self._is_guest = is_guest\n self._timezone = timezone\n self._date_joined = date_joined\n self._delivery_email = delivery_email\n self._profile_data = profile_data", "def get_campaign(self, uuid):\n return Campaign.deserialize(self._get_single('campaigns', {'uuid': uuid}))", "def test_admin_sms_campaign_view_add(self):\n response = self.client.get('/admin/sms_module/smscampaign/add/')\n self.failUnlessEqual(response.status_code, 200)", "def __init__(self, request_url, client, options):\n super(ConversationRequest, self).__init__(request_url, client, options)", "def list_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def campaign(self, campaign):\n\n self._campaign = campaign", "def add_embedded_campaign(self, id, collection, campaign, confidence,\n analyst, date, description):\n if type(id) is not ObjectId:\n id = ObjectId(id)\n # TODO: Make sure the object does not already have the campaign\n # Return if it does. Add it if it doesn't\n obj = getattr(self.db, collection)\n result = obj.find({'_id': id, 'campaign.name': campaign})\n if result.count() > 0:\n return\n else:\n log.debug('Adding campaign to set: {}'.format(campaign))\n campaign_obj = {\n 'analyst': analyst,\n 'confidence': confidence,\n 'date': date,\n 'description': description,\n 'name': campaign\n }\n result = obj.update(\n {'_id': id},\n {'$push': {'campaign': campaign_obj}}\n )\n return result", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'ticket_id': 'str',\n 'type': 'str',\n 'from_number': 'str',\n 'from_name': 'str',\n 'to_number': 'str',\n 'to_name': 'str',\n 'via_number': 'str',\n 'date_created': 'datetime',\n 'date_answered': 'datetime',\n 'date_finished': 'datetime'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'ticket_id': 'ticketId',\n 'type': 'type',\n 'from_number': 'fromNumber',\n 'from_name': 'fromName',\n 'to_number': 'toNumber',\n 'to_name': 'toName',\n 'via_number': 'viaNumber',\n 'date_created': 'dateCreated',\n 'date_answered': 'dateAnswered',\n 'date_finished': 'dateFinished'\n }\n\n self._id = None\n self._ticket_id = None\n self._type = None\n self._from_number = None\n self._from_name = None\n self._to_number = None\n self._to_name = None\n self._via_number = None\n self._date_created = None\n self._date_answered = None\n self._date_finished = None", "def get_campaign_command(client: Client, campaign_id: str) -> CommandResults | str:\n try:\n raw_response = client.get_campaign(campaign_id)\n except ValueError:\n return 'Campaign Id not found'\n\n campaign_general_fields = ['id', 'name', 'description', 'startDate', 'notable']\n campaign_fields = ['families', 'techniques', 'actors', 'brands', 'malware']\n\n outputs = {}\n outputs['campaignMembers'] = dict_safe_get(raw_response, ['campaignMembers'])\n outputs['info'] = {key: value for key, value in raw_response.items() if key in campaign_general_fields}\n outputs.update({key: value for key, value in raw_response.items() if key in campaign_fields})\n fields_readable_output = \"\"\n for field in campaign_fields:\n fields_readable_output += \"\\n\" + tableToMarkdown(field.capitalize(),\n dict_safe_get(outputs, [field]), headers=['id', 'name'],\n headerTransform=pascalToSpace\n )\n\n campaign_info_output = tableToMarkdown('Campaign Information',\n outputs['info'],\n headers=['id', 'name', 'description', 'startDate', 'notable'],\n headerTransform=pascalToSpace\n )\n campaign_members_output = tableToMarkdown('Campaign Members',\n outputs['campaignMembers'],\n headers=['id', 'threat', 'type'],\n headerTransform=pascalToSpace\n )\n\n readable_output = campaign_info_output + \"\\n\" + campaign_members_output + fields_readable_output\n\n return CommandResults(\n readable_output=readable_output,\n outputs_prefix='Proofpoint.Campaign',\n outputs=outputs,\n outputs_key_field='id',\n raw_response=raw_response\n )", "def __init__(self):\n self.swagger_types = {\n 'source_contact': 'AddressableEntityRef',\n 'target_contact': 'AddressableEntityRef',\n 'resulting_contact': 'AddressableEntityRef'\n }\n\n self.attribute_map = {\n 'source_contact': 'sourceContact',\n 'target_contact': 'targetContact',\n 'resulting_contact': 'resultingContact'\n }\n\n self._source_contact = None\n self._target_contact = None\n self._resulting_contact = None", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_produto': 'int',\n 'id_pessoa': 'int',\n 'id_parentesco': 'int',\n 'tipo_portador': 'str',\n 'nome_impresso': 'str',\n 'id_tipo_cartao': 'int',\n 'flag_ativo': 'int',\n 'data_cadastro_portador': 'str',\n 'data_cancelamento_portador': 'str'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_produto': 'idProduto',\n 'id_pessoa': 'idPessoa',\n 'id_parentesco': 'idParentesco',\n 'tipo_portador': 'tipoPortador',\n 'nome_impresso': 'nomeImpresso',\n 'id_tipo_cartao': 'idTipoCartao',\n 'flag_ativo': 'flagAtivo',\n 'data_cadastro_portador': 'dataCadastroPortador',\n 'data_cancelamento_portador': 'dataCancelamentoPortador'\n }\n\n self._id_conta = None\n self._id_produto = None\n self._id_pessoa = None\n self._id_parentesco = None\n self._tipo_portador = None\n self._nome_impresso = None\n self._id_tipo_cartao = None\n self._flag_ativo = None\n self._data_cadastro_portador = None\n self._data_cancelamento_portador = None", "def testGetCampaign(self):\n if self.__class__.campaign1 is None:\n self.testSaveCampaign()\n self.assert_(isinstance(self.__class__.service.GetCampaign(\n self.__class__.campaign1['id']), tuple))", "def create(self, request, *args, **kwargs):\n self.serializer_class = ConversationDetailSerializer\n return super(ConversationViewSet, self).create(request, *args, **kwargs)", "def get_campaign(self, campaign_id: str) -> dict:\n return self.http_request(\"GET\", f'/campaign/{campaign_id}')", "def get(self):\n query = Campaign.query\n return paginate(Campaign.__tablename__, query, self.schema), HTTPStatus.OK", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'domain': 'str',\n 'custom_domain': 'str',\n 'customer_email': 'str',\n 'customer_name': 'str',\n 'company': 'str',\n 'date_created': 'datetime',\n 'date_validity': 'datetime',\n 'status': 'str',\n 'account_id': 'str',\n 'cluster_id': 'str',\n 'task_id': 'str',\n 'version': 'str',\n 'is_latest': 'bool',\n 'product_id': 'str',\n 'variation_id': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'domain': 'domain',\n 'custom_domain': 'custom_domain',\n 'customer_email': 'customer_email',\n 'customer_name': 'customer_name',\n 'company': 'company',\n 'date_created': 'date_created',\n 'date_validity': 'date_validity',\n 'status': 'status',\n 'account_id': 'account_id',\n 'cluster_id': 'cluster_id',\n 'task_id': 'task_id',\n 'version': 'version',\n 'is_latest': 'is_latest',\n 'product_id': 'product_id',\n 'variation_id': 'variation_id'\n }\n\n self._id = None\n self._domain = None\n self._custom_domain = None\n self._customer_email = None\n self._customer_name = None\n self._company = None\n self._date_created = None\n self._date_validity = None\n self._status = None\n self._account_id = None\n self._cluster_id = None\n self._task_id = None\n self._version = None\n self._is_latest = None\n self._product_id = None\n self._variation_id = None", "def __init__(self):\n self.swagger_types = {\n 'enabled': 'bool',\n 'auto_review': 'bool',\n 'allow_direct_trades': 'bool',\n 'min_hours_in_future': 'int',\n 'unequal_paid': 'str',\n 'one_sided': 'str',\n 'weekly_min_paid_violations': 'str',\n 'weekly_max_paid_violations': 'str',\n 'requires_matching_queues': 'bool',\n 'requires_matching_languages': 'bool',\n 'requires_matching_skills': 'bool',\n 'requires_matching_planning_groups': 'bool',\n 'activity_category_rules': 'list[ShiftTradeActivityRule]'\n }\n\n self.attribute_map = {\n 'enabled': 'enabled',\n 'auto_review': 'autoReview',\n 'allow_direct_trades': 'allowDirectTrades',\n 'min_hours_in_future': 'minHoursInFuture',\n 'unequal_paid': 'unequalPaid',\n 'one_sided': 'oneSided',\n 'weekly_min_paid_violations': 'weeklyMinPaidViolations',\n 'weekly_max_paid_violations': 'weeklyMaxPaidViolations',\n 'requires_matching_queues': 'requiresMatchingQueues',\n 'requires_matching_languages': 'requiresMatchingLanguages',\n 'requires_matching_skills': 'requiresMatchingSkills',\n 'requires_matching_planning_groups': 'requiresMatchingPlanningGroups',\n 'activity_category_rules': 'activityCategoryRules'\n }\n\n self._enabled = None\n self._auto_review = None\n self._allow_direct_trades = None\n self._min_hours_in_future = None\n self._unequal_paid = None\n self._one_sided = None\n self._weekly_min_paid_violations = None\n self._weekly_max_paid_violations = None\n self._requires_matching_queues = None\n self._requires_matching_languages = None\n self._requires_matching_skills = None\n self._requires_matching_planning_groups = None\n self._activity_category_rules = None", "def _createConferenceObject(self, request):\n # preload necessary data items\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = getUserId(user)\n\n if not request.name:\n raise endpoints.BadRequestException(\"Conference 'name' field required\")\n\n # copy ConferenceForm/ProtoRPC Message into dict\n data = {field.name: getattr(request, field.name) for field in request.all_fields()}\n del data['websafeKey']\n del data['organizerDisplayName']\n\n # add default values for those missing (both data model & outbound Message)\n for df in DEFAULTS:\n if data[df] in (None, []):\n data[df] = DEFAULTS[df]\n setattr(request, df, DEFAULTS[df])\n\n # convert dates from strings to Date objects; set month based on start_date\n if data['startDate']:\n data['startDate'] = datetime.strptime(data['startDate'][:10], \"%Y-%m-%d\").date()\n data['month'] = data['startDate'].month\n else:\n data['month'] = 0\n if data['endDate']:\n data['endDate'] = datetime.strptime(data['endDate'][:10], \"%Y-%m-%d\").date()\n\n # set seatsAvailable to be same as maxAttendees on creation\n if data[\"maxAttendees\"] > 0:\n data[\"seatsAvailable\"] = data[\"maxAttendees\"]\n # generate Profile Key based on user ID and Conference\n # ID based on Profile key get Conference key from ID\n p_key = ndb.Key(Profile, user_id)\n c_id = Conference.allocate_ids(size=1, parent=p_key)[0]\n c_key = ndb.Key(Conference, c_id, parent=p_key)\n data['key'] = c_key\n data['organizerUserId'] = request.organizerUserId = user_id\n\n # create Conference, send email to organizer confirming\n # creation of Conference & return (modified) ConferenceForm\n Conference(**data).put()\n taskqueue.add(params={'email': user.email(),\n 'conferenceInfo': repr(request)},\n url='/tasks/send_confirmation_email'\n )\n return request", "def create(self, request, *args, **kwargs):\n self.serializer_class = ConversationDetailSerializer\n return super(PublicChatViewSet, self).create(request, *args, **kwargs)", "def __init__(self):\n self.swagger_types = {\n 'ids': 'list[str]',\n 'consumer': 'str',\n 'entity_type': 'str',\n 'start_date': 'datetime',\n 'end_date': 'datetime',\n 'created_date': 'datetime',\n 'updated_date': 'datetime',\n 'scope': 'str',\n 'disabled': 'bool',\n 'id': 'str'\n }\n\n self.attribute_map = {\n 'ids': 'ids',\n 'consumer': 'consumer',\n 'entity_type': 'entityType',\n 'start_date': 'startDate',\n 'end_date': 'endDate',\n 'created_date': 'createdDate',\n 'updated_date': 'updatedDate',\n 'scope': 'scope',\n 'disabled': 'disabled',\n 'id': 'id'\n }\n\n self._ids = None\n self._consumer = None\n self._entity_type = None\n self._start_date = None\n self._end_date = None\n self._created_date = None\n self._updated_date = None\n self._scope = None\n self._disabled = None\n self._id = None", "def write_campaign(campaign_data):\n\n campaign = Campaign(**campaign_data)\n campaign.save()\n authorization.make_campaign_public(campaign)\n\n return campaign.id", "def campaign_id(request):\n\n user = None\n response = ApiJsonResponse()\n try:\n user = MyUser.objects.get(pk=request.user.pk)\n except ObjectDoesNotExist:\n return Response({\n \"msg\": _('MSG_USER_NOT_EXIST'),\n \"status\": 404\n }, status=404)\n try:\n company = Company.objects.get(owner=user)\n except:\n return Response({\n \"msg\": _('MSG_COMPANY_NOT_EXIST'),\n \"status\": 404\n }, status=404)\n try:\n promotions = Promotion.objects.filter(company=company)\n except ObjectDoesNotExist:\n response.set_data(\"[]\")\n response.set_result_code(200)\n response.set_result_msg(\"MSG_PROMOTIONS_NOT_FOUNDED\")\n return JsonResponse(response.get_dict())\n list_of_promotions = []\n for promotion in promotions:\n list_of_promotions.append({'name': promotion.campaign_name, 'id': promotion.pk})\n return Response({\n \"msg\": _('MSG_PROMOTION_FOUNDED'),\n \"list_of_promotions\": list_of_promotions,\n \"status\": 200\n }, status=200)", "def _createConferenceObject(self, request):\n # Preload necessary data items\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = user.email()\n if not request.name:\n raise endpoints.BadRequestException(\n \"Conference 'name' field required\")\n # Copy ConferenceForm/ProtoRPC Message into dict\n data = {\n field.name: getattr(request, field.name) for field in\n request.all_fields()\n }\n del data['websafeKey']\n del data['organizerDisplayName']\n # Add default values for those missing (both data model and\n # outbound Message)\n for df in CONF_DEFAULTS:\n if data[df] in (None, []):\n data[df] = CONF_DEFAULTS[df]\n setattr(request, df, CONF_DEFAULTS[df])\n # Convert dates from strings to Date objects; set month based\n # on start_date\n if data['startDate']:\n data['startDate'] = datetime.strptime(\n data['startDate'][:10], \"%Y-%m-%d\").date()\n data['month'] = data['startDate'].month\n else:\n data['month'] = 0\n if data['endDate']:\n data['endDate'] = datetime.strptime(\n data['endDate'][:10], \"%Y-%m-%d\").date()\n # Set seatsAvailable to be same as maxAttendees on creation\n if data[\"maxAttendees\"] > 0:\n data[\"seatsAvailable\"] = data[\"maxAttendees\"]\n # Get the user profile key, then set the conference's parent\n # to that value.\n # NOTE: The original code made a call to allocate_ids in order to\n # generate an ID for the conference. Since the profiles utilize\n # strings (email addresses) for their IDs, resulting in no risk\n # of colliding with NDB's auto-generated numeric IDs, I decided\n # to let NDB generate the conference ID automatically.\n # https://cloud.google.com/appengine/docs/python/ndb/entities?hl=en#numeric_keys\n p_key = ndb.Key(Profile, user_id)\n data['parent'] = p_key\n data['organizerUserId'] = request.organizerUserId = user_id\n # Create Conference, send email to organizer confirming\n # creation of Conference and return (modified) ConferenceForm\n Conference(**data).put()\n taskqueue.add(params={'email': user.email(),\n 'conferenceInfo': repr(request)},\n url='/tasks/send_confirmation_email'\n )\n return request" ]
[ "0.5676597", "0.56284505", "0.528872", "0.5195491", "0.5169349", "0.5091118", "0.50815505", "0.5049281", "0.50430477", "0.5014071", "0.4998264", "0.4988041", "0.49813396", "0.49767828", "0.49386248", "0.49370554", "0.49350393", "0.49330032", "0.49310178", "0.49302855", "0.4923266", "0.4905545", "0.49011555", "0.48553076", "0.48452422", "0.48412606", "0.4836652", "0.48364946", "0.48327047", "0.48281068" ]
0.5792911
0
Sets the date_created of this MessagingCampaign.
def date_created(self, date_created): self._date_created = date_created
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_created(self, date_created: datetime):\n\n self._date_created = date_created", "def date_created(self, date_created):\n\n self._date_created = date_created", "def date_created(self, date_created):\n\n self._date_created = date_created", "def date_created(self, date_created):\n\n self._date_created = date_created", "def date_created(self, date_created):\n self._date_created = date_created", "def date_created(self, date_created):\n self._date_created = date_created", "def datecreated(self, datecreated):\n\n self._datecreated = datecreated", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n self._created_date = created_date", "def SetDateCreated(self, date):\n self.datecreated = str(date)", "def created_date(self, created_date):\n if created_date is None:\n raise ValueError(\"Invalid value for `created_date`, must not be `None`\") # noqa: E501\n\n self._created_date = created_date", "def set_account_created_date(self, account_created_date):\n self.account_created_date = account_created_date", "def created(self, created):\n if created is None:\n raise ValueError(\"Invalid value for `created`, must not be `None`\")\n\n self._created = created", "def create_date(self, create_date):\n\n self._create_date = create_date", "def create_date(self, create_date):\n\n self._create_date = create_date", "def create_date(self, create_date):\n\n self._create_date = create_date", "def set_created(self, dt):\n self.created = dt_to_iso(dt)", "def creation_date(self, creation_date):\n\n self._creation_date = creation_date", "def creation_date(self, creation_date):\n\n self._creation_date = creation_date", "def created_timestamp(self, created_timestamp):\n self._created_timestamp = created_timestamp", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created" ]
[ "0.80591285", "0.79904276", "0.79904276", "0.79904276", "0.7909916", "0.7909916", "0.7818758", "0.7676886", "0.7676886", "0.7676886", "0.7676886", "0.7676886", "0.7632258", "0.759261", "0.7258268", "0.69510114", "0.6808352", "0.67060393", "0.67060393", "0.67060393", "0.664208", "0.6581187", "0.6581187", "0.65647835", "0.65502775", "0.65502775", "0.65502775", "0.65502775", "0.65502775", "0.65502775" ]
0.80568826
1
Gets the date_modified of this MessagingCampaign.
def date_modified(self): return self._date_modified
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")", "def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetLastModifiedDate', self.handle)", "def modified(self) -> datetime.datetime:\n timestamp = os.path.getmtime(self._manifest_path)\n\n return datetime.datetime.fromtimestamp(timestamp)", "def get_inbound_statement_details_last_modified_date(self):\n return self.get_text_from_element(self.inbound_statements_details_last_modified_date_locator, False)", "def modified_timestamp(self) -> str:\n return pulumi.get(self, \"modified_timestamp\")", "def modified_timestamp(self) -> str:\n return pulumi.get(self, \"modified_timestamp\")", "def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetLastModifiedDate', self.handle)", "def last_modified(self):\n return os.path.getmtime(self.filename)", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")", "def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")", "def time_modified(self) -> str:\n return pulumi.get(self, \"time_modified\")", "def time_modified(self) -> str:\n return pulumi.get(self, \"time_modified\")", "def last_modified(self) -> str:\n\t\tif not self._closed:\n\t\t\ttimestamp = self.ds.last_modified()\n\t\t\treturn timestamp\n\t\treturn None", "def last_modified_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_modified_time\")", "def last_modified_at(self):\n return self.viztrail.last_modified_at", "def last_modified_date_time(self):\n if \"lastModifiedDateTime\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"lastModifiedDateTime\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None", "def time_last_modified(self):\n return self.properties.get(\"TimeLastModified\", None)", "def last_modified(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_modified\")", "def modification_time(self) -> str:\n return pulumi.get(self, \"modification_time\")", "def getModifiedDate(self, *args):\n return _libsbml.ModelHistory_getModifiedDate(self, *args)", "def last_modified_at(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_modified_at\")", "def updated_date(self):\n return self._updated_date", "def updated_date(self):\n return self._updated_date" ]
[ "0.72402114", "0.72402114", "0.71875286", "0.7092818", "0.70100117", "0.6977944", "0.6977944", "0.69701886", "0.69357944", "0.69300026", "0.69300026", "0.69300026", "0.69300026", "0.69300026", "0.69300026", "0.6924368", "0.6924368", "0.6866872", "0.6866872", "0.68586713", "0.6826732", "0.6785116", "0.6763307", "0.6762151", "0.6754415", "0.6688579", "0.66802037", "0.66784215", "0.6675155", "0.6675155" ]
0.81347257
0
Sets the date_modified of this MessagingCampaign.
def date_modified(self, date_modified): self._date_modified = date_modified
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def modified_date(self, modified_date):\n\n self._modified_date = modified_date", "def modified_date(self, modified_date):\n\n self._modified_date = modified_date", "def modified_at(self, modified_at):\n\n self._modified_at = modified_at", "def modified_at(self, modified_at):\n\n self._modified_at = modified_at", "def setModifiedDate(self, *args):\n return _libsbml.ModelHistory_setModifiedDate(self, *args)", "def set_modified(self, dt):\n self.modified = dt_to_iso(dt)", "def set_modified(self, dt):\n self.modified = dt_to_iso(dt)", "def modified(self, modified):\n\n self._modified = modified", "def last_modified(self, last_modified):\n\n self._last_modified = last_modified", "def set_modification_date(self, modification_date):\n\t\t\n\t\tif (modification_date.__class__ != str or modification_date ==\"\") and (modification_date.__class__ != time.struct_time or len(modification_date) != 9 ):\n\t\t\traise InvalidParameterError(\"modification_date\", \"modification_date is not in a proper format\")\n\t\ttry:\n\t\t\tif modification_date.__class__ == str:\n\t\t\t\ttmp_md = time.strptime(modification_date, '%S %M %H %d %m %Y')\n\t\t\telif modification_date.__class__ == time.struct_time:\n\t\t\t\ttmp_md = modification_date\n\t\t\tself.__modification_date = datetime(tmp_md[0], tmp_md[1], tmp_md[2], tmp_md[3], tmp_md[4], tmp_md[5])\t\n\t\texcept:\n\t\t\traise InvalidDate, \"date is not valid modification_date is not in a proper format\"", "def last_modified_on(self, last_modified_on):\n\n self._last_modified_on = last_modified_on", "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def last_modified_dts(self, last_modified_dts):\n\n self._last_modified_dts = last_modified_dts", "def date_modified(self):\n return self._date_modified", "def updated_date(self, updated_date):\n self._updated_date = updated_date", "def set_modified_since(self, data):\n self.add_payload('modifiedSince', data)", "def last_modification(self, last_modification):\n\n self._last_modification = last_modification", "def set_modified(obj, *args):\n dt = datetime.datetime(*args, tzinfo=pytz.utc)\n zope.dublincore.interfaces.IZopeDublinCore(obj).modified = dt\n return dt", "def set_datetime(self, date):\n self.date = date", "def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by", "def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by", "def isSetModifiedDate(self):\n return _libsbml.ModelHistory_isSetModifiedDate(self)" ]
[ "0.7839285", "0.7839285", "0.7839285", "0.7839285", "0.7839285", "0.7839285", "0.7839285", "0.75868607", "0.75868607", "0.64068264", "0.64068264", "0.63872856", "0.6339021", "0.6339021", "0.6242144", "0.61531746", "0.6107971", "0.6012136", "0.5762818", "0.5762818", "0.575947", "0.574503", "0.5740442", "0.5457705", "0.5421706", "0.54139584", "0.53219634", "0.5305048", "0.5305048", "0.52821285" ]
0.7893535
0
Sets the version of this MessagingCampaign. Required for updates, must match the version number of the most recent update
def version(self, version): self._version = version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version(self, version):\n self._version = version", "def version(self, version):\n self._version = version", "def set_version(self, version: str) -> None:\n if self.current_version == version:\n return\n self.current_version = version\n self._del_cached_property(\"version\")", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version: str):\n\n self._version = version", "def version(self, version: str):\n\n self._version = version", "def version(self, version: int):\n\n self._version = version", "def version(self, version):\n if version is None:\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version", "def version(self, version):\n if version is None:\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version", "def version(self, version):\n if version is None:\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version", "def version(self, version):\n self._version = utils.VersionParser().parse(version)" ]
[ "0.72409934", "0.72409934", "0.71715456", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.7151149", "0.7151149", "0.7107088", "0.679211", "0.679211", "0.679211", "0.6780163" ]
0.7293403
0
Gets the division of this MessagingCampaign. The division this entity belongs to.
def division(self): return self._division
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subdivision(self) -> Optional[str]:\n return pulumi.get(self, \"subdivision\")", "def subdivision(self) -> Optional[str]:\n return pulumi.get(self, \"subdivision\")", "def get_group(self):\n return self._group", "def getGroup(self):\n\t\treturn self.Group", "def get_domain(self):\n return self._domain", "def getDivider(self):\n return _libsbml.CompModelPlugin_getDivider(self)", "def format_division(self, data):\n return data", "def group(self):\n return self.properties.get('Group', None)", "def get_domain(self):\n return self.domain", "def boundary(self):\n return self._boundary", "def district(self) -> str:\n return pulumi.get(self, \"district\")", "def get_divide(self, ):\n return self.get_parameter('divide')", "def comm_group(self):\n return self._gcomm", "def get(self):\n self._group = self._client.get(\n url=self._client.get_full_url(\n self.get_path(\n 'single', realm=self._realm_name, group_id=self._group_id\n )\n )\n )\n self._group_id = self._group[\"id\"]\n return self._group", "def group(self):\n return self._group", "def group(self):\n return self._group", "def group(self):\n return self._group", "def get_divergence_hor(self):\n for focus_mode in self.focus_modes:\n if focus_mode['modeName'] == self.active_focus_mode:\n return focus_mode['diverg'][0]", "def get_partition(self):\n return self._partition", "def folder(self):\n return self._folder", "def getDomain(self):\n return self.domain", "def get_group(self) -> Optional[str]:\n return self.group", "def get_division(self, name: str):\n genome = self.genomes[safe(name)]\n division = str(genome[\"division\"]).lower().replace(\"ensembl\", \"\")\n if division == \"bacteria\":\n raise NotImplementedError(\"Bacteria from Ensembl not supported.\")\n\n is_vertebrate = division == \"vertebrates\"\n return division, is_vertebrate", "def message_group_id(self) -> Optional[str]:\n return pulumi.get(self, \"message_group_id\")", "def chunk(self):\n # easy enough\n return self.dcpl.getChunk(rank=len(self.shape))", "def domain(self):\n\n return self._domain", "def get_dimension(self):\n return self._dimension", "def domain(self):\n return self._domain", "def domain(self):\n return self._domain", "def domain(self):\n return self._domain" ]
[ "0.5810358", "0.5810358", "0.51984763", "0.5110629", "0.50223154", "0.5020974", "0.50027514", "0.49567127", "0.49472067", "0.49352798", "0.49319625", "0.49162114", "0.4908078", "0.4903234", "0.488874", "0.488874", "0.488874", "0.4876585", "0.4860735", "0.48574632", "0.48448205", "0.48382932", "0.48056766", "0.47636947", "0.4753338", "0.47446758", "0.47381356", "0.4734144", "0.4734144", "0.4734144" ]
0.6648795
0
Sets the division of this MessagingCampaign. The division this entity belongs to.
def division(self, division): self._division = division
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def division(self, division):\n\n self._division = division", "def set_divide(self, a_divide):\n self.set_parameter('divide', a_divide)\n return self", "def SetBoundaryCriterion(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivideContinuity_SetBoundaryCriterion(self, *args)", "def validate_division(self, div_field):\n if not div_field.data or div_field.data == '':\n raise ValidationError('All users must belong to a division')\n return True", "def setSplit(self,split):\n self.split=split", "def set_part(self, connection_part):\n self.part = connection_part", "def campaign(self, campaign):\n\n self._campaign = campaign", "def _set_campaign(self, campaign):\n if isinstance(campaign, str):\n campaign = TrackedCampaign.objects.create(name=campaign)\n\n campaign.save()\n\n self.campaign = campaign", "def dividend(self, dividend):\n\n self._dividend = dividend", "def district(self, district):\n\n self._district = district", "def district(self, district):\n\n self._district = district", "def set_divisions(self, nx=1, ny=1):\n\n self.nx = nx\n self.ny = ny", "def division(self):\n return self._division", "def create_division(self, division_title):\n request = post(url=self.base_url + 'api/services/etender/division/CreateDivision',\n headers=self.headers,\n data=json.dumps({\"title\": division_title}))\n self.division = json.loads(request.content).get('result')\n print('Created division:', self.division)\n return self.division", "def set_dimension(self, dimension):\n assert self.dimension == dimension, \"dimensions do not match\"\n self._dimension = dimension", "def setDomainRange(self, domain, range):\n self.domain = domain.cloneSpace()\n self.range = range.cloneSpace()\n return", "def set_space_guid(self, space_guid):\n res = self._cc.spaces(space_guid).get()\n self._space = res.resource\n\n res = self._cc.request(self._space.organization_url).get()\n self._org = res.resource\n return self", "def set_group(self, group):\n self._group = group", "def set_group(self, group: str) -> None:\n self.group = group", "def set_split(self,split='train'):\r\n \r\n self._target_data = self.processed_data[split]\r\n self.split_ = split", "def group(self, group):\n self._group = group", "def set_conversation(self, conversation):\r\n self.conversation = conversation", "def add_subdivision(self, parent, condition, client_id=None):\n\n biddable_ad_group_criterion=set_elements_to_none(campaign_service.factory.create('BiddableAdGroupCriterion'))\n product_partition=set_elements_to_none(campaign_service.factory.create('ProductPartition'))\n # If the root node is a unit, it would not have a parent\n product_partition.ParentCriterionId=parent.ad_group_criterion.Id if parent is not None and parent.ad_group_criterion is not None else None\n product_partition.Condition=condition\n product_partition.PartitionType='Subdivision'\n biddable_ad_group_criterion.Criterion=product_partition\n biddable_ad_group_criterion.CriterionBid=None\n biddable_ad_group_criterion.AdGroupId=self._ad_group_id\n biddable_ad_group_criterion.Status=None\n if hasattr(biddable_ad_group_criterion, 'EditorialStatus'):\n biddable_ad_group_criterion.EditorialStatus=None\n biddable_ad_group_criterion.Id=self._reference_id\n self._reference_id=self._reference_id\n self._reference_id-=1\n\n partition_action=BulkAdGroupProductPartition()\n partition_action.client_id=client_id\n partition_action.ad_group_criterion=biddable_ad_group_criterion\n self._partition_actions.append(partition_action)\n\n return partition_action", "def _setPartedPartition(self, partition):\n log_method_call(self, self.name)\n\n if partition is not None and not isinstance(partition, parted.Partition):\n raise ValueError(\"partition must be None or a parted.Partition instance\")\n\n log.debug(\"device %s new partedPartition %s\", self.name, partition)\n self._partedPartition = partition\n self.updateName()", "def SetProtobufMessageField(self, group_message, field, field_value):\n if field.label == field.LABEL_REPEATED:\n self.SetProtoRepeatedField(group_message, field, field_value)\n elif field.type == field.TYPE_MESSAGE:\n self.SetProtoMessageField(group_message, field, field_value)\n elif not self.SetProtoField(group_message, field, field_value):\n raise Exception('Unknown field type %s' % field.type)", "def group(self, group):\n\n self._group = group", "def group(self, group):\n\n self._group = group", "def group(self, group):\n\n self._group = group", "def department(self, department):\n\n self._department = department", "def department(self, department):\n\n self._department = department" ]
[ "0.6715935", "0.48573586", "0.48263213", "0.45991567", "0.45864677", "0.458617", "0.45811203", "0.44704387", "0.44482273", "0.4443386", "0.4443386", "0.44264278", "0.44212875", "0.439743", "0.43385282", "0.43234468", "0.43153226", "0.4298492", "0.4283395", "0.42175615", "0.41753483", "0.4148679", "0.41163713", "0.41144437", "0.41112566", "0.4102334", "0.4102334", "0.4102334", "0.40902355", "0.40902355" ]
0.66989917
1
Gets the campaign_status of this MessagingCampaign. The current status of the messaging campaign. A messaging campaign may be turned 'on' or 'off'.
def campaign_status(self): return self._campaign_status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_status(self):\n # TODO retrieve from db if not set\n return self.status", "def get_status(self):\n return self._status", "def get_status(self):\n statuses = dict(ACTIVITY_STATUS_CHOICES)\n return statuses.get(self.status, \"N/A\")", "def campaign_status(self, campaign_status):\n allowed_values = [\"on\", \"stopping\", \"off\", \"complete\", \"invalid\"]\n if campaign_status.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for campaign_status -> \" + campaign_status)\n self._campaign_status = \"outdated_sdk_version\"\n else:\n self._campaign_status = campaign_status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def getStatus(self):\n return self._status", "def getStatus(self):\n return self.__status", "def getstatus(self):\n return self.__status", "def status(self):\n return status_dict[self._get_property_(self.STATUS).upper()]", "def Status(self):\n return self._get_attribute('status')", "def status(self):\n return self.get(self._names[\"status\"])", "def status(self):\n return STATUS[self.fields['status']]", "def status(self):\n return self._query_status()['status']", "def status(self):\n return self.m.status", "def get_status(self):\n return dict(CAMPAIGN_STATUS_CHOICES).get(self.status, \"N/A\")", "def status(self):\n return self._data['status']", "def status(self):\n\n return self._status", "def status(self):\n\n return self._status", "def status(self):\n\n return self._status", "def status(self):\n return STATUSES.get(self._mower_status, {}).get('message', self._mower_status)", "def get_status(self):\n status = self._status.get_message()\n \n if status == \"N\":\n return \"offline\"\n \n elif status == \"Y\":\n return \"online\"\n \n elif status == \"A\":\n return \"away\"\n \n elif status == \"B\":\n return \"busy\"", "def Status(self):\r\n\t\treturn self._get_attribute('status')", "def status(self):\n return self._dbattr('status')", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status" ]
[ "0.6404847", "0.6360354", "0.62657183", "0.62341815", "0.6217242", "0.6217242", "0.6217242", "0.62143916", "0.61250436", "0.6114817", "0.6114813", "0.60491306", "0.6047289", "0.6042757", "0.6023667", "0.6022818", "0.5983734", "0.59719783", "0.5946572", "0.5946572", "0.5946572", "0.5871262", "0.5832075", "0.58224785", "0.58119065", "0.57928276", "0.57928276", "0.57928276", "0.57928276", "0.57928276" ]
0.8441347
0
Sets the campaign_status of this MessagingCampaign. The current status of the messaging campaign. A messaging campaign may be turned 'on' or 'off'.
def campaign_status(self, campaign_status): allowed_values = ["on", "stopping", "off", "complete", "invalid"] if campaign_status.lower() not in map(str.lower, allowed_values): # print("Invalid value for campaign_status -> " + campaign_status) self._campaign_status = "outdated_sdk_version" else: self._campaign_status = campaign_status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def campaign_status(self):\n return self._campaign_status", "def set_activity(self, status):\n self._activity = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status: str) -> None:\n\n try:\n self.status = Buddy.status_map[status.lower()]\n except KeyError:\n self.status = status", "def set_message_status(self, message_id, status):\n\t\tself.c.execute(\"UPDATE messages SET status = ? WHERE message_id = ?\", (status, message_id))\n\t\tself.save()", "def setStatus(self, status):\n self.__status = status", "def setstatus(self, status):\n with self.lock:\n self.status = status", "def status(self, status):\n if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n\n self._status = status", "def job_status(self, job_status):\n\n self._job_status = job_status", "def status(self, status):\n allowed_values = [1, 2, 3] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\".format(status, allowed_values) # noqa: E501\n )\n\n self._status = status", "def status(self, status):\n if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n status is not None and len(status) < 1):\n raise ValueError(\"Invalid value for `status`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._status = status", "def SetStatus(self, status):\r\n self.status = status", "def status(self, status):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n allowed_values = [\"draft\", \"sent\", \"archive\", \"queued\", \"suspended\", \"in_process\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def status(self, status):\n allowed_values = [\"ENABLED\", \"DISABLED\"] # noqa: E501\n if (self._configuration.client_side_validation and\n status not in allowed_values):\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def _set_status(self, status):\n with self.status_lock:\n if (status in _ENDING_STATUSES) or (not self.status in _ENDING_STATUSES):\n self.status = status", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def status(self, status: int):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n\n self._status = status", "def set_connection_status(self, connection_status: Literal[ConnectionState]) -> None:\n self.connection_status = connection_status\n self.publish(self.key_gen(\"connection_status\"), connection_status)", "def set_status(self, status: HTTPProxyStatus) -> None:\n self._status = status\n self.update_actor_details(status=self._status)", "def status(self, status):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n if status is not None and len(status) < 1:\n raise ValueError(\"Invalid value for `status`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status" ]
[ "0.6368695", "0.61262167", "0.5973353", "0.5973353", "0.5973353", "0.5827563", "0.57827497", "0.5776234", "0.5757022", "0.5722353", "0.5680996", "0.56777996", "0.5648092", "0.5639821", "0.5627372", "0.55989665", "0.5592994", "0.5582759", "0.5582326", "0.55720466", "0.55364394", "0.5512199", "0.5450819", "0.5450819", "0.5450819", "0.5450819", "0.5450819", "0.5450819", "0.5450819", "0.5450819" ]
0.7548788
0
Gets the callable_time_set of this MessagingCampaign. The callable time set for this messaging campaign.
def callable_time_set(self): return self._callable_time_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def callable_time_set(self, callable_time_set):\n \n self._callable_time_set = callable_time_set", "def schedule_times(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"schedule_times\")", "def getScheduleOnset(self):\n return DPxGetDinSchedOnset()", "def get_schedules(self):\n return self.__schedules", "def schedule_times(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"schedule_times\")", "def get_time_points(self):\n return self._time", "def scheduledTimes(self, runnable):\n events = self.store.query(\n TimedEvent, TimedEvent.runnable == runnable)\n return (event.time for event in events if not event.running)", "def schedule(self):\n return self._schedule", "def schedule(self):\n return self._schedule", "def schedule(self):\n return self._schedule", "def queue_times(self):\r\n queue_times = []\r\n for task in self.__tasks.values():\r\n if task.complete():\r\n queue_times.append(task.queued_time())\r\n return queue_times", "def time_scoping(self):\n return self._time_scoping", "def get_times(self):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def queue_times(self):\r\n return [task.scheduler_launch_time - self.__arrival_time\r\n for task in self.__tasks.values() if task.complete()]", "def get_last_set(self):\n return self.set", "def getSchedules(self) :\n return self.schedules", "def get_time(option_set):\n return option_set & TIME_MASK", "def get_timed_events(self):\n return self.dispatcher.timed_events", "def timers(self):\n return self['timers']", "def sets(self):\n return self._loaded_and_cached(gdxcc.GMS_DT_SET)", "def timings(self):\n if self._C_timings is None:\n raise RuntimeError(\"Cannot extract timings with non-finalized Profiler.\")\n return {field: max(getattr(self._C_timings, field), 10**-6)\n for field, _ in self._C_timings._fields_}" ]
[ "0.6893189", "0.55124503", "0.54107213", "0.5211071", "0.5081652", "0.5053929", "0.4999273", "0.4996595", "0.4996595", "0.4996595", "0.49755397", "0.4962994", "0.49513596", "0.4948963", "0.4948963", "0.4948963", "0.4948963", "0.4948963", "0.4948963", "0.4948963", "0.4948963", "0.4948963", "0.4880842", "0.48680344", "0.48603994", "0.47949654", "0.4780974", "0.47346133", "0.47227257", "0.4699778" ]
0.8133848
0
Sets the callable_time_set of this MessagingCampaign. The callable time set for this messaging campaign.
def callable_time_set(self, callable_time_set): self._callable_time_set = callable_time_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def callable_time_set(self):\n return self._callable_time_set", "def set_time(self, set_time):\n\n self._set_time = set_time", "def setScheduleOnset(self, onset):\n DPxSetDinSchedOnset(onset)", "def setShowCallables(self, show_callables):\n logger.debug(\"setShowCallables: {}\".format(show_callables))\n self._show_callables = show_callables\n self.invalidateFilter()", "def collection_time(self, collection_time):\n\n self._collection_time = collection_time", "def set_enqueue_time(self, time):\n self.enqueue_time = time\n for task in self.tasks:\n task.enqueue_time = time", "def set_exec_time(self, time):\n for task in self.tasks:\n task.exec_time = time", "def scheduled_at(self, scheduled_at):\n\n self._scheduled_at = scheduled_at", "def set_set_later(self, value):\r\n self.set_later = value", "def time_utc(self, time_utc):\n\n self._time_utc = time_utc", "def scheduled_reset_at(self, scheduled_reset_at):\n\n self._scheduled_reset_at = scheduled_reset_at", "def setSubmitTime(t):", "def set(self):\n now = time.time()\n remove = None\n for ident, event in self.events.items():\n if not event[0].isSet():\n # if this client's event is not set, then set it\n # also update the last set timestamp to now\n event[0].set()\n event[1] = now\n else:\n # if the client's event is already set, it means the client\n # did not process a previous frame\n # if the event stays set for more than 5 seconds, then assume\n # the client is gone and remove it\n if now - event[1] > 5:\n remove = ident\n if remove:\n del self.events[remove]", "def valkkafsmanager_set_time_cb(self, t):\n self.signals.set_time.emit(t)", "def set_speaker_time(self, datetime):\n params = [\n ('year', datetime.year),\n ('month', datetime.month),\n ('day', datetime.day),\n ('hour', datetime.hour),\n ('min', datetime.minute),\n ('sec', datetime.second),\n ]\n\n self.get(COMMAND_UIC, 'SetSpeakerTime', params)", "def set_response_time(self, time):\n for task in self.tasks:\n task.response_time = time", "def _callback_local_setpoint(self, local_setpoint):\n # type: (PositionTarget) -> None\n self.local_setpoint = local_setpoint\n return", "def submit_time(self, submit_time: datetime):\n\n self._submit_time = submit_time", "def svn_info_t_schedule_set(svn_info_t_self, svn_wc_schedule_t_schedule): # real signature unknown; restored from __doc__\n pass", "def set_analysis_time(self, t):\n for z in self.zones:\n z.set_demand_rate_per_t(t)", "def setup_channel_set(channel_set):\n\n #amfast.logger = log.logger\n\n # Map service targets to controller methods\n cont_obj = app.controller.Controller()\n service = Service('DAService')\n service.mapTarget(CallableTarget(cont_obj.get_player_info, 'get_player_info'))\n service.mapTarget(CallableTarget(cont_obj.do_move, 'do_move'))\n service.mapTarget(CallableTarget(cont_obj.do_attack, 'do_attack'))\n service.mapTarget(CallableTarget(cont_obj.get_news, 'get_news'))\n service.mapTarget(CallableTarget(cont_obj.get_floor, 'get_floor'))\n service.mapTarget(CallableTarget(cont_obj.get_monster, 'get_monster'))\n service.mapTarget(CallableTarget(cont_obj.raiseException, 'raiseException'))\n channel_set.service_mapper.mapService(service)", "def set_last_submission_time(self):\r\n self.last_submission_time = datetime.datetime.now(UTC())", "def setTimepoint(self, tp):\n\t\tpass", "def setConcurrentTasks(self, config):\n self.concurrentTasks = [{'func': self.logDBCleanUp, 'duration': config.logDBCleanDuration}]", "def permission_sets(self, permission_sets):\n\n self._permission_sets = permission_sets", "def set_time(self, time_fn):\n self.time_fn = time_fn\n self.socket.send_string(f'T {time_fn()}')\n return self.socket.recv_string()", "def set_schedule(self, time: str, handler: Callable, **kwargs) -> None:\n if time in (\"sunrise\", \"sunset\"):\n method = getattr(self, \"run_at_{0}\".format(time))\n method(handler, **kwargs, constrain_enabled=True)\n else:\n self.run_daily(\n handler, self.parse_time(time), **kwargs, constrain_enabled=True\n )", "def scheduled_plan(self, scheduled_plan):\n\n self._scheduled_plan = scheduled_plan", "async def set_chat_sticker_set(self, chat_id: typing.Union[base.Integer, base.String],\n sticker_set_name: base.String) -> base.Boolean:\n payload = generate_payload(**locals())\n result = await self.request(api.Methods.SET_CHAT_STICKER_SET, payload)\n\n return result", "def time_settime(currenttime):\r\n\r\n time_query_times.append((getruntime(), currenttime))" ]
[ "0.65908307", "0.6178317", "0.5953841", "0.5100885", "0.49795374", "0.49558243", "0.483162", "0.48207587", "0.48039177", "0.477478", "0.47390914", "0.47065333", "0.47013178", "0.4697793", "0.4662012", "0.46349522", "0.45914975", "0.45656434", "0.45478013", "0.45220882", "0.45187205", "0.45013884", "0.44955614", "0.44649416", "0.44500446", "0.4425367", "0.4389864", "0.43658504", "0.4364397", "0.43598932" ]
0.8699226
0
Gets the contact_list of this MessagingCampaign. The contact list that this messaging campaign will send messages for.
def contact_list(self): return self._contact_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_contacts(self):\n return self.contacts", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def GetContactList(self):\n\t\tfeeds = []\n\t\tfeed = self.client.GetContacts()\n\t\tfeeds.append(feed)\n\t\tnext = feed.GetNextLink()\n\t\twhile next:\n\t\t\tfeed = self.client.GetContacts(uri=next.href)\n\t\t\tfeeds.append(feed)\n\t\t\tnext = feed.GetNextLink()\n\t\t\n\t\tcontacts = []\n\t\tfor feed in feeds:\n\t\t\tif not feed.entry:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfor i, entry in enumerate(feed.entry):\n\t\t\t\t\tcontacts.append(entry)\n\t\treturn contacts", "def get_contacts_list(self):\n return [(id + 1, contact) for id, contact in enumerate(self.contact_list)]", "def contact_lists(self):\n from hubspot3.contact_lists import ContactListsClient\n\n return ContactListsClient(**self.auth, **self.options)", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def contacts(self):\r\n return contacts.Contacts(self)", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def contacts(self):\n return ContactCollection(self.request)", "def _get_receivers_list(self):\n\n # TODO: document what this plugin expects to be in Dockerfile/where it gets info from\n global_component = self._get_component_label()\n # this relies on bump_release plugin configuring source.git_commit to actually be\n # branch name, not a commit\n if not isinstance(self.workflow.source, GitSource):\n raise PluginFailedException('Source is not of type \"GitSource\", panic!')\n git_branch = self.workflow.source.git_commit\n try:\n r = requests.get(urljoin(self.pdc_url, 'rest_api/v1/release-component-contacts/'),\n headers={'Authorization': 'Token %s' % self._get_pdc_token()},\n params={'global_component': global_component,\n 'dist_git_branch': git_branch,\n 'role': self.pdc_contact_role},\n verify=self.pdc_verify_cert)\n except requests.RequestException as e:\n self.log.error('failed to connect to PDC: %s', str(e))\n raise RuntimeError(e)\n\n if r.status_code != 200:\n self.log.error('PDC returned status code %s, full response: %s',\n r.status_code, r.text)\n raise RuntimeError('PDC returned non-200 status code (%s), see referenced build log' %\n r.status_code)\n\n contacts = r.json()\n\n if contacts['count'] == 0:\n self.log.error('no %s role for the component', self.pdc_contact_role)\n raise RuntimeError('no %s role for the component' % self.pdc_contact_role)\n\n send_to = []\n for contact in contacts['results']:\n send_to.append(contact['contact']['email'])\n\n return send_to", "def get_recipients(self) -> List[Client]:\n\n index_list = [i for i in range(len(self.int_var_list)) if self.int_var_list[i].get() == 1]\n return [self.client_list[i] for i in index_list]", "def get_contacts_list(self):\n contacts = self.driver.find_elements_by_class_name(\"_1wjpf\")\n s= [contact.text for contact in contacts] #extracts chats and last messsages\n print (\"get contacts: \"+str(s)) #print only chat names\n return s[::2] #returns only chat names", "def get_active_contact(self):\n list_contact = Contact.objects.filter(phonebook__campaign=self.id,\n status=CONTACT_STATUS.ACTIVE).all()\n if not list_contact:\n return False\n return list_contact", "def contact(self):\n return self._contact", "def contact(self):\n return self._contact", "def get_contacts(self, count=-1, excluded_guid=None):\n current_len = len(self._contacts)\n if current_len == 0 or count == 0:\n return []\n\n if count < 0:\n count = current_len\n else:\n count = min(count, current_len)\n\n if excluded_guid is None:\n # Get the last `count` contacts.\n contact_list = self._contacts[-count:]\n else:\n contact_list = []\n for contact in reversed(self._contacts):\n if contact.guid == excluded_guid:\n continue\n contact_list.append(contact)\n if len(contact_list) >= count:\n break\n return contact_list", "async def get_contacts(self, **kwargs) -> List[CertificateContact]:\n contacts = await self._client.get_certificate_contacts(\n vault_base_url=self._vault_url, **kwargs\n )\n return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]", "def get_cached_contacts(self):\n return list(self._replacement_cache)", "def get_all_contacts(self):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} ORDER BY id;\".format(\", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query)\n\n return [Contact(*item) for item in data]", "def get_queryset(self):\n contact_data = Contact.objects.filter(contact_groups__in=Member.objects.filter(\n user=self.request.user).values('group_id').distinct())\n\n return contact_data", "def Contact(self):\n return self.__contact", "def support_contacts(self):\n return self._support_contacts", "def contacts(self):\n from hubspot3.contacts import ContactsClient\n\n return ContactsClient(**self.auth, **self.options)", "def get_queryset(self):\n return self.request.user.contacts.all()", "def receiveContactList(self, contactList):", "def update_contacts(self):\n self.contacts = self.db.list_contacts()\n return self.list_contacts()", "def list_contact(self, key, value):\n self.db.list_contact(\n key,\n value,\n )", "def get_contact(self, username, password):\n\t\tdn, username = self.auth(username, password)\n\t\tif self.is_blacklisted(username):\n\t\t\traise ServiceForbidden()\n\n\t\tuser = self.get_udm_user(username=username)\n\t\tif not self.send_plugins:\n\t\t\traise ServiceForbidden()\n\n\t\treturn [{\n\t\t\t\"id\": p.send_method(),\n\t\t\t\"label\": p.send_method_label(),\n\t\t\t\"value\": user[p.udm_property]\n\t\t} for p in self.send_plugins.values() if p.udm_property in user]", "def contacts(request):\n User = get_user_model()\n ids = set(request.user.chatmessage_set.all().values_list(\"recipients\", flat=True))\n context = {\n 'contacts': User.objects.filter(pk__in=ids)\n }\n return render(request, \"chat/contacts.html\", context)", "def contact_info(self):\n return self._contact_info" ]
[ "0.72631705", "0.71663344", "0.7121245", "0.6652589", "0.65349084", "0.64175344", "0.63972867", "0.6386485", "0.63770306", "0.63157517", "0.61904186", "0.61765337", "0.6132532", "0.6121634", "0.6121634", "0.602559", "0.6003591", "0.59747976", "0.5964907", "0.5926964", "0.5926448", "0.5915741", "0.59078896", "0.5905416", "0.589102", "0.5790044", "0.5783242", "0.57187986", "0.5704222", "0.5675407" ]
0.8081953
0
Sets the contact_list of this MessagingCampaign. The contact list that this messaging campaign will send messages for.
def contact_list(self, contact_list): self._contact_list = contact_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receiveContactList(self, contactList):", "def set_contacts(self, contacts):\n\n\t\tif contacts is not None and not isinstance(contacts, list):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: contacts EXPECTED TYPE: list', None, None)\n\t\t\n\t\tself.__contacts = contacts\n\t\tself.__key_modified['Contacts'] = 1", "def contacts(self, contacts):\n\n self._contacts = contacts", "def contacts(self, contacts):\n\n self._contacts = contacts", "def contact_list(self):\n return self._contact_list", "def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list", "def contact(self, contact):\n\n self.logger.debug(\"In 'contact' setter.\")\n\n self._contact = contact", "async def set_contacts(self, contacts: List[CertificateContact], **kwargs) -> List[CertificateContact]:\n new_contacts = await self._client.set_certificate_contacts(\n vault_base_url=self.vault_url,\n contacts=self._models.Contacts(contact_list=[c._to_certificate_contacts_item() for c in contacts]),\n **kwargs\n )\n return [\n CertificateContact._from_certificate_contacts_item(contact_item=item) for item in new_contacts.contact_list\n ]", "def contact_points(self, contact_points: object):\n\n self._contact_points = contact_points", "def contact(self, contact):\n\n self._contact = contact", "def contact(self, contact):\n\n self._contact = contact", "def support_contacts(self, support_contacts):\n self._support_contacts = support_contacts", "def contact_lists(self):\n from hubspot3.contact_lists import ContactListsClient\n\n return ContactListsClient(**self.auth, **self.options)", "def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')", "def send_mass_messages(self, recipient_list, sender, message=\"\", subject=\"\"):\n try:\n for s in recipient_list:\n self.send_message(to=s, sender=sender, message=message, subject=subject)\n except TypeError:\n return -1\n return 1", "def list(self, list):\n if list is None:\n raise ValueError(\"Invalid value for `list`, must not be `None`\") # noqa: E501\n\n self._list = list", "def do_send_list( self, a_list ):\r\n # --- this needs to be moved to task some set up here then on there\r\n self.logger.info( \"turn on sendList\" )\r\n self.send_list_ix = 0\r\n\r\n #self.send_list = [ 180, 920, 160, 1740, 160, 780, 160, 2840, 160, 1320, 160, 1340, 160, ] # 1180, 160, 2700, 160, 12780, 200, 920, \\\r\n #160, 2680, 160, 780, 160, 800, 160, 780, 160, 920, 160, 800, 140, 800, \\\r\n # 160 ]\r\n self.send_list = a_list\r\n self.com_driver.send( \"z\\n\" )\r\n self.list_send = True # if we were mult-threaded this would have to be here\r\n\r\n return", "def contact_point(self, contact_point: object):\n\n self._contact_point = contact_point", "def contact_reference(self, contact_reference):\n\n self._contact_reference = contact_reference", "def list_contact(self, key, value):\n self.db.list_contact(\n key,\n value,\n )", "def target_contact(self, target_contact):\n \n self._target_contact = target_contact", "def contactListClicked(self):\n \n contacts = self.userList.getSelectedItems()\n self.mergeButton.setEnabled(contacts != None and len(contacts) > 1)\n \n if contacts != None and len(contacts) == 1:\n self.messageList.filterByContact(contacts[0])\n else:\n self.messageList.removeFilter()", "def _create_mailing_list(cls):\n cls.mailing_list_1 = cls.env['mailing.list'].with_context(cls._test_context).create({\n 'name': 'List1',\n 'contact_ids': [\n (0, 0, {'name': 'Déboulonneur', 'email': '[email protected]'}),\n (0, 0, {'name': 'Gorramts', 'email': '[email protected]'}),\n (0, 0, {'name': 'Ybrant', 'email': '[email protected]'}),\n ]\n })\n cls.mailing_list_2 = cls.env['mailing.list'].with_context(cls._test_context).create({\n 'name': 'List2',\n 'contact_ids': [\n (0, 0, {'name': 'Gilberte', 'email': '[email protected]'}),\n (0, 0, {'name': 'Gilberte En Mieux', 'email': '[email protected]'}),\n (0, 0, {'name': 'Norbert', 'email': '[email protected]'}),\n (0, 0, {'name': 'Ybrant', 'email': '[email protected]'}),\n ]\n })", "def member_list(self, member_list):\n\n self._member_list = member_list", "def contact_info(self, contact_info):\n\n self._contact_info = contact_info", "def SetDomainsList(self, domainsList) :\n\t\t...", "def source_contact(self, source_contact):\n \n self._source_contact = source_contact", "def remove_from_contact_list(self, contacts_to_remove_list):\n if self.contact_list is None:\n return\n for id in contacts_to_remove_list:\n if id in range(0, len(self.contact_list) + 1):\n self.contact_list[id - 1] = None\n self.contact_list = [contact for contact in self.contact_list if contact is not None]", "def add_contact(self, name, number, email, zipcode):\n \n new_contact = f\"{name}, {number}, {email}, {zipcode}\"\n contact_list = [name,number,email,zipcode]\n self.contacts.append(contact_list)\n self.save()\n print(f\"Thank you {new_contact} has been added to your contact book.\")", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email" ]
[ "0.6209503", "0.6202445", "0.588594", "0.588594", "0.5883504", "0.5845821", "0.5795876", "0.5775629", "0.55796677", "0.5562263", "0.5562263", "0.5559013", "0.5479643", "0.5460591", "0.53779536", "0.5320598", "0.53073066", "0.5305575", "0.52854943", "0.5274175", "0.5192591", "0.5121968", "0.5113486", "0.50693697", "0.5029035", "0.5010891", "0.50096", "0.5007492", "0.49756786", "0.49653065" ]
0.83265656
0
Gets the dnc_lists of this MessagingCampaign. The dnc lists to check before sending a message for this messaging campaign.
def dnc_lists(self): return self._dnc_lists
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dmarc_messages(self):\n messages = []\n try:\n if self.opt_use_ssl:\n self.server = poplib.POP3_SSL(self.opt_pop3_server)\n self.server.user(self.opt_global_account[\"username\"])\n self.server.pass_(self.opt_global_account[\"password\"])\n else:\n self.server = poplib.POP3(self.opt_pop3_server)\n self.server.user(self.opt_global_account[\"username\"])\n self.server.pass_(self.opt_global_account[\"password\"])\n except Exception as e:\n raise Exception(\n \"Error connecting to %s with exception %s\" %\n (self.opt_pop3_server, str(e)))\n else:\n self.helper.log_debug(\n 'get_dmarc_messages: successfully connected to %s' %\n self.opt_pop3_server)\n messages = self.byte2str(self.server.uidl()[1])\n self.helper.log_info(\n 'get_dmarc_messages: %d messages' %\n len(messages))\n return messages", "def contact_list(self):\n return self._contact_list", "def get_drip_campaigns(self):\n return list(DripCampaign.objects(user_id=self.user_id))", "def get_message_list(self):\n \n result = requests.get(\n url = root_url + '/{}'.format(\"message\"),\n headers = { 'Authorization': api_key },\n )\n\n message_list = result.json()\n\n self.message_list = message_list", "def get_list_of_campaigns(self, limit=0, offset=0):\n logger.info(\"Function call: get_list_of_campaigns\")\n return self.__handle_result(self.__send_request('campaigns', 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def get_recipients(self) -> List[Client]:\n\n index_list = [i for i in range(len(self.int_var_list)) if self.int_var_list[i].get() == 1]\n return [self.client_list[i] for i in index_list]", "def get_campaign_name_list(self):\n campaigns = self.find('campaigns', {})\n campaign_names = []\n for campaign in campaigns:\n if 'name' in campaign:\n campaign_names.append(campaign['name'])\n return campaign_names", "def contact_lists(self):\n from hubspot3.contact_lists import ContactListsClient\n\n return ContactListsClient(**self.auth, **self.options)", "def _get_campaigns(self, params):\n return self._api.account.get_campaigns(params={**params, **self._state_filter()}, fields=[self.state_pk])", "def get_dns_list(self):\n return self.get_ipv4_dns_list()", "def dnc_lists(self, dnc_lists):\n \n self._dnc_lists = dnc_lists", "def get_message_list(self):\n count = 0\n for msg in self.mbox:\n if msg['From'].find(self.config['tgt_email']) > -1:\n dtime = arrow.get(msg['Date'], 'ddd, D MMM YYYY HH:mm:ss ZZ')\n message = dict({'from': msg['From'],\n 'date': dtime,\n 'subject': msg['Subject']})\n # boundary = msg.get_boundary()\n # if boundary is not None:\n # bounds = [m.start() for m\n # in re.finditer(boundary, str(msg))]\n # else:\n # bounds = list()\n # if len(bounds) > 2:\n # message['text'] = str(msg)[bounds[1]:bounds[2]]\n # else:\n # message['text'] = None\n pl = None\n if msg['Subject'].find(\":\") == -1:\n finished = False\n pl = msg.get_payload()\n while finished is False:\n if isinstance(pl, str):\n finished = True\n elif isinstance(pl, list):\n pl = pl[0].get_payload()\n else:\n raise ValueError(\"Non-list, non-str payload?\")\n break\n message['text'] = self.clean_text(str(pl))\n\n if message['text'] is not None:\n self.messages.append(message)\n count += 1\n # print count\n self.messages.sort(key=lambda item: item['date'])", "def GetListDoctors(self):\n\t\treturn self.ClientsMap.values()", "def get_mailchimp_lists(mc_api_key, server):\n try:\n client = MailchimpMarketing.Client()\n client.set_config({\n \"api_key\": mc_api_key,\n \"server\": server\n })\n\n response = client.lists.get_all_lists()\n print(response)\n except ApiClientError as error:\n print(\"Error: {}\".format(error.text))", "def _get_receivers_list(self):\n\n # TODO: document what this plugin expects to be in Dockerfile/where it gets info from\n global_component = self._get_component_label()\n # this relies on bump_release plugin configuring source.git_commit to actually be\n # branch name, not a commit\n if not isinstance(self.workflow.source, GitSource):\n raise PluginFailedException('Source is not of type \"GitSource\", panic!')\n git_branch = self.workflow.source.git_commit\n try:\n r = requests.get(urljoin(self.pdc_url, 'rest_api/v1/release-component-contacts/'),\n headers={'Authorization': 'Token %s' % self._get_pdc_token()},\n params={'global_component': global_component,\n 'dist_git_branch': git_branch,\n 'role': self.pdc_contact_role},\n verify=self.pdc_verify_cert)\n except requests.RequestException as e:\n self.log.error('failed to connect to PDC: %s', str(e))\n raise RuntimeError(e)\n\n if r.status_code != 200:\n self.log.error('PDC returned status code %s, full response: %s',\n r.status_code, r.text)\n raise RuntimeError('PDC returned non-200 status code (%s), see referenced build log' %\n r.status_code)\n\n contacts = r.json()\n\n if contacts['count'] == 0:\n self.log.error('no %s role for the component', self.pdc_contact_role)\n raise RuntimeError('no %s role for the component' % self.pdc_contact_role)\n\n send_to = []\n for contact in contacts['results']:\n send_to.append(contact['contact']['email'])\n\n return send_to", "def tracking_domain_list(self):\r\n params = base.get_params(None, locals())\r\n return self._get('tracking_domain_list', params)", "def get_dms_list(self, start_index, list_count):\n response = self.get(COMMAND_UIC, 'GetDmsList', [\n ('liststartindex', int(start_index)),\n ('listcount', int(list_count)),\n ])\n\n if not int(response['listcount']):\n return []\n\n return response_list(response['dmslist']['dms'])", "async def fetch_dm_channels(self):\n data = await self.http.get_dm_channels()\n channels = []\n for dm_channel_data in data.get('channels', data):\n dm_channel = self.http.create_channel(data=dm_channel_data)\n channels.append(dm_channel)\n\n return channels", "def GetContactList(self):\n\t\tfeeds = []\n\t\tfeed = self.client.GetContacts()\n\t\tfeeds.append(feed)\n\t\tnext = feed.GetNextLink()\n\t\twhile next:\n\t\t\tfeed = self.client.GetContacts(uri=next.href)\n\t\t\tfeeds.append(feed)\n\t\t\tnext = feed.GetNextLink()\n\t\t\n\t\tcontacts = []\n\t\tfor feed in feeds:\n\t\t\tif not feed.entry:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfor i, entry in enumerate(feed.entry):\n\t\t\t\t\tcontacts.append(entry)\n\t\treturn contacts", "def get_contacts(self, count=-1, excluded_guid=None):\n current_len = len(self._contacts)\n if current_len == 0 or count == 0:\n return []\n\n if count < 0:\n count = current_len\n else:\n count = min(count, current_len)\n\n if excluded_guid is None:\n # Get the last `count` contacts.\n contact_list = self._contacts[-count:]\n else:\n contact_list = []\n for contact in reversed(self._contacts):\n if contact.guid == excluded_guid:\n continue\n contact_list.append(contact)\n if len(contact_list) >= count:\n break\n return contact_list", "def retrievelist(self,listofsharesofmessages):\n \n messageslist = []\n for shareslist in listofsharesofmessages :\n message = self.retrieve(shareslist)\n messageslist.append(message)\n return messageslist", "def get_sent_messages(self):\n return self.sent_messages", "def _messages_list(self, queue):\n\n return queue.messages()", "def sent_messages(self):\n return self._get_messages_from_folder_name('SentItems')", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def detectionlists(self):\n return self._sdk_dependencies.detection_lists_module", "def get_conversations(self):\n\t\treturn self.conversations", "def getIpv4DnsList(self):\n with self.status._dhcp_status_mutex:\n if self.status.ipv4_lease_valid is None:\n return [None]\n else:\n return self.status.ipv4_dnslist", "def AllowedDomains(self)->list:\n return self._allowedDomains", "def email_ml_list(self):\n return self._request('email/ml/list', inspect_args_func(inspect.currentframe()), method='get')" ]
[ "0.5988404", "0.58297056", "0.56870914", "0.55929554", "0.5453402", "0.5412666", "0.5395899", "0.5304845", "0.5249858", "0.5206307", "0.5206069", "0.5170666", "0.5170228", "0.51614743", "0.5143177", "0.51181716", "0.5111123", "0.50970674", "0.50523174", "0.504655", "0.5045429", "0.5012835", "0.4995541", "0.49917287", "0.4958403", "0.4955061", "0.49537343", "0.49376455", "0.49335316", "0.4913977" ]
0.70566237
0
Sets the dnc_lists of this MessagingCampaign. The dnc lists to check before sending a message for this messaging campaign.
def dnc_lists(self, dnc_lists): self._dnc_lists = dnc_lists
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SetDomainsList(self, domainsList) :\n\t\t...", "def checklists(self, checklists):\n\n self._checklists = checklists", "def fdsid_list(self, fdsid_list):\n\n self._fdsid_list = fdsid_list", "def contact_list(self, contact_list):\n \n self._contact_list = contact_list", "def dnc_lists(self):\n return self._dnc_lists", "def do_send_list( self, a_list ):\r\n # --- this needs to be moved to task some set up here then on there\r\n self.logger.info( \"turn on sendList\" )\r\n self.send_list_ix = 0\r\n\r\n #self.send_list = [ 180, 920, 160, 1740, 160, 780, 160, 2840, 160, 1320, 160, 1340, 160, ] # 1180, 160, 2700, 160, 12780, 200, 920, \\\r\n #160, 2680, 160, 780, 160, 800, 160, 780, 160, 920, 160, 800, 140, 800, \\\r\n # 160 ]\r\n self.send_list = a_list\r\n self.com_driver.send( \"z\\n\" )\r\n self.list_send = True # if we were mult-threaded this would have to be here\r\n\r\n return", "def _set_scrolls(self, listOfScrolls):\n self._scrolls = listOfScrolls", "def forward_messages(self, message_list):\n\n def find_ports(destination):\n \"\"\"\n Return a list of the ports that according to the forwarding table\n lead to 'destination'.\n\n Arguments:\n destination: an instance of class NetworkDevice or an iterable\n of NetworkDevice instances.\n\n Returns:\n A set of the ports that lead to the devices in 'destination'.\n\n \"\"\"\n output_ports = set()\n if isinstance(destination, collections.Iterable):\n for device in destination:\n # ports leading to device\n ports_towards_device = self.forwarding_table.get(\n device, self.ports)\n output_ports.update(ports_towards_device)\n else:\n output_ports.update(\n self.forwarding_table.get(destination, self.ports))\n return output_ports\n\n for message in message_list:\n destinations = message.destination\n output_ports = find_ports(destinations)\n for port in output_ports:\n new_message = Message.from_message(message)\n self.env.process(\n self.instruct_transmission(new_message, port))", "def set_hosts(self, host_list: t.List[str]) -> None:\n if isinstance(host_list, str):\n host_list = [host_list.strip()]\n if not isinstance(host_list, list):\n raise TypeError(\"host_list argument must be a list of strings\")\n if not all(isinstance(host, str) for host in host_list):\n raise TypeError(\"host_list argument must be list of strings\")\n # TODO check length\n if self.batch:\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n self.batch_settings.set_hostlist(host_list)\n\n if self.launcher == \"lsf\":\n for db in self.dbnodes:\n db.set_hosts(host_list)\n else:\n for host, db in zip(host_list, self.dbnodes):\n if isinstance(db.run_settings, AprunSettings):\n if not self.batch:\n db.run_settings.set_hostlist([host])\n else:\n db.run_settings.set_hostlist([host])\n\n if db.is_mpmd and hasattr(db.run_settings, \"mpmd\"):\n for i, mpmd_runsettings in enumerate(db.run_settings.mpmd):\n mpmd_runsettings.set_hostlist(host_list[i + 1])", "def set(self, varbindlist):\n comm = self.sessiondata.get_community(RW)\n if not comm:\n raise SNMPBadCommunity(\"No community!\")\n mo = CommunityBasedMessage(comm, SetRequestPDU() , self.sessiondata.version )\n for vb in varbindlist:\n mo.add_varbind(vb)\n resp = self._send_and_receive(mo)\n if resp.pdu.error_status:\n raise EXCEPTION_MAP[resp.pdu.error_status](resp.pdu.error_index)\n else:\n return resp.pdu.varbinds", "def send_message_list(message_lst: list, reciever: str, receiver_data: dict,\n users_to_remove: list) -> None:\n new_prev_mes: list = []\n final_message = ''\n for message in message_lst:\n if len(new_prev_mes) == 5:\n break\n if message not in receiver_data['usr_prevs_mes']:\n receiver_data['mes_limit'] -= 1\n final_message += f'\\n{message}'\n new_prev_mes.append(message)\n receiver_data['usr_prevs_mes'] = new_prev_mes\n final_message += '\\nReply stop to stop these notifications.'\n if len(new_prev_mes) != 0:\n send_message(reciever,\n 'New Vaccine Locations Detected!',\n final_message,\n receiver_data['carrier'])\n if receiver_data['mes_limit'] <= 0:\n users_to_remove.append(reciever)", "def setup_ncfile_list(self):\n self.ncfilelist = []\n for file in os.listdir(self.dirpath_netcdf):\n if file.endswith('.nc'):\n self.ncfilelist.append(osp.join(self.dirpath_netcdf, file))", "def set_dns_servers(self, hDnsServersList):\n\t\tcall_sdk_function('PrlVmDevNet_SetDnsServers', self.handle, conv_handle_arg(hDnsServersList))", "def set_blists(self, blists):\n self.blists = blists[:]", "def _create_mailing_list(cls):\n cls.mailing_list_1 = cls.env['mailing.list'].with_context(cls._test_context).create({\n 'name': 'List1',\n 'contact_ids': [\n (0, 0, {'name': 'Déboulonneur', 'email': '[email protected]'}),\n (0, 0, {'name': 'Gorramts', 'email': '[email protected]'}),\n (0, 0, {'name': 'Ybrant', 'email': '[email protected]'}),\n ]\n })\n cls.mailing_list_2 = cls.env['mailing.list'].with_context(cls._test_context).create({\n 'name': 'List2',\n 'contact_ids': [\n (0, 0, {'name': 'Gilberte', 'email': '[email protected]'}),\n (0, 0, {'name': 'Gilberte En Mieux', 'email': '[email protected]'}),\n (0, 0, {'name': 'Norbert', 'email': '[email protected]'}),\n (0, 0, {'name': 'Ybrant', 'email': '[email protected]'}),\n ]\n })", "def update_emails_with_dlcs(dlcs, liaison=None):\n for dlc in dlcs:\n EmailMessage.objects.filter(\n record__author__dlc=dlc,\n date_sent__isnull=True).update(_liaison=liaison)", "def setDetectLists(self, value):\n return self._set(detectLists=value)", "def set_target_stocks_list(self, list_of_stocks):\n self.target_stocks = list_of_stocks", "def DistanceMatrices(self, dms):\r\n if not isinstance(dms, ListType):\r\n raise TypeError(\"The item passed in as the new list was not a \"\r\n \"list data type.\")\r\n if self._num_dms >= 0 and len(dms) != self._num_dms:\r\n raise ValueError(\"Cannot set %d distance matrices. Must provide \"\r\n \"exactly %d distance matrices.\" % (len(dms),\r\n self._num_dms))\r\n for dm in dms:\r\n if not isinstance(dm, DistanceMatrix):\r\n raise TypeError(\r\n 'Invalid type (%s); expected DistanceMatrix' %\r\n dm.__class__.__name__)\r\n if self._min_dm_size >= 0 and dm.shape[0] < self._min_dm_size:\r\n raise ValueError(\"Distance matrix of size %dx%d is smaller \"\r\n \"than the minimum allowable distance matrix \"\r\n \"size of %dx%d for this analysis.\" %\r\n (dm.shape[0], dm.shape[0], self._min_dm_size,\r\n self._min_dm_size))\r\n self._dms = dms", "def _setsenders_correspondent_53D(self, val):\n self.swift_obj.SendersCorrespondent_D = val\n self.swift_obj.SendersCorrespondent_D.swiftTag = \"53D\"", "def security_list_ids(self, security_list_ids):\n self._security_list_ids = security_list_ids", "def domains(self, domains):\n\n self._domains = domains", "def sync_all_lists(self):\r\n print(\"Started syncing influencer master lists with DB\")\r\n screen_names_on_lists = []\r\n self._add_or_update(screen_names_on_lists)\r\n print(\"Removing entries which are no longer on any list\")\r\n self._delete_entries_not_in_list(screen_names_on_lists) # remove entries from DB if they are on no list\r\n print(\"Sync complete\")", "def set_search_domains(self, hSearchDomainsList):\n\t\tcall_sdk_function('PrlVmDevNet_SetSearchDomains', self.handle, conv_handle_arg(hSearchDomainsList))", "def setListDoc(self, list):\n if list is None: list__o = None\n else: list__o = list._o\n libxml2mod.xmlSetListDoc(list__o, self._o)", "def test_set_multiple_ca_list(self):\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n clcert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n sedesc = secert.get_subject()\n cldesc = clcert.get_subject()\n\n def multiple_ca(ctx):\n L = [sedesc, cldesc]\n ctx.set_client_ca_list(L)\n return L\n\n self._check_client_ca_list(multiple_ca)", "def fields_in_list(self, fields_in_list):\n\n self._fields_in_list = fields_in_list", "def fields_in_list(self, fields_in_list):\n\n self._fields_in_list = fields_in_list", "def set_search_domains(self, hSearchDomainsList):\n\t\tcall_sdk_function('PrlVmCfg_SetSearchDomains', self.handle, conv_handle_arg(hSearchDomainsList))", "def cc_emails(self, cc_emails):\n\n self._cc_emails = cc_emails" ]
[ "0.5641915", "0.55063945", "0.52784514", "0.5267238", "0.51484704", "0.5096262", "0.48523757", "0.48513708", "0.48345873", "0.48015624", "0.47823006", "0.47243136", "0.47188637", "0.47175246", "0.4708469", "0.47014678", "0.46647304", "0.466445", "0.46205124", "0.45986927", "0.45935085", "0.4573834", "0.45719153", "0.45717672", "0.455192", "0.45004776", "0.44919387", "0.44919387", "0.44664395", "0.4448866" ]
0.75755775
0
Gets the always_running of this MessagingCampaign. Whether this messaging campaign is always running
def always_running(self): return self._always_running
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsRunning(self):\n return self.running", "def running(self):\n return self.scheduler.running", "def is_running(self):\n return self._running", "def is_running(self):\n return self._running", "def is_running(self):\n return self._running.is_set()", "def running(self):\n\n return self._running", "def is_running(self):\n return self._is_running", "def is_running(self):\n return self._is_running", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def is_running(self) -> bool:\n return self._running.is_set()", "def isScheduleRunning(self):\n if DPxIsDinSchedRunning() == 0:\n schedule_running = False\n else:\n schedule_running = True\n return schedule_running", "def is_running(self):\n\t\treturn self._running", "def always_running(self, always_running):\n \n self._always_running = always_running", "def Running(self):\n return self.Timer.IsRunning()", "def running(self):\n info = self.info()\n return info['running']", "def running(self): # type: () -> bool\n return self.state['Running']", "def is_running(self):\n data = self._poll()\n return data.get('building', False)", "def is_running(self):\n return self.type_id == STATE_RUNNING", "def is_running(self) -> bool:\n return self._is_running", "async def is_running(self, **kwargs: Any) -> bool:\n return self._enabled", "def running(self):\n return self._state == RUNNING_STATE", "def running(self) -> bool:\n return self._running", "def _is_running(self):\n return self._run_state.is_running()", "def is_running(self) -> Awaitable[bool]:\n return self.instance.is_running()", "def is_running(self) -> bool:\r\n return self.__running", "def is_running(self):\n return self.current_state == self.States.RUNNING", "def is_running(self):\n self.__condition.acquire()\n result = self.__is_running\n self.__condition.release()\n return result", "def is_running(self):\n\n return self._state == \"RUNNING\"" ]
[ "0.684747", "0.68215805", "0.6736479", "0.6736479", "0.67310226", "0.67029005", "0.6684114", "0.6684114", "0.6658457", "0.6658457", "0.6658457", "0.6624625", "0.6622321", "0.6609278", "0.6596", "0.6578254", "0.65689", "0.65341485", "0.6479026", "0.6433435", "0.6432685", "0.6391873", "0.63847125", "0.63835776", "0.63794404", "0.6378868", "0.637607", "0.63648105", "0.63579166", "0.6352973" ]
0.8078336
0
Sets the always_running of this MessagingCampaign. Whether this messaging campaign is always running
def always_running(self, always_running): self._always_running = always_running
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def always_running(self):\n return self._always_running", "def set_as_running(self):\n with self._running_condition:\n assert self._state == PENDING_STATE\n self._state = RUNNING_STATE\n self._running_condition.notify()", "def set_running(self):\n with self._done_condition:\n if self._state == PENDING:\n self._state = RUNNING\n return True\n return False", "def running(self, running):\n\n self._running = running", "def running(self, running):\n\n self._running = running", "def running(self, running):\n\n self._running = running", "def is_always(self, is_always):\n\n self._is_always = is_always", "def isScheduleRunning(self):\n if DPxIsDinSchedRunning() == 0:\n schedule_running = False\n else:\n schedule_running = True\n return schedule_running", "def is_running(self) -> bool:\n return self._running.is_set()", "async def is_running(self, **kwargs: Any) -> bool:\n return self._enabled", "def mark_running(self):\r\n self.status = RUNNING", "def is_running(self):\n return self._running.is_set()", "def is_running(self):\n return self._is_running", "def is_running(self):\n return self._is_running", "def is_running(self):\n return self.type_id == STATE_RUNNING", "def is_running(self) -> bool:\n return self._is_running", "def is_running(self):\n\n return self._state == \"RUNNING\"", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def is_running(self):\n return self._running", "def is_running(self):\n return self._running", "def IsRunning(self):\n return self.running", "def is_running(self) -> bool:\r\n return self.__running", "def is_running(self):\n return self.current_state == self.States.RUNNING", "def is_running(self):\n\t\treturn self._running", "def set_running_behavior(self, behavior: Behavior) -> None:", "def running(self) -> bool:\n return self._running", "def is_always_active(self) -> bool:\n if len(self.active_periods) == 0:\n return True\n\n if len(self.active_periods) == 1:\n period = self.active_periods[0]\n if period.lower == 0 and period.upper == 24000:\n return True\n\n return False", "def whenRunning(self):\n whenRunning = self.options.get(RunnerOptions.whenRunning)\n if whenRunning is not None:\n whenRunning(self.options)" ]
[ "0.70217943", "0.6201809", "0.61069536", "0.5851402", "0.5851402", "0.5851402", "0.568576", "0.5624877", "0.5585618", "0.5526143", "0.5486297", "0.54765916", "0.5464183", "0.5464183", "0.5460435", "0.5454445", "0.5423037", "0.5418796", "0.5418796", "0.5418796", "0.53875816", "0.53875816", "0.53815275", "0.53335404", "0.5295837", "0.5268198", "0.52670527", "0.52579874", "0.525458", "0.5237627" ]
0.8169366
0
Gets the contact_sorts of this MessagingCampaign. The order in which to sort contacts for dialing, based on up to four columns.
def contact_sorts(self): return self._contact_sorts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSorted(self):\n return sorted(self.contacts)", "def contact_sorts(self, contact_sorts):\n \n self._contact_sorts = contact_sorts", "def get_sort_columns(self):\n col_sort_orders = self.gridpreference_sort.all().values_list('column__id', flat=True)\n return GridColumn.objects.select_related().all().filter(id__in=col_sort_orders)", "def sort_list(self, key_):\n options = {\n 'index': 0,\n 'name' : 1,\n 'surname': 2,\n 'email': 3,\n 'phone': 4,\n }\n if key_ in options.keys():\n key_ = options.get(key_)\n\n return(sorted(self.contacts, key = lambda x: x[key_]))", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def get_sort_columns_raw(self):\n col_sort_orders = self.gridpreference_sort.all().select_related()\n \n return [x.sort_display for x in col_sort_orders]", "def sort_contacts(contacts):\n \n key_list = list(contacts.keys()) #get keys\n key_list.sort() #sort key_list\n sorted_list = [] #initialize sorted list\n for key in key_list:\n contact = (key, contacts[key][0], contacts[key][1]) #create tuple\n sorted_list += [contact] #add tuple to list\n \n return(sorted_list)", "def get_sorted_fields(cls):\n return sorted(\n cls.get_fields(), key=lambda x: (x._primary and 1 or 2, x._order))", "def get_sorted_activities(self):\n return helpers.get_sorted_activities(self)", "def sort_contacts(self, method, order):\n \n method_l = method.lower()\n order_l = order.lower()\n \n if method_l == 'name' and order_l == 'asc':\n name_sort = sorted(self.contacts, key=lambda x: x[0])\n for x in name_sort:\n print(x)\n return name_sort\n elif method_l == 'name' and order_l == 'desc':\n name_sort = sorted(self.contacts, key=lambda x: x[0], reverse=True)\n for x in name_sort:\n print(x)\n return name_sort \n \n elif method_l == 'zipcode' and order_l == 'asc':\n zip_sort = sorted(self.contacts, key=lambda y: y[3])\n for x in zip_sort:\n print(x)\n return zip_sort\n elif method_l == 'zipcode' and order_l == 'desc':\n zip_sort = sorted(self.contacts, key=lambda y: y[3],reverse=True)\n for x in zip_sort:\n print(x)\n return zip_sort", "def get_all_contacts(self):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} ORDER BY id;\".format(\", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query)\n\n return [Contact(*item) for item in data]", "def get_contacts_list(self):\n return [(id + 1, contact) for id, contact in enumerate(self.contact_list)]", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def contact_list(self):\n return self._contact_list", "def cols_sorted(self, cmp=None, key=None, reverse=False):\n return self.select(*sorted(self.names, cmp, key, reverse))", "def _get_order_bys(self, record_class, sorts, convert_key_names_func):\n result = list()\n for sort in sorts:\n attr_name = convert_key_names_func(sort.attr)\n if attr_name is not None and hasattr(record_class, attr_name):\n if sort.direction == \"ASC\":\n result.append(getattr(record_class, attr_name).asc())\n else:\n result.append(getattr(record_class, attr_name).desc())\n else:\n raise AttributeError(\"Invalid attribute.\")\n return result", "def _sort(self, groups):\n return sorted(groups, key=lambda group: (group.name.lower(), group.pubid))", "def get_order_columns(self):\n return self.order_columns", "def GetContactList(self):\n\t\tfeeds = []\n\t\tfeed = self.client.GetContacts()\n\t\tfeeds.append(feed)\n\t\tnext = feed.GetNextLink()\n\t\twhile next:\n\t\t\tfeed = self.client.GetContacts(uri=next.href)\n\t\t\tfeeds.append(feed)\n\t\t\tnext = feed.GetNextLink()\n\t\t\n\t\tcontacts = []\n\t\tfor feed in feeds:\n\t\t\tif not feed.entry:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfor i, entry in enumerate(feed.entry):\n\t\t\t\t\tcontacts.append(entry)\n\t\treturn contacts", "def sort(self):\n # Sort here actually uses the tuple comparison we defined in the Card class\n self.cards.sort()", "def sort_orders(self) -> Dict[int, SortOrder]:\n return {sort_order.order_id: sort_order for sort_order in self.metadata.sort_orders}", "def list_contacts(self):\n return self.contacts", "def get_all_comments_ascending(self):\n try:\n return self.comments.order_by('commented_date')\n except(ValueError, IntegrityError, OperationalError):\n return []", "def get_sort_by(self):\n\n\t\treturn self.__sort_by", "def _sort_data(self, sort_data_by='position'):\n all_mutants = iter(self)\n if sort_data_by=='position':\n sorted_data = sorted(all_mutants, key = lambda m: (m.position, m.IB))\n # x.position here is an Insertion_position object and has a sensible cmp function\n # TODO do unaligned/multi-aligned/unknown positions sort sensibly here?\n elif sort_data_by=='read_count':\n if self.multi_dataset: \n raise MutantError(\"Sorting by readcount in print_data not implemented for multi-datasets!\")\n sorted_data = sorted(all_mutants, key = lambda m: (m.total_read_count, m.perfect_read_count, m.position, m.IB), \n reverse=True)\n else:\n raise MutantError(\"Can't sort mutants by %s - only position or readcount are implemented!\"%sort_data_by)\n return sorted_data", "def sort_cards(self):\n self.cards.sort(key=operator.attrgetter('persona', 'rank'))\n self.update_position()", "def sortedFields(cls):\n return [\n i[0] for i in sorted(cls._nameToValue.items(), key=lambda item: item[1])\n ]", "def sort(self):\n self.cards.sort()", "def sort(self):\n self.cards.sort()", "def find_top_five_most_contacted_listings_per_month(self):\n available_dates = self.contacts.quer_distinct_components(\"contact_date\", \"Contacts\")\n available_listing_ids = self.contacts.quer_distinct_components(\"listing_id\", \"Contacts\")\n \n self.ordered = {}\n for date in available_dates:\n listing = []\n month = self.contacts.quer_component_using_column(\"listing_id\", \"contact_date\", \"Contacts\", date)\n new = [month[0] for month in month]\n for listing_id in available_listing_ids:\n count = new.count(listing_id)\n listing.append([count, listing_id])\n listing = sorted(listing, key=lambda x: x[0], reverse=True)\n\n self.ordered[date] = listing[:5]" ]
[ "0.6996267", "0.6311992", "0.60650474", "0.60044813", "0.5959529", "0.5943762", "0.5862135", "0.57969254", "0.5660018", "0.5624674", "0.5591094", "0.55369407", "0.52994704", "0.52872974", "0.52799004", "0.5243885", "0.5241674", "0.5228304", "0.5202736", "0.51900417", "0.5163623", "0.5146807", "0.5140924", "0.51158285", "0.50962734", "0.5085119", "0.50788325", "0.5068385", "0.5068385", "0.50534034" ]
0.8388801
0
Sets the contact_sorts of this MessagingCampaign. The order in which to sort contacts for dialing, based on up to four columns.
def contact_sorts(self, contact_sorts): self._contact_sorts = contact_sorts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contact_sorts(self):\n return self._contact_sorts", "def set_sorts(self, sorts: List[DataGridSort]):\n self.sorts = sorts", "def set_contacts(self, contacts):\n\n\t\tif contacts is not None and not isinstance(contacts, list):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: contacts EXPECTED TYPE: list', None, None)\n\t\t\n\t\tself.__contacts = contacts\n\t\tself.__key_modified['Contacts'] = 1", "def sort(self, sort):\n\n self._sort = sort", "def contacts(self, contacts):\n\n self._contacts = contacts", "def contacts(self, contacts):\n\n self._contacts = contacts", "def getSorted(self):\n return sorted(self.contacts)", "def sort_order(self, sort_order: int):\n\n self._sort_order = sort_order", "def sort_order(self, sort_order):\n\n self._sort_order = sort_order", "def _sort_columns(self, order):\n unknown = set(self._columns) - set(order)\n if unknown:\n names = \", \".join(str(name) for name in unknown)\n raise ValueError(f\"Unknown columns: {names}\")\n\n cols = [self.column_location(column) for column in order]\n\n self._columns = [self._columns[col] for col in cols]\n self._data = [[row[col] for col in cols] for row in self._data]", "def sort_contacts(self, method, order):\n \n method_l = method.lower()\n order_l = order.lower()\n \n if method_l == 'name' and order_l == 'asc':\n name_sort = sorted(self.contacts, key=lambda x: x[0])\n for x in name_sort:\n print(x)\n return name_sort\n elif method_l == 'name' and order_l == 'desc':\n name_sort = sorted(self.contacts, key=lambda x: x[0], reverse=True)\n for x in name_sort:\n print(x)\n return name_sort \n \n elif method_l == 'zipcode' and order_l == 'asc':\n zip_sort = sorted(self.contacts, key=lambda y: y[3])\n for x in zip_sort:\n print(x)\n return zip_sort\n elif method_l == 'zipcode' and order_l == 'desc':\n zip_sort = sorted(self.contacts, key=lambda y: y[3],reverse=True)\n for x in zip_sort:\n print(x)\n return zip_sort", "def sort_list(self, key_):\n options = {\n 'index': 0,\n 'name' : 1,\n 'surname': 2,\n 'email': 3,\n 'phone': 4,\n }\n if key_ in options.keys():\n key_ = options.get(key_)\n\n return(sorted(self.contacts, key = lambda x: x[key_]))", "def reorder( self ):\n self.sorted.sort(self.compareFunction)", "def sort_contacts(contacts):\n \n key_list = list(contacts.keys()) #get keys\n key_list.sort() #sort key_list\n sorted_list = [] #initialize sorted list\n for key in key_list:\n contact = (key, contacts[key][0], contacts[key][1]) #create tuple\n sorted_list += [contact] #add tuple to list\n \n return(sorted_list)", "def sort(self):\n self.cards.sort()", "def sort(self):\n self.cards.sort()", "def sort_cards(self):\n self.cards.sort(key=operator.attrgetter('persona', 'rank'))\n self.update_position()", "def sortChoices(self):\n self.formatList.sort()", "def sort(self):\n sort_key = self.data.chromosome.apply(sorter_chrom)\n self.data = (\n self.data.assign(_sort_key_=sort_key)\n .sort_values(by=[\"_sort_key_\", \"start\", \"end\"], kind=\"mergesort\")\n .drop(\"_sort_key_\", axis=1)\n .reset_index(drop=True)\n )", "def set_sort_by(self, sort_by):\n\n\t\tif sort_by is not None and not isinstance(sort_by, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: sort_by EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__sort_by = sort_by\n\t\tself.__key_modified['sort_by'] = 1", "def contact_list(self, contact_list):\n \n self._contact_list = contact_list", "def set_sort_order(self, sort_order):\n\n\t\tif sort_order is not None and not isinstance(sort_order, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: sort_order EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__sort_order = sort_order\n\t\tself.__key_modified['sort_order'] = 1", "def sort(self):\n # Sort here actually uses the tuple comparison we defined in the Card class\n self.cards.sort()", "async def set_contacts(self, contacts: List[CertificateContact], **kwargs) -> List[CertificateContact]:\n new_contacts = await self._client.set_certificate_contacts(\n vault_base_url=self.vault_url,\n contacts=self._models.Contacts(contact_list=[c._to_certificate_contacts_item() for c in contacts]),\n **kwargs\n )\n return [\n CertificateContact._from_certificate_contacts_item(contact_item=item) for item in new_contacts.contact_list\n ]", "def sort(self):\n tmp = list(zip(self.user_points, self.user_ids));\n tmp = sorted(tmp, reverse=True);\n self.user_points, self.user_ids = list(zip(*tmp));\n \n self.user_points = list(self.user_points);\n self.user_ids = list(self.user_ids);", "def set_as_sort1(self):\n if self.is_sort1:\n #if self.analysis_code == 1:\n #self.nonlinear_factor = np.nan\n #print(self.data_code)\n #print(self._times, type(self._times))\n #aaa\n return\n #print(f'{self.class_name}-{self.table_name}')\n self.table_name = SORT2_TABLE_NAME_MAP[self.table_name]\n self.sort_bits[1] = 0 # sort1\n self.sort_method = 1\n assert self.is_sort1 is True, self.is_sort1\n self._update_time_word()", "def sort(self, cmp=None, key=None, reverse=False):\n o = order(list(self), cmp, key, reverse)\n # Modify the table in place, more than one variable may be referencing it:\n r=list(self._table); [self._table.__setitem__(i2, r[i1]) for i2, i1 in enumerate(o)]", "def sort_by_default(self):\n self.data.sort()", "def sort(self):\n self.chain_list.sort()\n for chain in self.chain_list:\n chain.sort()", "def sort_columns(self):\n extra_cols = []\n for col in self.data.columns:\n if col not in self._required_columns:\n extra_cols.append(col)\n sorted_colnames = list(self._required_columns) + sorted(extra_cols)\n assert len(sorted_colnames) == len(self.data.columns)\n self.data = self.data.reindex(columns=sorted_colnames)" ]
[ "0.68852335", "0.62442034", "0.55268556", "0.55110604", "0.5504529", "0.5504529", "0.5466908", "0.5404668", "0.53766954", "0.5268589", "0.5230543", "0.51540035", "0.5129339", "0.51096964", "0.5105754", "0.5105754", "0.50918037", "0.5086296", "0.5057816", "0.50474155", "0.5042188", "0.5041556", "0.5036751", "0.5035697", "0.5002974", "0.50019294", "0.49803436", "0.49789461", "0.49717766", "0.49578923" ]
0.8539102
0
Gets the messages_per_minute of this MessagingCampaign. How many messages this messaging campaign will send per minute.
def messages_per_minute(self): return self._messages_per_minute
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def messages_per_minute(self, messages_per_minute):\n \n self._messages_per_minute = messages_per_minute", "def get_words_per_minute(self):\n return self.words_per_minute", "def query_plans_per_minute(self) -> int:\n return pulumi.get(self, \"query_plans_per_minute\")", "def getNumOfMsgSend_interval(self):\n return self.MsgSendCount_interval", "def message_count(self):\n return self._message_count", "def message_count(self):\n return len(self.messages)", "def get_limit_per_second(self):\n pass", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def unit_ms(self):\n return (self.time_base / 1000.0) / 60.0", "def minutes(self):\n return int(int(self) / 60)", "def length_minutes(self):\n return self._length_minutes", "def calculate_fetch_size(minutes: int):\n return round(minutes / CONF.interval) if minutes >= CONF.interval else 1", "def freq_minutes(self):\n return 5", "def _get_milleseconds(self):\n return int(round(time.time() * 1000))", "def fan_timer_duration(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"fan_timer_duration\"))\r\n return self._fan_timer_duration.seconds / 60", "def ticks_per_second(self):\n return self._ticks_per_second", "def kills_per_min(self):\n return self._kills_per_min", "def getDurationMs(self):\n return self.durationMs", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def _unit_ms(self):\n return (self.time_base / 1000.0) / 60.0", "def last_seen_minutes(self):\n return (self.last_seen.seconds % 3600) / 60", "def mileage(self):\n return str(self._delivery_service.total_mileage())", "def message_length(self):\n return self._message_length", "def total_minutes(td):\n return total_seconds(td) / 60", "def get_minutes(self, datetime):\n return datetime.hour*60.0+datetime.minute+datetime.second/60", "def minute(self) -> int:\r\n return self._minute", "def getMessageCount(self):\n return 9", "def poll_interval_in_milliseconds(self):\n\n return self._poll_interval_in_milliseconds", "def calculate_wpm(self, delta_seconds: int):\n minutes = delta_seconds / 60\n return self.total_estimated_words() / minutes", "def shared_runners_minutes_limit(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"shared_runners_minutes_limit\")" ]
[ "0.686777", "0.6715453", "0.5897515", "0.57897204", "0.5508112", "0.5492951", "0.5468848", "0.5431342", "0.5427984", "0.54239345", "0.5376645", "0.5357198", "0.52036357", "0.5193324", "0.5188115", "0.51773685", "0.51770973", "0.51570386", "0.51300323", "0.512672", "0.51112086", "0.51089203", "0.51057315", "0.510142", "0.5084566", "0.50815916", "0.4983262", "0.49818733", "0.49705178", "0.49692985" ]
0.8563438
0
Sets the messages_per_minute of this MessagingCampaign. How many messages this messaging campaign will send per minute.
def messages_per_minute(self, messages_per_minute): self._messages_per_minute = messages_per_minute
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def messages_per_minute(self):\n return self._messages_per_minute", "def set_words_per_minute(self, words_per_minute):\n is_valid_wpm = 5.0 <= words_per_minute <= 60.0\n if is_valid_wpm:\n self.words_per_minute = words_per_minute\n self.dot_time_in_msec = 1200.0 / self.words_per_minute\n # Synthesizes sample data for the current dot length.\n self._cache_dot_dash_sample_data()\n self._cache_silence_sample_data()\n return is_valid_wpm", "def set_limit_per_second(self, rate_limit_per_second):\n pass", "def kills_per_min(self, kills_per_min):\n\n self._kills_per_min = kills_per_min", "def set_fan_timer_duration(self, minutes: int = 5):\r\n self._fan_timer_duration = timedelta(minutes=minutes)\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"fan_timer_duration\", minutes))", "def drive_time_minutes(self, drive_time_minutes):\n\n self._drive_time_minutes = drive_time_minutes", "def message_count_limit(self, message_count_limit: ConfigNodePropertyInteger):\n\n self._message_count_limit = message_count_limit", "def cooldown_minutes(self, cooldown_minutes):\n\n self._cooldown_minutes = cooldown_minutes", "def report_minute_distribution(self):\n self.histogram_granularities.add(histogram_granularity.MINUTE)\n return self", "def run_for_mins(bot, nr_mins):\n for i in range(1, nr_mins+1):\n time.sleep(60)\n bot.send_msg('It has been {} minutes.'.format(i))", "def query_plans_per_minute(self) -> int:\n return pulumi.get(self, \"query_plans_per_minute\")", "def length_minutes(self, length_minutes):\n \n self._length_minutes = length_minutes", "def get_words_per_minute(self):\n return self.words_per_minute", "def set_minute(self, minute):\n if minute not in range(60):\n raise ValueError(\"Second value must be in range [0..59] but is {}\".format(minute))\n\n # First we separate the tens and the digit\n tens, digit = divmod(int(minute), 10)\n\n # Then we add them in a single int\n reg_value = (tens << 4) | digit\n\n # The we add it to the register\n self.__write_register(_REGISTER_MINUTE, reg_value)", "def set_custom_speed(self, bytes_per_second):\n self._custom_speed = bytes_per_second", "def time_remaining_minutes(self, time_remaining_minutes):\n\n self._time_remaining_minutes = time_remaining_minutes", "def message_count(self, message_count):\r\n\r\n self._message_count = message_count", "def message_box_size_limit(self, message_box_size_limit: ConfigNodePropertyInteger):\n\n self._message_box_size_limit = message_box_size_limit", "def set_mem_per_proc(self, mem_mb):\n QueueAdapter.set_mem_per_proc(self, mem_mb)\n #self.qparams[\"mem\"] = self.mem_per_proc", "def set_message_rate(self, msg_type, rate):\n pass", "def per_page(self, per_page):\n\n self._per_page = per_page", "def set_max_message_size(self, size: int = 1_073_741_824) -> None:\n self.set_db_conf(\"proto-max-bulk-len\", str(size))", "def rate_per_unit(self, rate_per_unit):\n\n self._rate_per_unit = rate_per_unit", "def TimeMinutes(self):\n return '%2.2d:%2.2d' % (self._hour, self._minute)", "def freq_minutes(self):\n return 5", "def every_minute(self, time, function, args=None, kwargs=None, name=None):\n if args is None:\n args = list()\n if kwargs is None:\n kwargs = dict()\n if name is None:\n name = function.__name__+(f'_{len(self.config)+1}' if function.__name__ in self.config else '')\n self.config[name] = {'mode':'every_minute', 'time':int(time), 'function':function, 'args':args, \n 'kwargs':kwargs, 'execute_num':0, 'runner':(function, args, kwargs, name),\n 'time_init':datetime.datetime.now()}\n self.params.tracker_dict[name] = dict()", "def cooldown_grace_period_minutes(self, cooldown_grace_period_minutes):\n\n self._cooldown_grace_period_minutes = cooldown_grace_period_minutes", "def set_Minute(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Minute', value)", "def hero_damage_per_min(self, hero_damage_per_min):\n\n self._hero_damage_per_min = hero_damage_per_min", "def setMinute(self, *args):\n return _libsbml.Date_setMinute(self, *args)" ]
[ "0.66696775", "0.58786", "0.5668301", "0.5376296", "0.5365551", "0.52839804", "0.51178545", "0.5013213", "0.49938592", "0.47783017", "0.4773923", "0.4772288", "0.4751765", "0.47004074", "0.46891505", "0.46256608", "0.460433", "0.45917523", "0.45578098", "0.45366237", "0.45331538", "0.4500748", "0.44628927", "0.44518393", "0.4441813", "0.4435244", "0.44014743", "0.43995833", "0.43952876", "0.4350517" ]
0.87528765
0
Sets the errors of this MessagingCampaign. A list of current error conditions associated with this messaging campaign.
def errors(self, errors): self._errors = errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errors(self, errors):\n\n self._errors = errors", "def validation_errors(self, validation_errors):\n self._validation_errors = validation_errors", "def add_errors(self, errors):\n self.errors = merge_errors(self.errors, errors)", "def errors (self):\n return self._errors", "def errors (self):\n return self._errors", "def errors(self):\n return self._errors", "def errors(self):\n return self.__errors", "def Errors(self):\n return self._get_attribute('errors')", "def errors(self):\r\n if not hasattr(self, '_errors_cache'):\r\n self._errors_cache = self.form.get_field_errors(self)\r\n return self._errors_cache", "def errors(self):\n return self._properties.get(\"errors\")", "def errors(self) -> Tuple[MqexsErrorInfo, ...]:\n return self.__errors", "def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")", "def getErrors(self):\n return self.errors", "def getErrorsList(self):\n return self.__errors", "def Errors(self):\r\n\t\treturn self._get_attribute('errors')", "def validation_errors(self):\n return self._validation_errors", "def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors", "def error(self, msg, transfers):\n self.validation_exceptions.extend(self._create_exceptions(msg, transfers, ValidationType.ERROR))", "def errors(self) -> List[Error]:\n return self._errors_files + list(self._errors.values())", "def errors(self):\n raise NotImplementedError", "def set_limit(self, errors):\n self.limit = errors", "def errors(self) -> str:\n return self.job_errors() + self.analysis_errors()", "def on_errors(self, errors):\n log.error(\"Received errors: %s\", errors)", "def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]", "def get_errors(self, request):\n\n value = request._get_parameter_value(self)\n return value.errors", "def error_message(self, error_message):\n if (self.local_vars_configuration.client_side_validation and\n error_message is not None and len(error_message) > 500):\n raise ValueError(\"Invalid value for `error_message`, length must be less than or equal to `500`\") # noqa: E501\n\n self._error_message = error_message", "def retrieve_error_messages(self):\n return self.errors_seen[:]", "def report_transaction_error_messages(self):\n response = self.__get_transaction_response()\n\n # get response data from response object\n response_data = response.json()\n\n # get error messages\n response_error = response_data['Error']\n response_error_messages = response_error['messages']\n\n # add all error messages to the report\n error_messages_to_report = []\n for response_error_message in response_error_messages:\n error_description = response_error_message['description']\n error_messages_to_report.append(error_description)\n\n return error_messages_to_report", "def errors(self) -> List[Error]:", "def analysis_errors(self) -> str:\n errors = []\n\n # Get any callback errors\n for cid, callback in self._analysis_callbacks.items():\n if callback.status == AnalysisStatus.ERROR:\n errors.append(f\"\\n[Analysis Callback ID: {cid}]: {callback.error_msg}\")\n\n return \"\".join(errors)" ]
[ "0.7063868", "0.6316049", "0.62133807", "0.5995449", "0.5995449", "0.5993789", "0.5955976", "0.58611673", "0.5738866", "0.57359105", "0.56898946", "0.56828547", "0.56606567", "0.56536406", "0.5637317", "0.5577304", "0.5540206", "0.5453656", "0.54534554", "0.54445463", "0.5427907", "0.5297211", "0.5279484", "0.525997", "0.5244819", "0.5221872", "0.5194199", "0.51878995", "0.5151027", "0.51397586" ]
0.7172415
0
Gets the sms_config of this MessagingCampaign. Configuration for this messaging campaign to send SMS messages.
def sms_config(self): return self._sms_config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sms_config(self, sms_config):\n \n self._sms_config = sms_config", "def config(self):\n if self.__config is None:\n self.__config = self._get_config(self.bot)\n return self.__config", "def sms(self):\r\n return sms.SMS(self)", "def get_configuration(self) -> dict:\n return self.config", "def get_config(self):\n return self.config", "def getConfig(self):\n \n return self.config", "def config(self):\n return self[CONFIG_KEY]", "def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})", "def config(self):\n if not hasattr(self,\"_config\") or self._config is None:\n self._config = {}\n \n return self._config", "def configuration(self):\n if self.integration is None:\n return None\n return self.integration.configuration", "def config(self) -> ServerConfig:\n return self._config", "def get_config(self) -> Dict[str, Any]:\n if self.config is None:\n self.config = self.load_config()\n\n return self.config", "def get_config(self) -> Dict[str, Any]:\n if self.config is None:\n self.config = self.load_config()\n\n return self.config", "def config(self):\n return self.__config", "def config(self):\n return self.__config", "def get_config(self):\n\n # make sure that the config reflects the state of the underlying logic\n self.logic_to_config()\n # and then return the config struct.\n return self._config", "def config(self) -> dict:\n return self._config", "def config(self):\n return self._config", "def config(self):\n return self._config", "def config(self):\n return self._config", "def CONFIG(self) -> misc_.Config:\n\t\treturn self._CONFIG", "def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)", "def config(self) -> ClientConfig:\n return self._config", "def configuration(self):\n return self._config", "def get_config_connection(self):\n return self.m_connection.config", "def sms_disabled(self):\n return self._sms_disabled", "def config(self):\n return self._cfg", "def config(self) -> Any:\n return self._config", "def getConfiguration(self):\n return self._configuration", "def destination_config(self):\n return self._destination_config" ]
[ "0.66070634", "0.592993", "0.56343", "0.54566157", "0.5446236", "0.5412347", "0.53807044", "0.53407514", "0.5317883", "0.52791315", "0.52556473", "0.52439207", "0.52439207", "0.52175206", "0.52175206", "0.52135134", "0.5207754", "0.5194879", "0.5194879", "0.5194879", "0.51929045", "0.51494735", "0.5143155", "0.5127543", "0.5107325", "0.50860894", "0.50726235", "0.5026649", "0.5018908", "0.5013706" ]
0.8423905
0
Sets the sms_config of this MessagingCampaign. Configuration for this messaging campaign to send SMS messages.
def sms_config(self, sms_config): self._sms_config = sms_config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sms_config(self):\n return self._sms_config", "def sms_enabled(self, sms_enabled):\n\n self._sms_enabled = sms_enabled", "def send_sms(self, sms):\n pass", "def sms_phone_number(self, sms_phone_number):\n\n self._sms_phone_number = sms_phone_number", "def sms_disabled(self, sms_disabled):\n\n self._sms_disabled = sms_disabled", "def sms(self, phone_address, message):\n self.server.sendmail(self.username, phone_address, message)", "def send_sms(self, body):\n message = self.twilio_client.sms.messages.create(to=self.to_num, from_=self.from_num, body=body)", "def set_config(self, config):\n\n self.config = config\n\n return self", "def send_sms(self, message, to=CONTACT_NUMBER):\n try:\n pbx_alarm = PBXAlert()\n pbx_alarm.send_sms(self.tinfo['message'])\n if self.verbose:\n print(\"{} Successfully sent SMS!\".format(Timer.OK))\n return True\n except Exception as e:\n print(\"{} Caught exception in send_sms: {}\".format(Timer.FAIL, e))\n return False", "def send_sms(domain, contact, phone_number, text, **kwargs):\n if phone_number is None:\n return False\n if isinstance(phone_number, int) or isinstance(phone_number, long):\n phone_number = str(phone_number)\n phone_number = clean_phone_number(phone_number)\n\n msg = SMSLog(\n domain=domain,\n phone_number=phone_number,\n direction=OUTGOING,\n date = datetime.utcnow(),\n backend_id=None,\n text = text\n )\n if contact:\n msg.couch_recipient = contact._id\n msg.couch_recipient_doc_type = contact.doc_type\n add_msg_tags(msg, **kwargs)\n \n def onerror():\n logging.exception(\"Problem sending SMS to %s\" % phone_number)\n return queue_outgoing_sms(msg, onerror=onerror)", "def set_sms_telephone_number(self, telephone_number, email):\n ngo_user_profile = NGOUserProfile.objects.get(user__email=email)\n org_setting = OrganizationSetting.objects.get(organization__org_id=ngo_user_profile.org_id)\n smsc = SMSC(vumi_username=\"smsc\")\n smsc.save()\n outgoing_number = OutgoingNumberSetting(phone_number=telephone_number, smsc=smsc)\n outgoing_number.save()\n org_setting.sms_tel_number = telephone_number\n org_setting.outgoing_number = outgoing_number\n org_setting.save()", "def sms(self):\r\n return sms.SMS(self)", "def delete_sms(self, sms_id: int) -> SetResponseType:\n return self._connection.post_set('sms/delete-sms', {'Index': sms_id})", "def can_enable_for_sms(self, can_enable_for_sms):\n\n self._can_enable_for_sms = can_enable_for_sms", "def __init__(self, config):\n self.smsapi_username = config['smsapi_username']\n self.smsapi_password = config['smsapi_password']\n self.smsapi_recipient = config['smsapi_recipient']\n\n super(SmsApiNotifier, self).__init__(config)", "def send_sms(self,msg,to=None,long=True):\n if long:\n return self.send_msg(msg,to,\"SendCatSMS\")\n else:\n return self.send_msg(msg,to,\"SendSMS\")", "def send_service_config(self, honeypotids, config):\n req = {\"type\": \"set_settings\", \n \"from\": self.network.mc_id,\n \"to\": honeypotids,\n \"settings\": config}\n expect_dict = {\"type\": \"hp_settings\"}\n msg_list = self.send_receive(req, honeypotids, expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = msg[\"settings\"]\n return answer", "def set_config(self, config):\r\n self._config = config\r\n self._config.dump_to_sdb(\"botoConfigs\", self.id)", "def send_transactional_sms(self, phone_number, message):\n if not settings.CAN_SEND_SMS: # So that we do not send SMS while development\n return\n if not phone_number:\n logger.warning('No phone number received for meaasge: {0}'.format(message))\n raise MissingPhoneNumberException('No phone number received to send the SMS to')\n request_data = self._get_request()\n request_data[\"sms\"] = [{\"message\": message, \"to\": [phone_number]}]\n logger.info('Sending SMS to {0}. SMS content {1}'.format(phone_number, request_data))\n sms_response = self._get_response_from_msg91(self.request_method, self.MSG91_SMS_URL, request_data)\n logger.info(\"sms_response {0}\".format(sms_response))\n if sms_response.get('type') != \"success\":\n raise MessageSendingFailed('The service provider failed to send the SMS')", "def config(self, config):\n self._config = config", "def tls_config(self, tls_config):\n\n self._tls_config = tls_config", "def send_sms(message):\n client.messages.create(\n body=message,\n from_=os.environ['TWILIO_NUMBER_FROM'],\n to=['TWILIO_NUMBER_TO']\n )", "def send_sms_via_api(self, mobile, text=\"\"):\n\n mobile = self.sanitise_phone(mobile)\n\n try:\n self.sms_api_post_config[self.sms_api.message_variable] = text\n self.sms_api_post_config[self.sms_api.to_variable] = str(mobile)\n query = urllib.urlencode(self.sms_api_post_config)\n request = urllib.urlopen(self.sms_api.url, query)\n output = request.read()\n return True\n except:\n return False", "def send_transactional_sms(self, phone_number, message):\n if not settings.CAN_SEND_SMS: # So that we do not send SMS while development\n return\n if not phone_number:\n logger.warning('No phone number received for meaasge: {0}'.format(message))\n raise MissingPhoneNumberException('No phone number received to send the SMS to')\n request_data = {\n 'From': self.exo_phone,\n \"To\": phone_number,\n 'Body': message\n }\n logger.info('Sending SMS to {0}. SMS content {1}'.format(phone_number, message))\n sms_response = requests.post(self.EXOTEL_SMS_URL.format(self.sid, self.token), data=request_data).json()\n logger.info(sms_response)\n if sms_response.get('RestException'):\n logger.warn('SMS sending failed. Rsponse from exotel - {0}'.format(sms_response))\n elif sms_response.get('SMSMessage') and sms_response['SMSMessage']['Status'] \\\n not in self.EXOTEL_SUCCESS_STATUS_LIST:\n raise MessageSendingFailed('The service provider failed to send the SMS')", "def set_config(self, config):\n self.adversarial = config.adversarial\n self.eps = config.eps\n self.probability = config.probability\n self.use_dynamics = config.use_dynamics\n self.random = config.random\n self.observable_noise = config.observable_noise\n self.use_max_norm = config.use_max_norm", "def send_sms(self, message, phone=None, client=None, send_before=None):\n if not phone:\n phone = str(client.phone)\n if not phone:\n return False\n return self.sender.send(message, phone, client, send_before)", "def send_config(self, configdict):\n self.config = configdict\n self.send_to_backend('config', configdict=configdict)", "def send_sms(message_text: str, receiver_sms=\"+27749911999\"):\n user = \"AC6925977501b11f3f5ea71105df8a4ea7\"\n twilio_client = Client(user, keyring.get_password(\"twilio\", user))\n twilio_client.messages.create(to=receiver_sms,\n from_=\"+19149964656\",\n body=message_text)", "def send_sms(self, from_number, to_number, msg, tag=None):\n # allow sending to a group in one call\n if not isinstance(to_number, list):\n to_number = [to_number]\n\n return self.sms_client.send_message(\n from_=self._as_e164(from_number),\n to=[self._as_e164(number) for number in to_number],\n text=msg,\n tag=tag,\n applicationId=settings.BW_APP_ID\n )", "def send_sms(msg, phone_number, logger=None):\n\n if logger:\n logger.debug(f\"msg: '{msg}'\")\n logger.debug(f\"phone_number: '{phone_number}'\")\n sns = boto3.client('sns')\n try:\n sns.publish(PhoneNumber=phone_number, Message=msg)\n except BaseException as e:\n if logger:\n logger.error(e)\n raise e\n if logger:\n logger.info(f'SMS with available dates was sent to {phone_number}.')" ]
[ "0.6710091", "0.6376342", "0.61567897", "0.5817117", "0.5528274", "0.5234292", "0.5205838", "0.515643", "0.5122757", "0.5109166", "0.51015836", "0.50929946", "0.50358784", "0.5014341", "0.49991947", "0.49899116", "0.49732202", "0.49195576", "0.4918217", "0.4918136", "0.49015227", "0.4887474", "0.4870044", "0.48350385", "0.482204", "0.47904205", "0.47790444", "0.47695473", "0.47661555", "0.4762994" ]
0.84385973
0
seaborn time series, with errorbands
def sns_time_series(x_tuple,y_tuple,outputname,errors=0,two=False, *args,**kwargs): if (type(outputname)==str)|(type(x_tuple)==tuple)|(type(y_tuple)==tuple): pass else: raise TypeError() import matplotlib matplotlib.use("pdf") import matplotlib.pyplot as plt import numpy as np import seaborn as sns; sns.set_style('darkgrid') import seaborn.timeseries x, x_label = x_tuple y, y_label = y_tuple if two==True: x2,x_label2 = x_tuple2 y2,y_label2 = y_tuple2 def _plot_std_bars(std=None, central_data=None, ci=None, data=None,*args, **kwargs): std = errors ci = np.asarray((central_data - std, central_data + std)) kwargs.update({"central_data": central_data, "ci": ci, "data": data}) seaborn.timeseries._plot_ci_band(*args, **kwargs) seaborn.timeseries._plot_std_bars = _plot_std_bars plt.figure() sns.tsplot(xip,r,err_style='std_bars') sns.tsplot(xim,r,err_style='std_bars',color='r') plt.xlabel(r'$\theta$ (arcmin)') plt.ylabel(r'$\xi$') plt.xscale('log') plt.yscale('log') plt.legend([r'$\xi_+$',r'$\xi_-$'],bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=0.) plt.savefig(outputname+'.pdf') plt.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize_time_series(fig_ax, data, inp_color, missing_data, lag_color, first_date,\n x_label=\"Number of Days\", y_label=\"Log of Aluminium Price\", title=\"Prices over time\"):\n fig, ax = fig_ax\n ((x_train_raw, y_train_raw), y_pred_list) = data\n\n missing_x, missing_y = missing_data\n is_missing = len(missing_x) != 0\n\n first_date = datetime.strptime(first_date, '%Y-%m-%d')\n\n convert_date = lambda x: [\n np.datetime64((first_date + timedelta(days=d)).strftime('%Y-%m-%d'))\n for d in x\n ]\n convert_price = lambda x: x[\"Output\"].to_list()\n\n x_train = convert_date(x_train_raw[\"Date\"].to_list())\n y_train = convert_price(y_train_raw)\n \n cut_point = x_train[-1]\n ax.plot(x_train, y_train, color=color[inp_color])\n\n for i, y_pred in enumerate(y_pred_list):\n data, plot_name, color_code, is_bridge = y_pred\n mean_pred, x_test_raw = data[\"mean\"], data[\"x\"]\n x_test = convert_date(x_test_raw)\n\n if i == 0 and is_missing:\n missing_x = convert_date(missing_x)\n ax.axvline(x_test[0], color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n ax.plot([missing_x[-1], x_test[0]], [missing_y[-1], mean_pred[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvspan(cut_point, x_test[0], color=color[lag_color], alpha=0.1)\n\n plot_bound(ax, data, x_test, color[color_code], plot_name)\n\n if is_bridge and (not is_missing): \n ax.plot([x_train[-1], x_test[0]], [y_train[-1], mean_pred[0]], color[color_code], linewidth=1.5)\n\n if is_missing:\n ax.plot(missing_x, missing_y, color=color[lag_color], linestyle=\"dashed\")\n ax.plot([x_train[-1], missing_x[0]], [y_train[-1], missing_y[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvline(cut_point, color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n else:\n ax.axvline(cut_point, color=color[\"k\"], linestyle='--')\n\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.legend()\n\n # ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n\n # ax.set_xlim(left=cut_point-np.timedelta64(1, 'm'))\n plot_axis_date(ax, x_train + missing_x + x_test)\n ax.grid()\n return fig, ax", "def plot_landings_quantiles(df):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_position(default_timeseries_position) \n\n Fn = df['CatchMT'].groupby([df.Year, df.Reg, df.Sreg]).mean()\n grp = df['CatchMT'].groupby([df.Year, df.Reg, df.Sreg])\n\n qmean = grp.mean().loc[:, 'All', 'All'] \n q90 = grp.quantile(0.90).loc[:, 'All', 'All'] \n q75 = grp.quantile(0.75).loc[:, 'All', 'All'] \n q50 = grp.quantile(0.50).loc[:, 'All', 'All'] \n q25 = grp.quantile(0.25).loc[:, 'All', 'All'] \n q10 = grp.quantile(0.10).loc[:, 'All', 'All'] \n\n # Don't plot the first year. Also, the data is shifted by one year.\n # For some reason, restricting the year range above results in a series\n # that still have a multi-index. This seems like the cleanest way to do\n # that.\n qmean = qmean.iloc[2:]\n q90 = q90.iloc[2:]\n q75 = q75.iloc[2:]\n q50 = q50.iloc[2:]\n q25 = q25.iloc[2:]\n q10 = q10.iloc[2:]\n qmean.index = qmean.index - 1\n q90.index = q90.index - 1\n q75.index = q75.index - 1\n q50.index = q50.index - 1\n q25.index = q25.index - 1\n q10.index = q10.index - 1\n \n colors = seaborn.color_palette(n_colors=3);\n\n q90.plot(ax=ax, color=colors[0], linestyle='--', label='90%') \n q75.plot(ax=ax, color=colors[1], linestyle='--', label='75%') \n qmean.plot(ax=ax, color='black', label='Mean') \n q50.plot(ax=ax, color=colors[2], linestyle='--', label='50%') \n q25.plot(ax=ax, color=colors[1], linestyle='--', label='25%') \n q10.plot(ax=ax, color=colors[0], linestyle='--', label='10%') \n\n ax.legend(loc='best')\n\n content = io.BytesIO()\n plt.savefig(content, format='png')\n content.seek(0)\n image_cache['landings']['quantiles'] = content\n\n plt.close()", "def clt_plot(error: pd.Series, n_max=800):\n plt.figure()\n n_values = [n for n in range(20, n_max, 20)]\n error_mean, error_var = do_many_bootstraps(error, n_values, bootstrap_number=50)\n plt.plot(n_values, np.sqrt(error_var), label='Bootstrap estimate')\n plt.plot(n_values, np.divide(np.std(error, ddof=1), np.sqrt(n_values)), label='CLT estimate')\n plt.xlabel('number of samples')\n plt.ylabel('variance of mean square error')\n plt.legend()\n plt.show()", "def _plot_experiment(df, axes, metric_name, isTrain):\n # colors: https://stackoverflow.com/questions/42086276/get-default-line-colour-cycle\n ldf = metric_short_to_long(df)\n plotted = \"Train\" if isTrain else \"Val\"\n m = ldf.query(\"stat == 'mse' and metric == @metric_name\")[[\"trial\",\"state\",\"value\"]].rename({\"value\":\"mse\"},axis=1)\n # aggregated\n ax = sns.barplot(x=\"trial\", y=\"mse\", data=m, palette=[u'#1f77b4'], ci=\"sd\", ax=axes[0])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"Aggregated State Errors ({plotted})\")\n ax.set_xlabel(\"Trial Number\")\n\n # individual state plots\n ax = sns.barplot(x=\"trial\", y=\"mse\", hue=\"state\",data=m, ci=\"sd\", ax=axes[1])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"State Error by Trial ({plotted})\")\n ax.set_xlabel(\"Trial Number\")", "def meanRegion(thk_s,thk_p,thk_diff):\n meanp = np.nanmean(np.nanmean(thk_p,axis=1),axis=1)\n means = np.nanmean(np.nanmean(thk_s,axis=1),axis=1)\n \n print '\\n --- [[%s to %s N, %s to %s E]] ---' % (latmin,latmax,lonmin,lonmax)\n print 'Average Thickness (Satellite) == %s meters' % np.nanmean(means)\n print 'Average Thickness (PIOMAS) == %s meters' % np.nanmean(meanp)\n print 'Average Difference == %s meters' % (np.nanmean(means)-np.nanmean(meanp))\n \n yearmin = 2004\n yearmax = 2015\n years = np.arange(yearmin,yearmax+1,1)\n years = np.setdiff1d(years,[2010]) ### no satellite data in 2010\n \n fig = plt.figure()\n ax = plt.subplot(111)\n \n ### Call parameters\n plt.rcParams['text.usetex']=True\n plt.rcParams['font.family'] = 'sans-serif'\n plt.rcParams['font.sans-serif'] = 'Avant Garde'\n \n plt.plot(meanp,color='darkred',linewidth=2,linestyle='-',\n label=r'PIOMAS')\n plt.plot(means,color='forestgreen',linewidth=2,linestyle='-',\n label=r'Satellite')\n plt.axvline(6,color='k',linewidth=3,linestyle='-')\n \n labelsy = map(str,np.arange(0,6,1))\n labelsx = map(str,years)\n plt.xticks(np.arange(len(years)),labelsx)\n plt.yticks(np.arange(0,6,1),labelsy)\n plt.ylabel(r'\\textbf{Thickness (meters)}',fontsize=13)\n \n ### Adjust axes in time series plots \n def adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 10))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n \n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n \n ### Adjust axes spines\n adjust_spines(ax, ['left', 'bottom'])\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n plt.grid(color='b',zorder=1,alpha=0.3)\n \n plt.legend(shadow=False,fontsize=11,loc='upper right',\n fancybox=True)\n \n plt.text(2,-0.8,r'\\textbf{ICESat}',fontsize=13)\n plt.text(7.3,-0.8,r'\\textbf{PIOMAS}',fontsize=13)\n \n fig.suptitle(r'\\textbf{SIT Difference [Satellite - PIOMAS]}',fontsize=16)\n plt.savefig(directoryfigure + 'test5_difftseries.png',dpi=300)", "def point_plot(MSDs, time_interval, histogram = True):\n\n from seaborn import pointplot as pointplot\n import seaborn as sns\n from matplotlib.colors import LinearSegmentedColormap\n import matplotlib.patches as mpatches\n import matplotlib as mpl\n mpl.rcParams.update({'font.size': 22})\n \n\n #plotting parameters\n fig, ax = plt.subplots(figsize=(17,10), dpi = 300)\n mpl.rcParams['lines.markersize'] = 10\n ax.set(xscale=\"log\", yscale = \"log\")\n ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5)\n\n\n\n for columnName,columnData in MSDs.iteritems():\n \n msds = columnData\n msds.index = np.arange(msds.shape[0])*time_interval\n ax.plot(msds.index[1:], msds.values[1:], linewidth = 0.75,markersize = 10, marker = 'o', mec='k',zorder = 0, alpha = 0.4)\n\n #more plot parameters\n ax.set_xticks([3,6,9,12,15,18,21,24,27,30,45,60])\n ax.get_xaxis().set_major_formatter(mpl.ticker.ScalarFormatter())\n ax.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())\n\n ax.set_title('MSD')\n ax.set_ylabel('MSD (\\u03BC'+'m)\\u00b2')\n ax.set_xlabel('Lag time (min)')\n \n ax.set_axisbelow(True)\n ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5)\n\n\n #average MSD\n avg_msd = MSDs.mean(axis = 1)\n avg_msd.index = np.arange(avg_msd.shape[0])*time_interval\n ax.plot(avg_msd.index[1:], avg_msd[1:], linewidth = 0.75, c = 'k', markersize = 15, marker = 'D', mec='k',zorder = 1)\n \n plt.show()\n\n return avg_msd", "def scatter_error_plot(y_true, y_predict, datelist,\n xlab='Dates', ylab='Units sold', title='Error analysis',\n ticks_separation='weeks'):\n plt.style.use('seaborn')\n\n #create plot\n fig=plt.figure(figsize=(15,10))\n \n #plot things\n plt.plot(datelist,y_true, label=r'True Values' ,\n linestyle='--', linewidth=2)\n plt.plot(datelist,y_predict, label=r'Predicted Values',\n linestyle='--', linewidth=2)\n plt.scatter(datelist,y_true)\n plt.scatter(datelist,y_predict)\n \n #labels\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.title(title)\n \n #set ticks every week\n if ticks_separation == 'days':\n plt.gca().xaxis.set_major_locator(matplotlib.dates.DayLocator())\n \n elif ticks_separation == 'weeks':\n plt.gca().xaxis.set_major_locator(matplotlib.dates.WeekdayLocator())\n \n elif ticks_separation == 'months':\n plt.gca().xaxis.set_major_locator(matplotlib.dates.MonthLocator())\n \n elif ticks_separation == 'days':\n plt.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())\n\n \n #set week format\n plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d %b'))\n \n \n plt.legend(loc='best')\n \n #increase all text\n ax=plt.gca()\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels() + ax.legend().get_texts()):\n item.set_fontsize(18)\n \n \n return fig", "def plot_errors(self):\n\n plt.title(\"Prediction Error\")\n plt.plot(self.errors)\n plt.ylabel(\"MSE (Mean Squared Error)\")\n plt.xlabel(\"Iteration\")\n plt.show()", "def errBarPlot(\n dataFrame,\n meanKey=\"mean\",\n sDevKey=\"sDev\",\n xKey=\"nBinSize\",\n rowKey=\"observable\",\n colKey=\"nX\",\n colorKey=\"nSamples\",\n errBarKwargs=None,\n shareY=False,\n):\n # Check whether frame contains all columns\n for key in [rowKey, colKey, xKey, meanKey, sDevKey, colorKey]:\n if not key in dataFrame.columns:\n raise KeyError(\"Key %s not found in input frame\" % key)\n\n # Set up the error bat plot\n errBarStyle = {\n \"linestyle\":\"None\",\n \"marker\":\".\",\n \"ms\":3,\n \"lw\":1,\n \"elinewidth\":0.5,\n \"capthick\":0.5,\n \"capsize\":0.5,\n }\n # Adjust by input keys\n if errBarKwargs:\n for key, val in errBarKwargs.items():\n errBarStyle[key] = val\n\n # Compute how much one has to shift plots for visualization\n ## Number of shifts\n colorEntries = dataFrame[colorKey].unique()\n nColors = len(colorEntries)\n\n ## Compute minimal independent variable distance\n xRange = dataFrame[xKey].unique()\n\n ## Loop through distances to get the minimal one\n dXmin = max(abs(xRange[-1] - xRange[0]), 0.1)\n for nx1, x1 in enumerate(xRange[:-1]):\n for x2 in xRange[nx1+1:]:\n if abs(x2-x1) < dXmin:\n dXmin = abs(x2-x1)\n dXmin /= 3\n\n ## Allocate shift of distances\n dX = {}\n for nEntry, entry in enumerate(colorEntries):\n dX[entry] = dXmin*(2*nEntry-nColors+1)*1./nColors\n\n ## Modify x cols\n df = dataFrame.copy()\n df[xKey] += df.apply(lambda col: dX[col[colorKey]], axis=1)\n\n # Create the facet grid for the mapping\n graph = sns.FacetGrid(\n data=df,\n row=rowKey,\n col=colKey,\n hue=colorKey,\n palette=\"Blues\",\n sharex=True,\n sharey=\"row\" if shareY else False,\n )\n ## and map the error bar plot\n graph.map(plt.errorbar, xKey, meanKey, sDevKey, **errBarStyle)\n\n # Change figure size\n graph.fig.set(\n dpi=500,\n figheight=2,\n figwidth=len(dataFrame[colKey].unique())*1./2\n )\n\n # Style individual plots\n for nax, ax in enumerate(graph.axes.flat):\n if not shareY:\n ax.set_yticks([])\n ## At most three ticks\n ax.set_xticks(np.linspace(\n dataFrame[xKey].min(), dataFrame[xKey].max(), 3, dtype=int\n ))\n ## Set the range\n ax.set_xlim(dataFrame[xKey].min()-1, dataFrame[xKey].max()+1)\n ## Set the ticks\n ax.tick_params(\n axis=\"both\",\n direction='inout',\n width=0.5,\n length=2.5,\n )\n\n # Remove axis and ticks\n for pos in [\"left\", \"top\", \"right\"]:\n ax.spines[pos].set_linewidth(0)\n if shareY and nax % len(graph.axes[0]) == 0:\n ax.spines[\"left\"].set_linewidth(0.5)\n else:\n ax.tick_params(\n axis=\"y\",\n direction='inout',\n width=0.0,\n length=0.0,\n )\n ax.spines[\"bottom\"].set_linewidth(0.5)\n\n # Adjust the margin titles and plot the mean of the means\n graph.set_titles(\"\")\n means = dataFrame.groupby([rowKey, colKey])[meanKey].mean()\n for nCorr, (corrName, axRow) in enumerate(\n zip(dataFrame[rowKey].unique(), graph.axes)\n ):\n for nt, ax in zip(dataFrame[colKey].unique(), axRow):\n if nCorr == 0:\n ax.set_title(\"{colKey}$ = {nt}$\".format(nt=nt, colKey=colKey))\n ax.axhline(means[corrName, nt], color=\"black\", ls=\"--\", lw=0.5)\n\n # Set the labels\n graph.set_ylabels(meanKey)\n\n # Adjust the remaining margin titles\n for corrName, ax in zip(dataFrame[rowKey].unique(), graph.axes[:, -1]):\n ax.yaxis.set_label_position(\"right\")\n ax.set_ylabel(corrName)\n\n graph.set_xlabels(xKey)\n graph.add_legend()\n\n # Adjust the intermediate plot spacing\n plt.subplots_adjust(wspace=0.1, hspace=0.05)\n\n return graph", "def get_avg_trend(y, filter='ewm', a=0.015, verbose =1, resample_interval='60s', fill_missing=False, title= '' , note= ''):\n\n # Two-way EWMA averaging\n ts_mean1, ts_std1 = smoothing(y, filter=filter, a=a)\n\n reversed_y = y.iloc[::-1]\n ts_mean2, ts_std2 = smoothing(reversed_y, filter=filter,a=a)\n ts_mean2 = ts_mean2.iloc[::-1]\n ts_std2 = ts_std2.iloc[::-1]\n\n ts_mean = (ts_mean1 + ts_mean2)/2\n ts_std = (ts_std1 + ts_std2)/2\n\n\n # Resample the daily trend by calculating the median of a resampling slice. mean can also be used.\n trend = ts_mean.resample(resample_interval).mean()\n ts_std = ts_std.resample(resample_interval).mean()\n\n # Fill up the missing trend samples if exist, by propagating the last valid\n if fill_missing: #rolling filter introduce Nan at the head or tail..\n trend.fillna(method='ffill', inplace=True, limit=2) #fill the end\n trend.fillna(method='bfill', inplace=True, limit=2) #fill the start\n\n\n\n if verbose>=1:\n t = title if title is not None else 'Average Trend'\n\n fig = plt.gcf()\n\n plt.plot(y[::1+y.shape[0]// 2000], alpha=.5)\n ax = trend.plot()\n ax.fill_between(trend.index, trend - 2 * ts_std, trend + 2 * ts_std,\n alpha=.25)\n ax.legend(['Orignal', 'Trend', 'std'])\n plt.text(ax.get_xlim()[0], ax.get_ylim()[0] + 50, note)\n plt.title(t)\n plt.show()\n\n import matplotlib.dates as mdates\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))\n plt.tight_layout()\n\n fig.savefig('./output/trends/'+t + '.pdf')\n plt.close(fig)\n\n return trend", "def plot_error(class_incorreto):\n epochs = np.arange(1, num_iter + 1)\n plt.plot(epochs, class_incorreto)\n plt.xlabel('Iterações')\n plt.ylabel('Classificados incorretamente')\n plt.show()", "def plot_errors(dat, title='Data', avg='mean', err='sem'):\n\n n_groups = len(dat)\n\n fig = plt.figure(figsize=[4, 5])\n ax = plt.gca()\n\n if avg == 'mean': avg_func = np.nanmean\n if avg == 'median': avg_func = np.nanmedian\n\n if err == 'sem': err_func = sem\n\n plt.errorbar(np.arange(1, n_groups+1), avg_func(dat, 1), yerr=err_func(dat, 1), xerr=None, fmt='.',\n markersize=22, capsize=10, elinewidth=2, capthick=2)\n\n ax.set_xlim([0.5, n_groups+0.5])\n\n # Titles & Labels\n ax.set_title(title, fontsize=16)\n ax.set_xlabel('Noise Levels')\n ax.set_ylabel('Error')\n\n # Set the top and right side frame & ticks off\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n # Set linewidth of remaining spines\n ax.spines['left'].set_linewidth(2)\n ax.spines['bottom'].set_linewidth(2)", "def residual_plots(test_data, mods, station=None, squared=False):\r\n import config\r\n learning_var = config.learning_var\r\n data = test_data.get_miniOD()\r\n mods.models[0].mod.load()\r\n pred = mods.models[0].mod.predict(test_data)\r\n test_data.miniOD = None\r\n if squared:\r\n data[test_data.get_stations_col(2015)] = (data[test_data.get_stations_col(2015)] - pred) ** 2 # /pred\r\n else:\r\n data[test_data.get_stations_col(2015)] = data[test_data.get_stations_col(2015)] - pred\r\n ind = data[data['Annee'] == 0].index\r\n data.drop(ind, inplace=True)\r\n print(data.columns.values)\r\n i = 0\r\n if station is None:\r\n ch_an = test_data.get_stations_col(2015)\r\n else:\r\n ch_an = 'End date ' + str(station)\r\n for ch in learning_var:\r\n if not (ch[0] == 'h') and not (ch in ['LV', 'MMJ', 'SD', 'poudrerie', 'verglas']):\r\n # data.boxplot(ch_an, by=ch)\r\n # if ch != 'Heure':\r\n plt.figure(i // 9)\r\n # plt.title('squared error / expectation')\r\n fig = plt.subplot(3, 3, (i % 9) + 1)\r\n i += 1\r\n # fig = plt.figure().add_subplot(111)\r\n fig.set_xlabel(ch)\r\n if squared:\r\n fig.set_ylabel('error²')\r\n else:\r\n fig.set_ylabel('error')\r\n l = []\r\n xaxis = np.unique(data[ch])\r\n print(ch, xaxis.shape)\r\n if xaxis.shape[0] < 20 or ch == 'Heure':\r\n for u in xaxis:\r\n l.append(data[ch_an][data[ch] == u])\r\n else:\r\n m = np.min(data[ch])\r\n M = np.max(data[ch])\r\n step = (M - m) / 20\r\n xaxis = np.arange(m, M, step)\r\n for u in xaxis:\r\n l.append(data[ch_an][(data[ch] >= u) * (data[ch] < u + step)])\r\n xaxis = xaxis.astype(int)\r\n # fig = plt.boxplot(ch_an, by=ch)\r\n # g = data.groupby(ch).mean()[ch_an]\r\n # v = data.groupby(ch).std()[ch_an]\r\n plt.boxplot(l, labels=xaxis)\r\n if squared:\r\n plt.ylim((0, 12))\r\n else:\r\n plt.ylim((-5, 5))\r\n # plt.plot(g, '-r')\r\n # plt.plot(g + v, ':r')\r\n # plt.plot(g - v, ':r')\r\n plt.show()", "def error():\n\n # Make data set using errors\n dataset_a = DataSet(oscillating,error_y=oscillating_error,plot='error_bar',label='Data and error')\n dataset_a.set_error(interval=5,width=1,cap=2)\n dataset_b = DataSet(oscillating,plot='error_shade',error_y=oscillating_error,order=0,colour='lightgrey',label='Error')\n dataset_c = DataSet(oscillating,plot='line',order=1,colour='firebrick',label='Data')\n\n # Make line graph with error bars\n plot_bar = Plot()\n plot_bar.set_legend(legend=True)\n plot_bar.add_dataset(dataset_a)\n plot_bar.plot()\n plot_bar.save(name='./figures/2d_error_bar',fmt='png')\n plot_bar.display()\n\n # Make line graph with shaded errors\n plot_shade = Plot()\n plot_shade.set_legend(legend=True,location='upper left')\n plot_shade.add_dataset(dataset_b)\n plot_shade.add_dataset(dataset_c)\n plot_shade.plot()\n plot_shade.save(name='./figures/2d_error_shade',fmt='png')\n plot_shade.display()", "def plot_ts(da, key):\n p = sns.lineplot(data=da.to_pandas(), linewidth=2)\n p.set_xlabel('time')\n p.set_ylabel(key)", "def hogg_errorbar(x, y, yerr, **kwargs):\n for xx, yy, yyerr in zip(x, y, yerr):\n plt.plot([xx, xx], [yy - yyerr, yy + yyerr], 'k-', **kwargs)\n return None", "def plot_Hubble():\n pickle_in = open(\"MICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n z = SN_data[\"SNZ\"]\n mu = SN_data['SNMU']\n mu_err = SN_data['SNMU_ERR']\n z_array = np.linspace(0.0, 1.5 + 0.01, 1001)\n mu_cosm = 5 * np.log10((1 + z_array) * Convergence.comoving(z_array, OM=0.25, OL=0.75, h=0.7) * 1000) + 25\n mu_diff = SN_data['mu_diff']\n ax = plt.subplot2grid((2, 1), (0, 0))\n ax2 = plt.subplot2grid((2, 1), (1, 0))\n ax.set_ylabel(\"$\\mu$\")\n ax2.set_xlabel(\"$z$\")\n ax2.set_ylabel(\"$\\Delta\\mu$\")\n plt.subplots_adjust(wspace=0, hspace=0)\n ax.set_xticklabels([])\n ax.tick_params(labelsize=12)\n ax.errorbar(z[::2], mu[::2], mu_err[::2], linestyle='', linewidth=0.8, marker='o',\n markersize=2, capsize=2, color='C3', zorder=0, alpha=0.6, elinewidth=0.7)\n ax.plot(z[::2], mu[::2], linestyle='', marker='o', markersize=2, color='C3', alpha=0.4, markerfacecolor='C3')\n\n ax.set_ylim([38.5, 46])\n ax.set_xlim([0, 1.5])\n ax.plot(z_array, mu_cosm, linestyle='--', linewidth=0.8, color='C0', zorder=10)\n ax2.errorbar(z[::2], mu_diff[::2], mu_err[::2], linestyle='', linewidth=1, marker='o',\n markersize=2, capsize=2, color='C3', zorder=0, alpha=0.6, elinewidth=0.7)\n ax2.plot(z[::2], mu_diff[::2], linestyle='', marker='o', markersize=2, color='C3', alpha=0.4, markerfacecolor='C3')\n ax2.plot(z_array, np.zeros(len(z_array)), zorder=10, color='C0', linewidth=0.8, linestyle='--')\n ax2.set_ylim(-1.0, 1.0)\n ax2.set_xlim([0, 1.5])\n ax2.tick_params(labelsize=12)\n\n plt.show()", "def summaryPlot(df):\n import datetime as dt\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n import numpy as np\n import pandas as pd\n from numpy import array\n import matplotlib.patches as mpatches\n import seaborn as sns\n from matplotlib.pyplot import figure\n\n class color:\n # Allows for bolded and underlined text\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n END = \"\\033[0m\"\n\n # Reads df and fills empty values\n df.index = pd.to_datetime(df.date)\n df = df.drop(\"date\", axis=1)\n df_all = df.resample(\"1D\")\n df_all = df_all.fillna(method=\"ffill\")\n\n dataPoints = [\"pm25\", \"co\", \"so2\", \"pm10\", \"o3\", \"no2\", \"nox\", \"wd\", \"ws\"]\n\n i = 0\n sub = 1\n while i < 9:\n # Plots line and histogram plots for ecery polutant\n # in the correct location based on subplot\n plt.figure(1, figsize=(50, 50))\n plt.subplot(9, 2, sub)\n sub = sub + 1\n a = df_all[dataPoints[i]].plot.line(color=\"gold\")\n a.axes.get_xaxis().set_visible(False)\n a.yaxis.set_label_position(\"left\")\n plt.ylabel(dataPoints[i], fontsize=75, bbox=dict(facecolor=\"whitesmoke\"))\n # print(df['pm25'].max())\n\n plt.subplot(9, 2, sub)\n sub = sub + 1\n plt.hist(df_all[dataPoints[i]], bins=50, color=\"green\")\n i = i + 1\n i = 0\n while i < 9:\n # Calculates statistics\n nDf = df[dataPoints[i]]\n missing = nDf.isna().sum() + sum(n < 0 for n in nDf)\n minVal = nDf.min()\n maxVal = nDf.max()\n meanVal = nDf.mean()\n medianVal = nDf.median()\n percentile = nDf.quantile(0.95)\n print(\"---------------\")\n print(color.BOLD + color.UNDERLINE + dataPoints[i] + color.END)\n print(\"min = \" + str(0))\n print(\"max = \" + str(maxVal))\n print(\"missing = \" + str(missing))\n print(\"mean = \" + str(meanVal))\n print(\"median = \" + str(medianVal))\n print(\"95th percentile = \" + str(percentile))\n i = i + 1", "def plot_ridgeline(windfield_errors):\n\n models = windfield_errors.columns # Extract model names\n n_points = 30 # Number of bins for the ridge plot\n\n # Get mean and confidence intervals for each model,\n # Also create histogram for each models\n model_data = pd.DataFrame()\n for model in models:\n data = {}\n error = windfield_errors[model].dropna().values\n data[\"name\"] = model\n data[\"mse\"] = np.mean(error)\n data[\"mse_ci\"] = 2 * np.std(error) / len(error) ** 0.5\n data[\"hist\"] = Histogram(error, n_points)\n model_data = model_data.append(data, ignore_index=True)\n\n # Sort values by mean square error to make the plot more visually appealing\n model_data = model_data.sort_values(by=[\"mse\"], axis=0).reset_index(drop=True)\n\n y = 0 # First histogram vertical position\n ys = [] # List of vertical histogram positions\n error_grid = np.linspace(0, 2, n_points)\n\n # Create colors for the histograms\n cmap = cm.get_cmap(\"bwr\", len(models))\n colors = [cmap(i) for i in range(len(models))]\n\n plt.figure(figsize=(6, 4))\n ax_L = plt.gca()\n for i, data in model_data.iterrows():\n ys.append(y)\n e_mean = data[\"mse\"]\n e_error = data[\"mse_ci\"]\n hist = data[\"hist\"]\n\n freqs = hist.freqs\n bins = hist.edges\n\n freqs = np.array([0] + [a for a in freqs for _ in range(2)] + [0])\n width = bins[1] - bins[0]\n bins = np.array([bins[0]] + [a + da for a in bins for da in [0, width]] + [bins[-1]+width])\n\n ax_L.fill_between(bins, y * np.ones(2*n_points+2), y + freqs, alpha=0.5, color=colors[i])\n ax_L.plot(bins, freqs + y, color='k', linewidth=1)\n\n def get_y(x):\n return freqs[2 * int((x - min(bins)) / (max(bins) - min(bins)) * n_points) + 2]\n\n freq_mean = get_y(e_mean)\n freq_lower = get_y(e_mean - e_error)\n freq_upper = get_y(e_mean + e_error)\n\n ax_L.plot([e_mean, e_mean], [y, freq_mean + y], color='k')\n ax_L.plot([e_mean-e_error, e_mean-e_error], [y, freq_lower + y], \"k--\", linewidth=.5)\n ax_L.plot([e_mean+e_error, e_mean+e_error], [y, freq_upper + y], \"k--\", linewidth=.5)\n\n y += 1.1*max(freqs)\n\n ax_L.plot(error_grid, np.zeros(n_points), color='k', linewidth=2)\n ax_L.set_yticks(ys)\n ax_L.set_ylim([0, y])\n ax_L.set_yticklabels([])\n\n ax_L.set_yticklabels(model_data[\"name\"])\n\n ax_L.set_xlabel(\"Fraction of unexplained variance $\\widetilde{\\mathcal{E}}$\")\n ax_L.set_xlim([0, 1.2])\n ax_L.grid(axis=\"y\")\n plt.tight_layout()\n return None", "def plotErr(self):\n if self.xp and self.wp:\n # plot the spectra\n w=self.ws.value(np.array(self.xp))\n self.errcurve,=self.erraxes.plot(self.xp,self.wp-w,linewidth=0.5,linestyle='',marker='o',color='b')\n if self.dxp and self.dwp:\n # plot the spectra\n dw=self.ws.value(np.array(self.dxp))\n self.delerrcurve,=self.erraxes.plot(self.dxp,self.dwp-dw,linewidth=0.5,linestyle='',marker='x',color='b')", "def plot_mean_std(data,ax,label=None,show_error=True):\n x = np.arange(1,100)\n mean = np.array([np.mean(data_n) for data_n in data])\n if show_error: std = np.array([np.std(data_n) for data_n in data])\n ax.plot(x,mean,label=label)\n if show_error: ax.fill_between(x,mean-std,mean+std,alpha=0.3)", "def plot_hists_wratio_errorband( hists, histErros, name, **kw):\n\n #\n # Calc bands\n #\n varUp = []\n varDown = []\n\n for sysHist in histErros:\n thisUpVar, thisDownVar = calcBinByBinDiffs(hists[1],sysHist)\n\n if varUp:\n varUp = addInQuad(thisUpVar, varUp)\n else:\n varUp = thisUpVar\n\n\n if varDown:\n varDown = addInQuad(thisDownVar, varDown)\n else:\n varDown = thisDownVar\n\n #\n # Build Band\n #\n xAxis = hists[0].GetXaxis()\n nBins = xAxis.GetNbins()\n var_band = ROOT.TGraphAsymmErrors(nBins)\n var_band.SetFillColor(ROOT.kRed)\n for i in range(nBins):\n var_band.SetPoint(i,xAxis.GetBinCenter(i+1),1.0)\n \n up = varUp [i]\n down = varDown[i]\n nom = hists[1].GetBinContent(i+1)\n \n if nom:\n errUp = float(up)/nom\n errDown = float(down)/nom\n else:\n errUp = 0\n errDown = 0\n\n var_band.SetPointError(i,\n xAxis.GetBinCenter(i+1)-xAxis.GetBinLowEdge(i+1),xAxis.GetBinUpEdge(i+1)-xAxis.GetBinCenter(i+1),\n errUp,errDown)\n\n\n #\n # Make ratio\n #\n kw[\"sys_band\"] = var_band\n res = plot_hists_wratio(hists, name, **kw)\n \n return res", "def plot_sum(self):\n fig, ax = plt.subplots()\n ax.set_title(\"Unpolarized intensity: I_up + I_down\")\n ax.set_xlabel(\"Time (microseconds)\")\n ax.set_ylabel('Intensity')\n\n if (self.is_attribute(\"time\") & self.is_attribute(\"intensity_up\") & \n self.is_attribute(\"intensity_up_sigma\") &\n self.is_attribute(\"intensity_down\") & \n self.is_attribute(\"intensity_down_sigma\") &\n self.is_attribute(\"intensity_up_total\") &\n self.is_attribute(\"intensity_down_total\")):\n np_excl = numpy.array(self.excluded, dtype=bool)\n np_notexcl = numpy.logical_not(np_excl)\n np_time = numpy.array(self.time, dtype=float)\n np_up = numpy.array(self.intensity_up, dtype=float)\n np_sup = numpy.array(self.intensity_up_sigma, dtype=float)\n np_up_mod = numpy.array(self.intensity_up_total, dtype=float)\n np_down = numpy.array(self.intensity_down, dtype=float)\n np_sdown = numpy.array(self.intensity_down_sigma, dtype=float)\n np_down_mod = numpy.array(self.intensity_down_total, dtype=float)\n np_sum = np_up + np_down\n np_sum_mod = np_up_mod + np_down_mod\n np_ssum = numpy.sqrt(numpy.square(np_sup)+numpy.square(np_sdown))\n ax.plot(np_time, np_sum_mod, \"k-\", label=\"model\")\n ax.errorbar(np_time[np_notexcl], np_sum[np_notexcl], yerr=np_ssum[np_notexcl], fmt=\"ko\", alpha=0.2, label=\"experiment\")\n ax.errorbar(np_time[np_excl], np_sum[np_excl], yerr=np_ssum[np_excl], fmt=\"rs\", alpha=0.2, label=\"excluded\")\n\n y_min_d, y_max_d = ax.get_ylim()\n param = y_min_d-(np_sum - np_sum_mod).max()\n coeff = np_notexcl.astype(int)\n\n ax.plot([np_time.min(), np_time.max()], [param, param], \"k:\")\n ax.plot(np_time, coeff*(np_sum - np_sum_mod)+param, \"r-\", alpha=0.7,\n label=\"difference\")\n elif (self.is_attribute(\"time\") & self.is_attribute(\"intensity\") & \n self.is_attribute(\"intensity_total\") &\n self.is_attribute(\"intensity_sigma\")):\n np_excl = numpy.array(self.excluded, dtype=bool)\n np_notexcl = numpy.logical_not(np_excl)\n np_time = numpy.array(self.time, dtype=float)\n np_sum = numpy.array(self.intensity, dtype=float)\n np_sum_mod = numpy.array(self.intensity_total, dtype=float)\n np_ssum = numpy.array(self.intensity_sigma, dtype=float)\n ax.plot(np_time, np_sum_mod, \"k-\", label=\"model\")\n ax.errorbar(np_time[np_notexcl], np_sum[np_notexcl], yerr=np_ssum[np_notexcl], fmt=\"ko\", alpha=0.2, label=\"experiment\")\n ax.errorbar(np_time[np_excl], np_sum[np_excl], yerr=np_ssum[np_excl], fmt=\"rs\", alpha=0.2, label=\"excluded\")\n\n y_min_d, y_max_d = ax.get_ylim()\n param = y_min_d-(np_sum - np_sum_mod).max()\n coeff = np_notexcl.astype(int)\n\n ax.plot([np_time.min(), np_time.max()], [param, param], \"k:\")\n ax.plot(np_time, coeff*(np_sum - np_sum_mod)+param, \"r-\", alpha=0.7,\n label=\"difference\")\n ax.legend(loc='upper right')\n fig.tight_layout()\n return (fig, ax)", "def _plot_ts(self, data, labels, ax,\n show_ylabels=True,\n offset=0.0,\n special_idx=[],\n errors_list=[],\n fontsize=FiguresConfig.LARGE_FONT_SIZE):\n if data.ndim == 1:\n data = data[np.newaxis, :]\n offset = int(offset)\n # apply offset setting onto the data\n data = data[:, offset:]\n\n # get shape of data to be plotted\n nsamples, ntimes = data.shape\n\n nTS = 1\n def_alpha = 1.0\n # generate ylabels for the plot\n labels = generate_region_labels(nsamples, labels)\n\n # set plotting parameters: alpha_ratio, colors, alphas\n alpha_ratio = 1.0 / nsamples\n colors = np.array(['k'] * nTS)\n alphas = np.maximum(np.array(\n [def_alpha] *\n nTS) *\n alpha_ratio,\n 1.0)\n colors[special_idx] = 'r'\n alphas[special_idx] = np.maximum(alpha_ratio, 0.1)\n\n # apply normalization for each trace\n for i in range(nsamples):\n data[i, :] = data[i, :] / np.nanmax(data[i, :])\n\n # plot each trace\n x = np.arange(ntimes)\n for itrace in range(nTS):\n for i in range(nsamples):\n y = data[i, :] + np.r_[i]\n ax.plot(x, y,\n color=colors[itrace],\n label=labels[itrace],\n alpha=alphas[itrace])\n\n # plot errors bars\n if errors_list:\n error = errors_list[error]\n ax.fill_between(x, y - error, y + error,\n color=colors[itrace],\n alphas=alphas[itrace])\n\n if show_ylabels:\n # print(\"Labels are : \", labels)\n y_ticks = np.arange(len(labels))\n ax.set_yticks(y_ticks)\n ax.set_yticklabels(labels, fontsize=fontsize / 1.5)\n\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(fontsize / 1.5)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(fontsize / 1.5)\n\n return ax", "def plot_XDR_PDR_Gauss():\n\n fig,axes = plt.subplots(nrows=2, ncols=2, squeeze=True, sharex='col', sharey='row', figsize=(6,6))\n fig.subplots_adjust(hspace=0, wspace=0) #, top=0.80, bottom=0.04, left=0.04, right=0.93)\n\n # get data\n sscs = [SSC['no'] for SSC in SSCs]\n colors = [plt.cm.inferno(i/(len(SSCs)+1)) for i in SSCs['no']]\n HCO_HCN, HNC_HCN, HNC_HCO = [],[],[]\n HCO_HCN_err, HNC_HCN_err, HNC_HCO_err = [],[],[]\n for SSC in SSCs:\n try:\n hco_hcn = ratios_Gauss['HCO+/HCN'][SSC['num']]['bestfit']\n hco_hcn_err = ratios_Gauss['HCO+/HCN'][SSC['num']]['error']\n HCO_HCN.append( np.log10(hco_hcn) )\n HCO_HCN_err.append( 0.434*hco_hcn_err/hco_hcn )\n except:\n HCO_HCN.append( np.nan )\n HCO_HCN_err.append( np.nan )\n try:\n hnc_hcn = ratios_Gauss['HNC/HCN'][SSC['num']]['bestfit']\n hnc_hcn_err = ratios_Gauss['HNC/HCN'][SSC['num']]['error']\n HNC_HCN.append( np.log10(hnc_hcn) )\n HNC_HCN_err.append( 0.434*hnc_hcn_err/hnc_hcn )\n except:\n HNC_HCN.append( np.nan )\n HNC_HCN_err.append( np.nan )\n try:\n hnc_hco = ratios_Gauss['H15NC/HCO+'][SSC['num']]['bestfit']*ratios_Gauss['14N/15N'][SSC['num']]['bestfit']\n hnc_hco_err = np.sqrt( (ratios_Gauss['H15NC/HCO+'][SSC['num']]['error']/ratios_Gauss['H15NC/HCO+'][SSC['num']]['bestfit'])**2 +(ratios_Gauss['14N/15N'][SSC['num']]['error']/ratios_Gauss['14N/15N'][SSC['num']]['bestfit'])**2 )\n HNC_HCO.append( np.log10(hnc_hco) )\n HNC_HCO_err.append( 0.434*hnc_hco_err/hnc_hco )\n except:\n HNC_HCO.append( np.nan )\n HNC_HCO_err.append( np.nan )\n\n # comparison from Baan+08\n B_hcn = [318.2, 14]\n B_hnc = [234.0, 7]\n B_hco = [276.1, 14]\n B_hco_hcn = [B_hco[0]/B_hcn[0], B_hco[0]/B_hcn[0]*np.sqrt((B_hco[1]/B_hco[0])**2+(B_hcn[1]/B_hcn[0])**2)]\n B_hnc_hcn = [B_hnc[0]/B_hcn[0], B_hnc[0]/B_hcn[0]*np.sqrt((B_hnc[1]/B_hnc[0])**2+(B_hcn[1]/B_hcn[0])**2)]\n B_hnc_hco = [B_hnc[0]/B_hco[0], B_hnc[0]/B_hco[0]*np.sqrt((B_hnc[1]/B_hnc[0])**2+(B_hco[1]/B_hco[0])**2)]\n B_HCO_HCN = [np.log10(B_hco_hcn[0]), 0.434*B_hco_hcn[1]/B_hco_hcn[0]]\n B_HNC_HCN = [np.log10(B_hnc_hcn[0]), 0.434*B_hnc_hcn[1]/B_hnc_hcn[0]]\n B_HNC_HCO = [np.log10(B_hnc_hco[0]), 0.434*B_hnc_hco[1]/B_hnc_hco[0]]\n\n def format_panel(ax):\n ax.xaxis.set_major_locator(MultipleLocator(0.5))\n ax.xaxis.set_minor_locator(MultipleLocator(0.25))\n ax.yaxis.set_major_locator(MultipleLocator(0.5))\n ax.yaxis.set_minor_locator(MultipleLocator(0.25))\n ax.set_axisbelow(True)\n ax.grid(axis='both', which='both')\n\n def label_regions(ax):\n ax.text(0.95, 0.9, 'XDR', color='k', transform=ax.transAxes, ha='right', va='top', weight='bold', fontsize=16)\n ax.text(0.05, 0.1, 'PDR', color='k', transform=ax.transAxes, ha='left', va='bottom', weight='bold', fontsize=16)\n\n # panel 1: HCO+/HCN over HNC/HCO+\n ax = axes[0][0]\n ax.plot([-10,10],[10,-10], ls='-', lw=1, c='grey', zorder=2)\n ax.fill_between([-10,10],[10,-10],[10,10], color='lightgrey', alpha=0.5, zorder=1)\n label_regions(ax)\n for a,b,a_err,b_err,c,s in zip(HNC_HCO, HCO_HCN, HNC_HCO_err, HCO_HCN_err, colors, SSCs):\n if np.isfinite(a) and np.isfinite(b):\n ax.errorbar(a,b, xerr=a_err, yerr=b_err, marker='o', ms=5, lw=0, color=c, elinewidth=1, ecolor=c, label='SSC '+str(s['no']), zorder=3)\n ax.errorbar(B_HCO_HCN[0],B_HNC_HCO[0], xerr=B_HCO_HCN[1], yerr=B_HNC_HCO[1], marker='o', ms=5, lw=0, color='lime', elinewidth=1, ecolor='lime', label=r'NGC 253 (Baan +08)', zorder=4)\n ax.set_xlim(-1.15,0.45)\n ax.set_ylim(-0.80,0.80)\n format_panel(ax)\n ax.set_ylabel(r'log I(HCO$^+$) / I(HCN)', fontsize=12)\n\n # panel 2: HNC/HCN over HCO/HCN\n ax = axes[0][1]\n ax.plot([0,0],[-10,10], ls='-', lw=1, c='grey', zorder=2)\n ax.fill_between([0,10],[-10,-10],[10,10], color='lightgrey', alpha=0.5, zorder=1)\n label_regions(ax)\n for a,b,a_err,b_err,c in zip(HNC_HCN, HCO_HCN, HNC_HCN_err, HCO_HCN_err, colors):\n if np.isfinite(a) and np.isfinite(b):\n ax.errorbar(a,b, xerr=a_err, yerr=b_err, marker='o', ms=5, lw=0, color=c, elinewidth=1, ecolor=c, zorder=3)\n ax.errorbar(B_HNC_HCN[0],B_HCO_HCN[0], xerr=B_HCO_HCN[1], yerr=B_HNC_HCO[1], marker='o', ms=5, lw=0, color='lime', elinewidth=1, ecolor='lime', zorder=4)\n ax.set_xlim(-1.15,0.45)\n ax.set_ylim(-0.80,0.80)\n ax.xaxis.set_tick_params(which='both', labelbottom=True)\n format_panel(ax)\n ax.set_xlabel(r'log I(HNC) / I(HCN)', fontsize=12)\n\n # panel 3: HNC/HCO over HNC/HCN\n ax = axes[1][0]\n ax.plot([-10,10],[0,0], ls='-', lw=1, c='grey', zorder=2)\n ax.fill_between([-10,10],[0,0],[10,10], color='lightgrey', alpha=0.5, zorder=1)\n label_regions(ax)\n for a,b,a_err,b_err,c in zip(HNC_HCO, HNC_HCN, HNC_HCO_err, HNC_HCN_err, colors):\n if np.isfinite(a) and np.isfinite(b):\n ax.errorbar(a,b, xerr=a_err, yerr=b_err, marker='o', ms=5, lw=0, color=c, elinewidth=1, ecolor=c, zorder=3)\n ax.errorbar(B_HNC_HCO[0],B_HNC_HCN[0], xerr=B_HCO_HCN[1], yerr=B_HNC_HCO[1], marker='o', ms=5, lw=0, color='lime', elinewidth=1, ecolor='lime', zorder=4)\n ax.set_xlim(-1.15,0.45)\n ax.set_ylim(-1.00,0.60)\n format_panel(ax)\n ax.set_xlabel(r'log I(HNC$^{**}$) / I(HCO$^+$)', fontsize=12)\n ax.set_ylabel(r'log I(HNC) / I(HCN)', fontsize=12)\n\n # panel 4: legend\n ax = axes[1][1]\n ax.set_axis_off()\n fig.legend(loc=3, bbox_to_anchor=(0.55,0.05,0.14,0.3), ncol=1, mode=\"expand\", borderaxespad=0., fontsize=12, frameon=False)\n\n savepath = escape_fname(os.path.join(plotdir, '10.results', 'XDR-PDR_line_ratio.pdf'))\n os.system('mkdir -p '+os.path.dirname(savepath))\n fig.savefig(savepath, dpi=300, bbox_inches='tight')", "def plot_XDR_PDR_XCLASS():\n\n fig,axes = plt.subplots(nrows=2, ncols=2, squeeze=True, sharex='col', sharey='row', figsize=(6,6))\n fig.subplots_adjust(hspace=0, wspace=0) #, top=0.80, bottom=0.04, left=0.04, right=0.93)\n\n # get data\n sscs = [SSC['no'] for SSC in SSCs]\n colors = [plt.cm.inferno(i/(len(SSCs)+1)) for i in SSCs['no']]\n HCO_HCN, HNC_HCN, HNC_HCO = [],[],[]\n HCO_HCN_err, HNC_HCN_err, HNC_HCO_err = [],[],[]\n for SSC in SSCs:\n try:\n hco_hcn_med = ratios_XCLASS['HCO+/HCN'][SSC['num']]['median']\n hco_hcn_p16 = ratios_XCLASS['HCO+/HCN'][SSC['num']]['16th']\n hco_hcn_p84 = ratios_XCLASS['HCO+/HCN'][SSC['num']]['84th']\n hco_hcn_low = hco_hcn_med-hco_hcn_p16\n hco_hcn_hig = hco_hcn_p84-hco_hcn_med\n HCO_HCN.append( np.log10(hco_hcn_med) )\n HCO_HCN_err.append( [0.434*hco_hcn_low/hco_hcn_med,0.434*hco_hcn_hig/hco_hcn_med] )\n except:\n HCO_HCN.append( np.nan )\n HCO_HCN_err.append( [np.nan,np.nan] )\n try:\n hnc_hcn_med = ratios_XCLASS['HNC/HCN'][SSC['num']]['median']\n hnc_hcn_p16 = ratios_XCLASS['HNC/HCN'][SSC['num']]['16th']\n hnc_hcn_p84 = ratios_XCLASS['HNC/HCN'][SSC['num']]['84th']\n hnc_hcn_low = hnc_hcn_med-hnc_hcn_p16\n hnc_hcn_hig = hnc_hcn_p84-hnc_hcn_med\n HNC_HCN.append( np.log10(hnc_hcn_med) )\n HNC_HCN_err.append( [0.434*hnc_hcn_low/hco_hcn_med,0.434*hnc_hcn_hig/hco_hcn_med] )\n except:\n HCO_HCN.append( np.nan )\n HCO_HCN_err.append( [np.nan,np.nan] )\n try:\n hnc_hco_med = ratios_XCLASS['H15NC/HCO+'][SSC['num']]['median']*ratios_XCLASS['14N/15N'][SSC['num']]['median']\n hnc_hco_p16 = ratios_XCLASS['H15NC/HCO+'][SSC['num']]['16th']*ratios_XCLASS['14N/15N'][SSC['num']]['median']\n hnc_hco_p84 = ratios_XCLASS['H15NC/HCO+'][SSC['num']]['84th']*ratios_XCLASS['14N/15N'][SSC['num']]['median']\n hnc_hco_low = hnc_hco_med-hnc_hco_p16\n hnc_hco_hig = hnc_hco_p84=hnc_hco_med\n HNC_HCO.append( np.log10(hnc_hco_med) )\n HNC_HCO_err.append( [0.434*hnc_hco_low/hnc_hco_med,0.434*hnc_hco_hig/hnc_hco_med] )\n except:\n HCO_HCN.append( np.nan )\n HCO_HCN_err.append( [np.nan,np.nan] )\n\n # comparison from Baan+08\n B_hcn = [318.2, 14]\n B_hnc = [234.0, 7]\n B_hco = [276.1, 14]\n B_hco_hcn = [B_hco[0]/B_hcn[0], B_hco[0]/B_hcn[0]*np.sqrt((B_hco[1]/B_hco[0])**2+(B_hcn[1]/B_hcn[0])**2)]\n B_hnc_hcn = [B_hnc[0]/B_hcn[0], B_hnc[0]/B_hcn[0]*np.sqrt((B_hnc[1]/B_hnc[0])**2+(B_hcn[1]/B_hcn[0])**2)]\n B_hnc_hco = [B_hnc[0]/B_hco[0], B_hnc[0]/B_hco[0]*np.sqrt((B_hnc[1]/B_hnc[0])**2+(B_hco[1]/B_hco[0])**2)]\n B_HCO_HCN = [np.log10(B_hco_hcn[0]), 0.434*B_hco_hcn[1]/B_hco_hcn[0]]\n B_HNC_HCN = [np.log10(B_hnc_hcn[0]), 0.434*B_hnc_hcn[1]/B_hnc_hcn[0]]\n B_HNC_HCO = [np.log10(B_hnc_hco[0]), 0.434*B_hnc_hco[1]/B_hnc_hco[0]]\n\n def format_panel(ax):\n ax.xaxis.set_major_locator(MultipleLocator(0.5))\n ax.xaxis.set_minor_locator(MultipleLocator(0.25))\n ax.yaxis.set_major_locator(MultipleLocator(0.5))\n ax.yaxis.set_minor_locator(MultipleLocator(0.25))\n ax.set_axisbelow(True)\n ax.grid(axis='both', which='both')\n\n def label_regions(ax):\n ax.text(0.95, 0.9, 'XDR', color='k', transform=ax.transAxes, ha='right', va='top', weight='bold', fontsize=16)\n ax.text(0.05, 0.1, 'PDR', color='k', transform=ax.transAxes, ha='left', va='bottom', weight='bold', fontsize=16)\n\n # panel 1: HCO+/HCN over HNC/HCO+\n ax = axes[0][0]\n ax.plot([-10,10],[10,-10], ls='-', lw=1, c='grey', zorder=2)\n ax.fill_between([-10,10],[10,-10],[10,10], color='lightgrey', alpha=0.5, zorder=1)\n label_regions(ax)\n for a,b,a_err,b_err,c,s in zip(HNC_HCO, HCO_HCN, HNC_HCO_err, HCO_HCN_err, colors, SSCs):\n if np.isfinite(a) and np.isfinite(b):\n ax.errorbar(a,b, xerr=[[a_err[0]],[a_err[1]]], yerr=[[b_err[0]],[b_err[1]]], marker='o', ms=5, lw=0, color=c, elinewidth=1, ecolor=c, label='SSC '+str(s['no']), zorder=3)\n ax.errorbar(B_HCO_HCN[0],B_HNC_HCO[0], xerr=B_HCO_HCN[1], yerr=B_HNC_HCO[1], marker='o', ms=5, lw=0, color='lime', elinewidth=1, ecolor='lime', label=r'NGC 253 (Baan +08)', zorder=4)\n ax.set_xlim(-0.75,0.75)\n ax.set_ylim(-0.85,0.65)\n format_panel(ax)\n ax.set_ylabel(r'log N(HCO$^+$) / N(HCN)', fontsize=12)\n\n # panel 2: HNC/HCN over HCO/HCN\n ax = axes[0][1]\n ax.plot([0,0],[-10,10], ls='-', lw=1, c='grey', zorder=2)\n ax.fill_between([0,10],[-10,-10],[10,10], color='lightgrey', alpha=0.5, zorder=1)\n label_regions(ax)\n for a,b,a_err,b_err,c in zip(HNC_HCN, HCO_HCN, HNC_HCN_err, HCO_HCN_err, colors):\n if np.isfinite(a) and np.isfinite(b):\n ax.errorbar(a,b, xerr=[[a_err[0]],[a_err[1]]], yerr=[[b_err[0]],[b_err[1]]], marker='o', ms=5, lw=0, color=c, elinewidth=1, ecolor=c, zorder=3)\n ax.errorbar(B_HNC_HCN[0],B_HCO_HCN[0], xerr=B_HCO_HCN[1], yerr=B_HNC_HCO[1], marker='o', ms=5, lw=0, color='lime', elinewidth=1, ecolor='lime', zorder=4)\n ax.set_xlim(-0.95,0.55)\n ax.set_ylim(-0.85,0.65)\n format_panel(ax)\n ax.tick_params(labelbottom=True)\n ax.set_xlabel(r'log N(HNC) / N(HCN)', fontsize=12)\n\n # panel 3: HNC/HCO over HNC/HCN\n ax = axes[1][0]\n ax.plot([-10,10],[0,0], ls='-', lw=1, c='grey', zorder=2)\n ax.fill_between([-10,10],[0,0],[10,10], color='lightgrey', alpha=0.5, zorder=1)\n label_regions(ax)\n for a,b,a_err,b_err,c in zip(HNC_HCO, HNC_HCN, HNC_HCO_err, HNC_HCN_err, colors):\n if np.isfinite(a) and np.isfinite(b):\n ax.errorbar(a,b, xerr=[[a_err[0]],[a_err[1]]], yerr=[[b_err[0]],[b_err[1]]], marker='o', ms=5, lw=0, color=c, elinewidth=1, ecolor=c, zorder=3)\n ax.errorbar(B_HNC_HCO[0],B_HNC_HCN[0], xerr=B_HCO_HCN[1], yerr=B_HNC_HCO[1], marker='o', ms=5, lw=0, color='lime', elinewidth=1, ecolor='lime', zorder=4)\n ax.set_xlim(-0.75,0.75)\n ax.set_ylim(-1.05,0.45)\n format_panel(ax)\n ax.set_xlabel(r'log N(HNC$^{**}$) / N(HCO$^+$)', fontsize=12)\n ax.set_ylabel(r'log N(HNC) / N(HCN)', fontsize=12)\n\n # panel 4: legend\n ax = axes[1][1]\n ax.set_axis_off()\n fig.legend(loc=3, bbox_to_anchor=(0.55,0.05,0.14,0.3), ncol=1, mode=\"expand\", borderaxespad=0., fontsize=12, frameon=False)\n\n savepath = escape_fname(os.path.join(plotdir, '10.results', 'XDR-PDR_column_density.pdf'))\n os.system('mkdir -p '+os.path.dirname(savepath))\n fig.savefig(savepath, dpi=300, bbox_inches='tight')", "def plot_NODE_err(err, tseries, soln_names, var_string,**kwargs):\n\n ky1 = soln_names[0]; ky2 = soln_names[1]; ky3 = soln_names[2]\n if 'unit' in kwargs:\n t_unit = kwargs['unit']\n else:\n t_unit = 'seconds'\n\n freq = tseries.size//20\n\n fig = plt.figure(figsize=(16,4))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(tseries[:], err[ky1][:], 'r-s', markersize=8,\n label='$\\mathbf{%s}$'%(var_string[ky1]),lw=2, markevery=freq)\n ymax_ax1 = err[ky1][:].max()\n ax1.set_xlabel('Time (%s)'%t_unit);lg=plt.legend(ncol=2, fancybox=True,)\n\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(tseries[:], err[ky2][:], 'b-o', markersize=8,\n label='$\\mathbf{%s}$'%(var_string[ky2]), lw=2, markevery=freq)\n ax2.plot(tseries[:], err[ky3][:], 'g-^', markersize=8,\n label='$\\mathbf{%s}$'%(var_string[ky3]), lw=2, markevery=freq-10)\n ymax_ax2 = np.maximum(err[ky2][:].max(), err[ky3][:].max())\n ax2.set_xlabel('Time (%s)'%t_unit);lg=plt.legend(ncol=2, fancybox=True,)\n\n if 'mark' in kwargs:\n tr_mark = kwargs['mark']\n ax1.vlines(tseries[tr_mark], 0, ymax_ax1, colors ='k', linestyles='dashdot')\n ax2.vlines(tseries[tr_mark],0,ymax_ax2, colors = 'k', linestyles ='dashdot')\n\n if 'metric' in kwargs:\n if kwargs['metric'] == 'rel':\n fig.suptitle('Relative errors of PODNODE NIROM solutions', fontsize=18)\n elif kwargs['metric'] == 'rms':\n fig.suptitle('Spatial RMS errors of PODNODE NIROM solutions', fontsize=18)\n else:\n fig.suptitle('Spatial RMS errors of PODNODE NIROM solutions', fontsize=18)", "def plot_lastreviews_means_and_errors_scaled(H_in_HH_mean, H_in_HH_error, M_in_MM_mean, M_in_MM_error, L_in_LL_mean, L_in_LL_error,\n H_in_HL_mean, H_in_HL_error, L_in_HL_mean, L_in_HL_error, H_in_HM_mean, H_in_HM_error,\n M_in_HM_mean, M_in_HM_error, M_in_ML_mean, M_in_ML_error, L_in_ML_mean, L_in_ML_error):\n plt.figure(figsize=(12, 9)) \n\n # create the fig. and axes.\n ax = plt.subplot(111)\n ax.spines[\"top\"].set_visible(False) \n ax.spines[\"right\"].set_visible(False)\n\n # define the color to use\n light_green = (152, 223, 138)\n strong_green = (44, 160, 44)\n light_red = (255, 152, 150)\n orange = (255, 187, 120)\n strong_red = (214, 39, 40)\n\n strong_green = rgb_to_matplot_lib(strong_green)\n light_green = rgb_to_matplot_lib(light_green)\n strong_red = rgb_to_matplot_lib(strong_red)\n light_red = rgb_to_matplot_lib(light_red)\n orange = rgb_to_matplot_lib(orange)\n\n # axis \n ax.set_ylabel('Rating', fontsize = 14)\n ax.tick_params(axis='both', labelsize=14)\n\n # plot small dash lines to follow the grading \n for y in np.arange(3.5, 4.6, 0.1): \n ax.plot(range(0, 45), [y] * len(range(0, 45)), \"--\", lw=0.5, color=\"black\", alpha=0.3)\n\n\n # set titles\n ax.set_title('10+ reviews average rating for each case in each group', fontsize = 14)\n\n plt.errorbar(1, H_in_HH_mean, H_in_HH_error, lineStyle= None, capsize=5, marker=\"^\", color=strong_green)\n plt.errorbar(2, M_in_MM_mean, M_in_MM_error, lineStyle= None, capsize=5, marker=\"^\", color=orange)\n plt.errorbar(3, L_in_LL_mean, L_in_LL_error, lineStyle= None, capsize=5, marker=\"^\", color=strong_red)\n plt.errorbar(4, H_in_HL_mean, H_in_HL_error, lineStyle= None, capsize=5, marker=\"^\", color=light_green)\n plt.errorbar(5, L_in_HL_mean, L_in_HL_error, lineStyle= None, capsize=5, marker=\"^\", color=light_red)\n plt.errorbar(6, H_in_HM_mean, H_in_HM_error, lineStyle= None, capsize=5, marker=\"^\", color=light_green)\n plt.errorbar(7, M_in_HM_mean, M_in_HM_error, lineStyle= None, capsize=5, marker=\"^\", color=orange)\n plt.errorbar(8, M_in_ML_mean, M_in_ML_error, lineStyle= None, capsize=5, marker=\"^\", color=orange)\n plt.errorbar(9, L_in_ML_mean, L_in_ML_error, lineStyle= None, capsize=5, marker=\"^\", color=light_red) \n\n plt.text(0.7, 3.41, \"({:04.3f})\".format(H_in_HH_mean), fontsize=14, color=strong_green)\n plt.text(1.7, 3.41, \"({:04.3f})\".format(M_in_MM_mean), fontsize=14, color=orange)\n plt.text(2.7, 3.41, \"({:04.3f})\".format(L_in_LL_mean), fontsize=14, color=strong_red)\n plt.text(3.7, 3.41, \"({:04.3f})\".format(H_in_HL_mean), fontsize=14, color=light_green)\n plt.text(4.7, 3.41, \"({:04.3f})\".format(L_in_HL_mean), fontsize=14, color=light_red)\n plt.text(5.7, 3.41, \"({:04.3f})\".format(H_in_HM_mean), fontsize=14, color=light_green)\n plt.text(6.7, 3.41, \"({:04.3f})\".format(M_in_HM_mean), fontsize=14, color=orange)\n plt.text(7.7, 3.41, \"({:04.3f})\".format(M_in_ML_mean), fontsize=14, color=orange)\n plt.text(8.7, 3.41, \"({:04.3f})\".format(L_in_ML_mean), fontsize=14, color=light_red)\n\n # set ticks label\n ax.set_xticks(range(1,10))\n ax.set_xticklabels(('H in HH', 'M in MM', 'L in LL', 'H in HL', 'L in HL', 'H in HM', 'M in HM', 'M in ML', 'L in ML'))\n\n #set ticks color\n colors = [strong_green, orange, strong_red, light_green, light_red, light_green, orange, orange, light_red]\n for xtick, color in zip(ax.get_xticklabels(), colors):\n xtick.set_color(color)\n\n plt.ylim([3.4,4.6])\n plt.xlim([0.5,9.5])\n plt.show()", "def plot_error(k_vals, error):\n\n plt.plot(k_vals,error)\n plt.xlabel('k-value')\n plt.ylabel('Cost')\n plt.show()", "def plot_series(self, dates, mean_series, year_series, std_series, savename):\n year = dates[0].year\n year_label = 'Discharge for '+str(year)\n dates2 = np.concatenate([dates,dates[::-1]])\n std2 = np.concatenate([std_series+mean_series,\\\n (mean_series-std_series)[::-1]])\n fig = plt.figure(figsize=(10,5))\n ax = fig.add_subplot(111)\n p1 = ax.plot(dates,mean_series,'-k', label = 'Mean Discharge')\n p3 = ax.fill(dates2,std2,facecolor = 'gray',label='Mean Variance')\n p2 = ax.plot(dates,year_series,'-r', label = year_label)\n ax.set_ylabel('$m^3/s$')\n ax.set_title('Brazos River Discharge Near Rosharon, TX')\n plt.ylim([0,max(year_series)+500])\n plt.legend(fontsize='x-small')\n idx = [i for i in range(dates.shape[0]) if (dates[i].day == 1)]\n dt_form = '%b'\n plt.xticks(dates[idx],[datetime.datetime.strftime(dates[i],dt_form) for i in idx])\n plt.savefig(savename)" ]
[ "0.5816549", "0.5654263", "0.5647554", "0.5641937", "0.5606311", "0.5547447", "0.5509094", "0.5484054", "0.54816675", "0.5443533", "0.5367257", "0.5338084", "0.52920187", "0.52789783", "0.5228553", "0.5223728", "0.52207625", "0.52193207", "0.5198728", "0.5193151", "0.518719", "0.51768726", "0.51384556", "0.51314616", "0.512458", "0.51170695", "0.50964254", "0.5092824", "0.509155", "0.5085633" ]
0.580105
1