query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Return a custom boolean operator. This method is shorthand for calling | def bool_op(
self,
opstring: str,
precedence: int = 0,
python_impl: Optional[Callable[..., Any]] = None,
) -> Callable[[Any], Operators]:
return self.op(
opstring,
precedence=precedence,
is_comparison=True,
python_impl=python_impl,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _op_bool(self, op: str, other: t.Any) -> bool:\n if hasattr(self.__members__, op):\n if isinstance(other, InspectableSet):\n other = other.__members__\n return getattr(self.__members__, op)(other)\n return NotImplemented",
"def is_binary_operator(oper):\n # definition:\n # memeber in class\n # ret-type operator symbol(arg)\n # globally\n # ret-type operator symbol( arg1, arg2 )\n symbols = [\n ',', '()', '[]', '!=', '%', '%=', '&', '&&', '&=', '*', '*=', '+',\n '+=', '-', '-=', '->', '->*', '/', '/=', '<', '<<', '<<=', '<=', '=',\n '==', '>', '>=', '>>', '>>=', '^', '^=', '|', '|=', '||']\n if not isinstance(oper, calldef.operator_t):\n return False\n if oper.symbol not in symbols:\n return False\n if isinstance(oper, calldef.member_operator_t):\n if 1 == len(oper.arguments):\n return True\n else:\n return False\n else:\n if 2 == len(oper.arguments):\n return True\n else:\n return False",
"def isOperator(self, *args):\n return _libsbml.ASTBasePlugin_isOperator(self, *args)",
"def on_true(self) -> global___Expression:",
"def is_operator(cls, method_name):\n try:\n getattr(cls, method_name)\n except Exception:\n return False\n return Scenario.meta(cls, \"operator\", method_name, default=False)",
"def __or__(self, obj):\n return self._boolean_operation(obj, operator.__or__)",
"def _BoolOp(self, t):\n self.write(\"(\")\n s = \" %s \" % self.boolops[t.op.__class__]\n interleave(lambda: self.write(s), self.dispatch, t.values)\n self.write(\")\")",
"def pl_true(exp, model={}):\n op, args = exp.op, exp.args\n if exp == TRUE:\n return True\n elif exp == FALSE:\n return False\n elif is_prop_symbol(op):\n return model.get(exp)\n elif op == '~':\n p = pl_true(args[0], model)\n if p is None: return None\n else: return not p\n elif op == '|':\n result = False\n for arg in args:\n p = pl_true(arg, model)\n if p is True: return True\n if p is None: result = None\n return result\n elif op == '&':\n result = True\n for arg in args:\n p = pl_true(arg, model)\n if p is False: return False\n if p is None: result = None\n return result\n p, q = args\n if op == '>>':\n return pl_true(~p | q, model)\n elif op == '<<':\n return pl_true(p | ~q, model)\n pt = pl_true(p, model)\n if pt is None: return None\n qt = pl_true(q, model)\n if qt is None: return None\n if op == '<=>':\n return pt == qt\n elif op == '^':\n return pt != qt\n else:\n raise ValueError, \"illegal operator in logic expression\" + str(exp)",
"def isOperator(self):\n return _libsbml.ASTNode_isOperator(self)",
"def test04_boolean_operator(self):\n\n import _cppyy\n number = _cppyy.gbl.number\n\n n = number(20)\n assert n\n\n n = number(0)\n assert not n",
"def evaluate(self, operand: object) -> bool:\n pass",
"def is_operator(formula):\n return is_binary_operator(formula) or isinstance(formula, Not)",
"def operator(self) -> Optional[LogicalOperator]:\n return self.__operator",
"def __and__(self, obj):\n return self._boolean_operation(obj, operator.__and__)",
"def operator_present(input_str): # HELPER\n operator_list = ['+','-','*','/','**','<<','>>']\n\n if input_str in operator_list:\n return True\n else: return False",
"def is_operator(self, string):\n if string in '+-/*^()':\n return string\n else:\n return False",
"def _operators_conductor(operator_name, _bool=None):\n func = getattr(Series, operator_name)\n if _bool is None:\n # return bool series.\n _pre, _post = bool, bool\n else:\n # return ints.\n _pre, _post = int, int\n\n @wraps(func)\n def operator_method(self, other=None):\n if other is None:\n # for unary such as pos, neg, invert\n def not_(df: dF):\n return func(df.pipe(self.copy().pop())).apply(_post)\n\n return not_\n\n # if not isinstance(other, Condition):\n # raise TypeError(\"only conditions can add, got %r\" % type(other))\n\n def comb(df: dF) -> Series:\n return func(df.pipe(self).apply(_pre), df.pipe(other).apply(_pre)).apply(_post)\n\n return comb\n\n return operator_method",
"def operator(self):\n return self.__operator",
"def isoperator(token):\n\n # Token is an operator\n return token and token.lower() in Token.OPERATORS",
"def is_operator(operator):\n\t\tlist_of_operators = [\"+\", \"-\", \"*\", \"/\"]\n\t\treturn operator in list_of_operators",
"def isOp(self):\n return True",
"def _op(\n x: Union[bool, dts.Boolean, tps.BooleanValue],\n y: Union[bool, dts.Boolean, tps.BooleanValue],\n ) -> T:",
"def is_operator(self, symbol: str) -> bool:\n return symbol in self.operators",
"def __bool__(self):\n return (self.value == POS)",
"def __bool__(self):\n return bool(self.exp)",
"def __nonzero__(self):\n return self.__bool__()",
"def __nonzero__(self):\n return self.__bool__()",
"def operator(self):\n return self.data.get('operator', 'and')",
"def operator(self):\n return self.data.get('operator', 'and')",
"def my_operator(self):\n return self._my_operator"
] | [
"0.68767655",
"0.6805689",
"0.6731244",
"0.67064416",
"0.6659224",
"0.66318834",
"0.6631672",
"0.6612189",
"0.66057175",
"0.6603681",
"0.660212",
"0.6572331",
"0.654602",
"0.6541066",
"0.6471177",
"0.6452777",
"0.6418484",
"0.6383299",
"0.6374297",
"0.6358638",
"0.6356275",
"0.63521755",
"0.6342431",
"0.6323814",
"0.63101655",
"0.62823933",
"0.62823933",
"0.6282339",
"0.6282339",
"0.62715954"
] | 0.7286612 | 0 |
Implement the ``<`` operator. In a column context, produces the clause ``a < b``. | def __lt__(self, other: Any) -> ColumnOperators:
return self.operate(lt, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __lt__(self, other):\n self.conds.append((self.name, '<', other))\n return self",
"def _builtin_lt(arg1, arg2, engine=None, **kwdargs):\n check_mode((arg1, arg2), ['gg'], functor='<', **kwdargs)\n a_value = arg1.compute_value(engine.functions)\n b_value = arg2.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value < b_value",
"def less_than(self) -> global___Expression:",
"def __lt__(self, other):\n return self.lessThan(other)",
"def __lt__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return Less(self, other)",
"def __lt__(self, other: t.Any) -> bool:\n return self._op_bool('__lt__', other)",
"def __lt__(self, *args):\n return _ida_hexrays.cexpr_t___lt__(self, *args)",
"def __le__(self, other: Any) -> ColumnOperators:\n return self.operate(le, other)",
"def __gt__(self, other: Any) -> ColumnOperators:\n return self.operate(gt, other)",
"def __lt__(self, other):\n return self.f() < other.f()",
"def __lt__(self, other):\n return self.element() < other.element()",
"def lt(self, other):\n\n return self._get(\"lt\", other, Bool)",
"def __lt__(self, other):\n return less(self, other)",
"def __ge__(self, other: Any) -> ColumnOperators:\n return self.operate(ge, other)",
"def test_less_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::lt\"},\n )",
"def __lt__(self, rhs):\n return _table.Connection___lt__(self, rhs)",
"def less(x1, x2):\n return compare_chararrays(x1, x2, '<', True)",
"def lt(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"<\", __key, __and, kwargs.items())",
"def __lt__(self, *args):\n return _ida_hexrays.cdo_t___lt__(self, *args)",
"def _lt(self, node_a, node_b):\n node_a = self.__getitem__(node_a)\n node_b = self.__getitem__(node_b)\n if 'val' in node_a and 'val' in node_b:\n return node_a['val'] > node_b['val']\n else:\n return False",
"def __lt__(self, other):\n return self._obj_func() < other._obj_func()",
"def lt(self, e1, e2):\n return self._poset.lt(e1, e2)",
"def __lt__(self, other):\n return self <= other and not self >= other",
"def __lt__(self, *args):\n return _ida_hexrays.operand_locator_t___lt__(self, *args)",
"def less_than_or_equal(self) -> global___Expression:",
"def __lt__(self,other):\n return self.lvl < other.lvl",
"def __lt__(self, *args):\n return _ida_hexrays.cfor_t___lt__(self, *args)",
"def __lt__(self, other):\n return self._value < other.value_in_unit(self.unit)",
"def __lt__(self, *args):\n return _ida_hexrays.ccase_t___lt__(self, *args)",
"def lt(self, val):\n\t\treturn LessThan(self, val)"
] | [
"0.7481395",
"0.7145486",
"0.7140174",
"0.6991639",
"0.68957096",
"0.67781824",
"0.67484343",
"0.6713814",
"0.66865134",
"0.6664929",
"0.6620451",
"0.6612124",
"0.6578539",
"0.65778166",
"0.6572301",
"0.6556116",
"0.65221244",
"0.6521315",
"0.6477191",
"0.64536965",
"0.6428964",
"0.6415254",
"0.641129",
"0.6405686",
"0.6378935",
"0.6377217",
"0.63476145",
"0.63249934",
"0.6312441",
"0.62934387"
] | 0.8495016 | 0 |
Implement the ``==`` operator. In a column context, produces the clause ``a = b``. If the target is ``None``, produces ``a IS NULL``. | def __eq__(self, other: Any) -> ColumnOperators: # type: ignore[override]
return self.operate(eq, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __eq__(self, other):\n return super(Column, self).__eq__(tuple(other))",
"def __eq__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501\n if other is None or isinstance(other, expression.Null):\n if self.property.direction in [ONETOMANY, MANYTOMANY]:\n return ~self._criterion_exists()\n else:\n return _orm_annotate(\n self.property._optimized_compare(\n None, adapt_source=self.adapter\n )\n )\n elif self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"Can't compare a collection to an object or collection; \"\n \"use contains() to test for membership.\"\n )\n else:\n return _orm_annotate(\n self.property._optimized_compare(\n other, adapt_source=self.adapter\n )\n )",
"def eq(cls, __and=True, __key=None, **kwargs):\r\n null_handler = lambda c: (f\"{c} IS NULL\", [])\r\n return _queries(\"=\", __key, __and, kwargs.items(), null_handler)",
"def __eq__(self, rhs):\n return _table.Connection___eq__(self, rhs)",
"def is_(self, other: Any) -> ColumnOperators:\n return self.operate(is_, other)",
"def __eq__(self, other):\n self.conds.append((self.name, '==', other))\n return self",
"def where(self, column_or_label, value=None):\n column = self._get_column(column_or_label)\n if value is not None:\n column = column == value\n return self.take(np.nonzero(column)[0])",
"def __eq__(self, other):\n return (other is not None and\n ((not self.name and not other.name) or\n self.name == other.name) and\n ((not self.expressions and not other.expressions) or\n self.expressions == other.expressions) and\n self.fields == other.fields and\n dict.__eq__(self.attrs or {}, other.attrs or {}))",
"def __eq__(self, other):\n return ZeroaryOperator.__eq__(self, other) and \\\n self.relation_key == other.relation_key",
"def __eq__(self, other):\r\n if other is not None:\r\n return self.value() == other.value()\r\n else:\r\n return False",
"def eq(self, other):\n\n return self._get(\"eq\", other, Bool)",
"def eq(self, other):\n\n return self._get(\"eq\", other, Bool)",
"def __eq__(self, other):\n return (other is self) or (isinstance(other, Expr)\n and self.op == other.op and self.args == other.args)",
"def test_equality_method(self):\r\n wc1 = WhereClause('a', EqualsOperator(), 'c')\r\n wc2 = WhereClause('a', EqualsOperator(), 'c')\r\n assert wc1 == wc2",
"def __eq__(self, other):\n return (other is not None and\n self.field_name == other.field_name and\n self.field_type is other.field_type and\n dict.__eq__(self.field_attrs, other.field_attrs) and\n self.related_model == other.related_model)",
"def values_eq(self, a, b):\r\n return a == b",
"def _logical_equal(x, y):\n x_ = _static_value(x)\n y_ = _static_value(y)\n if x_ is None or y_ is None:\n return math_ops.equal(x, y)\n return constant_op.constant(np.array_equal(x_, y_))",
"def __eq__(self, other):\n if other is None:\n return False\n if self.value == other.value:\n return True\n return False",
"def __eq__(self, other: t.Any) -> bool:\n return self._op_bool('__eq__', other)",
"def __eq__(self, other):\n\n if not isinstance(other, PublishedDateTime):\n raise ValueError\n\n if isinstance(other, datetime.datetime):\n other = PublishedDateTime(other.year, other.month, other.day, other.timetz())\n\n return sql.and_(*[a == b for a, b in\n zip(self.__clause_element__().clauses,\n other.__composite_values__())])",
"def dialect_eq(lhs, rhs):\n # type: (csv.Dialect, csv.Dialect) -> bool\n return (lhs.delimiter == rhs.delimiter and\n lhs.quotechar == rhs.quotechar and\n lhs.doublequote == rhs.doublequote and\n lhs.escapechar == rhs.escapechar and\n lhs.quoting == rhs.quoting and\n lhs.skipinitialspace == rhs.skipinitialspace)",
"def __eq__(self, other):\n return self.value == other or self.value == other.value",
"def __eq__(self,other):\n\t\tif other != None:\n\t\t\treturn self.id==other.id and \\\n\t\t\t\t self.length == other.length and \\\n\t\t\t\t self.value==other.value\n\t\telse:\n\t\t\treturn False",
"def __eq__(self, other):\n if not isinstance(other, Expression):\n return False\n\n return self.evaluate() == other.evaluate()",
"def match(self, other: Any, **kwargs: Any) -> ColumnOperators:\n return self.operate(match_op, other, **kwargs)",
"def __eq__(self, other):\n if other != None:\n return self == other\n else:\n return False",
"def __eq__(self, other):\n if other != None:\n return self == other\n else:\n return False",
"def __eq__(self, other):\n return (other is not None and\n self.table_name == other.table_name and\n self.db_tablespace == other.db_tablespace and\n set(self.constraint_sigs) == set(other.constraint_sigs) and\n set(self.index_sigs) == set(other.index_sigs) and\n set(self.index_together) == set(other.index_together) and\n self.model_name == other.model_name and\n self.pk_column == other.pk_column and\n dict.__eq__(self._field_sigs, other._field_sigs) and\n not self.has_unique_together_changed(other))",
"def index_equivalent_value(indexer, obj1, attr1, obj2, attr2):\n eq_conds = indexer.index_by_type(ValueEquivalence)\n for cond in eq_conds:\n r = cond.relationship\n obj_list = r.obj_list\n attr_list = r.attr_list\n if obj1 in obj_list and obj2 in obj_list \\\n and attr1 in attr_list and attr2 in attr_list:\n return cond\n return None",
"def __eq__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return Equal(self, other)"
] | [
"0.65228146",
"0.62469417",
"0.58913606",
"0.58078814",
"0.5791249",
"0.5780802",
"0.5736693",
"0.5728964",
"0.5647626",
"0.5644617",
"0.5643474",
"0.5643474",
"0.5619931",
"0.5608386",
"0.5597904",
"0.5576965",
"0.5540127",
"0.5503248",
"0.54818636",
"0.5457207",
"0.54542685",
"0.5442877",
"0.54199594",
"0.54126805",
"0.5367253",
"0.5361711",
"0.5361711",
"0.53549916",
"0.53402525",
"0.5329728"
] | 0.70723593 | 0 |
Implement the ``IS DISTINCT FROM`` operator. Renders "a IS DISTINCT FROM b" on most platforms; on some such as SQLite may render "a IS NOT b". | def is_distinct_from(self, other: Any) -> ColumnOperators:
return self.operate(is_distinct_from, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_not_distinct_from(self, other: Any) -> ColumnOperators:\n return self.operate(is_not_distinct_from, other)",
"def isdistinct(seq):\n return len(seq) == len(set(seq))",
"def isdistinct(token):\n\n # Token is the distinct keyword\n return token and token.lower() in Token.DISTINCT",
"def test_distinct(self):\n self.Person(name=\"Mr Orange\", age=20).save()\n self.Person(name=\"Mr White\", age=20).save()\n self.Person(name=\"Mr Orange\", age=30).save()\n self.Person(name=\"Mr Pink\", age=30).save()\n assert set(self.Person.objects.distinct(\"name\")) == {\n \"Mr Orange\",\n \"Mr White\",\n \"Mr Pink\",\n }\n assert set(self.Person.objects.distinct(\"age\")) == {20, 30}\n assert set(self.Person.objects(age=30).distinct(\"name\")) == {\n \"Mr Orange\",\n \"Mr Pink\",\n }",
"def distinct(self):\n return DistinctQuery(self)",
"def test_no_duplicates(self):\n with Historical_ROAs_Table() as t:\n sql = f\"SELECT DISTINCT({','.join(t.columns[:-1])}) FROM {t.name}\"\n distinct = len(t.execute(sql))\n sql = f\"SELECT * FROM {t.name}\"\n assert len(t.execute(sql)) == distinct",
"def test_distinct(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n self.sql.add_all(pkgs)\n saved_pkgs = self.db.distinct()\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))",
"def returns_distinct_classes(self):\n assert simple_class() is not simple_class()",
"def is_distinct(n):\n nstr = str(n)\n return len(nstr) == len(set(nstr))",
"def UniqueIterator(iterator):\r\n so_far = set()\r\n def no_dups(x):\r\n if x in so_far:\r\n return False\r\n else:\r\n so_far.add(x)\r\n return True\r\n\r\n return IteratorFilter(iterator, no_dups)",
"def __eq__(self, *args):\n return _ida_hexrays.user_unions_iterator_t___eq__(self, *args)",
"def test_distinct(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n for pkg in pkgs:\n self.db.save(pkg)\n saved_pkgs = self.db.distinct()\n\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))",
"def __ne__(self, *args):\n return _ida_hexrays.user_unions_iterator_t___ne__(self, *args)",
"def test_distinct(self):\n pkgs = [\n make_package(factory=DynamoPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self._save_pkgs(*pkgs)\n saved_pkgs = self.db.distinct()\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))",
"def distinct(self):\n qs = copy(self)\n qs._distinct = True\n return qs",
"def distinct(iterable):\n\n def distincting(iterable_):\n set_of_distinct_values = set()\n for i in iterable_:\n if i not in set_of_distinct_values:\n set_of_distinct_values.add(i)\n yield i\n\n return distincting(iterable)",
"def get_distinct_disabled(self, field, query=None):\n logger.warn(u'get_distinct called field: %s, Query: %s',field,query)\n query = self._sanitize_query(query)\n logger.debug(u'Search sanitized query: %s ',query)\n result = []\n if field == 'artist':\n result = self._browse_artist(query)\n elif field == 'genre':\n result = self.get_genres()\n else:\n logger.info(u'get_distinct not fully implemented yet')\n result = []\n return set([v[0] for v in result])",
"def is_duplicate(self, **kwargs):\n return len(list(self.c.select(**kwargs))) > 0",
"def distinct(x):\n return list(set(x))",
"def distinct(self, columns, **kwds_filter):\n fn = lambda source: source.distinct(columns, **kwds_filter)\n results = (fn(source) for source in self._sources)\n results = itertools.chain(*results)\n return CompareSet(results)",
"def distinct(self, columns, **kwds_filter):\n if not nonstringiter(columns):\n columns = (columns,)\n self._assert_columns_exist(columns)\n select_clause = [self._normalize_column(x) for x in columns]\n select_clause = ', '.join(select_clause)\n select_clause = 'DISTINCT ' + select_clause\n\n cursor = self._execute_query(select_clause, **kwds_filter)\n return CompareSet(cursor)",
"def distinct(self):\n self.distinct_ = True\n\n return self",
"def distinct(self, columns, **kwds_filter):\n if not _is_nsiterable(columns):\n columns = (columns,)\n self._assert_columns_exist(columns)\n select_clause = [self._normalize_column(x) for x in columns]\n select_clause = ', '.join(select_clause)\n select_clause = 'DISTINCT ' + select_clause\n\n cursor = self._execute_query(select_clause, **kwds_filter)\n return CompareSet(cursor)",
"def __ne__(self, *args):\n return _ida_hexrays.user_cmts_iterator_t___ne__(self, *args)",
"def is_generic(self, A):\n return all_distinct(imap(lambda v: v[0], A))",
"def test_duplicates():\n\n conn = psycopg2.connect(host=\"sculptor.stat.cmu.edu\", database=c.DB_USER,\n user=c.DB_USER, password=c.DB_PASSWORD)\n cur = conn.cursor()\n cur.execute(\"\"\" SELECT COUNT(CONCAT(song_title, ' ', artist_name)) \n FROM songs \"\"\")\n count1 = cur.fetchone()[0]\n cur.execute(\"\"\" SELECT COUNT(DISTINCT CONCAT(song_title, ' ', artist_name))\n FROM songs \"\"\")\n count2 = cur.fetchone()[0]\n assert count1-count2 == 0",
"def make_query(self, qsrc):\n\n g = self.world.as_rdflib_graph()\n\n r = g.query_owlready(qsrc)\n res_list = []\n for elt in r:\n # ensure that here each element is a sequences of lenght 1\n assert len(elt) == 1\n res_list.append(elt[0])\n\n # drop duplicates\n return set(res_list)",
"def unsorted_not_distinct(table1, table2, subset=False):\n\n only_in_table1 = []\n if subset:\n # When subset, a row in table1 is not subset,\n # if its contains more instances of a row than table2\n for row in table1:\n count1 = table1.count(row)\n count2 = table2.count(row)\n if count1 > count2 or None in row.values():\n dic = row.copy()\n dic['count'] = count1\n only_in_table1.append(dic)\n\n else: # not Subset\n for row in table1:\n count1 = table1.count(row)\n count2 = table2.count(row)\n if count1 != count2 or None in row.values():\n dic = row.copy()\n dic['count'] = count1\n only_in_table1.append(dic)\n\n return only_in_table1",
"def is_(a, b):\n return False",
"def __eq__(self, *args):\n return _ida_hexrays.user_cmts_iterator_t___eq__(self, *args)"
] | [
"0.6805806",
"0.57211715",
"0.55256224",
"0.5360086",
"0.5352851",
"0.5322726",
"0.5257902",
"0.52318513",
"0.52310133",
"0.5195459",
"0.5176963",
"0.51325774",
"0.5045675",
"0.500418",
"0.49898845",
"0.49424577",
"0.4938313",
"0.4881698",
"0.48579457",
"0.48537987",
"0.48064646",
"0.47978604",
"0.4791056",
"0.47851256",
"0.47836652",
"0.4781041",
"0.47616488",
"0.47586718",
"0.47330907",
"0.4731599"
] | 0.74369556 | 0 |
Implement the ``IS NOT DISTINCT FROM`` operator. Renders "a IS NOT DISTINCT FROM b" on most platforms; on some such as SQLite may render "a IS b". | def is_not_distinct_from(self, other: Any) -> ColumnOperators:
return self.operate(is_not_distinct_from, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_distinct_from(self, other: Any) -> ColumnOperators:\n return self.operate(is_distinct_from, other)",
"def __ne__(self, *args):\n return _ida_hexrays.user_unions_iterator_t___ne__(self, *args)",
"def __ne__(self, *args):\n return _ida_hexrays.user_cmts_iterator_t___ne__(self, *args)",
"def __invert__(self):\n return NotAny(self)",
"def __ne__(self, values):\n self = self.__eq__(values)\n return self.__invert__()",
"def is_not(self, other: Any) -> ColumnOperators:\n return self.operate(is_not, other)",
"def __ne__(self, G):\n return not self.__eq__(G)",
"def __ne__(self, *args):\n return _ida_hexrays.cdo_t___ne__(self, *args)",
"def __ne__(self, other):\n return tuple(self) != tuple(other)",
"def __ne__(self, other):\n\n if other is None:\n return sql.and_(*[a != None for a in self.__clause_element__().clauses])\n\n return sql.and_(*[a != b for a, b in\n zip(self.__clause_element__().clauses,\n other.__composite_values__())])",
"def __ne__(self, *args):\n return _ida_hexrays.ccase_t___ne__(self, *args)",
"def __ne__(self, other):\n return self.isNot(other)",
"def isNot(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.IsNot)\n newq.setValue(value)\n return newq",
"def __neq__(self, other): \n return not self == other",
"def __ne__(self, other):\n return not self.__eq__(other)",
"def __ne__(self, other):\n return not self == other",
"def __ne__(self, other):\n return not_equal(self, other)",
"def __ne__(self, other):\n return not self == other",
"def are_not(self, value1, value2):\n (group_1, val_1) = self.get_val_tuple(value1)\n (group_2, val_2) = self.get_val_tuple(value2)\n f_arenot = Or(*[ And(self.X[group_1, val_1, idx], ~self.X[group_2, val_2, idx])\n for idx in range(0, self.items_per) ])\n\n return f_arenot",
"def __ne__(self, other):\n pass",
"def __ne__(self, other):\n return not(self == other)",
"def __ne__(self, other):\r\n return not self.__eq__(other)",
"def __ne__(self, other):\r\n return not self.__eq__(other)",
"def __ne__(self, other):\r\n return not self.__eq__(other)",
"def __ne__(self, other):\r\n return not self.__eq__(other)",
"def __ne__(self, other):\n return not self == other",
"def __ne__(self, other):\n return not self == other",
"def __ne__(self,other):\n return not (self == other)",
"def __ne__(self: _TT, other: object) -> bool:\n return self.ne(other) # type: ignore",
"def __ne__(self, other):\n return not (self == other)"
] | [
"0.6854851",
"0.6271306",
"0.60592026",
"0.59912914",
"0.59556234",
"0.5913235",
"0.5890726",
"0.58651143",
"0.5837896",
"0.5810431",
"0.5760489",
"0.574592",
"0.57227314",
"0.5712382",
"0.57120717",
"0.5675092",
"0.56723475",
"0.56513",
"0.5647014",
"0.5646091",
"0.56390554",
"0.56338614",
"0.56338614",
"0.56338614",
"0.56338614",
"0.5624807",
"0.5624807",
"0.5619619",
"0.56158245",
"0.5607673"
] | 0.78460455 | 0 |
Implement the ``>`` operator. In a column context, produces the clause ``a > b``. | def __gt__(self, other: Any) -> ColumnOperators:
return self.operate(gt, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def greater_than(self) -> global___Expression:",
"def __lt__(self, other: Any) -> ColumnOperators:\n return self.operate(lt, other)",
"def __gt__(self, other):\n self.conds.append((self.name, '>', other))\n return self",
"def _builtin_gt(arg1, arg2, engine=None, **kwdargs):\n check_mode((arg1, arg2), ['gg'], functor='>', **kwdargs)\n a_value = arg1.compute_value(engine.functions)\n b_value = arg2.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value > b_value",
"def greater_than_or_equal(self) -> global___Expression:",
"def __gt__(self, other):\n return self.greaterThan(other)",
"def less_than(self) -> global___Expression:",
"def test_greater_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::gt\"},\n )",
"def __gt__(self, *args):\n return _ida_hexrays.cexpr_t___gt__(self, *args)",
"def __ge__(self, other: Any) -> ColumnOperators:\n return self.operate(ge, other)",
"def gt(self, x, y):\n return self.lt(y,x)",
"def __gt__(self, other):\n return greater(self, other)",
"def _greater_than_op(spec):",
"def greater(x1, x2):\n return compare_chararrays(x1, x2, '>', True)",
"def __gt__(self, *args):\n return _ida_hexrays.cdo_t___gt__(self, *args)",
"def test_greater_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::gt\"},\n )",
"def __le__(self, other: Any) -> ColumnOperators:\n return self.operate(le, other)",
"def __gt__(self, other: t.Any) -> bool:\n return self._op_bool('__gt__', other)",
"def __gt__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return Greater(self, other)",
"def gt(self, other):\n\n return self._get(\"gt\", other, Bool)",
"def __gt__(self, other):\n\t\ttry:\n\t\t\treturn self.val > other.val\n\t\texcept:\n\t\t\treturn self.val > other",
"def cmpGreaterThan(self, conn1, sql1, conn2, sql2):\n for row in self.get_query_results(conn1, sql1):\n res1 = row[0]\n for row in self.get_query_results(conn2, sql2):\n res2 = row[0]\n self.log.info(\n \"cmpGreaterThan:: task: {}, value1: {}, value2: {}\".format(\n self.task_id, str(res1), str(res2)\n )\n )\n\n if res1 <= res2:\n raise AirflowException(\n \"EtlValidation cmpGreaterThanError: query {}\".format(sql1 + \"<=\" + sql2)\n )",
"def greater(lhs, rhs):\n return _make.greater(lhs, rhs)",
"def less_than_or_equal(self) -> global___Expression:",
"def __gt__(self, *args):\n return _ida_hexrays.cnumber_t___gt__(self, *args)",
"def __gt__(self, other):\n return self.element() > other.element()",
"def __gt__(self, *args):\n return _ida_hexrays.cwhile_t___gt__(self, *args)",
"def _greater_than_or_equal_to_op(spec):",
"def _builtin_lt(arg1, arg2, engine=None, **kwdargs):\n check_mode((arg1, arg2), ['gg'], functor='<', **kwdargs)\n a_value = arg1.compute_value(engine.functions)\n b_value = arg2.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value < b_value",
"def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)"
] | [
"0.7429546",
"0.7223679",
"0.7069552",
"0.6982694",
"0.69523406",
"0.6771465",
"0.66648924",
"0.6652993",
"0.66101515",
"0.660004",
"0.65316814",
"0.6488499",
"0.6462836",
"0.645463",
"0.64420253",
"0.64400244",
"0.64123285",
"0.6383191",
"0.63439715",
"0.63205564",
"0.6292407",
"0.62400943",
"0.62293154",
"0.6227282",
"0.6204612",
"0.6186555",
"0.6157592",
"0.6149813",
"0.61450076",
"0.6143736"
] | 0.8108457 | 0 |
implement the >> operator. Not used by SQLAlchemy core, this is provided for custom operator systems which want to use >> as an extension point. | def __rshift__(self, other: Any) -> ColumnOperators:
return self.operate(rshift, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __lshift__(self, other: Any) -> ColumnOperators:\n return self.operate(lshift, other)",
"def __rshift__(self, other):\n other.set_upstream(self)\n # return other so a >> b >> c works\n return other",
"def __and__(self, other):\n return self >> (lambda _: other)",
"def __rshift__(self, next: 'IO[TResult]') -> 'IO[TResult]':\n return self.bind(lambda _: next)",
"def __rmod__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mod, other)",
"def __le__(self, other: Any) -> ColumnOperators:\n return self.operate(le, other)",
"def test_rshift():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = operator.rshift(value, 1)\n num_a.value >>= 1\n assert num_a.value == new_value",
"def __ror__(self, other):\n return self._operation_or(other)",
"def __rlshift__(self, other):\r\n return NotImplemented",
"def __rlshift__(self, other):\r\n return NotImplemented",
"def stream(_) -> int:\n return 1 << 9",
"def stream(_) -> int:\n return 1 << 9",
"def __rshift__(self, fn):\n if self is Nothing:\n return Nothing\n else:\n v = self.right if self.is_right() else self.left\n fn = liftF(fn, self.__class__)\n return unlift(fn(v))",
"def __mod__(self, other: Any) -> ColumnOperators:\n return self.operate(mod, other)",
"def bitwise_rshift(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_rshift_op, other)",
"def __ror__(self, other):\n return whitespaces.CURRENT.normalize(other) | self",
"def bitwise_lshift(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_lshift_op, other)",
"def pipe(self, func, *args, **kwargs):\n return func(self, *args, **kwargs)",
"def test_lshift():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = operator.lshift(value, 1)\n num_a.value <<= 1\n assert num_a.value == new_value",
"def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)",
"def __rrshift__(self, other):\n if isinstance(other, Callable):\n return self @ other\n else:\n return self(other) # Function application",
"def __iter__(self,):\n done = False\n li = LogIterator(self.fd)\n while True:\n if li.peek() == FIELD_DELIMITER: break\n with stream_context(li) as data:\n yield JournaledOperation.load(iter(li))",
"def __lshift__(self, x):\n if isinstance(x, list):\n self.f.write(make_escape(*x))\n self.f.flush()\n return self\n\n if isinstance(x, str):\n self.f.write(x)\n self.f.flush()\n return self\n\n raise TypeError",
"def __lshift__(self, other):\r\n # TODO: extend to secret offset\r\n if not isinstance(other, int):\r\n return NotImplemented\r\n\r\n return runtime.mul(self, 1<<other)",
"def __lshift__(self, value):\n\t\tif isinstance(value, str):\n\t\t\tself.setState(value)\n\t\telse:\n\t\t\tself.execute(value)\n\t\treturn self",
"def __lt__(self, other: Any) -> ColumnOperators:\n return self.operate(lt, other)",
"def __rshift__(self, other):\r\n # TODO: extend to secret offset\r\n if not isinstance(other, int):\r\n return NotImplemented\r\n\r\n return self.__floordiv__(1<<other)",
"def lshift(self, attr):\n return self.set_child_and_return(shifter.lshift(self.statement, attr))",
"def wrap(self, stream_generator):\n for lineno, token, value in stream_generator:\n if token in ignored_tokens:\n continue\n elif token == tokens.OPERATOR:\n token = operators[value]\n elif token == tokens.INTEGER:\n value = int(value)\n elif token == tokens.FLOAT:\n value = float(value)\n elif token == tokens.STRING:\n value = value[1:-1]\n yield Token(lineno, token, value)",
"def __rshift__(self, other):\r\n return NotImplemented"
] | [
"0.6102553",
"0.58244914",
"0.5716298",
"0.5712464",
"0.5459851",
"0.5423245",
"0.542072",
"0.538608",
"0.53498346",
"0.53498346",
"0.5293768",
"0.5293768",
"0.52721554",
"0.52561367",
"0.5249288",
"0.52367735",
"0.52328104",
"0.51850885",
"0.5160269",
"0.5149386",
"0.5122719",
"0.5112629",
"0.5108939",
"0.51040953",
"0.5039217",
"0.50028443",
"0.49953517",
"0.49930036",
"0.4988068",
"0.4987536"
] | 0.610098 | 1 |
Implement the 'concat' operator. In a column context, produces the clause ``a || b``, or uses the ``concat()`` operator on MySQL. | def concat(self, other: Any) -> ColumnOperators:
return self.operate(concat_op, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _rconcat(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(concat_op, other)",
"def concat(cls, c1, c2, op):\r\n if c1.clause and c2.clause:\r\n return cls('({}) {} ({})'.format(c1.clause, op, c2.clause), c1.params + c2.params)\r\n elif c1.clause:\r\n return c1\r\n elif c2.clause:\r\n return c2\r\n else:\r\n return cls('', ())",
"def _concat_values(concat_columns, column_values, delimiter):\n # Use the order of values that we got from concat_columns def.\n values = [\n column_values[item] for item in concat_columns if item in column_values\n ]\n return delimiter.join(values) or None",
"def assemble_col(c1, c2):\n c1.extend(c2)\n return c1",
"def concat_columns(df: DataFrame, columnName_name: str, *columns: str, union_char: str = '_') -> DataFrame:\n return df.withColumn(columnName_name, concat_ws(f'{union_char}', *columns))",
"def concat(*args, sep=\"/\"):\n return sep.join(args)",
"def concat(a, b):\n return torch.cat((a, b), 1)",
"def group_concat(df, gr_cols, col_concat):\n\n df_out = (\n df\n .groupby(gr_cols)[col_concat]\n .apply(lambda x: ' '.join(x))\n .to_frame()\n .reset_index()\n )\n\n return df_out",
"def concat_cols(df, cols, delim):\n\n cols_str = [df[x].astype(str) for x in cols]\n\n return reduce(lambda a, b: a + delim + b, cols_str)",
"def FE_concatenate_multiple_columns(df, cols, filler=\" \", drop=True):\r\n df = df.copy(deep=True)\r\n df['combined'] = df[cols].apply(lambda row: filler.join(row.values.astype(str)), axis=1)\r\n if drop:\r\n df.drop(cols, axis=1, inplace=True)\r\n return df",
"def concat(xs, axis=1):\n return Concat(axis=axis)(*xs)",
"def concatenate_columns(params: List[str]) -> str:\n convert_columns_to_string = [f'string({col})' for col in params]\n\n return f\"concat({','.join(convert_columns_to_string)})\"",
"def test_evaluate_concat_expression(self):\n value = self.evaluate_common(\"concat('starts','with')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.String, \"Expected String\")\n self.assertTrue(value.value == \"startswith\")\n value = self.evaluate_common(\"concat('3.1',concat('4','159'))\")\n self.assertTrue(value.value == \"3.14159\")\n try:\n value = self.evaluate_common(\"concat('3.14',1)\")\n self.fail(\"integer as parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"concat('3.14')\")\n self.fail(\"1 parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"concat('3.1','4','159')\")\n self.fail(\"3 parameters\")\n except odata.EvaluationError:\n pass",
"def concat(seq1, seq2):\n if type_tag(seq1) == type_tag(seq2):\n return seq1 + seq2\n else:\n types = (type_tag(seq1), type_tag(seq2))\n if types in concat.adders:\n return concat.adders[types](seq1, seq2)",
"def concat(str1: str, str2: str) -> str:\n return str1 + str2",
"def concat(str1: str, str2: str) -> str:\n return str1 + str2",
"def concat(str1: str, str2: str) -> str:\n return str1 + str2",
"def reduce_join(df, columns,sep='_'):\n assert len(columns) > 1\n slist = [df[x].astype(str) for x in columns]\n return reduce(lambda x, y: x + sep + y, slist[1:], slist[0])",
"def concat_pattern():\n pattern = is_tuple(None)\n pattern = is_op(\"concatenate\")(pattern)\n\n return pattern",
"def concat(values, sep=', '):\n concat_str = None\n try:\n concat_str = sep.join([str(v) for v in values if not is_empty(v)])\n except Exception as e:\n pass\n return concat_str",
"def concat(vars, axis=-1):\n return concatenate(vars, axis)",
"def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)",
"def implode(self, column, glue=''):\n return glue.join(self.lists(column))",
"def _rewrite_concat(self, node: saldag.Concat):\n\n # Copy over columns from existing relation\n out_rel_cols = node.out_rel.columns\n\n # Combine per-column collusion sets\n for idx, col in enumerate(out_rel_cols):\n columns_at_idx = [in_rel.columns[idx] for in_rel in node.get_in_rels()]\n col.coll_sets = utils.coll_sets_from_columns(columns_at_idx)",
"def join_string(part1, part2, concatenation_string = 'AND', seperator=' '):\n\n if part1 == '':\n return part2\n\n elif part2 == '':\n return part1\n\n\n if part1[-1] == seperator:\n sep1 = ''\n else:\n sep1 = seperator\n\n\n if part2[0] == seperator:\n sep2 = ''\n else:\n sep2 = ' '\n\n\n return part1 + sep1 + concatenation_string + sep2 + part2",
"def concat_df(*args, **kwargs):\n return pd.concat(*args, **kwargs)",
"def _concat(self, *args, **kwargs):\n values = list(args)\n output = []\n for value in values:\n if not isinstance(value, (str, basestring)):\n value = unicode(value)\n else:\n value = unicode(value)\n value = value.strip()\n output.append(value)\n output = kwargs[\"delimiter\"].join(output)\n output = unicode(output)\n return output",
"def anchor():\n return 'concat'",
"def _concat_rows_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"ConcatRowsNode\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.ConcatRowsNode\"\n )\n common_columns = [c for c in op.columns_produced() if c != op.id_column]\n inputs = [self._compose_polars_ops(s, data_map=data_map) for s in op.sources]\n assert len(inputs) == 2\n inputs = [input_i.select(common_columns) for input_i in inputs] # get columns in same order\n if op.id_column is not None:\n inputs[0] = inputs[0].with_columns([_build_lit(op.a_name).alias(op.id_column)])\n inputs[1] = inputs[1].with_columns([_build_lit(op.b_name).alias(op.id_column)])\n res = pl.concat(inputs, how=\"vertical\")\n return res",
"def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)"
] | [
"0.7569585",
"0.6719773",
"0.66358083",
"0.618569",
"0.612748",
"0.6091729",
"0.58711517",
"0.5804198",
"0.57980394",
"0.57886857",
"0.5775912",
"0.56869364",
"0.5638573",
"0.5622331",
"0.55752325",
"0.55752325",
"0.55752325",
"0.55685467",
"0.55000454",
"0.54974973",
"0.54491156",
"0.5441292",
"0.54399073",
"0.54207665",
"0.5410734",
"0.54084754",
"0.53974074",
"0.53923",
"0.53814626",
"0.5351323"
] | 0.78436726 | 0 |
Implement an 'rconcat' operator. this is for internal use at the moment | def _rconcat(self, other: Any) -> ColumnOperators:
return self.reverse_operate(concat_op, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __radd__(self, left_arr):\n concat_arr = left_arr.copy() # Create new instance to return\n concat_arr.extend(self)\n return concat_arr",
"def concat(self, other: Any) -> ColumnOperators:\n return self.operate(concat_op, other)",
"def concat_all(self):\n return self.merge(1)",
"def concatenate_data():",
"def assemble_row(r1, r2):\n r1.extend(r2)\n return r1",
"def concat(xs, axis=1):\n return Concat(axis=axis)(*xs)",
"def concat(seq1, seq2):\n if type_tag(seq1) == type_tag(seq2):\n return seq1 + seq2\n else:\n types = (type_tag(seq1), type_tag(seq2))\n if types in concat.adders:\n return concat.adders[types](seq1, seq2)",
"def _rewrite_concat(self, node: saldag.Concat):\n\n if node.is_lower_boundary():\n\n out_stored_with = node.out_rel.stored_with\n for par in node.parents:\n if not par.is_root():\n par.out_rel.stored_with = copy.copy(out_stored_with)\n node.is_mpc = False",
"def __add__(self, right_arr):\n concat_arr = self.copy() # Create new instance to return\n concat_arr.extend(right_arr)\n return concat_arr",
"def __rshift__(self, other):\n other.set_upstream(self)\n # return other so a >> b >> c works\n return other",
"def concat_immediate(self, other: \"Linked[T]\") -> None:\n self.forward.concat(other)",
"def __rshift__(self, other: Any) -> ColumnOperators:\n return self.operate(rshift, other)",
"def concatv(*seqs):\n return concat(seqs)",
"def concat(self, other: \"Linked[T]\") -> None:\n first_self = self\n last_self = self.backward\n\n first_other = other\n last_other = other.backward\n # self ++ other\n # consider last_self and first_other\n last_self._join(first_other)\n last_other._join(first_self)",
"def _concat(self, partial: Optional[O], outputs: O):\n raise NotImplementedError",
"def __radd__(self, other):\n return asarray(add(numpy.asarray(other), self))",
"def concat(a, b):\n return torch.cat((a, b), 1)",
"def _concatenate_inner(self, chunks, direction):\n tmp_bucket = []\n source_chunks = chunks if direction else chunks[::-1]\n target_chunks = ChunkList()\n for chunk in source_chunks:\n if (\n # if the chunk has matched dependency, do concatenation.\n chunk.dependency == direction or\n # if the chunk is SPACE, concatenate to the previous chunk.\n (direction == False and chunk.is_space())\n ):\n tmp_bucket.append(chunk)\n continue\n tmp_bucket.append(chunk)\n if not direction: tmp_bucket = tmp_bucket[::-1]\n new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])\n chunk.update_word(new_word)\n target_chunks.append(chunk)\n tmp_bucket = []\n if tmp_bucket: target_chunks += tmp_bucket\n return target_chunks if direction else target_chunks[::-1]",
"def concat(cls, c1, c2, op):\r\n if c1.clause and c2.clause:\r\n return cls('({}) {} ({})'.format(c1.clause, op, c2.clause), c1.params + c2.params)\r\n elif c1.clause:\r\n return c1\r\n elif c2.clause:\r\n return c2\r\n else:\r\n return cls('', ())",
"def convert_concat(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[i]) for i in range(len(op.input(\"X\")))]\n axis = op.attr(\"axis\")\n inputs = _dtype_shape_promotion(inputs)\n out = _op.concatenate(inputs, axis=axis)\n g.add_node(op.output(\"Out\")[0], out)",
"def __concat(self, *args):\n \n if isinstance(args[0], list):\n items = args[0]\n else:\n items = list(args)\n\n items.insert(0, self)\n return Observable.concat(items)",
"def __add__(self, other):\n return self.concatenate(other)",
"def __add__(self, other):\n return self.concatenate(other)",
"def __rsub__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(sub, other)",
"def concatenate(self, other):\n return as_stream_iterator(_flatten_stream_from_reversed_list([other, self]))",
"def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)",
"def __radd__(self,that):\n return self.__opExpand2(that,np.add)",
"def reassemble(self, seq, buf):\n # XXX - fastpath properly sequenced data.\n if seq == self.cur and not self.q:\n self.cur += len(buf)\n return buf\n # XXX - favor newer data\n heapq.heappush(self.q, (seq, buf))\n l = []\n while self.q:\n if self.q[0][0] <= self.cur:\n seq, buf = heapq.heappop(self.q)\n if seq != self.cur:\n # Reverse overlap. Trim left (empty string on rexmit)...\n buf = buf[self.cur-seq:]\n l.append(buf)\n self.cur += len(buf)\n else:\n break\n return ''.join(l)",
"def brepalgo_ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)",
"def mconcat(a, b):\r\n if a is None:\r\n return b\r\n if b is None:\r\n return a\r\n for key in b.keyset:\r\n value=get(b,key)\r\n put(a,key,value)\r\n return a"
] | [
"0.698876",
"0.65194875",
"0.63239926",
"0.6302908",
"0.62483746",
"0.6195619",
"0.6139364",
"0.6137772",
"0.61146563",
"0.6076295",
"0.60584617",
"0.5972496",
"0.59362096",
"0.591057",
"0.59061617",
"0.58875877",
"0.5878507",
"0.5878187",
"0.5867767",
"0.5866643",
"0.58657575",
"0.5839344",
"0.5839344",
"0.58383",
"0.58281434",
"0.58111763",
"0.57923937",
"0.57497686",
"0.5733134",
"0.57304156"
] | 0.83142644 | 0 |
r"""Implement the ``like`` operator. | def like(
self, other: Any, escape: Optional[str] = None
) -> ColumnOperators:
return self.operate(like_op, other, escape=escape) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def like(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"LIKE\", __key, __and, [(k, f\"%{_escape_like(v)}%\") for k, v in kwargs.items()])",
"def ilike(\n self, other: Any, escape: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(ilike_op, other, escape=escape)",
"def postfix(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"LIKE\", __key, __and, [(k, f\"%{_escape_like(v)}\") for k, v in kwargs.items()])",
"def prefix(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"LIKE\", __key, __and, [(k, f\"{_escape_like(v)}%\") for k, v in kwargs.items()])",
"def is_like(self, q):\n q = q.lower()\n return q in self.title.lower() or q in self.url.lower() or q in self.media_type.lower()",
"def ILIKE(self, first, second, escape=None):\n if isinstance(second, Expression):\n second = self.expand(second, 'string')\n else:\n second = self.expand(second, 'string').lower()\n if escape is None:\n escape = '\\\\'\n second = second.replace(escape, escape*2)\n if second.startswith(\"n'\"):\n second = \"N'\" + second[2:]\n return \"(LOWER(%s) LIKE %s ESCAPE '%s')\" % (self.expand(first),\n second, escape)",
"def ILIKE(self, first, second, escape=None):\n if isinstance(second, Expression):\n second = self.expand(second, 'string')\n else:\n second = self.expand(second, 'string').lower()\n if escape is None:\n escape = '\\\\'\n second = second.replace(escape, escape*2)\n if second.startswith(\"n'\"):\n second = \"N'\" + second[2:]\n return \"(LOWER(%s) LIKE %s ESCAPE '%s')\" % (self.expand(first),\n second, escape)",
"def LIKE(self, first, second, escape=None):\n if isinstance(second, Expression):\n second = self.expand(second, 'string')\n else:\n second = self.expand(second, 'string')\n if escape is None:\n escape = '\\\\'\n second = second.replace(escape, escape * 2)\n return \"(%s LIKE %s ESCAPE '%s')\" % (self.expand(first),\n second, escape)",
"def ILIKE(self, first, second, escape=None):\n if isinstance(second, Expression):\n second = self.expand(second, 'string')\n else:\n second = self.expand(second, 'string').lower()\n if escape is None:\n escape = '\\\\'\n second = second.replace(escape, escape*2)\n return \"(LOWER(%s) LIKE %s ESCAPE '%s')\" % (self.expand(first),\n second, escape)",
"def LIKE(self, first, second, escape=None):\n if isinstance(second, Expression):\n second = self.expand(second, 'string')\n else:\n second = self.expand(second, 'string')\n if escape is None:\n escape = '\\\\'\n second = second.replace(escape, escape * 2)\n if first.type not in ('string', 'text', 'json'):\n return \"(%s LIKE %s ESCAPE '%s')\" % (\n self.CAST(self.expand(first), 'CHAR(%s)' % first.length),\n second, escape\n )\n else:\n return \"(%s LIKE %s ESCAPE '%s')\" % (self.expand(first), second, escape)",
"def Like(text, pattern):\n return fnmatch.fnmatch(text, pattern)",
"def test_apply_filter_like(app):\n with app.app_context():\n users = User.query\n users = apply_filter(users, User,\n {'column': 'username', 'type': 'like',\n 'value': 'user'})\n assert str(users.whereclause) == 'users.username LIKE :username_1'",
"def post_like(self, entry, **args):\n args.update(entry=entry)\n return self.fetch(\"/like\", post_args=args)",
"def not_like(\n self, other: Any, escape: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(not_like_op, other, escape=escape)",
"def ILIKE(self, first, second, escape=None):\n if isinstance(second, Expression):\n second = self.expand(second, 'string')\n else:\n second = self.expand(second, 'string')\n if escape is None:\n escape = '\\\\'\n second = second.replace(escape, escape * 2)\n if first.type not in ('string', 'text', 'json', 'list:string'):\n return \"(%s ILIKE %s ESCAPE '%s')\" % (\n self.CAST(self.expand(first), 'CHAR(%s)' % first.length),\n second, escape\n )\n else:\n return \"(%s ILIKE %s ESCAPE '%s')\" % (self.expand(first), second, escape)",
"def icontains(self, other):",
"def is_simple (self, phrase):\r\n\r\n return not self.contains(phrase,'()&|>#')",
"def prep_for_like_query(self, x):\n # http://msdn2.microsoft.com/en-us/library/ms179859.aspx\n return smart_text(x).replace('%', '\\%').replace('_', '\\_')",
"def from_like_context(ctx, param, value):\n if ctx.obj and ctx.obj.get(\"like\") and (value == \"like\" or ctx.obj.get(\"all_like\")):\n return ctx.obj[\"like\"][param.name]\n else:\n return None",
"def from_like_context(ctx, param, value):\n if ctx.obj and ctx.obj.get('like') and (\n value == 'like' or ctx.obj.get('all_like')):\n return ctx.obj['like'][param.name]\n else:\n return None",
"def test_wildcards_both_inside_and_outside_literal(self):\n qs = '\"Fo? t*\" said the *'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped,\n r'\"Fo\\? t\\*\" said the *',\n \"Wildcards in literal should be escaped\",\n )\n self.assertTrue(wildcard, \"Wildcard should be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"wildcard\", title=r'\"Fo\\? t\\*\" said the *')),\n \"Wildcard Q object should be generated\",\n )",
"def test_findlike():\n parser = CmdParser([findlike])\n out = parser.parse(\"findlike . -name foo\")\n assert out[0].arguments[0].present == True\n assert out[0].arguments[0].value == \"foo\"\n assert out[0].arguments[1].present == True\n assert out[0].arguments[1].value == \".\"\n assert out[0].as_shell_string() == \"findlike . -name foo\"",
"def convert_to_like(column_value: str) -> str:\n like_query = \"%\".join(column_value)\n like_query = \"%\" + like_query + \"%\"\n return like_query",
"def more_like_text(text, klass):\n back = connections['default'].get_backend()\n\n if hasattr(back, 'conn'):\n query = {'query': {\n 'filtered': {\n 'query': {\n 'fuzzy_like_this': {\n 'like_text': text\n }\n },\n 'filter': {\n 'bool': {\n 'must': {\n 'term': {'django_ct': 'idea.idea'}\n }\n }\n }\n }\n }\n\n }\n results = back.conn.search(query)\n return back._process_results(results)['results']\n else:\n return []",
"def test_filter_users_like(app, add_ten_users):\n with app.app_context():\n add_ten_users()\n users = User.query\n users = apply_filter(users, User,\n {'column': 'username', 'type': 'like',\n 'value': '%name_1%'})\n result = users.all()\n assert len(result) == 1",
"def all_handler(ctx, param, value):\n if ctx.obj and ctx.obj.get('like') and value is not None:\n ctx.obj['all_like'] = value\n value = ctx.obj.get('like')\n return value",
"def like(self):\n self.like_count = self.like_count + 1 if self.like_count else 1",
"def test_match_any_wildcard_in_literal(self):\n qs = '\"Foo t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Foo t\\*\"', \"Wildcard should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Foo t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )",
"def icontains(self, other: Any, **kw: Any) -> ColumnOperators:\n return self.operate(icontains_op, other, **kw)",
"def advanced_search(self, pattern):\n pass"
] | [
"0.6908183",
"0.6637425",
"0.64561987",
"0.62338483",
"0.61270046",
"0.6034408",
"0.6034408",
"0.60227627",
"0.59732383",
"0.59676987",
"0.591182",
"0.5893983",
"0.57473695",
"0.5668938",
"0.56546885",
"0.5596868",
"0.5439247",
"0.5408136",
"0.5342852",
"0.5329303",
"0.53040093",
"0.5285632",
"0.5269149",
"0.52315605",
"0.5225596",
"0.52202815",
"0.52065736",
"0.5206204",
"0.51786184",
"0.5175472"
] | 0.72407717 | 0 |
Produce a bitwise XOR operation, typically via the ``^`` operator, or ```` for PostgreSQL. | def bitwise_xor(self, other: Any) -> ColumnOperators:
return self.operate(bitwise_xor_op, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xor(a, b):",
"def logical_xor(a, b):\n return bool(a) ^ bool(b)",
"def bitwise_xor(lhs, rhs):\n return _make.bitwise_xor(lhs, rhs)",
"def logical_xor(lhs, rhs):\n return _make.logical_xor(lhs, rhs)",
"def bitwise_xor(a, b):\n\n result = \"\"\n for i in range(0, len(a)):\n result += str(int(a[i]) ^ int(b[i]))\n return result",
"def xor(self, *args):\n return Xor(self, *args)",
"def _xor_str(self,s,t):\n \treturn \"\".join(chr(ord(a)^ord(b)) for a,b in zip(s,t))",
"def __xor__(self, obj):\n return self._boolean_operation(obj, operator.__xor__)",
"def __xor__(self, other):\r\n if self.field.characteristic == 2:\r\n return runtime.xor(self, other)\r\n\r\n return super().__xor__(other)",
"def sxor(s1, s2):\n return ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(s1, s2))",
"def __xor__(self, other):\r\n return self + other - 2 * self * other",
"def __xor__(self, other):\n a, b = Trits.match_length(self, other)\n return Trits([x ^ y for x, y in zip(a, b)])",
"def _XOR(integer1, integer2):\n _checkInt(integer1, minvalue=0, description='integer1')\n _checkInt(integer2, minvalue=0, description='integer2')\n\n return integer1 ^ integer2",
"def xor_(l1, l2):\n return np.bitwise_xor(l1,l2)",
"def __rxor__(self, other):\n return self.runtime.xor(self, other)",
"def XOR(string1, string2):\r\n return \"\".join(chr(ord(s1) ^ ord(s2)) for s1, s2 in zip(string1, string2))",
"def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic_op_node('Xor', node, kwargs)",
"def f_xor(*args):\n f = Xor(*args).factor()\n return f if f in B else f.factor()",
"def _blockXOR(a, b):\n\tif len(a) != len(b):\n\t\traise ValueError(\"expected to strings with same length\")\n\tres = []\n\tfor i in xrange(len(a)):\n\t\tres.append(chr(ord(a[i]) ^ ord(b[i])))\n\treturn \"\".join(res)",
"def __xor__(self, other):\n return MyCustomNumber(self.value ^ other.value)",
"def xor_inplace(a,b):",
"def bitwise_xor(self):\n register = self.return_middle_registers(self.opcode)\n self.registers[register[0]] = (\n self.registers[register[0]] ^ self.registers[register[1]])\n logger.info(\"Bitwise XOR on V{} and V{} for {}\".format(\n register[0],\n register[1],\n self.registers[register[0]]))",
"def xor_bytes(a, b):\n return bytes(i^j for i, j in zip(a, b))",
"def xor(a: bool, b: bool) -> bool:\n return (a and not b) or (not a and b)",
"def xor_strings(s,t):\n return \"\".join(chr(ord(a)^ord(b)) for a,b in zip(s,t))",
"def xor_bytestring(a_b, b_b):\n return bytes(a ^ b for a, b in zip(a_b, b_b))",
"def b64_xor_crypt(self, data, key, mode):\n if mode == 'dec':\n data = base64.b64decode(data)\n elif mode == 'enc':\n data = base64.b64encode(data)\n data = data.decode()\n\n return ''.join(chr(ord(str(a)) ^ ord(str(b))) for (a, b) in zip(data, cycle(key)))",
"def bXor(byte_string_1,byte_string_2):\n return bytes([b1 ^ b2 for b1, b2 in zip(byte_string_1, byte_string_2)])",
"def fixed_xor(a, b):\n if len(a) != len(b):\n raise ValueError(f\"Expected a and b to be the same length; got {len(a)} vs {len(b)}\")\n\n return bytes([a[i] ^ b[i] for i in range(len(a))])",
"def xor(data1=None, data2=None):\n\n return bytearray(a ^ b for a, b in zip(*map(bytearray, [data1, data2])))"
] | [
"0.80635536",
"0.76373774",
"0.7607591",
"0.7467922",
"0.7443504",
"0.7433648",
"0.7301684",
"0.7291576",
"0.72389627",
"0.7238131",
"0.7196956",
"0.71916723",
"0.71875733",
"0.71541244",
"0.7134191",
"0.71074146",
"0.7057533",
"0.7027421",
"0.7010324",
"0.6973761",
"0.6957928",
"0.6957607",
"0.6915681",
"0.6908636",
"0.69054806",
"0.6896148",
"0.6894301",
"0.6884726",
"0.6874419",
"0.6834203"
] | 0.7803131 | 1 |
Produce a bitwise NOT operation, typically via the ``~`` operator. | def bitwise_not(self) -> ColumnOperators:
return self.operate(bitwise_not_op) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bitwise_not(data):\n return _make.bitwise_not(data)",
"def logical_not(data):\n return _make.logical_not(data)",
"def convert_logical_not(node, **kwargs):\n return create_basic_op_node('Not', node, kwargs)",
"def logical_not(x, f=None):\n return _cur_framework(x, f=f).logical_not(x)",
"def convert_logical_not(g, op, block):\n\n ipt0 = g.get_node(op.input(\"X\")[0])\n op_func = get_relay_op(op.type)\n out = op_func(ipt0)\n g.add_node(op.output(\"Out\")[0], out)",
"def _logical_not(x):\n x_ = _static_value(x)\n if x_ is None:\n return math_ops.logical_not(x)\n return constant_op.constant(np.logical_not(x_))",
"def cnot(control: QubitInput, target: QubitInput) -> Instruction:\n return Instruction(CNot(), target=[control, target])",
"def not_(bits: int) -> int:\n # The `& ALL_` is necessary so python doesn't treat bits as 2's compliment\n return ~bits & ALL_",
"def is_not(self, other: Any) -> ColumnOperators:\n return self.operate(is_not, other)",
"def bitwise_not(self, destination):\n value = bytearray()\n\n value.append(0xf7) # F7 /2 \tNOT r/m32\n rm = get_register_encoding(destination)\n reg = 2 # F7 /2 \tNOT r/m32\n # ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and\n # REG destination\n\n mod = 0b11\n modr_byte = (mod << 6) + (reg << 3) + (rm << 0)\n value.append(modr_byte)\n\n return value",
"def ccnot(control1: QubitInput, control2: QubitInput, target: QubitInput) -> Instruction:\n return Instruction(CCNot(), target=[control1, control2, target])",
"def negate(x):\n return x ^ 1",
"def negate_gate(wordlen, input='x', output='~x'):\n neg = bitwise_negate(wordlen, input, \"tmp\")\n inc = inc_gate(wordlen, \"tmp\", output)\n return neg >> inc",
"def __xor__(self, obj):\n return self._boolean_operation(obj, operator.__xor__)",
"def c_not(control_qbit, not_qbit):\n return (\n hadamard(not_qbit) |\n c_phase(control_qbit, not_qbit, 0.5) |\n hadamard(not_qbit))",
"def CNOT(self, qubit_expr):\n self.apply_gate_operation(cirq.ops.CNOT, qubit_expr)",
"def logical_xor(lhs, rhs):\n return _make.logical_xor(lhs, rhs)",
"def logical_xor(a, b):\n return bool(a) ^ bool(b)",
"def to_implies_not(formula: Formula) -> Formula:\r\n # Task 3.6c\r\n convert_and_op_1 = to_not_and(formula)\r\n and_formula_1 = Formula('->', Formula('p'), Formula('~', Formula('q')))\r\n and_formula_2 = Formula('->', Formula('~', Formula('p')), Formula('q'))\r\n\r\n map_and = {'&': Formula('~', Formula('->', and_formula_2, and_formula_1))}\r\n return convert_and_op_1.substitute_operators(map_and)",
"def RewriteNOT(self, expr):\n return None",
"def bitwise_xor(lhs, rhs):\n return _make.bitwise_xor(lhs, rhs)",
"def negate(self):\n self.formula = '!(' + self.formula + ')'",
"def to_nand(formula: Formula) -> Formula:\r\n # Task 3.6b\r\n not_in_nand = Formula('-&', Formula('p'), Formula('p'))\r\n and_in_nand_1 = Formula('-&', Formula('p'), Formula('q'))\r\n and_in_nand_2 = Formula('-&', and_in_nand_1, and_in_nand_1)\r\n map_not_and = {'~': not_in_nand, '&': and_in_nand_2}\r\n formula_not_and = to_not_and(formula)\r\n return formula_not_and.substitute_operators(map_not_and)",
"def negated(self):\n op = QueryCompound.Op.And if self.__op == QueryCompound.Op.Or else QueryCompound.Op.Or\n return QueryCompound(*self.__queries, op=op)",
"def test_bit_not(self):\n ops = [bitwise_operations.bit_not(self.five_255_bin, 0, 40, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 5)\n assert bins[self.five_255_bin] == expected_result",
"def NOT(expression):\n return {'$not': [expression]}",
"def to_not_and_or(formula: Formula) -> Formula:\r\n # Task 3.5\r\n\r\n map_operators = {'->': Formula.parse('(~p|q)'),\r\n '+': Formula.parse('((p&~q)|(~p&q))'),\r\n '<->': Formula.parse('~((p&~q)|(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~(p|q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)')}\r\n return formula.substitute_operators(map_operators)",
"def to_not_and(formula: Formula) -> Formula:\r\n # Task 3.6a\r\n map_operators = {'->': Formula.parse('~(~~p&~q)'),\r\n '+': Formula.parse('~(~(p&~q)&~(~p&q))'),\r\n '<->': Formula.parse('~~(~(p&~q)&~(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~~(~p&~q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)'),\r\n '|': Formula.parse('~(~p&~q)')}\r\n return formula.substitute_operators(map_operators)",
"def _negation_op(spec, expression):",
"def NOT(r):\n return lambda l, i: not r(l, i)"
] | [
"0.8585514",
"0.8135557",
"0.78399366",
"0.77473474",
"0.75731504",
"0.7467479",
"0.7228365",
"0.71585476",
"0.7043616",
"0.7034654",
"0.70159453",
"0.6926055",
"0.6918478",
"0.68926644",
"0.6876603",
"0.6829226",
"0.68107647",
"0.67881817",
"0.67658967",
"0.67029274",
"0.6675447",
"0.6674448",
"0.66261804",
"0.6590438",
"0.65883285",
"0.65362054",
"0.65262055",
"0.6518433",
"0.6511355",
"0.6500766"
] | 0.8449333 | 1 |
Produce a bitwise LSHIFT operation, typically via the ``<<`` operator. | def bitwise_lshift(self, other: Any) -> ColumnOperators:
return self.operate(bitwise_lshift_op, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lshift(self, value):\n return self.clone().lshift_(value)",
"def __lshift__(self, other: Any) -> ColumnOperators:\n return self.operate(lshift, other)",
"def lshift(self):\n self.lcd_byte(0x18, LCD_CMD)",
"def test_lshift():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = operator.lshift(value, 1)\n num_a.value <<= 1\n assert num_a.value == new_value",
"def __lshift__(return_spec, argument_spec):\n return return_spec.fam.c_lshift(return_spec, argument_spec)",
"def __lshift__(self, other):\r\n # TODO: extend to secret offset\r\n if not isinstance(other, int):\r\n return NotImplemented\r\n\r\n return runtime.mul(self, 1<<other)",
"def lshift(self, count):\n self._c = self._c[count:] + (bitarray('0') * count)",
"def test_bit_lshift_wrap(self):\n ops = [bitwise_operations.bit_lshift(self.test_bin_ones, 0, 40, 8, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([1] * 4 + [0])\n assert bins[self.test_bin_ones] == expected_result",
"def __rlshift__(self, *args):\n return _libsbml.string___rlshift__(self, *args)",
"def lshift_(self, value):\n assert isinstance(value, int), \"lshift must take an integer argument.\"\n self.share <<= value\n return self",
"def leftshift(x, c):\n return x << c",
"def test_bit_lshift(self):\n ops = [bitwise_operations.bit_lshift(self.test_bin_ones, 0, 8, 3, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([8] * 1 + [1] * 4)\n assert bins[self.test_bin_ones] == expected_result",
"def lrshift(val, n) -> np.int64:\n return (val % (1 << 64)) >> n",
"def left_shift(key,shift):\n if shift > len(key):\n shift = shift % len(key)\n return key[shift:] + key[:shift]",
"def test_bit_lshift_across_bytes(self):\n ops = [bitwise_operations.bit_lshift(self.test_bin_ones, 4, 12, 3, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([8] * 2 + [1] * 3)\n assert bins[self.test_bin_ones] == expected_result",
"def right_shift(key,shift):\n if shift > len(key):\n shift = shift % len(key)\n return key[-shift:] + key[:-shift]",
"def RotL_64(x, N):\n #return (x << np.uint64(N & 63)) | (x >> np.uint64((64-N) & 63))\n return(np.left_shift(x, (N & 63), dtype=np.uint64) |\n np.right_shift(x, ((64-N) & 63), dtype=np.uint64))",
"def rotl(x, count):\n ret = 0\n for i in range(64):\n bit = (x >> i) & 1\n ret |= bit << ((i + count) % 64)\n return ret",
"def lshift(self, attr):\n return self.set_child_and_return(shifter.lshift(self.statement, attr))",
"def __lshift__(self,g):\r\n\t\t\r\n\t\treturn self.substitute(*g)",
"def __lshift__(self, other) -> 'MultiVector':\n return self.lc(other)",
"def __lshift__(self, other):\r\n return NotImplemented",
"def right_shift(lhs, rhs):\n return _make.right_shift(lhs, rhs)",
"def shift_left_bit_length(x: int) -> int:\n return 1 << (x - 1).bit_length()",
"def left_shift(lhs, rhs):\n return _make.left_shift(lhs, rhs)",
"def shift_bits(x, k):\n if (k >= 0):\n return x << k\n else:\n return x >> -k",
"def BitShift(n, shift):\n\n if shift > 0: #Right shift\n if n[0] == \"0\":\n n_ = \"\".join([\"0\"] * shift) + n\n else:\n n_ = \"\".join([\"1\"] * shift) + n\n return n_[:len(n)]\n else:\n n_ = n + \"\".join([\"0\"] * (-shift))\n return n_[-len(n):]",
"def test_bit_lshift_offset_out_of_range(self):\n ops = [bitwise_operations.bit_lshift(self.test_bin_ones, 41, 8, 1, None)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)",
"def shl_fix(data, i): \n data = str(data)\n try:\n d = int(data)\n return c_hex(d >> -i if i < 0 else d << i) \n except:\n if i == 0:\n return \"({0})\".format(data)\n if i > 0:\n return \"(({0}) << {1})\".format(data, i)\n if i < 0:\n return \"(({0}) >> {1})\".format(data, -i)",
"def test_bit_lshift_bit_size_too_large(self):\n ops = [bitwise_operations.bit_lshift(self.test_bin_ones, 0, 41, 1, None)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)"
] | [
"0.7053611",
"0.69038886",
"0.690206",
"0.6857887",
"0.6747391",
"0.67373693",
"0.6708853",
"0.6699808",
"0.6639052",
"0.65725446",
"0.64468867",
"0.6406199",
"0.63805234",
"0.6204717",
"0.61922",
"0.61788565",
"0.5997652",
"0.5979469",
"0.5960854",
"0.59601706",
"0.5958869",
"0.59561944",
"0.5946668",
"0.5924072",
"0.5916839",
"0.59007394",
"0.589632",
"0.58831567",
"0.5838611",
"0.5730876"
] | 0.7435746 | 0 |
Produce a bitwise RSHIFT operation, typically via the ``>>`` operator. | def bitwise_rshift(self, other: Any) -> ColumnOperators:
return self.operate(bitwise_rshift_op, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def right_shift(lhs, rhs):\n return _make.right_shift(lhs, rhs)",
"def test_rshift():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = operator.rshift(value, 1)\n num_a.value >>= 1\n assert num_a.value == new_value",
"def __rshift__(self, other: Any) -> ColumnOperators:\n return self.operate(rshift, other)",
"def rshift(self, value):\n return self.clone().rshift_(value)",
"def rshift_(self, value):\n assert isinstance(value, int), \"rshift must take an integer argument.\"\n self.share >>= value\n return self",
"def right_shift(key,shift):\n if shift > len(key):\n shift = shift % len(key)\n return key[-shift:] + key[:-shift]",
"def __rshift__(self, other):\r\n # TODO: extend to secret offset\r\n if not isinstance(other, int):\r\n return NotImplemented\r\n\r\n return self.__floordiv__(1<<other)",
"def rshift(val, n):\n return (val % 0x100000000) >> n",
"def rshift(val, n):\n return (val % 0x100000000) >> n",
"def test_bit_rshift_wrap(self):\n ops = [bitwise_operations.bit_rshift(self.test_bin_ones, 0, 40, 8, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] + [1] * 4)\n assert bins[self.test_bin_ones] == expected_result",
"def __rlshift__(self, *args):\n return _libsbml.string___rlshift__(self, *args)",
"def rshift(self):\n self.lcd_byte(0x1C, LCD_CMD)",
"def rshift(self, count):\n self._c = (bitarray('0') * count) + self._c[:-count]",
"def test_bit_rshift(self):\n ops = [bitwise_operations.bit_rshift(self.count_bin, 8, 8, 3, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([1] * 1 + [10] * 1 + [86] * 1 + [255] * 1 + [3] * 1)\n assert bins[self.count_bin] == expected_result",
"def right_shift(self):\n register = (self.opcode & 0xFFF) >> 8\n bits = self.registers[register]\n \"\"\"if bits & 0b1 == 1:\n self.registers[0xF] = 1\n else:\n self.registers[0xF] = 0\n \"\"\"\n self.registers[0xF] = bits & 0b1\n self.registers[register] = self.registers[register] >> 1\n logger.info(\"Shifted register V{} 1 bit to the right got {}\".format(\n register,\n hex(self.registers[register])))",
"def __rshift__(self, other):\n other.set_upstream(self)\n # return other so a >> b >> c works\n return other",
"def lrshift(val, n) -> np.int64:\n return (val % (1 << 64)) >> n",
"def rshift(val: int, n: int) -> int:\n return (val % 0x100000000) >> n",
"def test_bit_rshift_across_bytes(self):\n ops = [bitwise_operations.bit_rshift(self.test_bin_ones, 4, 16, 3, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] + [32] + [33] + [1] * 2)\n assert bins[self.test_bin_ones] == expected_result",
"def __rshift__(self, fn):\n if self is Nothing:\n return Nothing\n else:\n v = self.right if self.is_right() else self.left\n fn = liftF(fn, self.__class__)\n return unlift(fn(v))",
"def __rlshift__(self, other):\r\n return NotImplemented",
"def __rlshift__(self, other):\r\n return NotImplemented",
"def _rightshift(inputInteger):\n _checkInt(inputInteger, minvalue=0)\n\n shifted = inputInteger >> 1\n carrybit = inputInteger & 1\n return shifted, carrybit",
"def right_shift_quirk(self):\n register = self.return_middle_registers(self.opcode)\n bits = self.registers[register[1]]\n self.registers[0xF] = bits & 0b1\n self.registers[register[0]] = self.registers[register[1]] >> 1\n logger.info(\"Shifted register V{} to the right into V{}({})\".format(\n register[1],\n register[0],\n hex(self.registers[register[0]])))",
"def shift_right(self):\n self.pointer = (self.pointer + 1) % len(self.data)",
"def shift_right(n, b):\n return (n >> b), n & ((1 << b) - 1)",
"def __lshift__(self, other):\r\n # TODO: extend to secret offset\r\n if not isinstance(other, int):\r\n return NotImplemented\r\n\r\n return runtime.mul(self, 1<<other)",
"def __rshift__(self, other):\r\n return NotImplemented",
"def __rshift__(self,g):\r\n\t\t\r\n\t\treturn self.plug(*g)",
"def rshift(self, attr):\n return self.set_child_and_return(shifter.rshift(self.statement, attr))"
] | [
"0.73124385",
"0.72767174",
"0.7224724",
"0.7161987",
"0.71405077",
"0.71010065",
"0.70435566",
"0.70038354",
"0.70038354",
"0.6964151",
"0.6928426",
"0.682287",
"0.679018",
"0.67720765",
"0.6719027",
"0.67159736",
"0.6682019",
"0.66308916",
"0.65880835",
"0.65222585",
"0.64510643",
"0.64510643",
"0.6418187",
"0.64107686",
"0.6318511",
"0.6292666",
"0.62642664",
"0.6245549",
"0.6242721",
"0.62249076"
] | 0.7695628 | 0 |
Implement the ``in`` operator. In a column context, produces the clause ``column IN ``. | def in_(self, other: Any) -> ColumnOperators:
return self.operate(in_op, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def where_in(self, column, wheres=[]):\n if not wheres:\n self._wheres += ((QueryExpression(0, \"=\", 1, \"value_equals\")),)\n\n elif isinstance(wheres, QueryBuilder):\n self._wheres += (\n (QueryExpression(column, \"IN\", SubSelectExpression(wheres))),\n )\n else:\n wheres = [str(x) for x in wheres]\n self._wheres += ((QueryExpression(column, \"IN\", wheres)),)\n return self",
"def in_(self, item):\r\n return WhereClause(unicode(self), InOperator(), item)",
"def isin(self, values: Union[list, dict, IColumn]):\n if isinstance(values, list):\n return self._fromdata(\n {\n self.dtype.fields[i]\n .name: ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[i].dtype,\n self._data.child_at(i),\n True,\n )\n .isin(values)\n for i in range(self._data.children_size())\n },\n self._mask,\n )\n if isinstance(values, dict):\n self._check_columns(values.keys())\n return self._fromdata(\n {n: c.isin(values[n]) for n, c in self._field_data.items()}\n )\n if isinstance(values, IDataFrame):\n self._check_columns(values.columns)\n return self._fromdata(\n {n: c.isin(values=list(values[n])) for n, c in self._field_data.items()}\n )\n else:\n raise ValueError(\n f\"isin undefined for values of type {type(self).__name__}.\"\n )",
"def _sample_using_a_list(\n self,\n column_name: str,\n value_list: list,\n ):\n return sa.column(column_name).in_(value_list)",
"def in_(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"IN\", __key, __and, [(k, vs) for k, vs in kwargs.items()])",
"def _where_in_sub(self, column, query, boolean, negate=False):\n if negate:\n type = 'not_in_sub'\n else:\n type = 'in_sub'\n\n self.wheres.append({\n 'type': type,\n 'column': column,\n 'query': query,\n 'boolean': boolean\n })\n\n self.merge_bindings(query)\n\n return self",
"def in_(self, value):\n if isinstance(value, (str, text_type)):\n value = [value]\n return Filter(self, value, 'in')",
"def make_where_in(cls, key, value_list):\n\n return \"%s IN (%s)\" % (\n cls.to_attr_str(key), \", \".join(cls.to_value_str_list(value_list)))",
"def in_(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.IsIn)\n\n if isinstance(value, orb.Collection):\n newq.setValue(value)\n elif not isinstance(value, (set, list, tuple)):\n newq.setValue((value,))\n else:\n newq.setValue(tuple(value))\n\n return newq",
"def _expression(self, expression):\n exp, values = _convert_expression(expression)\n if isinstance(exp, sqlalchemy.sql.expression.ClauseElement):\n return exp\n if exp is None:\n return sqlalchemy.sql.expression.literal(True)\n qbh = expr.QueryBuilderHelper(self.table)\n where_clause = qbh.where_clause(exp, values)\n subselect = sql.select([self.table.c.id]).select_from(qbh.from_clause())\n subselect = subselect.where(where_clause)\n return self.table.c.id.in_(subselect)",
"def isin(self, values, ignore_indices=False, **kwargs): # noqa: PR02\n shape_hint = kwargs.pop(\"shape_hint\", None)\n if isinstance(values, type(self)) and ignore_indices:\n # Pandas logic is that it ignores indexing if 'values' is a 1D object\n values = values.to_pandas().squeeze(axis=1)\n if shape_hint == \"column\":\n return SeriesDefault.register(pandas.Series.isin)(self, values, **kwargs)\n else:\n return DataFrameDefault.register(pandas.DataFrame.isin)(\n self, values, **kwargs\n )",
"def queryByAttributeIn(table, attribute, values, access=None, addtl=\"\"):\n if len(values) > MAX_IN_ELEMENTS:\n values1 = values[:MAX_IN_ELEMENTS]\n values2 = values[MAX_IN_ELEMENTS:]\n records1 = queryByAttributeIn(table, attribute, values1, access, addtl)\n records2 = queryByAttributeIn(table, attribute, values2, access, addtl)\n records1.extend(records2)\n return records1\n\n valueString = u\",\".join(u\"'\" + sqlapi.quote(val) + u\"'\" for val in values)\n condition = u\"%s IN (%s)\" % (attribute, valueString)\n records = sqlapi.RecordSet2(table, condition,\n access=access, access_persno=auth.persno,\n addtl=addtl)\n return [records]",
"def create(df,column,list_):\n return df[df[column].isin(list_)]",
"def _sql_pkey_in(cur, pkeys, ids, prefix=''):\n pkeys = ['%s%s' % (prefix, pk) for pk in pkeys]\n if ids.is_full:\n return None\n elif len(ids) == 0:\n return 'false' # can never be satisfied\n return '(%s) IN %s' % (', '.join(pkeys), cur.mogrify('%s', (tuple(ids),)))",
"def whereClause(table, field, values):\n\n # Add field delimiters\n fieldDelimited = arcpy.AddFieldDelimiters(arcpy.Describe(table).path, field)\n\n # Split multivalue at semicolons and strip quotes\n valueList = [value[1:-1]\n if (value.startswith(\"'\") and value.endswith(\"'\"))\n else value for value in values.split(';')]\n\n # Determine field type\n fieldType = arcpy.ListFields(table, field)[0].type\n\n # Add single-quotes for string field values\n if str(fieldType) == 'String':\n valueList = [\"'%s'\" % value for value in valueList]\n\n # Format WHERE clause in the form of an IN statement\n whereClause = \"%s IN (%s)\"%(fieldDelimited, ', '.join(valueList))\n return whereClause",
"def isin(hi):\n return finder.search(hi)",
"def in_list(value, arg):\r\n return value in arg",
"def in_(self, to, *edge_classes, **kwargs):\n\n condition_str = self._get_condition_str(**kwargs)\n if condition_str:\n sql_string = 'SELECT EXPAND( in({0}) ) FROM {1}{2}'.format(\n ','.join(Graph.coerce_class_names_to_quoted(edge_classes))\n , self.coerce_class_names(to), condition_str)\n else:\n sql_string = 'SELECT EXPAND( in({0}) ) FROM {1}'.format(\n ','.join(Graph.coerce_class_names_to_quoted(edge_classes))\n , self.coerce_class_names(to))\n records = self.client.query(sql_string, -1)\n return [self.vertex_from_record(v) for v in records] \\\n if records else []",
"def filter_by_isin(df: pd.DataFrame, column: str, values: Iterable) -> pd.DataFrame:\n # First, create a \"map\" series from all possible values in the column => whether they should pass the filter\n all_ids = df[column].unique()\n is_id_relevant = pd.Series(np.zeros(len(all_ids)), index=all_ids).astype('bool') # Default false\n is_id_relevant.loc[values] = True\n\n # Create a boolean mask for column, based on the mapping above. Grab the raw array.\n mask = is_id_relevant[df[column]].values\n # Apply mask\n return df[mask]",
"def set_in(self, val):\n if not contain_in_list_equal(val, PARAM_INS):\n raise ArgumentError(\"[WARNING] `in`, should be \" + \", \".join(PARAM_INS))\n self._in = val\n pass",
"def is_in(self, e):\n return e in self.vals",
"def is_isin(value):\n return True",
"def isin(self, val):\n\t\treturn IsIn(self, val)",
"def build_where_clause(table, field, valueList):\n # Add DBMS-specific field delimiters\n fieldDelimited = arcpy.AddFieldDelimiters(arcpy.Describe(table).path, field)\n # Determine field type\n fieldType = arcpy.ListFields(table, field)[0].type\n # Add single-quotes for string field values\n if str(fieldType) == 'String':\n valueList = [\"'%s'\" % value for value in valueList]\n # Format WHERE clause in the form of an IN statement\n whereClause = \"%s IN(%s)\" % (fieldDelimited, ', '.join(map(str, valueList)))\n return whereClause",
"def in_(self, other):\n if hasattr(other, 'cypher'):\n results = other.all()\n t = []\n for x in results:\n t.append(getattr(x, self.label))\n else:\n t = other\n return InClauseElement(self, t)",
"def ifinlist(parser, token):\n return do_ifinlist(parser, token, False)",
"def column_values_in_list(col, test_list):\n test = np.array([c_i in test_list for c_i in col])\n return test",
"def test_array(self):\n q = big_query_query.Query()\n q.filter_in('multi', [1, 1.2, True, False, 'test\"test'])\n\n self.assertEqual('(multi IN (1, 1.2, true, false, \"test\\\\\"test\"))',\n q.get_where_clause())",
"def in_(self, other: Any) -> NoReturn:\n raise NotImplementedError(\n \"in_() not yet supported for \"\n \"relationships. For a simple \"\n \"many-to-one, use in_() against \"\n \"the set of foreign key values.\"\n )",
"def contains(self, other: Any, **kw: Any) -> ColumnOperators:\n return self.operate(contains_op, other, **kw)"
] | [
"0.7235667",
"0.6667996",
"0.64266074",
"0.62029254",
"0.61262214",
"0.61216533",
"0.6065823",
"0.60128945",
"0.5977825",
"0.5869149",
"0.5831549",
"0.5813662",
"0.5813355",
"0.5795436",
"0.57192147",
"0.5686568",
"0.5668566",
"0.5650912",
"0.5643218",
"0.5553692",
"0.55065244",
"0.5497459",
"0.5460937",
"0.53848004",
"0.5367979",
"0.52474195",
"0.5206834",
"0.51994234",
"0.5195473",
"0.51175284"
] | 0.70170045 | 1 |
implement the ``NOT IN`` operator. This is equivalent to using negation with | def not_in(self, other: Any) -> ColumnOperators:
return self.operate(not_in_op, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def not_in_(self, other):\n if hasattr(other, 'cypher'):\n results = other.all()\n t = []\n for x in results:\n t.append(getattr(x, self.label))\n else:\n t = other\n return NotInClauseElement(self, t)",
"def notIn(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.IsNotIn)\n\n if isinstance(value, orb.Collection):\n newq.setValue(value)\n elif not isinstance(value, (set, list, tuple)):\n newq.setValue((value,))\n else:\n newq.setValue(tuple(value))\n\n return newq",
"def value_not_in(self, value_not_in):\n\n self._value_not_in = value_not_in",
"def make_where_not_in(cls, key, value_list):\n\n return \"%s NOT IN (%s)\" % (\n cls.to_attr_str(key), \", \".join(cls.to_value_str_list(value_list)))",
"def id_not_in(self, id_not_in):\n\n self._id_not_in = id_not_in",
"def id_not_in(self, id_not_in):\n\n self._id_not_in = id_not_in",
"def id_not_in(self, id_not_in):\n\n self._id_not_in = id_not_in",
"def id_not_in(self, id_not_in):\n\n self._id_not_in = id_not_in",
"def id_not_in(self, id_not_in):\n\n self._id_not_in = id_not_in",
"def is_not(self, other: Any) -> ColumnOperators:\n return self.operate(is_not, other)",
"def isNot(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.IsNot)\n newq.setValue(value)\n return newq",
"def where_not_in(self, column, wheres=[]):\n if isinstance(wheres, QueryBuilder):\n self._wheres += (\n (QueryExpression(column, \"NOT IN\", SubSelectExpression(wheres))),\n )\n else:\n wheres = [str(x) for x in wheres]\n self._wheres += ((QueryExpression(column, \"NOT IN\", wheres)),)\n return self",
"def __ne__(self, *args):\n return _ida_hexrays.qlist_cinsn_t___ne__(self, *args)",
"def name_not_in(self, name_not_in):\n\n self._name_not_in = name_not_in",
"def name_not_in(self, name_not_in):\n\n self._name_not_in = name_not_in",
"def name_not_in(self, name_not_in):\n\n self._name_not_in = name_not_in",
"def key_not_in(self, key_not_in):\n\n self._key_not_in = key_not_in",
"def __ne__(self, *args):\n return _ida_hexrays.user_unions_iterator_t___ne__(self, *args)",
"def negated(self):\n op = QueryCompound.Op.And if self.__op == QueryCompound.Op.Or else QueryCompound.Op.Or\n return QueryCompound(*self.__queries, op=op)",
"def not_in(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"not_in\")",
"def exclude(self, **query):\n\n if self._query != '':\n query = '(%s) AND NOT (%s)' % (self._query, self._build_query(**query))\n else:\n query = 'NOT (%s)' % self._build_query(**query)\n\n return QueryList(self.model,\n query,\n order_by=self._order_by,\n fields=self._fields,\n limit=self._limit,\n offset=self._offset,\n links_to_names=self._links_to_names)",
"def assertNotIn(self, observed, expected, *args, **kwargs):\n return super(AsyncNbviewerTestCase, self).assertNotIn(\n to_unicode(observed),\n to_unicode(expected),\n *args,\n **kwargs\n )",
"def __ne__(self, *args):\n return _ida_hexrays.qlist_cinsn_t_iterator___ne__(self, *args)",
"def filter_not(self, *arguments, **kwargs):\n from jetengine.query_builder.node import Q, QCombination, QNot\n\n if arguments and len(arguments) == 1 and isinstance(arguments[0], (Q, QCombination)):\n self.filter(QNot(arguments[0]))\n else:\n self.filter(QNot(Q(**kwargs)))\n\n return self",
"def __ne__(self, values):\n self = self.__eq__(values)\n return self.__invert__()",
"def exclude(self, *args, **kwargs):\n return self.filter(~F(*args, **kwargs))",
"def test_searchNot(self):\n return self._messageSetSearchTest('NOT 3', [1, 2, 4, 5])",
"def vds_num_not_in(self, vds_num_not_in):\n\n self._vds_num_not_in = vds_num_not_in",
"def logical_not(data):\n return _make.logical_not(data)",
"def bitwise_not(self) -> ColumnOperators:\n\n return self.operate(bitwise_not_op)"
] | [
"0.762016",
"0.7430412",
"0.70646805",
"0.68484217",
"0.68356097",
"0.68356097",
"0.68356097",
"0.68356097",
"0.68356097",
"0.68231535",
"0.67760336",
"0.67754763",
"0.6743561",
"0.6693501",
"0.6693501",
"0.6693501",
"0.66579396",
"0.65956944",
"0.6590181",
"0.6560453",
"0.65536827",
"0.65358794",
"0.6522556",
"0.6490891",
"0.646664",
"0.6453702",
"0.6421135",
"0.6393995",
"0.6373173",
"0.6368724"
] | 0.84240085 | 0 |
implement the ``NOT LIKE`` operator. This is equivalent to using negation with | def not_like(
self, other: Any, escape: Optional[str] = None
) -> ColumnOperators:
return self.operate(not_like_op, other, escape=escape) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_searchNot(self):\n return self._messageSetSearchTest('NOT 3', [1, 2, 4, 5])",
"def not_ilike(\n self, other: Any, escape: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(not_ilike_op, other, escape=escape)",
"def RewriteNOT(self, expr):\n return None",
"def is_not(self, other: Any) -> ColumnOperators:\n return self.operate(is_not, other)",
"def doesNotMatch(self, value, caseSensitive=True):\n newq = self.copy()\n newq.setOp(Query.Op.DoesNotMatch)\n newq.setValue(value)\n newq.setCaseSensitive(caseSensitive)\n return newq",
"def exclude(self, *q, **kwargs):\n return self._filter_or_exclude(*q, _inverse=True, **kwargs)",
"def negations(self) -> str:",
"def privacy_pass_phrase_not_contains(self, privacy_pass_phrase_not_contains):\n\n self._privacy_pass_phrase_not_contains = privacy_pass_phrase_not_contains",
"def isNot(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.IsNot)\n newq.setValue(value)\n return newq",
"def NOT(expression):\n return {'$not': [expression]}",
"def privacy_pass_phrase_not_starts_with(self, privacy_pass_phrase_not_starts_with):\n\n self._privacy_pass_phrase_not_starts_with = privacy_pass_phrase_not_starts_with",
"def __ne__(self, *args):\n return _libsbml.string___ne__(self, *args)",
"def username_not_contains(self, username_not_contains):\n\n self._username_not_contains = username_not_contains",
"def username_not_contains(self, username_not_contains):\n\n self._username_not_contains = username_not_contains",
"def operator_nre(s, pattern):\n return not re.search(pattern, s)",
"def bitwise_not(self) -> ColumnOperators:\n\n return self.operate(bitwise_not_op)",
"def test_only_pos_that_not_match(self):\n eq_(None, grepit(\"naranja\", [\"ob\"]))",
"def logical_not(data):\n return _make.logical_not(data)",
"def test_both_exist_pos_match_neg_no_match(self):\n eq_(\"foobar\",grepit(\"foobar\",[\"foo\"],[\"nomatch\"]))",
"def filter_not(self, *arguments, **kwargs):\n from jetengine.query_builder.node import Q, QCombination, QNot\n\n if arguments and len(arguments) == 1 and isinstance(arguments[0], (Q, QCombination)):\n self.filter(QNot(arguments[0]))\n else:\n self.filter(QNot(Q(**kwargs)))\n\n return self",
"def test_negation():\n char1 = Character(court=['winter'])\n char2 = Character()\n char3 = Character(court=['summer'])\n res = npc.commands.find_characters([\"court~:winter\"], [char1, char2, char3])\n assert char1 not in res\n assert char2 in res\n assert char3 in res",
"def exclude(self, *args, **kwargs):\n return self.filter(~F(*args, **kwargs))",
"def name_not_contains(self, name_not_contains):\n\n self._name_not_contains = name_not_contains",
"def name_not_contains(self, name_not_contains):\n\n self._name_not_contains = name_not_contains",
"def name_not_contains(self, name_not_contains):\n\n self._name_not_contains = name_not_contains",
"def Not(*conditions):\n def notPred(db):\n matches = Or(*conditions)(db)\n return Result((k, v) for k, v in db.items() if k not in matches)\n\n return notPred",
"def negated(self):\n op = QueryCompound.Op.And if self.__op == QueryCompound.Op.Or else QueryCompound.Op.Or\n return QueryCompound(*self.__queries, op=op)",
"def negate(self):\n self.formula = '!(' + self.formula + ')'",
"def auth_pass_phrase_not_contains(self, auth_pass_phrase_not_contains):\n\n self._auth_pass_phrase_not_contains = auth_pass_phrase_not_contains",
"def __ne__(self, other):\n self.conds.append((self.name, '!=', other))\n return self"
] | [
"0.65409845",
"0.6489854",
"0.6365467",
"0.63564676",
"0.627446",
"0.610524",
"0.60887253",
"0.60735446",
"0.601609",
"0.5970636",
"0.59192866",
"0.5917322",
"0.5914209",
"0.5914209",
"0.59089214",
"0.5901702",
"0.5880488",
"0.5878728",
"0.58678925",
"0.5858922",
"0.58414394",
"0.58346045",
"0.58335817",
"0.58335817",
"0.58335817",
"0.57888955",
"0.57567984",
"0.57329834",
"0.5731498",
"0.57240564"
] | 0.73303384 | 0 |
implement the ``NOT ILIKE`` operator. This is equivalent to using negation with | def not_ilike(
self, other: Any, escape: Optional[str] = None
) -> ColumnOperators:
return self.operate(not_ilike_op, other, escape=escape) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def not_like(\n self, other: Any, escape: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(not_like_op, other, escape=escape)",
"def test_searchNot(self):\n return self._messageSetSearchTest('NOT 3', [1, 2, 4, 5])",
"def RewriteNOT(self, expr):\n return None",
"def operator_nre(s, pattern):\n return not re.search(pattern, s)",
"def test_only_pos_that_not_match(self):\n eq_(None, grepit(\"naranja\", [\"ob\"]))",
"def is_not(self, other: Any) -> ColumnOperators:\n return self.operate(is_not, other)",
"def doesNotMatch(self, value, caseSensitive=True):\n newq = self.copy()\n newq.setOp(Query.Op.DoesNotMatch)\n newq.setValue(value)\n newq.setCaseSensitive(caseSensitive)\n return newq",
"def test_both_exist_pos_match_neg_no_match(self):\n eq_(\"foobar\",grepit(\"foobar\",[\"foo\"],[\"nomatch\"]))",
"def username_not_contains(self, username_not_contains):\n\n self._username_not_contains = username_not_contains",
"def username_not_contains(self, username_not_contains):\n\n self._username_not_contains = username_not_contains",
"def privacy_pass_phrase_not_contains(self, privacy_pass_phrase_not_contains):\n\n self._privacy_pass_phrase_not_contains = privacy_pass_phrase_not_contains",
"def negations(self) -> str:",
"def name_not_contains(self, name_not_contains):\n\n self._name_not_contains = name_not_contains",
"def name_not_contains(self, name_not_contains):\n\n self._name_not_contains = name_not_contains",
"def name_not_contains(self, name_not_contains):\n\n self._name_not_contains = name_not_contains",
"def privacy_pass_phrase_not_starts_with(self, privacy_pass_phrase_not_starts_with):\n\n self._privacy_pass_phrase_not_starts_with = privacy_pass_phrase_not_starts_with",
"def isNot(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.IsNot)\n newq.setValue(value)\n return newq",
"def __ne__(self, *args):\n return _libsbml.string___ne__(self, *args)",
"def NOT(expression):\n return {'$not': [expression]}",
"def test_negation():\n char1 = Character(court=['winter'])\n char2 = Character()\n char3 = Character(court=['summer'])\n res = npc.commands.find_characters([\"court~:winter\"], [char1, char2, char3])\n assert char1 not in res\n assert char2 in res\n assert char3 in res",
"def not_in(self, other: Any) -> ColumnOperators:\n return self.operate(not_in_op, other)",
"def not_found(\n expr: str\n ) -> bool:\n return expr.startswith(\"N\")",
"def ip_not_starts_with(self, ip_not_starts_with):\n\n self._ip_not_starts_with = ip_not_starts_with",
"def ip_not_contains(self, ip_not_contains):\n\n self._ip_not_contains = ip_not_contains",
"def auth_pass_phrase_not_contains(self, auth_pass_phrase_not_contains):\n\n self._auth_pass_phrase_not_contains = auth_pass_phrase_not_contains",
"def logical_not(data):\n return _make.logical_not(data)",
"def exclude(self, *q, **kwargs):\n return self._filter_or_exclude(*q, _inverse=True, **kwargs)",
"def test_searchNotMessageSet(self):\n return self._messageSetSearchTest('NOT 2:*', [1])",
"def name_not_in(self, name_not_in):\n\n self._name_not_in = name_not_in",
"def name_not_in(self, name_not_in):\n\n self._name_not_in = name_not_in"
] | [
"0.70431954",
"0.6704028",
"0.64000976",
"0.63810587",
"0.63433325",
"0.6245004",
"0.6223399",
"0.6219486",
"0.61507857",
"0.61507857",
"0.61485255",
"0.6127695",
"0.6118989",
"0.6118989",
"0.6118989",
"0.6017641",
"0.59710455",
"0.5954014",
"0.59344053",
"0.59319",
"0.59183747",
"0.5870793",
"0.5840429",
"0.58060145",
"0.57900316",
"0.57444185",
"0.57419604",
"0.5719",
"0.5714725",
"0.5714725"
] | 0.6880071 | 1 |
Implements a databasespecific 'match' operator. | def match(self, other: Any, **kwargs: Any) -> ColumnOperators:
return self.operate(match_op, other, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_simple_match(self):\n query = Query().match('')\n expected = '\\n'.join((\n 'MATCH (_a)',\n 'RETURN _a',\n ))\n self.assertEqual(str(query), expected)\n\n query = Query().match('SomeLabel')\n expected = '\\n'.join((\n 'MATCH (_a:SomeLabel)',\n 'RETURN _a',\n ))\n self.assertEqual(str(query), expected)\n\n query = Query().match('SomeLabel', 'var')\n expected = '\\n'.join((\n 'MATCH (var:SomeLabel)',\n 'RETURN var',\n ))\n self.assertEqual(str(query), expected)\n\n query = Query().match('SomeLabel:OtherLabel')\n expected = '\\n'.join((\n 'MATCH (_a:SomeLabel:OtherLabel)',\n 'RETURN _a',\n ))\n self.assertEqual(str(query), expected)",
"def _select_match(matches):\n # TOOD: add user friendly representation to of each relation\n raise NotImplementedError()",
"def isMatched(expr):\n pass",
"def match(self, other):",
"def match(self, dc):\n raise NotImplemented",
"def match(self, table_name, field, regex=None, test=None):\n table = self.db.table(table_name)\n if test is not None:\n LOGGER.debug('%r: search(where(%r).test(%r))' % (table_name, field, test))\n return table.search(where(field).test(test))\n elif regex is not None:\n LOGGER.debug('%r: search(where(%r).matches(%r))' % (table_name, field, regex))\n return table.search(where(field).matches(regex))\n else:\n LOGGER.debug(\"%r: search(where(%r).matches('.*'))\" % (table_name, field))\n return table.search(where(field).matches('.*'))",
"def test_regex_case_insensitive_match(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'ABC'\", 'a.*', False)\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertEqual(cursor.fetchone()[0], 'TRUE')\n finally:\n self.dbh.rollback()\n cursor.close()",
"def matches(self, value, caseSensitive=True):\n newq = self.copy()\n newq.setOp(Query.Op.Matches)\n newq.setValue(value)\n newq.setCaseSensitive(caseSensitive)\n return newq",
"def match(self, field, table_name=None, regex=None, test=None):\n return self._get_storage().match(field, table_name=table_name, regex=regex, test=test)",
"def similar(text, database):\n # TODO\n pass",
"def test_regex_case_sensitive_match(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'abc'\", 'a.*')\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertEqual(cursor.fetchone()[0], 'TRUE')\n finally:\n self.dbh.rollback()\n cursor.close()",
"def match(data, query):\n ast = parser.parse(query)\n\n dq = DataQueryVisitor(ast)\n return dq.evaluate(data)",
"def test_match_sub_eq(self, subdocument):\n assert subdocument.match({\"and.the\": \"drake\"})\n assert not subdocument.match({\"and.no\": \"drake\"})",
"def match(self, item):",
"def match(self) -> \"MatchResult\":\n raise NotImplementedError",
"def test_match_gte(self, document):\n assert document.match({\"_id\": {\"$gte\": 1}})\n assert document.match({\"_id\": {\"$gte\": 0}})\n assert not document.match({\"_id\": {\"$gte\": 2}})",
"def match(self) -> bool:",
"def regexp_match(\n self, pattern: Any, flags: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(regexp_match_op, pattern, flags=flags)",
"def test_multi_match_return_expr(self):\n eq_(self.line,line_matches_greps(self.line,[\"foo\",\"bar\"]))",
"def matches(self, actual: Any) -> MatchResult:\n raise NotImplementedError()",
"def match(self, query, annotations):\n # XXX Note that we are not inspecting 'action' \n\n # TMP CACHE DEBUG\n #import pdb\n #pdb.set_trace()\n\n # The object without namespace to compare with the rule\n if ':' in query.object:\n obj = query.object.split(':')[-1] \n else:\n obj = query.object\n\n # Test if the object of the Query matches the object of the Rule\n if self.object != '*' and not str(self.object) == str(obj):\n return False\n\n #print \"rule.match between these objects: self.object = %s - query.object %s\" % (self.object,obj)\n query_fields_R = set()\n query_fields_R |= query.get_select()\n query_fields_R |= query.get_where().get_field_names()\n\n query_fields_W = set()\n query_fields_W |= set(query.get_params().keys())\n\n query_fields_RW = set()\n query_fields_RW |= query_fields_R\n query_fields_RW |= query_fields_W\n\n if self.access == 'R':\n return ('*' in self.fields and query_fields_R) or query_fields_R.intersection(self.fields)\n elif self.access == 'W':\n return ('*' in self.fields and query_fields_W) or query_fields_W.intersection(self.fields)\n elif self.access == 'RW':\n return ('*' in self.fields and query_fields_RW) or query_fields_RW.intersection(self.fields)",
"def __eq__(self, other: Any) -> ColumnOperators: # type: ignore[override]\n return self.operate(eq, other)",
"def test_regex_query_shortcuts(self):\n person = self.Person(name=\"Guido van Rossum\")\n person.save()\n\n # Test contains\n obj = self.Person.objects(name__contains=\"van\").first()\n assert obj == person\n obj = self.Person.objects(name__contains=\"Van\").first()\n assert obj is None\n\n # Test icontains\n obj = self.Person.objects(name__icontains=\"Van\").first()\n assert obj == person\n\n # Test startswith\n obj = self.Person.objects(name__startswith=\"Guido\").first()\n assert obj == person\n obj = self.Person.objects(name__startswith=\"guido\").first()\n assert obj is None\n\n # Test istartswith\n obj = self.Person.objects(name__istartswith=\"guido\").first()\n assert obj == person\n\n # Test endswith\n obj = self.Person.objects(name__endswith=\"Rossum\").first()\n assert obj == person\n obj = self.Person.objects(name__endswith=\"rossuM\").first()\n assert obj is None\n\n # Test iendswith\n obj = self.Person.objects(name__iendswith=\"rossuM\").first()\n assert obj == person\n\n # Test exact\n obj = self.Person.objects(name__exact=\"Guido van Rossum\").first()\n assert obj == person\n obj = self.Person.objects(name__exact=\"Guido van rossum\").first()\n assert obj is None\n obj = self.Person.objects(name__exact=\"Guido van Rossu\").first()\n assert obj is None\n\n # Test iexact\n obj = self.Person.objects(name__iexact=\"gUIDO VAN rOSSUM\").first()\n assert obj == person\n obj = self.Person.objects(name__iexact=\"gUIDO VAN rOSSU\").first()\n assert obj is None\n\n # Test wholeword\n obj = self.Person.objects(name__wholeword=\"Guido\").first()\n assert obj == person\n obj = self.Person.objects(name__wholeword=\"rossum\").first()\n assert obj is None\n obj = self.Person.objects(name__wholeword=\"Rossu\").first()\n assert obj is None\n\n # Test iwholeword\n obj = self.Person.objects(name__iwholeword=\"rOSSUM\").first()\n assert obj == person\n obj = self.Person.objects(name__iwholeword=\"rOSSU\").first()\n assert obj is None\n\n # Test regex\n obj = self.Person.objects(name__regex=\"^[Guido].*[Rossum]$\").first()\n assert obj == person\n obj = self.Person.objects(name__regex=\"^[guido].*[rossum]$\").first()\n assert obj is None\n obj = self.Person.objects(name__regex=\"^[uido].*[Rossum]$\").first()\n assert obj is None\n\n # Test iregex\n obj = self.Person.objects(name__iregex=\"^[guido].*[rossum]$\").first()\n assert obj == person\n obj = self.Person.objects(name__iregex=\"^[Uido].*[Rossum]$\").first()\n assert obj is None\n\n # Test unsafe expressions\n person = self.Person(name=\"Guido van Rossum [.'Geek']\")\n person.save()\n\n obj = self.Person.objects(name__icontains=\"[.'Geek\").first()\n assert obj == person",
"def test_match_bad_operator(self, document):\n with pytest.raises(ValueError) as exc:\n document.match({\"_id\": {\"$voici_voila\": 0}})\n\n assert \"Operator '$voici_voila' is not supported\" in str(exc.value)",
"def match(self, product):\n\n raise NotImplementedError, 'need impletent match method'",
"def handleMatch(self, m):\r\n pass",
"def test_match_table_post(self):\n pass",
"def test_match_lte(self, document):\n assert document.match({\"_id\": {\"$lte\": 2}})\n assert document.match({\"_id\": {\"$lte\": 1}})\n assert not document.match({\"_id\": {\"$lte\": 0}})",
"def _match(df: DataFrame,\r\n prob_mod: mlc.Model,\r\n method: str,\r\n metric_col: str,\r\n match_kwargs: Optional[dict] = None):\r\n\r\n functions_dict = {\r\n 'assignment': _assignment_match,\r\n 'quantile': _quantile_match\r\n }\r\n # _assignment_match doesnt currently have any kwargs, so match_kwargs should be empty\r\n df, match_info = functions_dict[method](df, prob_mod, metric_col, **match_kwargs)\r\n\r\n return df, match_info",
"def match_source_key(self, match):\n raise NotImplementedError"
] | [
"0.66808915",
"0.6366497",
"0.6265464",
"0.6193781",
"0.6153604",
"0.59592897",
"0.5906559",
"0.58791506",
"0.58500457",
"0.5849632",
"0.58387184",
"0.58373845",
"0.5770488",
"0.5739091",
"0.5731",
"0.5670912",
"0.5592207",
"0.5586597",
"0.5584879",
"0.55615056",
"0.5538527",
"0.55115646",
"0.55050343",
"0.55011654",
"0.54781616",
"0.5466671",
"0.5449586",
"0.54465455",
"0.54410374",
"0.5440249"
] | 0.7020061 | 0 |
Implements a databasespecific 'regexp match' operator. | def regexp_match(
self, pattern: Any, flags: Optional[str] = None
) -> ColumnOperators:
return self.operate(regexp_match_op, pattern, flags=flags) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isMatched(expr):\n pass",
"def main(self, regex_string):\n sql_sen = regex_string[0][0]\n reg = \"\\$\\w+\"\n if re.search(reg, sql_sen, re.I):\n\n p = re.compile(reg)\n match = p.findall(sql_sen)\n return match\n return None",
"def test_regex_case_insensitive_match(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'ABC'\", 'a.*', False)\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertEqual(cursor.fetchone()[0], 'TRUE')\n finally:\n self.dbh.rollback()\n cursor.close()",
"def test_regex_case_sensitive_match(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'abc'\", 'a.*')\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertEqual(cursor.fetchone()[0], 'TRUE')\n finally:\n self.dbh.rollback()\n cursor.close()",
"def regexp_predicate(value):\n return re.compile(value).match",
"def match(self, field, table_name=None, regex=None, test=None):\n return self._get_storage().match(field, table_name=table_name, regex=regex, test=test)",
"def match(self, table_name, field, regex=None, test=None):\n table = self.db.table(table_name)\n if test is not None:\n LOGGER.debug('%r: search(where(%r).test(%r))' % (table_name, field, test))\n return table.search(where(field).test(test))\n elif regex is not None:\n LOGGER.debug('%r: search(where(%r).matches(%r))' % (table_name, field, regex))\n return table.search(where(field).matches(regex))\n else:\n LOGGER.debug(\"%r: search(where(%r).matches('.*'))\" % (table_name, field))\n return table.search(where(field).matches('.*'))",
"def convertSQL_LIKE2REGEXP(sql_like_pattern):\n # Replace '_' by equivalent regexp, except when precede by '\\'\n # (escape character)\n regexp = re.sub(r'(?<!\\\\)_', '.', sql_like_pattern)\n # Replace '%' by equivalent regexp, except when precede by '\\'\n # (escape character)\n regexp = re.sub(r'(?<!\\\\)%', '.*', regexp)\n # Set regexp to ignore cases; SQL patterns are case-insensitive by default.\n regexp = \"(?i)^(\" + regexp + \")$\"\n return regexp",
"def test_simple_match(self):\n query = Query().match('')\n expected = '\\n'.join((\n 'MATCH (_a)',\n 'RETURN _a',\n ))\n self.assertEqual(str(query), expected)\n\n query = Query().match('SomeLabel')\n expected = '\\n'.join((\n 'MATCH (_a:SomeLabel)',\n 'RETURN _a',\n ))\n self.assertEqual(str(query), expected)\n\n query = Query().match('SomeLabel', 'var')\n expected = '\\n'.join((\n 'MATCH (var:SomeLabel)',\n 'RETURN var',\n ))\n self.assertEqual(str(query), expected)\n\n query = Query().match('SomeLabel:OtherLabel')\n expected = '\\n'.join((\n 'MATCH (_a:SomeLabel:OtherLabel)',\n 'RETURN _a',\n ))\n self.assertEqual(str(query), expected)",
"def testResolveRegEx(self):\n predicate = \"metadata:predicate\"\n subject = \"aff4:/metadata:10\"\n\n # Check we can specify a timestamp\n data_store.DB.Set(subject, predicate, \"3\", timestamp=1000, token=self.token)\n results = [x for x in data_store.DB.ResolveRegex(subject, \"metadata:pred.*\",\n timestamp=(0, 2000),\n token=self.token)]\n\n self.assertEqual(len(results), 1)\n # Timestamp\n self.assertEqual(results[0][2], 1000)\n # Value\n self.assertEqual(results[0][1], \"3\")\n # Predicate\n self.assertEqual(results[0][0], predicate)",
"def test_regex_query_shortcuts(self):\n person = self.Person(name=\"Guido van Rossum\")\n person.save()\n\n # Test contains\n obj = self.Person.objects(name__contains=\"van\").first()\n assert obj == person\n obj = self.Person.objects(name__contains=\"Van\").first()\n assert obj is None\n\n # Test icontains\n obj = self.Person.objects(name__icontains=\"Van\").first()\n assert obj == person\n\n # Test startswith\n obj = self.Person.objects(name__startswith=\"Guido\").first()\n assert obj == person\n obj = self.Person.objects(name__startswith=\"guido\").first()\n assert obj is None\n\n # Test istartswith\n obj = self.Person.objects(name__istartswith=\"guido\").first()\n assert obj == person\n\n # Test endswith\n obj = self.Person.objects(name__endswith=\"Rossum\").first()\n assert obj == person\n obj = self.Person.objects(name__endswith=\"rossuM\").first()\n assert obj is None\n\n # Test iendswith\n obj = self.Person.objects(name__iendswith=\"rossuM\").first()\n assert obj == person\n\n # Test exact\n obj = self.Person.objects(name__exact=\"Guido van Rossum\").first()\n assert obj == person\n obj = self.Person.objects(name__exact=\"Guido van rossum\").first()\n assert obj is None\n obj = self.Person.objects(name__exact=\"Guido van Rossu\").first()\n assert obj is None\n\n # Test iexact\n obj = self.Person.objects(name__iexact=\"gUIDO VAN rOSSUM\").first()\n assert obj == person\n obj = self.Person.objects(name__iexact=\"gUIDO VAN rOSSU\").first()\n assert obj is None\n\n # Test wholeword\n obj = self.Person.objects(name__wholeword=\"Guido\").first()\n assert obj == person\n obj = self.Person.objects(name__wholeword=\"rossum\").first()\n assert obj is None\n obj = self.Person.objects(name__wholeword=\"Rossu\").first()\n assert obj is None\n\n # Test iwholeword\n obj = self.Person.objects(name__iwholeword=\"rOSSUM\").first()\n assert obj == person\n obj = self.Person.objects(name__iwholeword=\"rOSSU\").first()\n assert obj is None\n\n # Test regex\n obj = self.Person.objects(name__regex=\"^[Guido].*[Rossum]$\").first()\n assert obj == person\n obj = self.Person.objects(name__regex=\"^[guido].*[rossum]$\").first()\n assert obj is None\n obj = self.Person.objects(name__regex=\"^[uido].*[Rossum]$\").first()\n assert obj is None\n\n # Test iregex\n obj = self.Person.objects(name__iregex=\"^[guido].*[rossum]$\").first()\n assert obj == person\n obj = self.Person.objects(name__iregex=\"^[Uido].*[Rossum]$\").first()\n assert obj is None\n\n # Test unsafe expressions\n person = self.Person(name=\"Guido van Rossum [.'Geek']\")\n person.save()\n\n obj = self.Person.objects(name__icontains=\"[.'Geek\").first()\n assert obj == person",
"async def match_regex(text, opts):\n\n def is_case_sensitive():\n if opts[\"case_sensitive\"]:\n return False\n return regex.IGNORECASE\n\n if opts[\"matching_condition\"].lower() == \"search\":\n matched_regex = regex.search(opts[\"expression\"], text, is_case_sensitive())\n elif opts[\"matching_condition\"].lower() == \"fullmatch\":\n matched_regex = regex.fullmatch(opts[\"expression\"], text, is_case_sensitive())\n else:\n matched_regex = regex.match(opts[\"expression\"], text, is_case_sensitive())\n return matched_regex",
"def test_match(self):\n\n # Test of the rematch case.\n regex = r\"([a-z]{1,})\\s([a-z]{1,})\\s\"\n expected = \"is\"\n actual = Regex(self.data, regex, rematch=True, group=1).match()\n\n self.assertEqual(expected, actual)\n\n # Test of the group case\n regex = \"e\"\n expected = \"e\"\n actual = Regex(self.data, regex, group=0).match()\n\n self.assertEqual(expected, actual)",
"def test_regex_case_sensitive_nomatch(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'ABC'\", 'a.*')\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertIsNone(cursor.fetchone())\n finally:\n self.dbh.rollback()\n cursor.close()",
"def test_multi_match_return_expr(self):\n eq_(self.line,line_matches_greps(self.line,[\"foo\",\"bar\"]))",
"def test_match_regexp_including_start():\r\n runmatch(lcode)",
"def match(self, other: Any, **kwargs: Any) -> ColumnOperators:\n return self.operate(match_op, other, **kwargs)",
"def match_regex_2(s, r):\n s_len = len(s)\n r_len = len(r)\n @memoize\n def match(s_idx, r_idx):\n \"\"\"Matches string s[s_idx:] to regex r[r_idx:].\"\"\"\n # Case: string is empty.\n if s_idx == s_len:\n if r_idx == r_len:\n return True\n if r[r_idx] == '*':\n return match(s_idx, r_idx + 1)\n return False\n # Case: string is not empty.\n if r_idx == r_len:\n return False\n regex_instruction = r[r_idx]\n if regex_instruction in ('.', s[s_idx]):\n return match(s_idx + 1, r_idx + 1)\n if regex_instruction == '*':\n return match(s_idx + 1, r_idx + 1) or match(s_idx + 1, r_idx)\n return False\n return match(0, 0)",
"def upy_re_match(regex,value):\n reg = re.compile(regex)\n return reg.match(value)",
"def get_match_with_re(pattern, unknown):\n pattern, unknown = _check_params(pattern, unknown)\n regex = re.compile(pattern)\n if not regex.search(unknown):\n return False\n return True",
"def test_expression_regex(self):\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.label.op(\"REGEXP\")(\"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_3\"}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label REGEXP /pattern/ with models.Network.label=\"network_3\" (1)\"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label.op(\"REGEXP\")(\"(network_3|network_2)\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_3\"}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label REGEXP /pattern/ with models.Network.label=\"network_3\" (2)\"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label.op(\"REGEXP\")(\"(network_1|network_2)\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_3\"}], [\"networks\"]))\n self.assertFalse(value, \"\"\"models.Network.label REGEXP /pattern/ with models.Network.label=\"network_3\" (3)\"\"\")",
"def test_match_any_wildcard_in_literal(self):\n qs = '\"Foo t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Foo t\\*\"', \"Wildcard should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Foo t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )",
"def regexp_replace(\n self, pattern: Any, replacement: Any, flags: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(\n regexp_replace_op,\n pattern,\n replacement=replacement,\n flags=flags,\n )",
"def test_multiple_match_any_wildcard_in_literal(self):\n qs = '\"Fo*o t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Fo\\*o t\\*\"', \"Both wildcards should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Fo\\*o t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )",
"def checked_regexp(regexp, value, label):\n if isinstance(regexp, (unicode, str)):\n match = re.match(regexp, value)\n else:\n match = regexp.match(value)\n if match is None:\n flash(label + \" Is Incorrectly Formatted\")\n return None\n else:\n return match",
"def match(self, s):\n self.matches = self.re.search(s)\n return self.matches",
"def similar(text, database):\n # TODO\n pass",
"def test_regex_bad_case_sensitivity(self):\n with self.assertRaises(despydb.UnknownCaseSensitiveError):\n self.dbh.get_regex_clause(\"'ABC'\", 'a.*', 'F')",
"def test_match_right_regexp_to_none():\r\n runmatch(lcode)",
"def match(self, regexp):\n try:\n self.rematch = regexp.match(self.matchstring)\n except AttributeError:\n self.rematch = re.match(regexp, self.matchstring)\n return bool(self.rematch)"
] | [
"0.66413236",
"0.6616442",
"0.6545228",
"0.6535642",
"0.6511754",
"0.62631065",
"0.6258171",
"0.6247489",
"0.6175121",
"0.6082263",
"0.60702616",
"0.60619074",
"0.5965493",
"0.59089625",
"0.589825",
"0.5823971",
"0.5823638",
"0.58095807",
"0.57967544",
"0.5730419",
"0.5729448",
"0.57074434",
"0.5675925",
"0.5673778",
"0.56250125",
"0.5612563",
"0.55974257",
"0.5595583",
"0.55903316",
"0.55844903"
] | 0.7697757 | 0 |
Implements a databasespecific 'regexp replace' operator. | def regexp_replace(
self, pattern: Any, replacement: Any, flags: Optional[str] = None
) -> ColumnOperators:
return self.operate(
regexp_replace_op,
pattern,
replacement=replacement,
flags=flags,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_re_replace(val: AnyStr, pattern: str, repl: str) -> str:\n return re.sub(pattern, repl, str(val))",
"def replace_params(self):\n raw_sql = self.raw_sql\n for placeholder in self.to_replace:\n newreg = re.compile(placeholder)\n repl = self.get_replacement_value(placeholder)\n if repl:\n raw_sql = newreg.sub(str(repl), raw_sql)\n self.sql = raw_sql",
"def find_values_to_replace(self):\n regexp = re.compile(self.raw_pattern)\n self.to_replace = regexp.findall(self.raw_sql)",
"def convertSQL_LIKE2REGEXP(sql_like_pattern):\n # Replace '_' by equivalent regexp, except when precede by '\\'\n # (escape character)\n regexp = re.sub(r'(?<!\\\\)_', '.', sql_like_pattern)\n # Replace '%' by equivalent regexp, except when precede by '\\'\n # (escape character)\n regexp = re.sub(r'(?<!\\\\)%', '.*', regexp)\n # Set regexp to ignore cases; SQL patterns are case-insensitive by default.\n regexp = \"(?i)^(\" + regexp + \")$\"\n return regexp",
"def add_replace_filter(source, args, index):\n original = args.get('replace-pattern%02d' % index)\n replacement = args.get('replace-value%02d' % index)\n tags = args.get('replace-tags%02d' % index)\n use_regex = args.get('replace-regex%02d' % index)\n row_query = args.get('replace-where%02d' % index)\n return source.replace_data(original, replacement, tags, use_regex, queries=row_query)",
"def do_repl(self, args):\n args = args.split()\n if len(args) != 2:\n print 'usage: scan pat'\n return\n pat = args[0]\n repl = args[1]\n self.regexprutils.replace(pat, repl)",
"def sqlq(v):\n if not isinstance(v, (bytes, str)):\n return v\n for value, replacement in _sql_replacements:\n v = v.replace(value, replacement)\n return v",
"def test_evaluate_replace_expression(self):\n value = self.evaluate_common(\"replace('startswith','tart','cake')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.String, \"Expected String\")\n self.assertTrue(value.value == \"scakeswith\")\n value = self.evaluate_common(\"replace('startswith','t','x')\")\n self.assertTrue(value.value == \"sxarxswixh\")\n # not case insensitive\n value = self.evaluate_common(\"replace('sTartswith','t','x')\")\n self.assertTrue(value.value == \"sTarxswixh\")\n value = self.evaluate_common(\"replace('startswith','t','tx')\")\n self.assertTrue(value.value == \"stxartxswitxh\")\n try:\n value = self.evaluate_common(\"replace('3.14','1',2)\")\n self.fail(\"integer as parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"replace('3.14','1')\")\n self.fail(\"2 parameter\")\n except odata.EvaluationError:\n pass",
"def REGEXREPLACE(text, regular_expression, replacement):\n return re.sub(regular_expression, replacement, text)",
"def redacorator(func):\n def _replace(match):\n ori = match.group()\n text = match.group().strip().lower()\n return func(text, ori)\n return _replace",
"def regex_replace(s, old, new, count=0):\n\n return re.sub(old, new, s, count=count)",
"def test_substitutions_with_regex_chars(self):\n m = strutils.MultiReplace({'cat.+': 'kedi', r'purple': 'mor', })\n self.assertEqual(m.sub('The cat.+ is purple'), 'The kedi is mor')",
"def lreplace(pattern, sub, string):\n return re.sub('^%s' % pattern, sub, string)",
"def regexp_match(\n self, pattern: Any, flags: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(regexp_match_op, pattern, flags=flags)",
"def _glob_to_sql(self, string):\n\n # What's with the chr(1) and chr(2) nonsense? It's a trick to\n # hide \\* and \\? from the * and ? substitutions. This trick\n # depends on the substitutiones being done in order. chr(1)\n # and chr(2) were picked because I know those characters\n # almost certainly won't be in the input string\n table = ((r'\\\\', chr(1)), (r'\\*', chr(2)), (r'\\?', chr(3)),\n (r'%', r'\\%'), (r'?', '_'), (r'*', '%'),\n (chr(1), r'\\\\'), (chr(2), r'\\*'), (chr(3), r'\\?'))\n\n for (a, b) in table:\n string = string.replace(a,b)\n\n string = string[1:] if string.startswith(\"^\") else \"%\" + string\n string = string[:-1] if string.endswith(\"$\") else string + \"%\"\n\n return string",
"def replace_with(*, replacement, f=DECORATED):\n return replacement",
"def replace_data(self, original, replacement, pattern=None, use_regex=False, queries=[]):\n import hxl.filters\n replacement = hxl.filters.ReplaceDataFilter.Replacement(original, replacement, pattern, use_regex)\n return hxl.filters.ReplaceDataFilter(self, [replacement], queries=queries)",
"def add_substitution(self, pattern, repl):\r\n\r\n self.substitutions.append( (re.compile(pattern), repl) )",
"def Replace(expression, find, replace, start=1, count=-1):\n if find:\n return expression[:start - 1] + expression[start - 1:].replace(find, replace, count)\n else:\n return expression",
"def _update_date_by_regexp(connection, regexp, new_value):\n\n request_skeleton = \"\"\"\n UPDATE custom_attribute_values AS cav JOIN\n custom_attribute_definitions AS cad ON\n cav.custom_attribute_id = cad.id\n SET cav.attribute_value = {new_value}\n WHERE cad.attribute_type = 'Date' AND\n cav.attribute_value REGEXP '{regexp}'\n \"\"\"\n connection.execute(request_skeleton.format(new_value=new_value,\n regexp=regexp))",
"def replacement(cls, search_pattern: str, replacement: str) -> PhonTransform:\n sub_func = lambda match: replacement\n return cls(search_pattern, sub_func)",
"def test_replace(self):\n\n regex = \"th\"\n expected = \"Hello, htis is Fun Ilrys. I just wanted to know how htings goes around hte tests.\" # pylint: disable=line-too-long\n actual = Regex(self.data, regex, replace_with=\"ht\").replace()\n\n self.assertEqual(expected, actual)\n\n # Test of the case that there is not replace_with\n regex = \"th\"\n expected = self.data\n actual = Regex(self.data, regex).replace()\n\n self.assertEqual(expected, actual)",
"def _assign_regex(literal, regex):\n if regex:\n return regex.lower().strip()\n else:\n return r'\\b%s\\b'%literal.lower().strip()",
"def regex_search_and_replace(string, regex, replace):\n return re.sub(regex, replace, string)",
"def replacements(input_str, query, replace=\"\", num=0):\n check_parentheses = re.findall(\"\\([^()]*\\)\", query)\n check_replacement = re.findall(r\"\\\\[0-9]+\", replace)\n check_replacement = sorted([int(match[1:]) for match in check_replacement])\n if check_replacement and check_replacement[-1] > len(check_parentheses):\n raise AttributeError(\"There are more replacement match values specified than query parenthesized groups\")\n\n if num < 0:\n if check_replacement:\n for indx in sorted(range(check_replacement[-1]), reverse=True):\n indx += 1\n replace = re.sub(r\"\\\\%s\" % indx, r\"\\\\%s\" % (indx + 1), replace)\n right_replace = \"\\\\%s\" % (len(check_replacement) + 2)\n else:\n right_replace = \"\\\\2\"\n leftmost = str(input_str)\n new_str = str(input_str)\n rightmost = \"\"\n hash_to_split_on = \"UPNFSZ7FQ6RBhfFzwt0Cku4Yr1n2VvwVUG7x97G7\"\n for _ in range(abs(num)):\n if leftmost == \"\":\n break\n new_str = re.sub(r\"(.*)%s(.*)\" % query,\n r\"\\1%s%s%s\" % (hash_to_split_on, replace, right_replace), leftmost, 1)\n new_str = new_str.split(hash_to_split_on)\n if len(new_str) == 2:\n leftmost = new_str[0]\n rightmost = new_str[1] + rightmost\n new_str = leftmost + rightmost\n else:\n new_str = leftmost + rightmost\n break\n else:\n new_str = re.sub(query, replace, input_str, num)\n\n return new_str",
"def replacevals(self, stmt):\n if 'materialize' in stmt:\n stmt = self.process_materialize(stmt)\n if 'listagg' in stmt:\n stmt = process_aggregates(stmt)\n if 'select USER, table_name' in stmt and stmt.count('UNION') == 3:\n return \"select user,table_name,preference from ingest_test\"\n if '.nextval from dual' in stmt and 'connect by' in stmt:\n self.num = int(stmt[stmt.rfind('<') + 1:])\n return None\n for k, v in self.repl.items():\n stmt = stmt.replace(k, v)\n return stmt",
"def replace(text,pattern,replace=\"\"):\n\n thisFunc = inspect.currentframe().f_code.co_name\n result = re.sub(pattern,replace,text)\n return result",
"def regex_replace_value(val, val_new, pattern,\n val_exception=np.nan):\n try:\n if not bool(re.match(pattern, val)):\n return val_new\n else:\n return val\n except:\n return val_exception",
"def regexp(self, regexp):\n\n self._regexp = regexp",
"def replace(\n haystack : Exp, haystack_context : Context, haystack_pool : Pool,\n needle : Exp, needle_context : Context, needle_pool : Pool,\n replacement : Exp) -> Exp:\n return _Replacer(haystack_context, haystack_pool, needle, needle_context, needle_pool, replacement).visit(haystack)"
] | [
"0.63767254",
"0.62250006",
"0.6183154",
"0.6116867",
"0.5989531",
"0.59528315",
"0.59289795",
"0.58843",
"0.5872477",
"0.5823016",
"0.5805267",
"0.57211936",
"0.56842566",
"0.5672672",
"0.56666964",
"0.56602126",
"0.56288123",
"0.56245446",
"0.5611972",
"0.5604826",
"0.5587793",
"0.55548495",
"0.5551875",
"0.5549347",
"0.5534803",
"0.5533979",
"0.5526949",
"0.5503645",
"0.5485154",
"0.54828167"
] | 0.77711844 | 0 |
Implement the ``+`` operator. In a column context, produces the clause ``a + b`` if the parent object has nonstring affinity. If the parent object has a string affinity, produces the concatenation operator, ``a || b`` | def __add__(self, other: Any) -> ColumnOperators:
return self.operate(add, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)",
"def concat(self, other: Any) -> ColumnOperators:\n return self.operate(concat_op, other)",
"def _rconcat(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(concat_op, other)",
"def concat(cls, c1, c2, op):\r\n if c1.clause and c2.clause:\r\n return cls('({}) {} ({})'.format(c1.clause, op, c2.clause), c1.params + c2.params)\r\n elif c1.clause:\r\n return c1\r\n elif c2.clause:\r\n return c2\r\n else:\r\n return cls('', ())",
"def __add__(self, other):\n if type(other) == str:\n return str(self) + other",
"def __radd__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(other, self)",
"def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)",
"def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)",
"def __add__(self: _TT, other: _TT) -> _TT:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return type(self)(str(self.value + other.value),\"\")",
"def __radd__(self, other):\n return self + other",
"def __radd__(self, other):\n return self + other",
"def __add__(self, other):\n return self.concatenate(other)",
"def __add__(self, other):\n return self.concatenate(other)",
"def __add__(self, other):\n if not other:\n return self.clone()\n else:\n return self.using(join(self, other))",
"def __iadd__(self, other):\n\n return self + other",
"def test_operator_adapt(self):\n\n # test string concatenation\n expr = test_table.c.data + \"somedata\"\n assert testing.db.execute(select([expr])).scalar() == \"somedatasomedata\"\n\n expr = test_table.c.id + 15\n assert testing.db.execute(select([expr])).scalar() == 16\n\n # test custom operator conversion\n expr = test_table.c.avalue + 40\n assert expr.type.__class__ is test_table.c.avalue.type.__class__\n\n # value here is calculated as (250 - 40) / 10 = 21\n # because \"40\" is an integer, not an \"avalue\"\n assert testing.db.execute(select([expr.label('foo')])).scalar() == 21\n\n expr = test_table.c.avalue + literal(40, type_=MyCustomType)\n \n # + operator converted to -\n # value is calculated as: (250 - (40 * 10)) / 10 == -15\n assert testing.db.execute(select([expr.label('foo')])).scalar() == -15\n\n # this one relies upon anonymous labeling to assemble result\n # processing rules on the column.\n assert testing.db.execute(select([expr])).scalar() == -15",
"def __radd__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(value + self._real, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(value._real + self._real, value._imag + self._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n value.__class__.__name__, self.__class__.__name__\r\n )\r\n )",
"def __add__(self, other):\n\n return self._binary_elementwise_op(other, np.add)",
"def Addition(self, paren=False):\n left = self.Term(paren)\n while self.currtok[1].name in {\"PLUS\", \"MINUS\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Term(paren)\n left = BinaryExpr(op, left, right, paren)\n return left",
"def __add__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(self._real + value, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(self._real + value._real, self._imag + value._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n self.__class__.__name__, value.__class__.__name__\r\n )\r\n )",
"def plus(self, other):\n return self | other",
"def plus(self, a, b):\n return a + b",
"def __radd__(self, other):\n if other is Ellipsis:\n return SkipTo(self)(\"_skipped\") + self\n\n return whitespaces.CURRENT.normalize(other) + self",
"def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left",
"def __add__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Add, value)\n return out",
"def __iadd__(self, other):\n return (hasattr(other, '__iter__') and self.applyMaterFunc or self.applyScalarFunc)(other, '__add__')",
"def __add__(self, other):\n result = self.__class__()\n result._terms.extend(self)\n\n if isinstance(other, self._term_class):\n if any(\n isinstance(other, term.__class__) and other.name == term.name\n for term in self._terms\n ):\n msg = (\n f\"There is already a term of type {other.__class__} with name \"\n f\"'{other.name}' in {self.__class__}. Please provide a different \"\n f\"name for {other}.\"\n )\n raise ValueError(msg)\n else:\n result._terms.append(other)\n elif isinstance(other, self.__class__):\n for term in other:\n result += term\n else:\n msg = f\"Unsupported operand type(s) for +: {type(self)} and {type(other)}.\"\n raise TypeError(msg)\n\n return result",
"def plus(x, y):\n x[:] += y[:]\n return x",
"def _append_operator(self, operator):",
"def __add__(\n self,\n other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],\n ) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:\n return TensorWrappedPhiTensorPointer._apply_op(self, other, \"__add__\")"
] | [
"0.6905442",
"0.6812871",
"0.63020706",
"0.62879044",
"0.6166348",
"0.6156531",
"0.61488134",
"0.6112693",
"0.601093",
"0.58894515",
"0.58894515",
"0.58422416",
"0.58422416",
"0.58286184",
"0.5785112",
"0.57387686",
"0.5712233",
"0.57024205",
"0.569837",
"0.5680506",
"0.5651719",
"0.56439155",
"0.5601804",
"0.5589671",
"0.5558083",
"0.555472",
"0.54757154",
"0.5472365",
"0.5443158",
"0.53992236"
] | 0.7208119 | 0 |
Implement the ``%`` operator. In a column context, produces the clause ``a % b``. | def __mod__(self, other: Any) -> ColumnOperators:
return self.operate(mod, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mod(a: Decimal, b: Decimal) -> Decimal:\n return a % b",
"def __floordiv__(self, other: Any) -> ColumnOperators:\n return self.operate(floordiv, other)",
"def __rmod__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mod, other)",
"def __mod__(self, other):\r\n T = type(other)\r\n # vec4%scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x%other, self.y%other, self.z%other, self.w%other)\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for %\"",
"def modulus(x, y):\n return x % y",
"def _sample_using_mod(\n self,\n column_name,\n mod: int,\n value: int,\n ):\n return sa.column(column_name) % mod == value",
"def __mod__(self, other):\r\n T = type(other)\r\n # mat4%scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return mat4(map(lambda x,other=other: x%other, self.mlist))\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for %\"",
"def divmod(self, other, **kwargs):\n return SeriesDefault.register(pandas.Series.divmod)(self, other=other, **kwargs)",
"def mod(num1, num2):\n\n return num1 % num2",
"def mod(num1, num2):\n return num1 % num2",
"def LIKE(self, first, second, escape=None):\n if isinstance(second, Expression):\n second = self.expand(second, 'string')\n else:\n second = self.expand(second, 'string')\n if escape is None:\n escape = '\\\\'\n second = second.replace(escape, escape * 2)\n return \"(%s LIKE %s ESCAPE '%s')\" % (self.expand(first),\n second, escape)",
"def instruction_mod(self, register, a, b):\n if Vm.is_register(a):\n a = self.get_register(a)\n\n if Vm.is_register(b):\n b = self.get_register(b)\n\n self.set_register(register, (a % b) % MAX_INT)",
"def div(self, a, b):\n return (a / b, a % b)",
"def __mod__(self, other):\n return MyCustomNumber(self.value % other.value)",
"def division_algo(a, b):\n return a / b, a % b",
"def binary_operator_string(self, binary):\n return binary.operator == '%' and 'mod' or binary.operator",
"def true_div(a, b):\r\n # see decorator for function body\r",
"def __rfloordiv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(floordiv, other)",
"def modulo(val1, val2):\n if coerce_to_int(val2) == 0:\n return None\n return coerce_to_int(val1) % coerce_to_int(val2)",
"def mod(num1, num2):\n\n remainder = num1 % num2\n return remainder",
"def mod(num1, num2):\n remainder = num1 % num2\n return remainder",
"def __mod__( self, value ):\r\n\t\tif ( type( value ) == type( self ) ):\r\n\t\t\treturnvalue = fraction( self )\r\n\t\t\tif ( returnvalue < 0 ):\r\n\t\t\t\twhile ( returnvalue < -value ): returnvalue += value\r\n\t\t\telse:\r\n\t\t\t\twhile ( returnvalue > value ): returnvlaue -= value\r\n\t\t\treturn returnvalue\r\n\t\telif ( type( value ) in ( types.IntType, types.LongType ) ):\r\n\t\t\treturn fraction( self.numerator % ( value * self.denominator ), self.denominator )\r\n\t\telif ( type ( value ) == types.FloatType ):\r\n\t\t\treturn float( self ) % value\r\n\t\telse: return NotImplemented",
"def the_remainder_of_the_division(numb1, numb2):\r\n return f\"Your result: {numb1%numb2}\"",
"def LIKE(self, first, second, escape=None):\n if isinstance(second, Expression):\n second = self.expand(second, 'string')\n else:\n second = self.expand(second, 'string')\n if escape is None:\n escape = '\\\\'\n second = second.replace(escape, escape * 2)\n if first.type not in ('string', 'text', 'json'):\n return \"(%s LIKE %s ESCAPE '%s')\" % (\n self.CAST(self.expand(first), 'CHAR(%s)' % first.length),\n second, escape\n )\n else:\n return \"(%s LIKE %s ESCAPE '%s')\" % (self.expand(first), second, escape)",
"def div_mod_p(self, a, b):\n a = a % self.p\n b = b % self.p\n return a * self.pow_mod_p(b, self.p - 2, self.p) % self.p",
"def like(\n self, other: Any, escape: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(like_op, other, escape=escape)",
"async def modulus(message, number1: ParamType.NUMBER, number2: ParamType.NUMBER):\n try:\n rem = number1 % number2\n return \"remainder = \" + str(rem)\n except:\n return \"failed to perform modulo operation on provided values.\"",
"def percentage(a, b):\n return (a * 100.0) / b",
"def percent_of(part, whole):\n return part * 100 / whole",
"def resol_modulo(a,b, mod):\r\n\tfor i in range(mod): # Pour tous les nombres du modulo\r\n\t\tif (a*i) % mod == b: # Si a*i modulo mod = b\r\n\t\t\treturn i # Alors on a trouvé ! On renvoit i\r\n\treturn None"
] | [
"0.5719018",
"0.5704813",
"0.5701771",
"0.56699693",
"0.55854285",
"0.55826694",
"0.5576752",
"0.5486577",
"0.5443312",
"0.54423",
"0.5435979",
"0.5395203",
"0.538293",
"0.53065777",
"0.5273658",
"0.5256355",
"0.52500427",
"0.52453357",
"0.51927376",
"0.51926243",
"0.5174666",
"0.51745933",
"0.5162638",
"0.5131344",
"0.510094",
"0.5070713",
"0.50653315",
"0.50417626",
"0.5013811",
"0.50052565"
] | 0.6100919 | 0 |
Implement the ``//`` operator. In a column context, produces the clause ``a / b``, which is the same as "truediv", but considers the result type to be integer. | def __floordiv__(self, other: Any) -> ColumnOperators:
return self.operate(floordiv, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __rfloordiv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(floordiv, other)",
"def exquo(self, a, b):\n return a // b",
"def __truediv__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n if value == 0:\r\n if self.is_zero():\r\n raise InvalidOperationError(\r\n 'zero divided by zero is indeterminate'\r\n )\r\n raise DivisionByZero('division by zero')\r\n return self.__class__(self._real / value, self._imag / value)\r\n elif isinstance(value, self.__class__):\r\n # (a + bj)/(c + dj) = ((ac + bd) + (bc - ad)*j) / (c*c + d*d)\r\n dr = value._real\r\n di = value._imag\r\n hy = dr.fma(dr, di*di)\r\n if hy == 0:\r\n if self.is_zero():\r\n raise InvalidOperationError(\r\n 'zero divided by zero is indeterminate'\r\n )\r\n raise DivisionByZero('division by zero')\r\n qr = self._real.fma(dr, self._imag*di) / hy\r\n qi = self._imag.fma(dr, -self._real*di) / hy\r\n return self.__class__(qr, qi)\r\n raise TypeError(\r\n 'unsupported operand type(s) for /: {!r} and {!r}'.format(\r\n self.__class__.__name__, value.__class__.__name__\r\n )\r\n )",
"def exquo(self, a, b):\n return a / b",
"def __rtruediv__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n rr = dec.Decimal(value)\r\n ri = dec.Decimal('0')\r\n elif isinstance(value, self.__class__):\r\n rr = value._real\r\n ri = value._imag\r\n else:\r\n raise TypeError(\r\n 'unsupported operand type(s) for /: {!r} and {!r}'.format(\r\n value.__class__.__name__, self.__class__.__name__\r\n )\r\n )\r\n # (a + bj)/(c + dj) = ((ac + bd) + (bc - ad)*j) / (c*c + d*d)\r\n dr = self._real\r\n di = self._imag\r\n hy = dr.fma(dr, di*di)\r\n if hy == 0:\r\n if rr == 0 and ri == 0:\r\n raise InvalidOperationError(\r\n 'zero divided by zero is indeterminate'\r\n )\r\n raise DivisionByZero('division by zero')\r\n qr = rr.fma(dr, ri*di) / hy\r\n qi = ri.fma(dr, -rr*di) / hy\r\n return self.__class__(qr, qi)",
"def __truediv__(self, other):\n # other is a scalar\n if isinstance(other, (int, float, complex, Fraction)) and not isinstance(other, bool):\n return Vector([i / other for i in self.data], self.column)\n # other is not a scalar\n else:\n raise TypeError('Argument is not a number')",
"def dividir(value, arg):\n return int(value) /int(arg)",
"def __div__(self, other):\r\n T = type(other)\r\n # vec4/scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x/other, self.y/other, self.z/other, self.w/other)\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for /\"",
"def __truediv__(self, other: Any) -> ColumnOperators:\n return self.operate(truediv, other)",
"def convert_div_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)",
"def test_mixeddiv():\r\n i = iscalar()\r\n d = dscalar()\r\n assert 0 == function([i, d], d * (i // (i + 1)))(3, 1.0)",
"def __div__(self, other):\n if isinstance(other, (int, float)):\n return self * (1 / other)\n else:\n raise TypeError(\"Cannot divide vector by {}\".format(other))",
"def convert_rdiv_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)",
"def test_scalar_division(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n\n a2 = a1 / 2\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))",
"def __rtruediv__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Div.apply(other, self)",
"def test_rtruediv():\n truediv = _MathExpression() / 2\n rtruediv = 9 / _MathExpression()\n assert truediv(9) == rtruediv(2)",
"def __truediv__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Div.apply(self, other)",
"def __truediv__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during division of {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Divide(self, other)",
"def __rtruediv__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during division by {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Divide(other, self)",
"def test_truediv(self):\n a = int(3)\n self.assertEqual(a / 2, 1) # since \"from __future__ import division\"\n # is in effect\n self.assertEqual(type(a / 2), int)\n\n b = int(2)\n self.assertEqual(a / b, 1) # since \"from __future__ import division\"\n # is in effect\n self.assertEqual(type(a / b), int)\n\n c = int(3) / b\n self.assertEqual(c, 1)\n self.assertTrue(isinstance(c, int))\n\n d = int(5)\n d /= 5\n self.assertEqual(d, 1)\n self.assertTrue(isinstance(d, int))\n\n e = int(10)\n f = int(20)\n e /= f\n self.assertEqual(e, 0)\n self.assertTrue(isinstance(e, int))",
"def __rtruediv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(truediv, other)",
"def division(a, b):\n if b != 0:\n return a//b",
"def __div__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Divide, value)\n return out",
"def __truediv__(self, other):\n # other is scalar\n if isinstance(other, (int, float, complex, Fraction)) and not isinstance(other, bool):\n return Matrix([[p / other for p in row] for row in self.data])\n else:\n raise TypeError('Matrix can only be divided by a scalar')",
"def divide(self, a, b):\n return a / b",
"def __div__(self, other):\r\n T = type(other)\r\n # mat4/scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return mat4(map(lambda x,other=other: x/other, self.mlist))\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for /\"",
"def __int__( self ):\r\n\t\treturnvalue = self.numerator / self.denominator\r\n\t\tif ( type( returnvalue ) == types.ComplexType ):\r\n\t\t\treturnvalue = int( abs( returnvalue ) )\r\n\t\telse:\r\n\t\t\treturnvalue = int( returnvalue )\r\n\t\treturn returnvalue",
"def __truediv__(self, other):\n return MyCustomNumber(self.value / other.value)",
"def the_division_is_aimed(numb1, numb2):\r\n return f\"Your result: {numb1//numb2}\"",
"def __rdiv__(self, _scalar):\n\t\treturn self / _scalar"
] | [
"0.65181756",
"0.65117353",
"0.63063335",
"0.6280956",
"0.62741816",
"0.61978835",
"0.6117819",
"0.60942596",
"0.6050787",
"0.6042001",
"0.60112303",
"0.6004055",
"0.5971862",
"0.59563917",
"0.59391904",
"0.59379375",
"0.5923627",
"0.5915135",
"0.58833855",
"0.5875041",
"0.58683574",
"0.58523965",
"0.58060586",
"0.5805454",
"0.58020204",
"0.57940596",
"0.57928175",
"0.5789144",
"0.57810277",
"0.5776012"
] | 0.680153 | 0 |
Verifies the bot by solving the website's captcha | def solve_captcha(self):
# Switch to the Captcha's iframe
captcha = CapatchaSolver(self.driver)
while True:
self.driver.switch_to.frame(self.driver.find_element_by_tag_name("iframe"))
captcha.solve_captcha()
# Check if we passed the captcha part by checking the page title
wait = WebDriverWait(self.driver, 10)
try:
wait.until_not(EC.title_is(consts.BLOCKED))
break
except TimeoutException:
self.driver.refresh() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def captcha_validation(token: str):\n url = \"https://www.google.com/recaptcha/api/siteverify\"\n secret = json.loads(get_secret(\"CAPTCHA_SECRET\"))['CAPTCHA_SECRET']\n payload = {\n \"secret\": secret,\n \"response\": token\n }\n response_raw = requests.post(url, data=payload)\n response_text = response_raw.text\n logger.debug(response_text)\n response = json.loads(response_text)\n return response['success']",
"def captcha(self):\n notification.send_sms(message=message)\n notification.send_emails(emails=email, message=message)\n sleep(25)\n\n ### this code snippet is for reference only, not to be used ###\n # sleep(3)\n # captcha = self.driver.find_element_by_xpath('/html/body/div/iframe[0]')\n # self.driver.switch_to.frame(captcha)\n # captcha_loc = captcha.location\n # print(captcha_loc)\n # captcha_x = captcha_loc[\"x\"]\n # captcha_y = captcha_loc[\"y\"]\n # self.actions.tap_and_hold(captcha_x, captcha_y)\n # sleep(5)\n # self.actions.release(captcha_x, captcha_y)\n # self.search_input()",
"def twocaptcha_solver():\n SITE_URL = get_site_settings()[1]\n SITE_KEY = get_site_settings()[0] # osrs site key\n API_KEY = get_user_settings()[2] # api key read from settings.ini\n if not API_KEY:\n raise ValueError(\"No API key was found in settings.ini.\")\n\n s = requests.Session()\n\n # here we post and parse site key to 2captcha to get captcha ID\n try:\n captcha_id = s.post(f\"http://2captcha.com/in.php?key={API_KEY}\"\n f\"&method=userrecaptcha&googlekey={SITE_KEY}\"\n f\"&pageurl={SITE_URL}\").text.split('|')[1]\n except IndexError:\n print(\"You likely don't have a valid 2captcha.com API key with funds\"\n \" in your settings.ini file. Fix and re-run the program.\")\n\n # then we parse gresponse from 2captcha response\n recaptcha_answer = s.get(\n f\"http://2captcha.com/res.php?key={API_KEY}\"\n f\"&action=get&id={captcha_id}\").text\n print(\"Solving captcha...\")\n while 'CAPCHA_NOT_READY' in recaptcha_answer:\n sleep(6)\n recaptcha_answer = s.get(\n f\"http://2captcha.com/res.php?key={API_KEY}\"\n f\"&action=get&id={captcha_id}\").text\n try:\n recaptcha_answer = recaptcha_answer.split('|')[1]\n except IndexError:\n print(\"2captcha failed to solve this one.. Returning a blank response \"\n \"If the program fails to continue, please msg Gavin with error.\")\n recaptcha_answer = ''\n else:\n return recaptcha_answer",
"def bypass_captcha(self, rps):\n viewstate_pattern = r\"id=\\\"__VIEWSTATE\\\".*\\\"(.*)\\\"\"\n viewstategenerator_pattern = r\"id=\\\"__VIEWSTATEGENERATOR\\\".*\\\"(.*)\\\"\"\n CAPTCHA_PATTERN = r\"id=\\\"ctl00_ContentPlaceHolder1_ctl00_lblCapcha\\\".*?>(.*?)<\\/span>\"\n viewstate = re.search(viewstate_pattern, rps)\n if viewstate:\n viewstate = viewstate.group(1)\n else:\n print(\"VIEWSTATE value not found!\")\n viewstategenerator = re.search(viewstategenerator_pattern, rps)\n if viewstategenerator:\n viewstategenerator = viewstategenerator.group(1)\n captcha = re.search(CAPTCHA_PATTERN, rps)\n if captcha:\n captcha_text = captcha.group(1)\n print(\"[*] CAPTCHA -> [{}]\".format(captcha_text))\n payload = {\n 'ctl00$ContentPlaceHolder1$ctl00$txtCaptcha':captcha_text,\n '__VIEWSTATE':viewstate,\n '__VIEWSTATEGENERATOR':viewstategenerator,\n '__EVENTARGUMENT':'',\n '__EVENTTARGET':'',\n 'ctl00$ContentPlaceHolder1$ctl00$btnXacNhan': 'Vào website'\n }\n rps = self.session.post(url = home_url, headers = BROWSER_HEADERS, data=payload)\n if CAPTCHA_ELEMENT_ID not in rps.text:\n print(\"[*] CAPTCHA BYPASSED\")\n return True\n else:\n print(\"CAPTCHA NOT BYPASSED! PLEASE REPORT TO DEVELOPER BACHVKHOA!\")\n else:\n print(\"[*] CAPTCHA NOT FOUND\")\n return False",
"def askForCaptcha(self, url):\n try:\n import webbrowser\n wikipedia.output(u'Opening CAPTCHA in your web browser...')\n if webbrowser.open(url):\n return wikipedia.input(\n u'What is the solution of the CAPTCHA that is shown in '\n u'your web browser?')\n else:\n raise\n except:\n wikipedia.output(u'Error in opening web browser: %s'\n % sys.exc_info()[0])\n wikipedia.output(\n u'Please copy this url to your web browser and open it:\\n %s'\n % url)\n return wikipedia.input(\n u'What is the solution of the CAPTCHA at this url ?')",
"def handle_captcha(self):\n self.webdriver.save_screenshot('./out/captcha.png')\n sleep(20)\n\n with open('./out/captcha', 'r') as f:\n try:\n self.webdriver.find_element_by_xpath(\"//input[@aria-label='Type the text you hear or see']\").send_keys(f.read())\n except:\n log.error('Captcha input failed. Possibly incorrect captcha?')\n raise\n\n self.webdriver.find_element_by_xpath('//*[@id=\"identifierNext\"]').click()\n sleep(4)\n\n self.webdriver.find_element_by_css_selector(\"input[type=password]\").send_keys(self.bot.getPassword())",
"async def enter_captcha(self, url, sid):\n raise VkCaptchaNeeded(url, sid)",
"def solve_captcha_manual(gid):\n image = auth.get_captcha_image(gid)\n # FIXME: Use Python's temp file interface.\n image.save(\"./test.png\")\n webbrowser.open_new_tab(\"./test.png\")\n text = input('solve_captcha --->')\n return text",
"def _validate_captcha(data):\n settings = api.config.get_settings()[\"captcha\"]\n\n post_data = urllib.parse.urlencode(\n {\n \"secret\": settings[\"reCAPTCHA_private_key\"],\n \"response\": data[\"g-recaptcha-response\"],\n \"remoteip\": flask.request.remote_addr,\n }\n ).encode(\"utf-8\")\n\n request = urllib.request.Request(settings[\"captcha_url\"], post_data, method=\"POST\")\n response = urllib.request.urlopen(request).read().decode(\"utf-8\")\n parsed_response = json.loads(response)\n return parsed_response[\"success\"] is True",
"def handle_verify_code(self, code):\n r = self.session.get(self.image_url_format.format(code=code))\n\n # FIXME use terminal better\n img_path = os.path.expanduser('~/') + 'pansh.{}.vcode.png'.format(hash(self.username))\n with open(img_path, mode='wb') as fp:\n fp.write(r.content)\n print(\"Saved verification code to {}\".format(os.path.dirname(img_path)))\n vcode = raw_input(\"Please input the captcha:\\n\")\n return vcode",
"def tela_inicial_do_challenge_1():\r\n # primeiro\r\n _url_site = \"http://rpachallenge.com/\"\r\n _current_url = _browser.current_url\r\n\r\n assert _current_url == _url_site",
"def _handle_verify_code(self):\n while True:\n # r = self.session.get(self._genimage_url.format(code=self.codestring))\n try:\n self.headers[\"Cookie\"] = \"__jsluid=%s; __jsl_clearance=%s; JSESSIONID=%s\" % (self._jsluid, self._jsl_clearance, self.jsessionid)\n vfcode_url = \"http://www.miitbeian.gov.cn/getVerifyCode?%s\" % random.randint(10, 90)\n logger.info(\"Downloading verification code pic: %s\", vfcode_url)\n request = urllib2.Request(vfcode_url,headers=self.headers)\n r = self.opener.open(request, timeout=20)\n s = r.read()\n for cookie in self.cookiejar:\n logger.info(\"Get Cookie step2: %s, %s\", cookie.name, cookie.value)\n if cookie.name == \"JSESSIONID\":\n self.jsessionid = cookie.value\n img_path = \"miitVerf/code.png\"\n with open(img_path, mode='wb') as fp:\n fp.write(s)\n fp.close()\n logger.info(\"Saved verification code to %s\", format(os.path.dirname(img_path)))\n break\n except Exception,e:\n logger.info(e)\n self.vcode = raw_input(\"Please input the captcha:\\n\")\n return self.vcode",
"def test_cadastros_de_registros_no_site_rpa_challenge():",
"def get_captcha_challenge(http_body, \n captcha_base_url='http://www.google.com/accounts/'):\n contains_captcha_challenge = False\n captcha_parameters = {}\n for response_line in http_body.splitlines():\n if response_line.startswith('Error=CaptchaRequired'):\n contains_captcha_challenge = True\n elif response_line.startswith('CaptchaToken='):\n # Strip off the leading CaptchaToken=\n captcha_parameters['token'] = response_line[13:]\n elif response_line.startswith('CaptchaUrl='):\n captcha_parameters['url'] = '%s%s' % (captcha_base_url,\n response_line[11:])\n if contains_captcha_challenge:\n return captcha_parameters\n else:\n return None",
"def check(request, response_key='response'):\n response = (request.POST.get(response_key, None)\n or request.GET.get(response_key, None))\n remote_ip = get_ip(request)\n return base_check(settings.RECAPTCHA_SHARED_SECRET,\n response,\n remote_ip)",
"def verify():",
"def get_captcha_reply(captcha):\n def get_char_at(pos, captcha):\n char_chars = [line[pos-1:pos] for line in captcha.split(b'\\n')]\n key = ''.join([ str(s, 'ascii') for s in char_chars])\n if key == ' | ':\n return get_char_at(pos+2, captcha)\n if key == ' | .\\\\ ':\n return get_char_at(pos+2, captcha)\n return chars[key]\n\n pos = 1\n\n a, size = get_char_at(pos, captcha)\n pos += size\n pwn.log.info(\"a=%d\" % a)\n\n op, size = get_char_at(pos, captcha)\n pos += size\n pwn.log.info('op=%s' % op)\n\n b, size = get_char_at(pos, captcha)\n pos += size\n pwn.log.info('b=%d' % b)\n \n if op == '-':\n return a - b\n if op == '*':\n return a * b\n if op == '/':\n return a / b\n if op == '+':\n return a + b\n pwn.log.error(\"Ops not found (%s)\" % op)",
"def solve_image_captcha(self, captcha_tmp_path):\n # Get solution and apply it\n for i in range(1, 4):\n print(f\"Attempt #{i} for recaptcha solution\")\n solution = self.obtain_image_captcha(captcha_tmp_path)\n print(f'this {solution}')\n if solution and ERROR not in solution.upper():\n break\n\n if solution is None or ERROR in solution.upper():\n if not solution:\n message = f\"2Captcha service didn't return a response for the captcha\"\n else:\n message = f\"Error in captcha solution from 2Captcha: {solution}\"\n return None\n\n print(\"Captcha solution: {}\".format(solution))\n return solution",
"def is_recaptcha_valid(request):\n return requests.post(\n current_app.config[\"GOOGLE_VERIFY_RECAPTCHA_URL\"],\n data={\n 'secret': current_app.config[\"GOOGLE_VERIFY_RECAPTCHA_KEY\"],\n 'response': request.form['g-recaptcha-response'],\n },\n verify=True\n ).json().get(\"success\", False)",
"def test_client_submit_empty_input(self, mock_urlopen):\n result = client.submit('', '', '')\n self.assertFalse(result.is_valid)\n self.assertEqual(['incorrect-captcha-sol'], result.error_codes)",
"def __init__(self, anticaptcha_key, gb=True):\n self.solver = hCaptchaProxyless()\n self.solver.set_key(anticaptcha_key)\n self.solver.set_website_url(\"https://2ch.hk/\")\n # self.solver.set_website_url(\"https://2ch.pm/\")\n # self.solver.set_verbose(1) # debug\n self.solver.set_website_key(\"248cebfd-9b3f-4d8c-88b5-f812daf51261\") # 2ch google captcha site key\n\n if gb:\n self.get_balance()",
"def check(secret,\n response,\n remote_ip=None,\n check_url=DEFAULT_RECAPTCHA_CHECK_URL):\n return check_detailed(secret,\n response,\n remote_ip,\n check_url)['success']",
"def telegram_check():\n hotp = pyotp.HOTP('base32secret3232')\n random_seed = random.randint(9999, 99999)\n tkinter.messagebox.showinfo(\"\", \"Ga naar: http://t.me/BevFietsBot\" + \"\\nen stuur deze code: \" + hotp.at(random_seed)\n + \"\\nGa na versturen verder.\")\n telegram_output = telegram_read()\n\n if hotp.verify(telegram_output, random_seed):\n return 1\n else:\n tkinter.messagebox.showinfo(\"\", \"Inlog gegevens niet correct\")\n return 0",
"async def check(self,ctx):\r\n try:\r\n check = verify.check(ctx.message.author.id)\r\n except Exception as e:\r\n await self.bot.say('Error: ' +str(e)+'\\n\\nIf your match hasn\\'t registered yet, wait 5-10 minutes or check http://discord.me/EZLBot for updates. Else, signup again with {}verify <ign> <region>'.format(self.bot.command_prefix[0]))\r\n return\r\n await self.bot.say(\"OK. {}. You can now enter the matchmaker with {}vgtinder to find people to party with.\".format(check,self.bot.command_prefix[0]))",
"def send_verification(self):\n pass",
"def _verify_code_after_captcha(\n self,\n mock_add_credentials: Any,\n mock_request_user_sync: Any,\n mock_sendmail: Any,\n mock_recaptcha: Any,\n data1: Optional[dict] = None,\n email: str = '[email protected]',\n ):\n mock_add_credentials.return_value = True\n mock_request_user_sync.return_value = True\n mock_sendmail.return_value = True\n mock_recaptcha.return_value = True\n\n with self.session_cookie_anon(self.browser) as client:\n\n with self.app.test_request_context():\n with client.session_transaction() as sess:\n data = {\n 'email': email,\n 'recaptcha_response': 'dummy',\n 'tou_accepted': True,\n 'csrf_token': sess.get_csrf_token(),\n }\n if data1 is not None:\n data.update(data1)\n\n client.post('/trycaptcha', data=json.dumps(data), content_type=self.content_type_json)\n\n if data1 is None:\n # lower because we are purposefully calling it with a mixed case mail address in tests\n send_verification_mail(email.lower())\n\n signup_user = self.app.private_userdb.get_user_by_pending_mail_address(email)\n response = client.get('/verify-link/' + signup_user.pending_mail_address.verification_code)\n return json.loads(response.data)",
"def require_auth_captcha(self, response, query_params,\n login_form_data, http_session):\n logger.info('Captcha is needed. Query params: %s', query_params)\n form_text = response.text\n\n action_url = parse_form_action_url(form_text)\n logger.debug('form action url: %s', action_url)\n if not action_url:\n raise VkAuthError('Cannot find form action url')\n\n captcha_sid, captcha_url = parse_captcha_html(\n html=response.text, response_url=response.url)\n logger.info('Captcha url %s', captcha_url)\n\n login_form_data['captcha_sid'] = captcha_sid\n login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)\n\n response = http_session.post(action_url, login_form_data)\n return response",
"async def verify(self,ctx,ign='',region=''):\r\n if ign =='' or region =='':\r\n await self.bot.say(\"Please type in a ign and region.\")\r\n return\r\n if not ctx.message.channel.is_private: #Makes sure channel is private\r\n await self.bot.say('Sorry. But this process must be done in a private message, to continue please dm the bot ```{}```'.format(ctx.message.content))\r\n return\r\n try:\r\n pattern = verify.start(ctx.message.author.id, ign,region)\r\n except Exception as e:\r\n await self.bot.say('Error: ' + str(e)+'\\n\\nJoin http://discord.me for more info.')\r\n return\r\n pattern_ = '{} Halcyon Potions, {} Weapon Infusions, and {} Crystal Infusions'.format(str(pattern.count(0)), str(pattern.count(1)), str(pattern.count(2)))\r\n await self.bot.say(\"Awesome. To complete the authorization process.\\n• Enter a **blitz** match\\n• Buy **{}** for your first {} items.\\n• **You can sell them immediately at the same price.**\\n• This must be your next match.\\n• **Once you are done please type {}check to complete authorization process.** Once this is done, your account will be linked and authenticated permanantly.\".format(pattern_,len(pattern), self.bot.command_prefix[0]))\r\n\r\n await asyncio.sleep(345)\r\n\r\n await self.bot.send_message(ctx.message.author, verify.check(ctx.message.author.id))",
"def generate_challenge(self):\n return None",
"def get_image_response(self, captcha_id):\n url = 'http://2captcha.com/res.php'\n data = {'key': self.api_key, 'action': 'get',\n 'id': captcha_id, 'json': 1}\n response = self.session.post(url, data=data)\n json_response = json.loads(response.text)\n recaptcha_answer = json_response[\"request\"]\n finished = False\n for _ in range(20): # For making up to 120 seconds of waits\n if 'CAPCHA_NOT_READY' not in response.text:\n finished = True\n break\n # Time Requested by the web page\n sleep(6)\n response = self.session.post(url, data=data)\n json_response = json.loads(response.text)\n recaptcha_answer = json_response[\"request\"]\n\n if not finished:\n return False\n\n return recaptcha_answer"
] | [
"0.66799676",
"0.6658235",
"0.6610101",
"0.6559271",
"0.6540222",
"0.65375984",
"0.64021313",
"0.61327654",
"0.5926861",
"0.5926746",
"0.5910555",
"0.58881336",
"0.58064806",
"0.5769368",
"0.5726972",
"0.5697831",
"0.56768423",
"0.566876",
"0.5651989",
"0.56504005",
"0.5627242",
"0.56188107",
"0.5582711",
"0.5574426",
"0.55490977",
"0.55280906",
"0.5520481",
"0.5513409",
"0.5503811",
"0.5475712"
] | 0.72698504 | 0 |
Obtains a generic title for a review for a product | def get_review_title(self, language):
comment_generator = CommentGenerator(language)
return comment_generator.generateTitle() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def title(self) -> float:\n return self.product.name if self.product else self.name",
"def get_title():",
"def review_details(product_id):\n\n # Gets the product's specifications from the database\n product = mongo.db.products.find_one({\"_id\": ObjectId(product_id)})\n\n # Sets the page title\n page_title = product[\"name\"] + \" Review\"\n\n # Sets the current user\n\n if session.get('user'):\n current_user = \"{} {}\".format(session['user']['first_name'],\n session['user']['last_name'])\n\n else:\n current_user = None\n\n \"\"\"\n Gets the product's reviews from the database and sorts them. Sort method is\n from https://docs.mongodb.com/manual/reference/method/cursor.sort\n /index.html\n \"\"\"\n reviews = list((mongo.db.reviews.find(\n {\"product\": product[\"name\"]})).sort(\"date_added\", -1))\n\n \"\"\"\n Updates the date_added value in the review dictionary to be\n in the correct format. Code is from https://www.w3schools.com/python/\n python_datetime.asp\n \"\"\"\n for review in reviews:\n review['date_added'] = review['date_added'].strftime(\"%d %B %Y\")\n\n \"\"\"\n Calculates the ratings as percentages and returns a dictionary containing\n the ratings values\n \"\"\"\n\n ratings = ratings_percentages(product, len(reviews))\n\n return render_template(\"review_details.html\",\n page_title=page_title,\n product=product,\n ratings=ratings,\n reviews=reviews,\n current_user=current_user)",
"def title(self) -> Optional[str]:\n if self._title is not None:\n return self._title\n if self._target_object is not None and isinstance(\n self._target_object, pystac.Catalog\n ):\n return self._target_object.title\n return None",
"def getTitle(self, item):\n return item.Title() or item.getId()",
"def get_title(rating):\n title = \"\"\n if rating < 1200:\n title = [\"Newbie\", \"grey-text\"]\n elif rating < 1400:\n title = [\"Pupil\", \"light-green-text\"]\n elif rating < 1600:\n title = [\"Specialist\", \"cyan-text\"]\n elif rating < 1900:\n title = [\"Expert\", \"indigo-text\"]\n elif rating < 2100:\n title = [\"Candidate Master\", \"purple-text\"]\n elif rating < 2300:\n title = [\"Master\", \"amber-text\"]\n elif rating < 2400:\n title = [\"International Master\", \"orange-text\"]\n elif rating < 2600:\n title = [\"Grandmaster\", \"red-text\"]\n elif rating < 3000:\n title = [\"International Grandmaster\", \"red-text\"]\n else:\n title = [\"Legendary Grandmaster\", \"red-text\"]\n return title",
"def fetch_name(self, product_id):\n product_url = urljoin(self.endpoint, str(product_id)) + \"?excludes={}\".format(self.excludes) + \"&key={}\".format(self.key)\n\n result = requests.get(product_url)\n\n if result.status_code != requests.codes[\"ok\"]:\n raise ProductNotFoundError(\"could not find product name for ID {}\".format(product_id))\n\n data = result.json()\n\n try:\n name = data[\"product\"][\"item\"][\"product_description\"][\"title\"]\n except KeyError:\n name = None\n\n return name",
"def title_or_id(context):\n title = getattr(context, 'title', '')\n if not title:\n if hasattr(context, '__name__'):\n title = getattr(context, '__name__', '')\n elif hasattr(context, 'getId'):\n title = context.getId()\n return title",
"def get_title(rating):\n\ttitle = \"\"\n\tif rating < 1200:\n\t\ttitle = [\"Newbie\", \"grey-text\"]\n\telif rating < 1400:\n\t\ttitle = [\"Pupil\", \"light-green-text\"]\n\telif rating < 1600:\n\t\ttitle = [\"Specialist\", \"cyan-text\"]\n\telif rating < 1900:\n\t\ttitle = [\"Expert\", \"indigo-text\"]\n\telif rating < 2100:\n\t\ttitle = [\"Candidate Master\", \"purple-text\"]\n\telif rating < 2300:\n\t\ttitle = [\"Master\", \"amber-text\"]\n\telif rating < 2400:\n\t\ttitle = [\"International Master\", \"orange-text\"]\n\telif rating < 2600:\n\t\ttitle = [\"Grandmaster\", \"red-text\"]\n\telif rating < 3000:\n\t\ttitle = [\"International Grandmaster\", \"red-text\"]\n\telse:\n\t\ttitle = [\"Legendary Grandmaster\", \"red-text\"]\n\treturn title",
"def get_title(self) -> str:\n pass",
"def get_title(article):\n title = article.find(\"div\", class_=\"col-sm-6 product_main\").h1.text\n return title",
"def get_product_name(container) -> str:\r\n title_container = container.findAll(\"a\", {\"class\": \"item-title\"})\r\n # product_title: List[] = title_container[0].text\r\n return title_container[0].text",
"def get_title(self):\n return self.metadata['title']",
"def get_title(self):\n return self.title",
"def get_title(self):\n return self.title",
"def get_title(self):\n return self.title",
"def get_title(self) -> Optional[str]:\n return self.title",
"def get_recipe_title(soup_recipe):\n return soup_recipe.find(\"h1\", {\"itemprop\": \"name\"}).get_text()",
"def get_detail_title(soort, edit, obj):\n naam_ev = get_names_for_type(soort)[0]\n if edit == 'new':\n return _('Nieuw(e) ') + str(naam_ev)\n try:\n title = \" \".join((naam_ev.capitalize(), obj.naam))\n except AttributeError:\n title = \" \".join((naam_ev.capitalize(), obj.nummer))\n return title",
"def get_product_name(self):\n\n try:\n product_name = self.trees.get_element_by_id(\"productTitle\").text\n except:\n pass\n if product_name is None:\n product_name = \"Not available\"\n product_name = product_name.replace(\"\\n\", \"\")\n return product_name",
"def show(self, title):\n\n return Product.query.filter_by(title=title).first()",
"async def title(self):\n if not hasattr(self, \"_title\"):\n self._title = await Stack.fetch_stack_value(self, \"http://purl.org/dc/terms/title\", await self.uuid)\n return self._title",
"def title(self) -> str:\n return self._search_in_properties(ATTR_TITLE)",
"def get_title(self):\n\n return self.title",
"def get_recipe_title(soup_recipe):\n return soup_recipe.find(\"h1\", {\"itemprop\": \"name\"}).get_text().strip()",
"def _title(hit: DD) -> str:\n return hit[\"_source\"][\"title\"]",
"def get_review_type(self):\n return self.currentText()",
"def extract_title(soup):\r\n section = soup.find(\"div\", attrs={\"class\": \"col-sm-6 product_main\"})\r\n title = section.find(\"h1\")\r\n return title.text",
"def get_title(self):\n return self.run_command('get_title')[0]",
"def context_title(self):\n return self.request.POST.get(\"context_title\", self.context_id)"
] | [
"0.663245",
"0.6504558",
"0.6229481",
"0.6229021",
"0.6203045",
"0.61636335",
"0.6156097",
"0.6150632",
"0.6149885",
"0.60980564",
"0.60932887",
"0.599226",
"0.59651464",
"0.59595495",
"0.59595495",
"0.59595495",
"0.5944728",
"0.594012",
"0.5929832",
"0.58571696",
"0.584751",
"0.58432007",
"0.58352417",
"0.5827949",
"0.58202994",
"0.58184636",
"0.57926023",
"0.5786574",
"0.5775524",
"0.57515204"
] | 0.69112676 | 0 |
Leaves a review in a product page | def leave_review(self, product_url, review, review_title):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def leave_review(book_id):\n \n if request.method == 'POST':\n mongo.db.books.find_one_and_update(\n {\"_id\": ObjectId(book_id)}, \n {\"$push\": {\"reviews\": request.form.to_dict()['reviews']} }\n )\n return redirect(url_for('library'))\n \n else:\n return render_template('leave_review.html', \n book=mongo.db.books.find_one({'_id': ObjectId(book_id)}),\n reviews=mongo.db.books.reviews.find())",
"def delete_prod_review(request, pk):\n review = get_object_or_404(ProductReview, pk=pk)\n product = review.product_id\n if review.user == request.user:\n review.delete()\n sweetify.success(\n request,\n \"Review deleted\",\n icon='success',\n timer='2500',\n toast='true',\n position='center',\n background='#181818',\n )\n return redirect(single_prod, product)",
"def review_prod(request, pk):\n product = get_object_or_404(Product, pk=pk)\n user = request.user\n if request.method == \"POST\":\n if ProductReview.objects.filter(user=user, product=product).exists():\n form = ProdReviewForm(request.POST)\n sweetify.error(\n request,\n \"Already reviewed this product\",\n icon='info',\n timer='2500',\n toast='true',\n position='center',\n background='#181818',\n )\n return redirect(single_prod, product.pk)\n else:\n form = ProdReviewForm(request.POST)\n if form.is_valid():\n review = form.save(commit=False)\n review.product = product\n form.instance.user = request.user\n review.save()\n sweetify.success(\n request,\n \"Review added, thanking you\",\n icon='success',\n timer='2500',\n toast='true',\n position='top',\n )\n return redirect(single_prod, product.pk)\n else:\n form = ProdReviewForm()\n return render(request, 'prodreview.html', {\n 'form': form, 'product': product.pk\n }\n )",
"def product_detail(request, pk):\n product = get_object_or_404(Product, pk=pk)\n if request.method == \"POST\":\n if not request.user.is_authenticated:\n return redirect('login')\n\n else:\n form = ReviewForm(request.POST)\n if form.is_valid():\n review = form.save(commit=False)\n review.user = request.user\n review.product = product\n review.save()\n messages.success(request, \"Thanks for your review, it has been sent for approval!\")\n\n return redirect(product_detail, product.pk)\n\n else:\n form = ReviewForm()\n review_count = product.reviews.filter(approved=True).count()\n sum = 0 \n avg = 0 \n if review_count > 0:\n for score in product.reviews.filter(approved=True).values(\"score\"):\n sum += score[\"score\"]\n avg = sum / review_count \n\n is_favourite = False\n if request.user.is_authenticated:\n user = request.user \n if Favourite.objects.filter(user=user, product=product).count() > 0:\n is_favourite = True\n return render(request, \"productdetail.html\", {'product': product,\n 'form': form,\n 'is_favourite': is_favourite,\n 'score': avg,\n 'review_count': review_count})",
"def add_review(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n\n if request.method == 'POST': \n review_form = ReviewForm(request.POST)\n if review_form.is_valid():\n review = review_form.save(commit=False)\n review.product = product\n review.user = request.user\n review.save()\n messages.info(request, \"Your review has been received! Thank you for your interest.\")\n return redirect(reverse('product_detail', args=[product_id]))\n else:\n print(review_form.errors)\n \n return redirect(reverse('product_detail', args=[product_id]))",
"def holdingpenreview():\n objectid = request.values.get('objectid', 0, type=int)\n approved = request.values.get('approved', False, type=bool)\n ticket = request.values.get('ticket', False, type=bool)\n if not objectid:\n abort(400)\n workflow_object = workflow_object_class.get(objectid)\n workflow_object.extra_data[\"approved\"] = approved\n workflow_object.extra_data[\"ticket\"] = ticket\n workflow_object.save()\n db.session.commit()\n\n resume.delay(workflow_object.id)\n\n return render_template('authors/forms/new_review_accepted.html',\n approved=approved)",
"def go_product_reviews_page(self, driver, product_id, website):\n try:\n tab_list = driver.find_element_by_id(\"divProductDetailsCustomerReviewOptions\")\n review_tab = tab_list.find_element_by_id(\"tabProductDetailCustomerReviewNav1\")\n review_tab.click()\n except (NoSuchElementException, ElementNotVisibleException):\n pass\n time.sleep(1)",
"def edit_review_prod(request, pk):\n review = get_object_or_404(ProductReview, pk=pk)\n product = review.product_id\n if request.method == \"POST\":\n form = ProdReviewForm(request.POST, instance=review)\n if form.is_valid():\n review = form.save(commit=False)\n form.instance.user = request.user\n review.save()\n sweetify.success(\n request,\n \"Review updated\",\n icon='success',\n timer='2500',\n toast='true',\n position='top',\n )\n return redirect(single_prod, product)\n else:\n form = ProdReviewForm(instance=review)\n\n return render(request, 'editprodreview.html', {\n 'form': form, 'product': product\n }\n )",
"def review(self, review):\n self._review = review",
"def new_review(request):\n user_profile = UserProfile.objects.get(user=request.user)\n\n if request.user.is_authenticated:\n if request.method == 'POST':\n review_form = ReviewForm(request.POST)\n if review_form.is_valid():\n if len(request.POST[\"review_content\"]) <= 0 or len(\n request.POST[\"product\"]) <= 0:\n messages.error(\n request, \"You haven't completed the review form! \\\n Please add content and try again.\")\n return redirect(reverse(\"gallery\"))\n new_review = review_form.save(commit=False)\n new_review.user_profile = user_profile\n review_form.save()\n messages.success(request, 'Your review has \\\n been added.')\n return redirect(reverse(\"gallery\"))\n else:\n messages.error(request, 'Your review could not be added. \\\n Please check that your review is valid.')\n\n template = 'gallery/gallery.html'\n context = {\n 'review_form': review_form,\n }\n\n return render(request, template, context)",
"def add_review(self, review):\n review_issue = IParentGetter(review).get_parent_object_of_type(\"Issue\")\n if review_issue is None:\n review_issue = IParentGetter(review).get_parent_object_of_type(\"Volume\")\n if self.current_issue != review_issue:\n if self.current_issue:\n self.finish_issue()\n self.current_issue = review_issue\n self.reviews_xml.append(review.restrictedTraverse(self.xml_view_name)())",
"def review():\r\n\r\n # Ensure isbn_number is submitted\r\n if not request.form.get(\"isbn_number\"):\r\n return apology(\"Invalid book\", 403)\r\n\r\n # Ensure review is submitted\r\n if not request.form.get(\"review\"):\r\n return apology(\"Text is not submitted\", 403)\r\n\r\n # Check if book exist, if not error out\r\n\r\n # add review to db\r\n\r\n return redirect(url_for(details, isbn_number=request.form.get(\"isbn_number\")))",
"def product_detail(request, product_id):\n \n product = get_object_or_404(Product, pk=product_id)\n review_form = ReviewForm()\n reviews = Review.objects.filter(product_id=product_id).order_by('-created_at')\n\n context = {\n 'product': product,\n 'review_form': review_form,\n 'reviews': reviews,\n }\n\n return render(request, 'products/product_detail.html', context)",
"def subproduct_add_case_edit_skip(request):\n session = request.session.get('new_product', {})\n if not session:\n raise Http404()\n\n gtin = session.get('gtin', 0)\n try:\n product = Product.objects.get(gtin=gtin)\n except:\n return redirect(reverse('products:products_list'))\n\n # we remove subproducts, we move to the next step\n session['sub_products'] = []\n return redirect(reverse('products:fulledit_js', args=(product.id,)))",
"def save_review():\n prod_id = int(request.vars.prod_id)\n logger.info(\"saving review on prod_id {%s}\" %prod_id)\n content = request.vars.content\n db.reviews.update_or_insert(\n (db.reviews.prod_id == prod_id) & (db.reviews.user_email == auth.user.email),\n prod_id = prod_id,\n user_email = auth.user.email,\n review_content = content\n )\n return \"ok\" # Might be useful in debugging.",
"def review(self, review: object):\n\n self._review = review",
"def review_details(product_id):\n\n # Gets the product's specifications from the database\n product = mongo.db.products.find_one({\"_id\": ObjectId(product_id)})\n\n # Sets the page title\n page_title = product[\"name\"] + \" Review\"\n\n # Sets the current user\n\n if session.get('user'):\n current_user = \"{} {}\".format(session['user']['first_name'],\n session['user']['last_name'])\n\n else:\n current_user = None\n\n \"\"\"\n Gets the product's reviews from the database and sorts them. Sort method is\n from https://docs.mongodb.com/manual/reference/method/cursor.sort\n /index.html\n \"\"\"\n reviews = list((mongo.db.reviews.find(\n {\"product\": product[\"name\"]})).sort(\"date_added\", -1))\n\n \"\"\"\n Updates the date_added value in the review dictionary to be\n in the correct format. Code is from https://www.w3schools.com/python/\n python_datetime.asp\n \"\"\"\n for review in reviews:\n review['date_added'] = review['date_added'].strftime(\"%d %B %Y\")\n\n \"\"\"\n Calculates the ratings as percentages and returns a dictionary containing\n the ratings values\n \"\"\"\n\n ratings = ratings_percentages(product, len(reviews))\n\n return render_template(\"review_details.html\",\n page_title=page_title,\n product=product,\n ratings=ratings,\n reviews=reviews,\n current_user=current_user)",
"def edit_review(request, review_id):\n user_profile = get_object_or_404(UserProfile, user=request.user)\n review = get_object_or_404(UserReview, id=review_id)\n review_form = ReviewForm(instance=review)\n\n if request.user == user_profile.user:\n if request.method == 'POST':\n review_form = ReviewForm(request.POST, instance=review)\n if review_form.is_valid():\n if len(request.POST[\"product\" or \"review_content\"]) <= 0:\n messages.error(\n request, \"You have not completed the review form. \\\n Please add content and try again.\")\n return redirect(reverse(\"gallery\"))\n else:\n review = review_form.save(commit=False)\n user_profile = user_profile\n review_form.save()\n messages.success(request, 'Your review has \\\n been updated.')\n return redirect(reverse(\"gallery\"))\n else:\n review_form = ReviewForm(instance=review)\n\n template = 'gallery/edit_review.html'\n context = {\n 'review_form': review_form,\n 'user_profile': user_profile,\n 'review': review,\n }\n\n return render(request, template, context)",
"def add_review(product_id):\n if request.method == 'POST':\n \"\"\"\n Gets the next search perameter from the URL. Code is from https://\n blog.tecladocode.com/handling-the-next-url-when-logging-in-with-flask/\n \"\"\"\n next_url = request.form.get('next')\n\n \"\"\"\n Gets the product's ratings from the database and counts the number of\n reviews in the database for the product. Count method is from https://\n docs.mongodb.com/manual/reference/method/db.collection.count/\n \"\"\"\n product_ratings = mongo.db.products.find_one(\n {\"_id\": ObjectId(product_id)}, product_ratings_query())\n\n product_count = mongo.db.reviews.count(\n {\"product\": product_ratings['name']})\n\n \"\"\"\n Adds the details entered into the form to a dictionary. Datetime\n method is from https://www.w3schools.com/python/python_datetime.asp\n \"\"\"\n review = {\n \"overall_rating\": int(request.form.get('overall_rating')),\n \"performance_rating\": int(request.form.get('performance_rating')),\n \"usability_rating\": int(request.form.get('usability_rating')),\n \"price_rating\": int(request.form.get('price_rating')),\n \"quality_rating\": int(request.form.get('quality_rating')),\n \"review_title\": request.form.get('review_title'),\n \"review\": request.form.get('review'),\n \"product\": product_ratings['name'],\n \"date_added\": datetime.datetime.now(),\n \"reviewed_by\": \"{} {}\".format(session['user']['first_name'],\n session['user']['last_name'])\n }\n\n \"\"\"\n Calculates the product's new ratings and updates them in the database.\n Update one method is from https://docs.mongodb.com/manual/\n reference/method/db.collection.updateOne/\n \"\"\"\n new_ratings = add_ratings(product_ratings, product_count, review)\n\n mongo.db.products.update_one(\n {'_id': ObjectId(product_id)}, {\"$set\": new_ratings})\n\n mongo.db.products.update_one(\n {'_id': ObjectId(product_id)},\n star_rating(new_rating=int(request.form.get('overall_rating'))))\n\n # Adds the review to the database\n mongo.db.reviews.insert_one(review)\n\n \"\"\"\n Code for message categories is from https://flask.palletsprojects.com/\n en/1.1.x/patterns/flashing/\n \"\"\"\n flash(\"Review Successfully Added\", \"success\")\n\n return redirect(next_url)\n\n else:\n \"\"\"\n Aborts the request and returns a 400 status code if the URL does not\n contain a next search perameter. Code is from https://www.kite.com/\n python/answers/how-to-get-parameters-from-a-url-using-flask-in-python\n and https://flask.palletsprojects.com/en/1.1.x/api/#flask.abort\n \"\"\"\n if request.args.get('next') is None:\n abort(400)\n\n \"\"\"\n Gets the product's details from the products databse and aborts the\n request and returns a 404 status code if the product does not exist.\n Code is from https://flask.palletsprojects.com/en/1.1.x/api\n /#flask.abort\n \"\"\"\n product = mongo.db.products.find_one({'_id': ObjectId(product_id)})\n\n if product is None:\n abort(404)\n\n return render_template(\"add_review.html\", page_title=\"Add Review\",\n product_id=product_id)",
"def product_review_form(request):\n if request.method=='POST':\n service = ServiceProvider.objects.filter(creator=request.user).first()\n product = Product.objects.get(created_by=service)\n form=ReviewCreationForm(request.POST)\n form.instance.created_by = request.user\n# This is for service provider reviews it self not product so no need for it\n# form.instance.review_of=service\n form.instance.product= product\n form.save()\n return redirect('public:product_detail', product.pk)\n form=ReviewCreationForm()\n return render(request, 'product_detail.html', {'form':form})",
"def edit_review(review_id):\n if request.method == 'POST':\n \"\"\"\n Gets the next search perameter from the URL. Code is from https://\n blog.tecladocode.com/handling-the-next-url-when-logging-in-with-flask/\n \"\"\"\n next_url = request.form.get('next')\n\n # Gets the review's and product's ratings from the database\n user_ratings = mongo.db.reviews.find_one(\n {'_id': ObjectId(review_id)}, user_ratings_query())\n\n product_ratings = mongo.db.products.find_one(\n {\"name\": user_ratings['product']}, product_ratings_query())\n\n \"\"\"\n Counts the number of reviews in the database for the product.\n Count method is from https://docs.mongodb.com/manual/\n reference/method/db.collection.count/\n \"\"\"\n product_count = mongo.db.reviews.count(\n {\"product\": user_ratings['product']})\n\n \"\"\"\n Adds the details entered into the form to a dictionary. Datetime method\n is from https://www.w3schools.com/python/python_datetime.asp\n \"\"\"\n review = {\n \"overall_rating\": int(request.form.get('overall_rating')),\n \"performance_rating\": int(request.form.get('performance_rating')),\n \"usability_rating\": int(request.form.get('usability_rating')),\n \"price_rating\": int(request.form.get('price_rating')),\n \"quality_rating\": int(request.form.get('quality_rating')),\n \"review_title\": request.form.get('review_title'),\n \"review\": request.form.get('review'),\n \"date_added\": datetime.datetime.now(),\n }\n\n \"\"\"\n Calculates the product's new ratings and updates them in the database.\n Update one method is from https://docs.mongodb.com/manual/reference\n /method/db.collection.updateOne/\n \"\"\"\n new_ratings = edit_ratings(\n user_ratings, product_ratings, product_count, review)\n\n mongo.db.products.update_one(\n {'_id': product_ratings['_id']}, {\"$set\": new_ratings})\n\n if (int(request.form.get('overall_rating')) != user_ratings\n ['overall_rating']):\n\n mongo.db.products.update_one({\"_id\": review_id}, star_rating(\n request.form.get('overall_rating'), user_ratings\n ['overall_review']))\n\n mongo.db.reviews.update_one(\n {'_id': ObjectId(review_id)}, {\"$set\": review})\n\n \"\"\"\n Code for message categories is from https://flask.palletsprojects.com/\n en/1.1.x/patterns/flashing/\n \"\"\"\n flash(\"Review Successfully Updated\", \"success\")\n\n return redirect(next_url)\n\n else:\n \"\"\"\n Aborts the request and returns a 400 status code if the URL does not\n contain a next search perameter. Code is from https://www.kite.com/\n python/answers/how-to-get-parameters-from-a-url-using-flask-in-python\n and https://flask.palletsprojects.com/en/1.1.x/api/#flask.abort\n \"\"\"\n if request.args.get('next') is None:\n abort(400)\n\n \"\"\"\n Gets the product's details from the products databse and aborts the\n request and returns a 404 status code if no review is found or\n a 403 status if the review author is not the user currently signed in.\n Code is from https://flask.palletsprojects.com/en/1.1.x/api\n /#flask.abort and https://docs.mongodb.com/manual/tutorial/\n project-fields-from-query-results/\n \"\"\"\n review = mongo.db.reviews.find_one(\n {\"_id\": ObjectId(review_id)}, {\"reviewed_by\": 1, \"_id\": 0})\n\n if review is None:\n return abort(404)\n\n elif \"{} {}\".format(session['user']['first_name'], session['user']\n ['last_name']) != review['reviewed_by']:\n return abort(403)\n\n else:\n # Gets the review from the database\n review = mongo.db.reviews.find_one({'_id': ObjectId(review_id)})\n\n return render_template('edit_review.html',\n page_title='Edit Review', review=review)",
"def delete_review(review_id):\n\n next_url = request.args.get('next')\n\n if next_url is None:\n abort(400)\n\n \"\"\"\n Gets the review author's details from the database and aborts the\n request and returns a 403 status code if the review author is not the\n user currently signed in. Code is from https://flask.palletsprojects.com/en\n /1.1.x/api/#flask.abort and https://docs.mongodb.com/manual/tutorial/\n project-fields-from-query-results/\n \"\"\"\n review = mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)}, {\n \"reviewed_by\": 1, \"_id\": 0})\n\n if \"{} {}\".format(session['user']['first_name'], session['user']\n ['last_name']) != review.get('reviewed_by'):\n return abort(403)\n\n else:\n # Gets the review's and product's ratings from the database\n user_ratings = mongo.db.reviews.find_one(\n {'_id': ObjectId(review_id)}, user_ratings_query())\n\n product_ratings = mongo.db.products.find_one(\n {\"name\": user_ratings['product']}, product_ratings_query())\n\n \"\"\"\n Counts the number of reviews in the database for the product.\n Code for the count method from https://docs.mongodb.com/manual/\n reference/method/db.collection.count/\n \"\"\"\n product_count = mongo.db.reviews.count_documents(\n {\"product\": user_ratings['product']})\n\n \"\"\"\n Calculates the product's new ratings and updates them in the database.\n Code for the update one method is from https://docs.mongodb.com/manual/\n reference/method/db.collection.updateOne/\n \"\"\"\n new_ratings = delete_ratings(\n user_ratings, product_ratings, product_count)\n\n mongo.db.products.update_one(\n {'_id': product_ratings['_id']}, {\"$set\": new_ratings})\n\n mongo.db.products.update_one({\"name\": user_ratings['product']},\n star_rating(\n prev_rating=user_ratings['overall_rating']))\n\n # Deletes the review from the database\n mongo.db.reviews.delete_one({\"_id\": ObjectId(review_id)})\n\n \"\"\"\n Code for message categories is from https://flask.palletsprojects.com/\n en/1.1.x/patterns/flashing/\n \"\"\"\n flash(\"Review Successfully Deleted\", \"success\")\n\n return redirect(next_url)",
"def delete_review(id):\n review = Reviews()\n try:\n review.delete(id)\n flash('You have successfully deleted the review.')\n except:\n print(\"Cant delete review.\")\n\n # redirect to the departments page\n return redirect(url_for('review.index'))\n\n return render_template(title=\"Delete Department\")",
"def delete_review(request, review_id):\n review = get_object_or_404(UserReview, id=review_id)\n user_profile = get_object_or_404(UserProfile, user=request.user)\n\n if request.user.is_authenticated:\n if request.user == user_profile.user:\n review.delete()\n messages.success(request, 'Your review has \\\n been deleted.')\n return redirect(reverse(\"gallery\"))\n\n elif request.user.is_superuser:\n review.delete()\n messages.success(request, 'You have deleted this review.')\n return redirect(reverse(\"gallery\"))\n\n else:\n messages.error(request, 'This review can only be deleted \\\n by the author.')\n return redirect(reverse(\"gallery\"))\n\n else:\n messages.error(request, 'You must be signed in.')\n return redirect(reverse(\"gallery\"))\n\n template = 'gallery/gallery.html'\n context = {\n 'review': review,\n 'user_profile': user_profile,\n }\n\n return render(request, template, context)",
"def review(site, token, page):\n revid = page.latest_revision_id\n request = Request(site=site,\n action=\"review\",\n token=token,\n revid=revid)\n request.submit()",
"def post_review(self, form):\n comments_file = form.cleaned_data.get('comments', None)\n return_code = form.cleaned_data.get('return_code', None)\n\n # Update the review\n self.object.post_review(comments_file, return_code=return_code)\n if return_code:\n self.revision.return_code = return_code\n\n verb = None\n # If every reviewer has posted comments, close the reviewers step\n if self.object.role == 'reviewer':\n qs = Review.objects \\\n .filter(document=self.document) \\\n .filter(revision=self.revision.revision) \\\n .filter(role='reviewer') \\\n .exclude(closed_on=None)\n if qs.count() == self.revision.reviewers.count():\n self.revision.end_reviewers_step(save=False)\n verb = Activity.VERB_CLOSED_REVIEWER_STEP\n\n # If leader, end leader step\n elif self.object.role == 'leader':\n self.revision.end_leader_step(save=False)\n verb = Activity.VERB_CLOSED_LEADER_STEP\n\n # If approver, end approver step\n elif self.object.role == 'approver':\n self.revision.end_review(save=False)\n verb = Activity.VERB_CLOSED_APPROVER_STEP\n\n self.revision.save(update_document=True)\n\n if verb:\n activity_log.send(verb=verb,\n target=self.revision,\n sender=do_batch_import,\n actor=self.request.user)",
"def reviewhandler():\n objectid = request.values.get('objectid', 0, type=int)\n if not objectid:\n abort(400)\n\n form = AuthorUpdateForm(formdata=request.form)\n visitor = DataExporter()\n visitor.visit(form)\n\n workflow_object = workflow_object_class.get(objectid)\n workflow_object.extra_data[\"approved\"] = True\n workflow_object.extra_data[\"ticket\"] = request.form.get('ticket') == \"True\"\n workflow_object.extra_data['formdata'] = visitor.data\n workflow_object.data = formdata_to_model(workflow_object, visitor.data)\n workflow_object.save()\n db.session.commit()\n\n resume.delay(workflow_object.id)\n\n return render_template('authors/forms/new_review_accepted.html',\n approved=True)",
"def apparel(request):\n results = Product.objects.filter(category__icontains='A')\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n context = {\n 'products': results,\n 'stars': stars\n }\n if not results:\n messages.error(request, \"No apparel as of yet, that will change soon!\")\n return redirect(reverse('products'))\n else:\n return render(request, \"products.html\", context)",
"def edit_review(review_id):\n form = EditReviewForm()\n try:\n review = Review.from_mongo(**mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)}))\n except Exception as e:\n raise Exception(e)\n else:\n game = Game.from_mongo(**mongo.db.games.find_one({\"_id\": ObjectId(str(review.game_id))}))\n user_name = session.get('username')\n if user_name == review.author_ref['author_name']:\n user = User.from_mongo(**mongo.db.users.find_one({\"name\": user_name}))\n\n if form.validate_on_submit():\n review.name = form.title.data\n review.text = form.review_text.data\n review_ref = review.create_review_ref()\n review.update_review()\n for game_review in game.reviews:\n if game_review.get('review_pub_date') == review.pub_date:\n game.reviews.remove(game_review)\n game.reviews.append(review_ref)\n game.update_game()\n for user_review in user.reviews:\n if user_review.get('review_pub_date') == review.pub_date:\n user.reviews.remove(user_review)\n user.reviews.append(review_ref)\n user.update_user()\n return redirect(url_for('review', review_id=review_id))\n\n elif request.method == \"GET\":\n form.title.data = review.name\n form.review_text.data = review.text\n\n return render_template('edit_review.html.jinja',\n title='Edit Review',\n review_id=review_id,\n form=form\n )",
"def review(user_id, item_id, text, rating):\n if Review.objects.filter(user=user_id, item=item_id):\n return \"You already wrote a review!\"\n\n form = ReviewForm({\n 'user': user_id,\n 'item': item_id,\n 'text': text,\n 'rating': rating,\n 'agrees': 0,\n 'thanks': 0\n })\n if form.is_valid():\n form.save()\n return False\n return \"Something was wrong with the review you submitted!\""
] | [
"0.6768317",
"0.6521801",
"0.6329526",
"0.6132767",
"0.61207914",
"0.59795856",
"0.59374976",
"0.59345794",
"0.590007",
"0.58561623",
"0.5785104",
"0.5780705",
"0.5748374",
"0.5738451",
"0.56478876",
"0.56427765",
"0.563428",
"0.5569598",
"0.5549924",
"0.5507052",
"0.55060875",
"0.5500801",
"0.54400146",
"0.5425914",
"0.538724",
"0.53494036",
"0.5305458",
"0.53043115",
"0.5283211",
"0.5245908"
] | 0.7806384 | 0 |
Wait for the current page to change | def wait_for_page_change(self, current_page):
WebDriverWait(self.driver, 5).until(EC.url_changes(current_page)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wati_until_page_change(driver, url):\n while driver.current_url == url:\n time.sleep(10)",
"def wait_for_page_load(self):\n pass",
"def wait_for_page_load(self):\n # For right now, just wait for 2 seconds since webdriver returns when loaded.\n # TODO: switch to waiting for network idle\n time.sleep(2)",
"def page_changed(self):\n if self.current >= 0:\n if self.not_again:\n self.not_again = False\n return\n ok = self.check_oldpage(self.current)\n if not ok:\n self.not_again = True\n self.nb.setCurrentIndex(self.current)\n return\n self.current = self.nb.currentIndex()\n go = self.nb.currentWidget()\n if go.first_time:\n go.first_time = False\n go.create_widgets()\n go.create_actions()\n msg = go.refresh_screen(self.current_data)\n if msg:\n qtw.QMessageBox.information(self, self.title, msg)\n self.current = 0\n self.nb.setCurrentIndex(self.current)\n go.refresh_screen(self.current_data)",
"def on_page(self, wait_for_page_to_load=False):\n # TODO: fix this\n # that is really dumb, but seems Safari driver has some issues\n # with current_url method, which stuck sometimes\n # adding this simple 0,1 delay helped to solve that\n # but I would better fix this later\n if wait_for_page_to_load:\n pass\n time.sleep(0.1)\n if self.get_relative_path() == self.url:\n return True\n else:\n return False",
"def nav(self, url):\r\n\r\n self.driver.get(url)\r\n time.sleep(3) # wait for page load\r",
"def update_page(self, waittime):\n if not self.runningtask.get():\n return\n if self.vars[\"enabled\"].get():\n logger.trace(\"Updating page\")\n self.display_item_set()\n self.load_display()\n self.after(waittime, lambda t=waittime: self.update_page(t))",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def wait(self):\n pass",
"def wait(self):\n pass",
"def wait(self):\n time.sleep(self.next())",
"def second_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.third_page.wait_for_page()",
"def _go_to_page(self):\n self.salesforce.go_to_setup_home()\n self.eda.wait_for_new_window(\"Home | Salesforce\")\n self.selenium.switch_window(\"Home | Salesforce\")\n self.salesforce.wait_until_loading_is_complete()",
"def update():\n print(\"current page is \", wikiPageStackTrace[-1].getTitle())\n if wikiPageStackTrace[-1].getUrl() != goalPage.getUrl(): # no victory\n eel.addRoundNumber()\n eel.printInPageList(wikiPageStackTrace[-1].getOnlyLinksListJS())\n eel.updateCurrentPage(\n [wikiPageStackTrace[-1].getTitle(), wikiPageStackTrace[-1].getUrl()])\n eel.updateCurrentPageDescription(\n wikiPageStackTrace[-1].getFirstSentence())\n eel.updateRoundNumber()\n eel.updateHistory(getHistoryTitles())\n eel.hideLoader()\n elif wikiPageStackTrace[-1].getUrl() == goalPage.getUrl(): # victory\n eel.hideLoader()\n eel.addRoundNumber()\n eel.updateRoundNumber()\n eel.updateHistory(getHistoryTitles())\n eel.showVictory()\n # we need to do this because overwise the JS is not fat egoth to respond so we get an infinit loading\n time.sleep(0.5)\n eel.hideLoader()",
"def next_page():\n\tprint('-> \\nClicking next page')\n\told_html = driver.find_element_by_tag_name('html').text\n\tlink = driver.find_element_by_xpath(XPATHS['next_page']) \n\tlink.click()\n\treturn wait_for(old_html)",
"def wait_for_page_load(self, timeout=30):\n old_page = self.driver.find_element_by_tag_name('html')\n yield\n WebDriverWait(self.driver, timeout).until(\n staleness_of(old_page)\n )",
"def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")",
"def third_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.fourth_page.wait_for_page()",
"def wait():\n time.sleep(1)",
"def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result",
"def wait_page_loaded(self, timeout=10):\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support import expected_conditions as ec\n\n old_page = self.selenium.find_element(By.TAG_NAME, \"html\")\n yield\n # Wait for the next page to be loaded\n self.wait_until(ec.staleness_of(old_page), timeout=timeout)\n self.wait_page_ready(timeout=timeout)",
"def wait_for(old_html, timeout=60):\n\tstart_time = time.time() \n\twhile time.time() < start_time + timeout: \n\t\tif check_new_page_loaded(old_html): \n\t\t\treturn time.time() - start_time \n\t\telse: \n\t\t\ttime.sleep(0.1) \n\traise Exception('WebPage Load Timeout')",
"def new_page(page_link):\n\told_param = old_param = driver.find_element_by_tag_name('html').text\n\tdriver.get(page_link)\n\treturn wait_for(old_param)",
"def seeHome(self, waitFor=0):\n print (\"seeHome\")\n self.driver.get(self.base_url)\n time.sleep(waitFor)",
"def first_page_execution(self):\n self.errors_and_correct_input_values_helper(wrong_pattern_error=True)\n self.utility_page.click_next_button()\n self.utility_page.click_next_button()\n self.second_page.wait_for_page()",
"def wait(self):\n time.sleep(0.010)",
"def wait_progress(self):\n pass"
] | [
"0.7532723",
"0.73459953",
"0.6734942",
"0.66200167",
"0.655229",
"0.65445894",
"0.6482647",
"0.63733417",
"0.63733417",
"0.63733417",
"0.63733417",
"0.63710386",
"0.63710386",
"0.6363439",
"0.6355523",
"0.631563",
"0.63007927",
"0.62691486",
"0.6256945",
"0.6223538",
"0.62201375",
"0.6203515",
"0.62026274",
"0.61883056",
"0.6173241",
"0.61711985",
"0.61634004",
"0.613874",
"0.61219454",
"0.6105831"
] | 0.8245497 | 0 |
Verify the mail sent to the mail service | def verify_mail(self):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)",
"def test_send_mail(self):\n response = self.client.post(reverse('contact-form'), self.valid_data, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, self.valid_data['subject'])\n self.assertEqual(mail.outbox[0].from_email, self.valid_data['sender_email'])\n self.assertEqual(mail.outbox[0].to[1], self.valid_data['sender_email'])",
"def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)\n self.stopRouter()",
"def test_sendEmailVerification(self, testUser):\n with mail.record_messages() as outbox:\n testUser.send_email_verification()\n assert len(outbox) == 1\n msg = outbox[0]\n assert \"[email protected]\" in msg.recipients\n assert msg.subject == 'Ask Your Peeps: Email Verification'\n assert 'To verify your email' in msg.body\n assert 'Dear John' in msg.body",
"def __verify(self):\r\n code = self.request.get('code')\r\n email = None\r\n error = False\r\n # resend if code is not given or in case of some error\r\n if code is not None and code != '':\r\n email = User.verify(code, self.request.remote_addr)\r\n if email is None:\r\n error = True\r\n\r\n if email is None:\r\n template_values = {\r\n 'user_email': self.user_email,\r\n 'error': error\r\n }\r\n template = self.jinja2_env.get_template('verification.html')\r\n self.response.out.write(template.render(template_values))\r\n\r\n # message\r\n template_values = {\r\n 'user_email': self.user_email,\r\n 'message': self.gettext('THANK_YOU')\r\n }\r\n template = self.jinja2_env.get_template('staticmessage.html')\r\n self.response.out.write(template.render(template_values))",
"def verify(\n self,\n email,\n from_host='example.com',\n from_email='[email protected]'\n ):\n if DEBUG:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n if not EMAIL_RE.search(email):\n logging.debug(f\"'{email}' is not a valid email\")\n return self.EMAIL_NOT_FOUND\n\n try:\n hostname = email.strip().split('@')[1]\n socket.gethostbyname(hostname)\n mail_exchangers = query_mx(hostname)\n except Exception as e:\n logging.debug(e)\n raise e\n\n logging.debug(f\"Found mail exchangers: {mail_exchangers}\")\n for i, mx in enumerate(mail_exchangers):\n mx_name = mx[1]\n logging.debug(f\"Testing {mx_name} (#{i})...\")\n\n logging.debug(f\"\\tConnecting to {mx_name}\")\n server = self.connect(mx_name)\n\n if not server:\n logging.debug(\"\\tCould not get connected to server.\")\n continue\n\n if DEBUG:\n server.set_debuglevel(1)\n\n logging.debug(\"\\tDo helo...\")\n try:\n code, resp = server.helo(mx_name)\n if code != 250:\n if not self.unverifiable(resp):\n raise UnableToVerifyException()\n continue\n except:\n pass\n\n logging.debug(\"\\tDo mail:\")\n try:\n code, resp = server.mail(from_email)\n logging.debug(f\"Code: {code}\")\n logging.debug(f\"Response: {resp}\")\n if code != 250:\n if not self.unverifiable(resp):\n raise UnableToVerifyException()\n continue\n except:\n pass\n\n try:\n logging.debug(\"\\tDo rcpt:\")\n code, resp = server.rcpt(email)\n logging.debug(f\"\\t\\tCode: {code}\")\n logging.debug(f\"\\t\\tResponse: {resp}\")\n\n if code != 250:\n if self.nonexistent(resp):\n return self.EMAIL_NOT_FOUND\n elif self.unverifiable(resp):\n raise UnableToVerifyException()\n else:\n continue\n except:\n pass\n\n try:\n logging.debug(\"\\tDo data:\")\n code, resp = server.data('Ahoy. Are you there? Testing my python3 port of the package ;) {0}.{0}'.format(_smtp.CRLF))\n logging.debug(f\"\\t\\tCode: {code}\")\n logging.debug(f\"\\t\\tResponse: {resp}\")\n if code != 250:\n if self.nonexistent(resp):\n return self.EMAIL_NOT_FOUND\n elif self.unverifiable(resp):\n raise UnableToVerifyException()\n elif code == 250:\n return self.EMAIL_FOUND\n except:\n pass\n\n raise UnableToVerifyException()",
"def check_mail(self, update=False):\r\n return self.check_mail_dir(update=update)",
"def test_email_sent_on_failure(self):\n self._authorize()\n data = {\n 'Subject_Number': '000-1111',\n 'Pin_Code': '1234',\n 'Date_Enrolled': datetime.datetime.now().strftime('%b %d %Y '),\n 'Mobile_Number': '2223334444',\n }\n patient = self.create_xml_patient(data)\n payload = self.create_xml_payload([patient])\n response = self._post(payload)\n self.assertEqual(response.status_code, 500)\n self.assertEqual(len(mail.outbox), 1)",
"def testMailSent(self):\n self.sendEmail()\n messages = self.mail_stub.get_sent_messages(to='[email protected]')\n self.assertEqual(1, len(messages))\n self.assertEqual('[email protected]', messages[0].to)",
"def run_mailcheck (self):\n\t\t# TODO: add function in backend to check if all needed things are set\n\t\t# like server/pass/user/... - if not, show error\n\t\t# if it is not currently refreshing\n\t\tif not self.__mailbackend.refreshing:\n\t\t\tself.__status = mail.MailCheckStatus.REFRESH \n\t\t\tself.redraw_canvas()\n\t\t\tself.__mailbackend.start()\n\t\treturn False\t# in case we are run as a timeout",
"def test_failed_email(self):\n self.assertEqual(send_email(\"testtestcom\", \"test\", \"test\"), 'There was an error sending')",
"def test_send_verification_mail(self):\n self.email_verification = {\"user\": {\n \"username\": \"Ronny\",\n \"email\": \"[email protected]\",\n \"password\": \"myPass123!\"\n }\n }\n response = self.client.post(\n self.reg_url,\n self.email_verification,\n format=\"json\")\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, \"Activate your account.\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_send_verification_mail(self):\n self.email_verification = {\"user\": {\n \"username\": \"Ronny\",\n \"email\": \"[email protected]\",\n \"password\": \"myPass123!\"\n }\n }\n response = self.client.post(\n self.reg_url,\n self.email_verification,\n format=\"json\")\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, \"Activate your account.\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_send_subscribe_email(self):\n #Verifica se foi enviado 1 e-mail, o este não envia e-mail\n self.assertEqual(1, len(mail.outbox))",
"def test_email_good(get_email, capsys):\n e = get_email\n e.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out",
"def _send_verify_email(request, preferences, db_entry, rnd_hash, new_entry):\n\n location = reverse(\"KursAnmeldung-verify_email\", kwargs={\"hash\":rnd_hash})\n verify_link = request.build_absolute_uri(location)\n\n # FIXME: convert to users local time.\n now = datetime.datetime.utcnow()\n\n email_context = {\n \"verify_link\": verify_link,\n \"db_entry\": db_entry,\n \"now\": now,\n }\n\n # Render the internal page\n emailtext = render_to_string(\"kurs_anmeldung/verify_mailtext.txt\", email_context)\n\n # Get the preferences from the database:\n raw_notify_list = preferences[\"notify\"]\n notify_list = raw_notify_list.splitlines()\n notify_list = [i.strip() for i in notify_list if i]\n\n email_kwargs = {\n \"from_email\": preferences[\"from_email\"],\n \"subject\": preferences[\"email_subject\"],\n \"body\": emailtext,\n \"to\": [db_entry.email],\n \"bcc\": notify_list,\n }\n\n if MAIL_DEBUG == True:\n msg = u\"MAIL_DEBUG is on: No Email was sended!\"\n request.page_msg(msg)\n db_entry.log(request, msg)\n db_entry.mail_sended = False\n\n request.page_msg(\"django.core.mail.EmailMessage kwargs:\")\n request.page_msg(email_kwargs)\n\n request.page_msg(\"debug mail text:\")\n request.page_msg(mark_safe(\"<pre>%s</pre>\" % emailtext))\n return\n\n # We can't use django.core.mail.send_mail, because all members\n # of the recipient list will see the others in the 'To' field.\n # But we would like to notify the admins via 'Bcc' field.\n\n connection = SMTPConnection(fail_silently=False)\n email = EmailMessage(**email_kwargs)\n\n try:\n sended = email.send(fail_silently=False)\n except Exception, err:\n msg = \"Error sending mail: %s\" % err\n LogEntry.objects.log_action(app_label=\"kurs_anmeldung\", action=\"error\",\n message=msg\n )\n db_entry.log(request, msg)\n db_entry.mail_sended = False\n if settings.DEBUG or request.user.is_staff:\n db_entry.save()\n raise\n else:\n db_entry.mail_sended = sended\n db_entry.log(request, \"mail sended: %s\" % sended)",
"def test_email(self):\n # No email should be send\n self.assertEqual(len(mail.outbox), 0)\n\n # enable plugin and set mail setting to true\n plugin = registry.plugins.get('inventreecorenotificationsplugin')\n plugin.set_setting('ENABLE_NOTIFICATION_EMAILS', True)\n NotificationUserSetting.set_setting(\n key='NOTIFICATION_METHOD_MAIL',\n value=True,\n change_user=self.user,\n user=self.user,\n method=InvenTreeCoreNotificationsPlugin.EmailNotification.METHOD_NAME\n )\n\n # run through\n self._notification_run(InvenTreeCoreNotificationsPlugin.EmailNotification)\n\n # Now one mail should be send\n self.assertEqual(len(mail.outbox), 1)",
"def test_wrong_mail(self):\n resp = DeleteTest.client.post('/api/deleteuser/',{\"token\":DeleteTest.valid_token,\"email\":\"[email protected]\"})\n self.assertEqual(json.loads(resp.content),\"No user found.\",\"Mail Verification is not True\")",
"def test_check_email(self):\n url = reverse('check_email')\n data = {\"emails\": [\"[email protected]\"]}\n response_data = {\"results\": [{\"email\": \"[email protected]\", \"blocked\": True}], \"success\": True}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, response_data)",
"def test_send_mail_to_student(self):\r\n\r\n d = {'message': 'message_type_that_doesn\\'t_exist'}\r\n\r\n send_mail_ret = send_mail_to_student('[email protected]', d)\r\n self.assertFalse(send_mail_ret)",
"def test_compose_email_good(self): \n pass",
"def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)",
"async def verify(token: TextData, background_tasks: BackgroundTasks):\n token_data = token.data\n mail, subject, body = await AccountProcessor.confirm_email(token_data)\n background_tasks.add_task(Utility.validate_and_send_mail, email=mail, subject=subject, body=body)\n return {\"message\": \"Account Verified!\"}",
"def send_verification(self):\n pass",
"def __send_verification(self, email):\r\n user = User.getUser(email.lower())\r\n if user is None or user.verified:\r\n self.set_error(constants.STATUS_BAD_REQUEST, message=None, url=\"/\")\r\n return\r\n user.verificationCode = b64encode(CryptoUtil.get_verify_code(), \"*$\")\r\n template_values = {\r\n 'user_email': self.user_email,\r\n 'code': user.verificationCode,\r\n 'url': constants.VERIFICATION_URL\r\n }\r\n template = self.jinja2_env.get_template('verificationemail.jinja')\r\n message = mail.EmailMessage()\r\n message.sender = constants.SENDER_ADDRESS\r\n message.to = user.email\r\n message.subject = 'Please verify your address'\r\n message.body = template.render(template_values)\r\n message.send()\r\n user.put()",
"def send_mail_when_failed(self, body):\r\n pass",
"def test_send_email(self):\n self.register()\n response = self.client.post(self.password_reset_url,\n self.email,\n format=\"json\")\n self.assertEqual(response. status_code, status.HTTP_200_OK)\n self.assertEqual(json.loads(response.content), {'message':\n 'Successfully sent.Check your email'})",
"def verify_email(uid, token):\n return True",
"def test_sendmail(self):\n assert self.rc_conf.has_key('sendmail_enable')\n assert self.rc_conf['sendmail_enable'] == '\"NONE\"'",
"def test_verify_email(live_server):\n user = get_user_model().objects.create_user(username=\"test\")\n email = models.EmailAddress.objects.create(\n address=\"[email protected]\", user=user\n )\n verification = models.EmailVerification.objects.create(email=email)\n\n data = {\"token\": verification.token}\n url = f\"{live_server}/rest/email-verifications/\"\n response = requests.post(url, data)\n\n assert response.status_code == 201\n assert response.json() == {}"
] | [
"0.7377472",
"0.7287148",
"0.70876795",
"0.7085418",
"0.7074369",
"0.692471",
"0.6871539",
"0.6840313",
"0.6790432",
"0.67411506",
"0.6728825",
"0.66644526",
"0.66644526",
"0.66586095",
"0.6594762",
"0.653111",
"0.6484644",
"0.6457894",
"0.6408082",
"0.6403068",
"0.63938534",
"0.6375025",
"0.6335079",
"0.63216347",
"0.63207406",
"0.6303698",
"0.6298405",
"0.6275374",
"0.62681746",
"0.62271285"
] | 0.82257116 | 0 |
Returns a list of the positions of all locations that meet the target_func criteria | def get_x_in_range(self, start, target_func, max_distance, sort_func=None):
if sort_func is None:
targets = []
for x in range(-max_distance, max_distance + 1):
for y in range(-max_distance, max_distance + 1):
distance = abs(x) + abs(y)
if distance > max_distance:
continue
pos = Position(start.x + x, start.y + y)
if target_func(pos, distance):
targets.append(pos)
return targets
else:
targets = PriorityQueue()
for x in range(-max_distance, max_distance + 1):
for y in range(-max_distance, max_distance + 1):
distance = abs(x) + abs(y)
if distance > max_distance:
continue
pos = Position(start.x + x, start.y + y)
if target_func(pos, distance):
targets.enqueue(sort_func(pos, distance), pos)
return targets.to_list() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_lists(classes, target):\r\n coords = list()\r\n xs = list()\r\n ys = list()\r\n\r\n for element in classes:\r\n if classes[element] == target:\r\n xs.append(element[0])\r\n ys.append(element[1])\r\n\r\n coords.append(xs)\r\n coords.append(ys)\r\n return coords",
"def find_all_elements(grid, target):\n \n indices = []\n \n ### This pattern of iterating through row and col indices is very common\n for row_number in range(len(grid)):\n for col_number in range(len(grid[row_number])):\n \n if grid[row_number][col_number] == target:\n indices.append((row_number, col_number))\n \n return indices",
"def get_locs(self):\n self.update_filter_inds()\n return self.locs.iloc[self.filter_inds.ravel(), :].reset_index(drop=True)",
"def checked_positions():\n for base_position in chain([me.shipyard], me.get_dropoffs()):\n x_shipyard = base_position.position.x\n y_shipyard = base_position.position.y\n for x in range(-search_range, search_range):\n for y in range(-search_range, search_range):\n yield hlt.Position(\n x=x_shipyard + x,\n y=y_shipyard + y)",
"def get_all_possible_locations(loc: int, mask: Mask) -> List[str]:\n mask_loc = apply_mask_to_location(loc, mask)\n mask_loc_ary = np.array(list(mask_loc))\n possible_locs = []\n float_values = [[0, 1] for _ in range(mask.num_floats)]\n for float_value in product(*float_values):\n mask_loc_ary = np.array(list(mask_loc))\n mask_loc_ary[mask_loc_ary == \"X\"] = np.array(float_value)\n possible_locs.append(\"\".join(list(mask_loc_ary)))\n return possible_locs",
"def find_value(lists, target):\n loc = []\n l = len(lists)\n for i in range(0, l, 1):\n if(lists[i] == target):\n loc.append(i)\n else:\n continue\n return loc",
"def _FindLocations(input_api, search_regexes, files_to_check, files_to_skip):\n def FilterFile(affected_file):\n return input_api.FilterSourceFile(\n affected_file,\n files_to_check=files_to_check,\n files_to_skip=files_to_skip)\n\n no_presubmit = r\"// no-presubmit-check\"\n locations = []\n for f in input_api.AffectedSourceFiles(FilterFile):\n for line_num, line in f.ChangedContents():\n for search_regex in search_regexes:\n if (input_api.re.search(search_regex, line) and\n not input_api.re.search(no_presubmit, line)):\n locations.append(\" %s:%d\" % (f.LocalPath(), line_num))\n break\n return locations",
"def compute_loc_targets(boxes, gt_boxes, weights=(1.0, 1.0, 1.0, 1.0)):\r\n ex_widths = boxes[:, 2] - boxes[:, 0] + 1.0\r\n ex_heights = boxes[:, 3] - boxes[:, 1] + 1.0\r\n ex_ctr_x = boxes[:, 0] + 0.5 * ex_widths\r\n ex_ctr_y = boxes[:, 1] + 0.5 * ex_heights\r\n\r\n gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + 1.0\r\n gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + 1.0\r\n gt_ctr_x = gt_boxes[:, 0] + 0.5 * gt_widths\r\n gt_ctr_y = gt_boxes[:, 1] + 0.5 * gt_heights\r\n\r\n wx, wy, ww, wh = weights\r\n targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths\r\n targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights\r\n targets_dw = ww * np.log(gt_widths / ex_widths)\r\n targets_dh = wh * np.log(gt_heights / ex_heights)\r\n\r\n targets = np.vstack((targets_dx, targets_dy, targets_dw,\r\n targets_dh)).transpose()\r\n return targets",
"def hittable_targets(self):\n return [self.current_level.getPlayer()]",
"def _xy_locs(mask):\n y, x = mask.nonzero()\n return list(zip(x, y))",
"def get_candidate_locations(cur_location, radius, row_num, col_num):\n cur_y, cur_x = cur_location\n delta = int(radius)\n max_x = cur_x + delta if cur_x + delta < col_num else col_num - 1\n min_x = cur_x - delta if cur_x - delta >= 0 else 0\n max_y = cur_y + delta if cur_y + delta < row_num else row_num - 1\n min_y = cur_y - delta if cur_y - delta >= 0 else 0\n candidates = []\n for x in range(min_x, max_x + 1):\n for y in range(min_y, max_y + 1):\n if distance(cur_x, cur_y, x, y) < radius:\n candidates.append((y, x))\n return candidates",
"def _gen_matches(target_units, source_units, stoplist_set, features_size):\n for hits2positions in gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v) for k, v in hits2positions.items()\n if len(v) >= 2}\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)",
"def __get_position(self, value, state):\n coords = np.argwhere(state == value).flatten()\n return coords",
"def extract_target_pixel_location(self):\n #Respective Image location\n pixel_array = self.imageprepare(self.image_path)\n\n #Select less_than_target color point --> must be calibrated\n #?? Should we use an abstract class here instead of an if statment ??\n if self.color == \"g\":\n less_than_target = .15\n else:\n raise ValueError(\"Unknown color value\")\n\n #Chooses target pixels as well as it's location\n target_pixels = []\n for pixel in enumerate(pixel_array):\n if pixel[1] < less_than_target:\n target_pixels.append(pixel[0])\n\n return target_pixels",
"def find_obstacle_loc(self, obstacle_list):\n\n x_obst = []\n y_obst = []\n #x_obst_append = x_obst.append\n #y_obst_append = y_obst.append\n locs = []\n\n for x in obstacle_list:\n if x < self.width:\n x_obst.append(x*self.resolution + self.resolution/2)\n else:\n x_obst.append((x % self.width)*self.resolution + self.resolution/2)\n\n for y in obstacle_list:\n y_obst.append((y/self.width)*self.resolution + self.resolution/2)\n\n locs = map(lambda x: x, zip(x_obst, y_obst))\n\n return(locs)",
"def get_coord_in_classpath(cp, targets):\n conf_art_tuples_ex = cp.get_classpath_entries_for_targets(targets)\n simple_coords = set(x[1].coordinate.simple_coord for x in conf_art_tuples_ex)\n return simple_coords",
"def get_valid_locations(location_list, grid, shape):",
"def squareSearch( self, tTopLeft, tBottomRight, function, argsList ): #by LOQ\n\t\ttPaintedList = []\n\t\tresult = None\n\t\tfor x in range(tTopLeft[0], tBottomRight[0]+1):\n\t\t\tfor y in range(tTopLeft[1], tBottomRight[1]+1, -1): # edead: added -1, not sure why it didn't work before\n\t\t\t\tresult, bPaintPlot, bContinueSearch = function((x, y), result, argsList)\n\t\t\t\tif bPaintPlot: # paint plot\n\t\t\t\t\ttPaintedList.append((x, y))\n\t\t\t\tif not bContinueSearch: # goal reached, so stop\n\t\t\t\t\treturn result, tPaintedList\n\t\treturn result, tPaintedList",
"def get_all_locations(self):",
"def find_at(self, x, y):\n return list(self.ifind_at(x, y))",
"def get_target_indexes(self, dataset):\n targets = []\n for i, fobj in enumerate(dataset.files):\n for criterion in self.target_criteria:\n if self._is_valid_target_int(criterion):\n if i == criterion:\n targets.append(i)\n elif self._is_valid_target_str(criterion):\n if re.match(criterion, str(fobj)):\n targets.append(i)\n else:\n raise TypeError(\n \"Unrecognized type for 'applies_to()' target criteria\"\n )\n return targets",
"def targets(self) -> List[Point2]:\n return self._targets",
"def find_index(vec_vals,target):\n target=np.atleast_1d(target) #turn scalar into iterable, no op if already array\n vec_vals=np.array(vec_vals)\n index_list=[]\n for item in target:\n first_index=np.argmin(np.abs(vec_vals - item))\n index_list.append(first_index)\n return index_list",
"def getSearchSpaceCoords(self):",
"def check_location_confidence(self):\n\t\t## not the best way of doing things, but since the number of targets is fairly small its not a big deal\n\t\tepsilon_pixels = .05 * self.horizontal_resolution #arbitrary confidence factor\n\t\tepsilon_meters = .08\n\t\tpixel_distances = []\n\t\tactual_distances = []\n\t\tnum_observed = 0\n\t\tfor ti in self.targs:\n\t\t\tif ti.props_are_set:\n\t\t\t\tfor tj in self.targs:\n\t\t\t\t\tif tj.props_are_set: \n\t\t\t\t\t\tpixel_dist = np.linalg.norm(tj.position_camera - ti.position_camera)\n\t\t\t\t\t\tactual_dist = np.abs(tj.d_cam_image - ti.d_cam_image)\n\t\t\t\t\t\tif pixel_dist == 0:\n\t\t\t\t\t\t\tpixel_dist = 10000 #ignore two of the same points\n\t\t\t\t\t\t\tactual_dist = 10000\n\t\t\t\t\t\tpixel_distances.append(pixel_dist)\t\n\t\t\t\t\t\tactual_distances.append(actual_dist)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\t\tactual_distances.append(10000)\n\t\t\telse:\n\t\t\t\tfor _ in self.targs:\n\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\tactual_distances.append(10000)\n\t\tmin_ind_pixel = np.argmin(pixel_distances)\n\t\tmin_ind_actual = np.argmin(actual_distances)\n\t\t#min_ind is encoded in base (num_targets); decode it to find the closest two points\n\t\tbest_guys = [self.targs[min_ind_pixel/len(self.targs)],self.targs[min_ind_pixel%len(self.targs)]]\n\t\tif pixel_distances[min_ind_pixel] > epsilon_pixels or actual_distances[min_ind_actual] > epsilon_meters:\n\t\t\t#measurements are not trustworthy, return nothing\n\t\t\treturn None\n\n\t\treturn best_guys",
"def targets(self, predicate=None):\r\n return filter(predicate, self._targets)",
"def research_pos(self, map_list, character): \n list_pos = []\n for y in range(15): \n for x, c in enumerate(map_list[y]):\n if character in c and c == character:\n list_pos.append((x*50, y*50)) \n return list_pos",
"def moveFunction(target, rays):\r\n for ray in rays:\r\n ray.hitTarget(target)",
"def searchRange4(self, nums: List[int], target: int) -> List[int]:\n def bisearch_l() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] >= target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n def bisearch_r() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] > target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n return [bisearch_l(), bisearch_r()]",
"def FindQualifiedTargets(target, qualified_list):\n return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]"
] | [
"0.59461826",
"0.58725774",
"0.58292496",
"0.58218",
"0.5733502",
"0.5714609",
"0.56952053",
"0.5680164",
"0.5623801",
"0.5623321",
"0.56142974",
"0.5596879",
"0.5581851",
"0.55673",
"0.5537235",
"0.5523585",
"0.55194414",
"0.5513604",
"0.54883385",
"0.542974",
"0.5423209",
"0.54222584",
"0.541129",
"0.5408355",
"0.54005474",
"0.5393616",
"0.5379796",
"0.5351044",
"0.5344575",
"0.53302497"
] | 0.6564029 | 0 |
Decorator to modify the docstring of an object. For all provided strings, unused empty lines are removed, and the indentation of the first nonempty line is removed from all lines if possible. This allows better indentation when used as a decorator. Unused empty lines means initial enpty lines for ``pre``, and final empty lines for ``post``. | def docstring(
docstring: str = None, *, pre: str = None, post: str = None
) -> Callable[[U], U]:
def edit_docstring(obj: U) -> U:
obj.__doc__ = "".join(
(
clean_docstring(pre or "", unused="pre"),
clean_docstring(docstring or (obj.__doc__ or "")),
clean_docstring(post or "", unused="post"),
)
)
return obj
return edit_docstring | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_docstring(doc: str, unused: Literal[\"pre\", \"post\"] = None) -> str:\n doc = doc.split(\"\\n\")\n if unused == \"pre\":\n try:\n index = next(i for i, l in enumerate(doc) if l.strip())\n doc = doc[index:]\n except StopIteration:\n doc = []\n elif unused == \"post\":\n try:\n index = next(i for i, l in enumerate(reversed(doc)) if l.strip())\n doc = doc[: len(doc) - index]\n except StopIteration:\n doc = []\n if doc:\n first_line = doc[0]\n index = len(first_line) - len(first_line.lstrip())\n indent = first_line[:index]\n if all(l.startswith(indent) for l in doc if l.strip()):\n doc = [(l[index:] if l.strip() else l) for l in doc]\n return \"\\n\".join(doc)",
"def docfmt(**kwargs):\n kwargs = {k: v.lstrip() for k, v in kwargs.items()}\n\n def outer(fn):\n buf = []\n if fn.__doc__ is None:\n return fn\n formatsiter = string.Formatter().parse(fn.__doc__)\n for literal, field, fmtspec, conv in formatsiter:\n assert conv is None\n assert not fmtspec\n buf.append(literal)\n if field is not None:\n # get indentation\n lines = literal.rsplit(\"\\n\", 1)\n if _only_spaces(lines[-1]):\n indent = \" \" * len(lines[-1])\n valuelines = kwargs[field].splitlines(True)\n # first line\n buf.append(valuelines[0])\n # subsequent lines are indented\n buf.extend([indent + ln for ln in valuelines[1:]])\n else:\n buf.append(kwargs[field])\n fn.__doc__ = \"\".join(buf)\n return fn\n\n return outer",
"def docstring_formatter(*args, **kwargs):\n\n def dec(obj):\n obj.__doc__ = obj.__doc__.format(*args, **kwargs)\n return obj\n\n return dec",
"def indent_docstring_by_1(s):\r\n # In reST, it's useful to have strings that are similarly-indented.\r\n # If we have a classdoc indented by 2 next to an __init__ funcdoc indented\r\n # by 4, reST doesn't format things nicely. Oh, totally-dedenting doesn't\r\n # format nicely either.\r\n\r\n # Docstring indentation: more gnarly than you'd think:\r\n # http://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation\r\n if not s: return s\r\n # Convert tabs to spaces (following the normal Python rules)\r\n # and split into a list of lines:\r\n lines = s.expandtabs().splitlines()\r\n # Determine minimum indentation (first line doesn't count):\r\n indent = 999\r\n for line in lines[1:]:\r\n stripped = line.lstrip()\r\n if stripped:\r\n indent = min(indent, len(line) - len(stripped))\r\n # Remove indentation (first line is special):\r\n trimmed = [lines[0].strip()]\r\n if indent < 999:\r\n for line in lines[1:]:\r\n trimmed.append(line[indent:].rstrip())\r\n # Strip off trailing and leading blank lines:\r\n while trimmed and not trimmed[-1]:\r\n trimmed.pop()\r\n while trimmed and not trimmed[0]:\r\n trimmed.pop(0)\r\n # Return a single string:\r\n return '\\n'.join([\" \" + t for t in trimmed])",
"def docstring_hack():\n pass",
"def _docs_params(**kwds):\n\n def dec(obj):\n obj.__orig_doc__ = obj.__doc__\n obj.__doc__ = dedent(obj.__doc__).format_map(kwds)\n return obj\n\n return dec",
"def docstring_format(*values):\n\n def _decorator_(function):\n function.__doc__ = function.__doc__.format(*values).replace('_', '\\_')\n return function\n\n return _decorator_",
"def format_docstring_to_markdown(docstr: str) -> str:\n r = re.compile(r\"\\s\\s+\", re.MULTILINE)\n clean_docstr_list = []\n prev_line = None\n in_code_block = False\n in_param = False\n first_code_indentation = None\n\n # Parse each line to determine if it needs formatting\n for original_line in docstr.split(\"\\n\"):\n # Remove excess spaces from lines formed by concatenated docstring lines.\n line = r.sub(\" \", original_line)\n # In some old docstrings, this indicates the start of an example block.\n if line.strip() == \"::\":\n in_code_block = True\n clean_docstr_list.append(\"```\")\n\n # All of our parameter/arg/etc lists start after a line ending in ':'.\n elif line.strip().endswith(\":\"):\n in_param = True\n # This adds a blank line before the header if one doesn't already exist.\n if prev_line != \"\":\n clean_docstr_list.append(\"\")\n # Turn the line into an H4 header\n clean_docstr_list.append(f\"#### {line.strip()}\")\n elif line.strip() == \"\" and prev_line != \"::\":\n # All of our parameter groups end with a line break, but we don't want to exit a parameter block due to a\n # line break in a code block. However, some code blocks start with a blank first line, so we want to make\n # sure we aren't immediately exiting the code block (hence the test for '::' on the previous line.\n in_param = False\n # Add the markdown indicator to close a code block, since we aren't in one now.\n if in_code_block:\n clean_docstr_list.append(\"```\")\n in_code_block = False\n first_code_indentation = None\n clean_docstr_list.append(line)\n else:\n if in_code_block:\n # Determine the number of spaces indenting the first line of code so they can be removed from all lines\n # in the code block without wrecking the hierarchical indentation levels of future lines.\n if first_code_indentation == None and line.strip() != \"\":\n first_code_indentation = len(\n re.match(r\"\\s*\", original_line, re.UNICODE).group(0)\n )\n if line.strip() == \"\" and prev_line == \"::\":\n # If the first line of the code block is a blank one, just skip it.\n pass\n else:\n # Append the line of code, minus the extra indentation from being written in an indented docstring.\n clean_docstr_list.append(original_line[first_code_indentation:])\n elif \":\" in line.replace(\":ref:\", \"\") and in_param:\n # This indicates a parameter. arg. or other definition.\n clean_docstr_list.append(f\"- {line.strip()}\")\n else:\n # This indicates a regular line of text.\n clean_docstr_list.append(f\"{line.strip()}\")\n prev_line = line.strip()\n clean_docstr = \"\\n\".join(clean_docstr_list)\n return clean_docstr",
"def docstring(self, docstring): # type: (str) -> None\n self._tmp_docstring = inspect.cleandoc(docstring)",
"def trim_docstring(docstring):\r\n lines = docstring.expandtabs().splitlines()\r\n\r\n # Find minimum indentation of any non-blank lines after first line.\r\n from sys import maxint\r\n margin = maxint\r\n for line in lines[1:]:\r\n content = len(line.lstrip())\r\n if content:\r\n indent = len(line) - content\r\n margin = min(margin, indent)\r\n\r\n # Remove indentation.\r\n if lines:\r\n lines[0] = lines[0].lstrip()\r\n if margin < maxint:\r\n for i in range(1, len(lines)):\r\n lines[i] = lines[i][margin:]\r\n\r\n # Remove any trailing or leading blank lines.\r\n while lines and not lines[-1]:\r\n lines.pop()\r\n while lines and not lines[0]:\r\n lines.pop(0)\r\n return '\\n'.join(lines)",
"def description_from_docstring(self, fn):\n docstring = getattr(fn, '__doc__', None) or ''\n description = normalize_indent(docstring)\n return self.description(description)(fn)",
"def rewriteDocstringForPerl (docstring):\n\n # Get rid of the /** ... */ and leading *'s.\n docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ')\n\n # Get rid of indentation\n p = re.compile('^\\s+(\\S*\\s*)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of paragraph indentation not caught by the code above.\n p = re.compile('^[ \\t]+(\\S)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of blank lines.\n p = re.compile('^[ \\t]+$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Get rid of the %foo quoting.\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'I<\\1>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1I<\\2>', docstring)\n\n docstring = docstring.replace('<ul>', '\\n=over\\n')\n docstring = docstring.replace('<li> ', '\\n=item\\n\\n')\n docstring = docstring.replace('</ul>', '\\n=back\\n')\n\n docstring = docstring.replace('@return', 'Returns')\n docstring = docstring.replace(' < ', ' E<lt> ').replace(' > ', ' E<gt> ')\n docstring = re.sub('<code>([^<]*)</code>', r'C<\\1>', docstring)\n docstring = re.sub('<b>([^<]*)</b>', r'B<\\1>', docstring) \n\n return docstring",
"def old_function_with_docstring(x, y):\n return x + y",
"def wrapper(func):\n docstring = func.__doc__\n helpdict = parse_docstring(\n docstring, key_symbol=key_symbol,\n description_symbol=description_symbol)\n func.helpdict = helpdict\n # remove markers\n docstring = docstring.replace(key_symbol, '')\n func.__doc__ = docstring.replace(description_symbol, '')\n return func",
"def describe_docstring(doc_string, indentation=None):\n text = escape_triple_quotes(doc_string)\n text = u'\"\"\"\\n' + text + '\\n\"\"\"\\n'\n\n if indentation:\n text = indent(text, indentation)\n return text",
"def _format_obj(cls, **kwargs):\n def doc_rebuilder(obj):\n if kwargs.pop('_VOID_',False):\n return ''\n try:\n doc = getattr(obj,'__doc__')\n assert doc\n except:\n return ''\n else:\n return doc.format(**kwargs) # str(doc).format(**kwargs)\n return doc_rebuilder",
"def docstring_parameter(*args, **kwargs):\n\n def dec(obj):\n obj.__doc__ = obj.__doc__.format(*args, **kwargs)\n return obj\n\n return dec",
"def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal",
"def update_docstring(instance):\n try:\n docstring = instance.api_map['doc']\n except (KeyError, TypeError):\n docstring = 'No docstring provided.'\n\n instance.__class__.__doc__ = docstring\n instance.__class__.__call__.__signature__ = construct_signature(instance)\n\n return docstring",
"def convert_doxygen_docstring(lines, name):\n\n lines = lines[:]\n newlines = []\n indent = 0\n reading_desc = False\n\n while lines:\n line = lines.pop(0)\n if line.startswith(\"////\"):\n continue\n\n line = line.rstrip()\n if line.startswith('///<'):\n strline = line[4:]\n else:\n strline = line\n\n strline = strline.lstrip('/ \\t')\n\n if strline == \"**\" or strline == \"*/\":\n continue\n\n if strline.startswith(\"** \"):\n strline = strline[3:]\n elif strline.startswith(\"* \"):\n strline = strline[2:]\n elif strline == \"*\":\n strline = \"\"\n\n strline = strline.lstrip(' \\t')\n\n if strline.startswith('@'):\n special = strline.split(' ', 1)[0][1:]\n if special == 'par' and strline.endswith(':') and lines and '@code' in lines[0]:\n newlines.append(' '*indent + strline[5:] + ':')\n newlines.append('')\n line = lines.pop(0)\n offset = line.index('@code')\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"verbatim\" or special == \"code\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. code-block:: guess')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"f[\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. math::')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@f]' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == 'param':\n #TODO\n #if extra is not None:\n # _, name, desc = strline.split(' ', 2)\n # extra['param:' + name] = desc\n continue\n elif special == 'deprecated':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n\n # I'd love to use the proper Sphinx deprecated tag, but it\n # requires a version number, whereas Doxygen doesn't.\n newlines.append('*Deprecated:* ' + convert_doxygen_format(value, name))\n newlines.append('')\n continue\n elif special in ('brief', 'return', 'returns'):\n #TODO\n #if extra is not None:\n # _, value = strline.split(' ', 1)\n # extra[special] = value\n continue\n elif special == 'details':\n strline = strline[9:]\n elif special == 'sa' or special == 'see':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n values = value.split(',')\n\n for i, value in enumerate(values):\n result = resolve_reference(value.partition('(')[0], name)\n if result:\n values[i] = ':{0}:`{1}`'.format(*result)\n else:\n values[i] = ':obj:`{0}`'.format(value)\n\n if special == 'see':\n newlines.append('See {}.'.format(', '.join(values)))\n else:\n newlines.append('See also {}.'.format(', '.join(values)))\n newlines.append('')\n continue\n elif special in ('note', 'warning'):\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. %s:: ' % (special))\n newlines.append('')\n newlines.append(' ' + convert_doxygen_format(strline[2 + len(special):], name))\n while lines and lines[0].strip(' *\\t/'):\n line = lines.pop(0).lstrip(' *\\t')\n newlines.append(' ' + convert_doxygen_format(line, name))\n\n newlines.append('')\n continue\n elif special == 'since':\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. versionadded:: ' + strline[7:])\n newlines.append('')\n continue\n else:\n print(\"Unhandled documentation tag: @\" + special)\n\n if strline or len(newlines) > 0:\n newlines.append(' '*indent + convert_doxygen_format(strline, name))\n\n return newlines",
"def doc_string():\n pass # pass does nothing",
"def print_doc1(*args, **kwargs):\n # output settings from kwargs or take defaults\n color = kwargs.get('color', blue)\n bold = kwargs.get('bold', False)\n prefix = kwargs.get('prefix', '')\n tail = kwargs.get('tail', '\\n')\n\n def real_decorator(func):\n '''real decorator function'''\n @wraps(func)\n def wrapper(*args, **kwargs):\n '''the wrapper function'''\n try:\n prgf = first_paragraph(func.__doc__)\n print(color(prefix + prgf + tail, bold))\n except AttributeError as exc:\n name = func.__name__\n print(red(flo('{name}() has no docstring')))\n raise(exc)\n return func(*args, **kwargs)\n return wrapper\n\n invoked = bool(not args or kwargs)\n if not invoked:\n # invoke decorator function which returns the wrapper function\n return real_decorator(func=args[0])\n\n return real_decorator",
"def print_doc1(*args, **kwargs):\n # output settings from kwargs or take defaults\n color = kwargs.get('color', blue)\n bold = kwargs.get('bold', False)\n prefix = kwargs.get('prefix', '')\n tail = kwargs.get('tail', '\\n')\n\n def real_decorator(func):\n '''real decorator function'''\n @wraps(func)\n def wrapper(*args, **kwargs):\n '''the wrapper function'''\n try:\n prgf = first_paragraph(func.__doc__)\n print(color(prefix + prgf + tail, bold))\n except AttributeError as exc:\n name = func.__name__\n print(red(flo('{name}() has no docstring')))\n raise(exc)\n return func(*args, **kwargs)\n return wrapper\n\n invoked = bool(not args or kwargs)\n if not invoked:\n # invoke decorator function which returns the wrapper function\n return real_decorator(func=args[0])\n\n return real_decorator",
"def descr(text=None, **kwargs):\n\n def decorator(func):\n func.short_description = text or func.__name__\n if \"allow_tags\" not in kwargs:\n kwargs[\"allow_tags\"] = True\n for attr, value in kwargs.iteritems():\n setattr(func, attr, value)\n return func\n return decorator",
"def strip_docstring(blob):\n docstring = True\n while docstring == True:\n match_docstring = re.search('\\n\\s*\"\"\"[^\"\"\"]*\"\"\"', blob)\n if not match_docstring:\n docstring = False\n else:\n blob = blob.replace(blob[match_docstring.span()[0]:match_docstring.span()[1]], '')\n return blob",
"def trim(docstring):\n if not docstring:\n return ''\n\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = docstring.expandtabs().splitlines()\n\n # Determine minimum indentation (first line doesn't count):\n try:\n indent = min(len(l) - len(l.lstrip()) for l in lines[1:] if l)\n except ValueError:\n indent = 0\n\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n for line in lines[1:]:\n trimmed.append(line[indent:].rstrip())\n\n # Strip off trailing and leading blank lines:\n while trimmed and not trimmed[-1]:\n trimmed.pop()\n while trimmed and not trimmed[0]:\n trimmed.pop(0)\n\n return '\\n'.join(trimmed) + '\\n'",
"def add_notice_to_docstring(\n doc, instructions, no_doc_str, suffix_str, notice):\n if not doc:\n lines = [no_doc_str]\n else:\n lines = doc.splitlines()\n lines[0] += ' ' + suffix_str\n\n notice = [''] + notice + [instructions]\n\n if len(lines) > 1:\n # Make sure that we keep our distance from the main body\n if lines[1].strip():\n notice.append('')\n\n lines[1:1] = notice\n else:\n lines += notice\n\n return '\\n'.join(lines)",
"def md_docstring(docstring):\n content = []\n lines = textwrap.dedent(docstring).splitlines()\n content.append(md_escape(lines[0]))\n lines = lines[1:]\n while lines and (not lines[0] or lines[0].isspace()):\n lines = lines[1:]\n\n if not all(l.isspace() for l in lines):\n content.append(md_code('\\n'.join(lines), language=None))\n content.append('')\n return content",
"def main_docstring():",
"def empty_fn_docstr_pass():\n pass"
] | [
"0.69348717",
"0.6336572",
"0.61864924",
"0.61855805",
"0.6176088",
"0.6044348",
"0.6012267",
"0.59857607",
"0.59638166",
"0.5815031",
"0.5807178",
"0.5780133",
"0.5778427",
"0.5748335",
"0.57374907",
"0.573107",
"0.5676643",
"0.55951655",
"0.55807894",
"0.5573089",
"0.5561191",
"0.5543156",
"0.5543156",
"0.55266607",
"0.5503974",
"0.54804176",
"0.5479028",
"0.54700786",
"0.54255533",
"0.54166937"
] | 0.74438393 | 0 |
Class decorator to autoformat string arguments in the __init__ method Modify the class __init__ method in place by wrapping it. The wrapped class will call the format() method of arguments specified in `params` that exist in the original signature, passing all other arguments are dictionary to str.format() | def autoformat(
cls: Type[U] = None,
/,
params: Union[str, Iterable[str]] = ( # pylint: disable=unsubscriptable-object
"message",
"msg",
),
):
if isinstance(params, str):
params = (params,)
if cls is None:
return functools.partial(autoformat, params=params)
orig_init = cls.__init__
signature = inspect.signature(orig_init)
params = signature.parameters.keys() & set(params)
@functools.wraps(orig_init)
def init(*args, **kwargs):
bounds = signature.bind(*args, **kwargs)
bounds.apply_defaults()
pre_formatted = {
name: bounds.arguments.pop(name)
for name in params
if name in bounds.arguments
}
formatted = {
name: string.format(**bounds.arguments)
for name, string in pre_formatted.items()
}
for name, arg in formatted.items():
bounds.arguments[name] = arg
return orig_init(*bounds.args, **bounds.kwargs)
# init.__signature__ = signature
setattr(cls, "__init__", init)
return cls | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __format__(self, *args, **kwargs): # real signature unknown\r\n pass",
"def __init__(**params):",
"def format(cls, **kwargs):\n def _decorator(obj):\n if inspect.isclass(obj):\n _class_decorator = cls.format_class(**kwargs) \n return _class_decorator(obj)\n else:\n _func_decorator = cls.format_method(**kwargs) \n return _func_decorator(obj)\n return _decorator",
"def format(self, *args, **kwargs) -> String:\n pass",
"def params(cls):\n def method_decorator(method):\n @wraps(method)\n def wrapper(self, *args):\n return method(self, *map(cls, args))\n return wrapper\n return method_decorator",
"def auto_repr(cls=None, /, *, args=None, kwargs=None):\n if cls is not None: # infer parameters and return decorated class\n kwargs = [key for key in signature(cls.__init__).parameters if key is not 'self'] # skip self\n setattr(cls, '__repr__', make_repr(kwargs=kwargs))\n return cls\n\n # Otherwise we were passed the parameters. Return a decorator function that uses them\n def decorator(to_decorate):\n setattr(to_decorate, '__repr__', make_repr(args=args, kwargs=kwargs))\n return to_decorate\n\n return decorator",
"def __init__(self, class_object: type, method_str: str, *args, **kwargs):\n self.class_object = class_object\n self.method_str = method_str\n self.args = args\n self.kwargs = kwargs",
"def format(self, *args, **kwargs):\n raise NotImplementedError()",
"def __call__(self, cls):\n cls_dict = dict(cls.__dict__)\n\n def wrap_str(w_self):\n return self.pformat(w_self)\n\n cls_dict['__repr__'] = wrap_str\n return type(cls.__name__, cls.__bases__ if hasattr(cls, \"__bases__\") else (), cls_dict)",
"def format(self, *args, **kwargs):\n return self._format(args, kwargs)",
"def fromParams(self, params):\n args = dict(map(lambda kv: (str(kv[0]),kv[1]), params.items()))\n return self(**args)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, fmt, datefmt=None):\n logging.Formatter.__init__(self, fmt, datefmt)",
"def __init__(self, *args, **kwargs):\n\n self.logger = util.get_logger()\n self.args = args\n self.kwargs = kwargs\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def decorate(cls, attr_names=None, **kwargs):\n if attr_names is None:\n attr_names = ()\n elif not isinstance(attr_names,(list,tuple)):\n raise DecoError('type {} not accepted for list/tuple of decorated members'.format(type(attr_names)))\n # various local objects\n obj_decorator = cls._format_obj(**kwargs)#analysis:ignore\n format_decorator = cls.format(**kwargs)#analysis:ignore\n str_format = lambda s: s.format(**kwargs) if s is not None else '' \n special_members = ['__metaclass__','__module__','__weakref__','__dict__','__class__']#analysis:ignore\n def decorator(obj, obj_name=None):\n # deal with special '__doc__' member\n if obj_name=='__doc__':\n try: \n return str_format(obj)\n except: return obj or ''\n # don't consider other special members and other special members unless \n # it is explicitely to decorate them (e.g. __init__)\n elif obj_name in special_members: \\\n # or (obj_name.startswith('__') and obj_name.endswith('__') and obj_name not in attr_names):\n return obj\n # deal with properties\n elif isinstance(obj, property): \n try: \n return property(obj.__get__, obj.__set__, obj.__delattr__, str_format(obj.__doc__))\n except: return obj # e.g. property not decorated\n # deal with class members\n elif inspect.isclass(obj):\n try: \n return cls.format_class(**kwargs)(obj) \n except: return obj\n # deal with method members\n elif inspect.isroutine(obj): # inspect.ismethod(obj):\n try: \n return cls.format_method(**kwargs)(obj) \n except: return obj\n ## deal with attribute members\n else: \n try: # whenever __doc__ is writeable\n obj.__doc__ = obj_decorator(obj)\n return obj\n except: \n return obj\n return class_decorator(decorator, *attr_names)",
"def make_string_repr(instance):\n arg_list = [] if args is None else [getattr(instance, arg) for arg in args if hasattr(instance, arg)]\n\n kwarg_dict = {} if kwargs is None else {key: getattr(instance, key) for key in kwargs if hasattr(instance, key)}\n\n # Check that we could bind the args/kwargs that found matches to the __init__ method\n # Basically this is checking that any arguments we didn't find on the instance have default values\n signature(instance.__class__).bind(*arg_list, **kwarg_dict)\n\n return instance.__class__.__name__ + '(' + format_arguments(*arg_list, **kwarg_dict) + ')'",
"def __init__(self, *args, **kwargs):\n for dictionary in [_ for _ in args if isinstance(_, dict)]:\n for key in dictionary:\n setattr(self, key, dictionary[key])\n for key in kwargs:\n setattr(self, key, kwargs[key])",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__args = args\n self.__kwargs = kwargs",
"def initialize_params(self, params):\n pass",
"def init_params(cls, *args, **kwargs):\n sig = inspect.signature(cls.__init__)\n # The 'self' parameter needs to be removed or the first *args will be\n # assigned to it\n self_param = sig.parameters.get(\"self\")\n new_params = list(sig.parameters.values())\n new_params.remove(self_param)\n sig = sig.replace(parameters=new_params)\n boundargs = sig.bind_partial(*args, **kwargs)\n boundargs.apply_defaults()\n return boundargs.arguments",
"def __init__(self, **parameters):\n self.parameters = parameters",
"def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs"
] | [
"0.6549212",
"0.6235333",
"0.6221562",
"0.6095933",
"0.59771514",
"0.59307534",
"0.5884502",
"0.5831265",
"0.58071184",
"0.57440454",
"0.57300246",
"0.5687825",
"0.5687825",
"0.5687825",
"0.5687825",
"0.5687825",
"0.5687825",
"0.5687825",
"0.5687825",
"0.5687825",
"0.568708",
"0.56631684",
"0.5608551",
"0.5604235",
"0.55653983",
"0.5538371",
"0.55015695",
"0.5481746",
"0.5471861",
"0.5469104"
] | 0.8336647 | 0 |
Removes a parameter from a Signature object If param is an int, remove the parameter at that position, else remove any paramater with that name | def _sig_without(sig: inspect.Signature, param: Union[int, str]) -> inspect.Signature:
if isinstance(param, int):
params = list(sig.parameters.values())
params.pop(param)
else:
params = [p for name, p in sig.parameters.items() if name != param]
return sig.replace(parameters=params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def removeParameter(cTag, name): #@NoSelf",
"def removeParameter(self, *args):\n return _libsbml.KineticLaw_removeParameter(self, *args)",
"def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()",
"def removeParameter(self, *args):\n return _libsbml.Model_removeParameter(self, *args)",
"def remove_param(self, step_id, name):\n if step_id in self._params:\n del self._params[step_id][name]",
"def remove_parameter(self, pkey):\n if pkey not in self.parameters:\n raise KeyError(\"Parameter not found at object key: {}\".format(pkey))\n del self.parameters[pkey]",
"def delete_parametertype(request, parametertype, **_kwargs):\n pass",
"def delete_parameter(request, parameter, **_kwargs):\n pass",
"def removeParameter(self, name):\r\n try:\r\n self._parameters.pop(name).destroy()\r\n except KeyError:\r\n raise InvalidRequest('Can not remove a non existent node '\r\n \"'{0}' from the container.\".format(name))",
"def remove_parameter(self, obj):\n try:\n index = self.parameters.index(obj)\n self.parameters.pop(index)\n return True\n except ValueError:\n # the object cannot be removed because it is not present\n logger.warn(\"Parameter {0} not present, can't be remove from the list\".format(obj))\n return False",
"def view_removeParameter(self, user, cTag, name):\r\n try:\r\n user.containers[cTag].removeParameter(name)\r\n except KeyError:\r\n raise InvalidRequest('Can not remove Parameter, because Container '\r\n '{0} does not exist.'.format(cTag))\r\n\r\n # TODO: Return some info about success/failure of request\r",
"def remove_parameters(self):\n self.parameters = []",
"def removeLocalParameter(self, *args):\n return _libsbml.KineticLaw_removeLocalParameter(self, *args)",
"def remove(self, p_int, p_int_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def _clean_init_signature(node: sphinx.addnodes.desc) -> None:\n signode = cast(sphinx.addnodes.desc_signature, node.children[0])\n # Remove first parameter.\n for param in signode.traverse(condition=sphinx.addnodes.desc_parameter):\n if param.children[0].astext() == 'self':\n param.parent.remove(param)\n break\n\n # Remove return type.\n for node in signode.traverse(condition=sphinx.addnodes.desc_returns):\n node.parent.remove(node)",
"def removeParameter(self):\n row = self.ui.parameterList.currentRow()\n\n if row != -1:\n self.ui.parameterList.removeRow(row)",
"def remove_im_params(model, im):\n for param in model.parameters:\n # If the node doesn't exist e.g., it may have already been removed),\n # skip over the parameter without error\n try:\n im.remove_node(param.name)\n except:\n pass",
"def delete_params(self, base_key, *params):\n self.params[\n base_key + '.no_' + '|'.join(params)] = delete_params(\n self.params[base_key], *params)",
"def delete_params(self, base_key, *params):\n self.params[base_key + \".no_\" + \"|\".join(params)] = self.delete_params_s(self.params[base_key], params)",
"def delete_query_parameter(url, param_name):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params.pop(param_name, None)\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n\n return url",
"def remove(self, *args):\n return _libsbml.ListOfParameters_remove(self, *args)",
"def remove_parameter_check(self, parameter):\n if isinstance(parameter, list):\n [self.remove_parameter_check(p) for p in parameter]\n elif parameter in self.parameter_checks:\n self.parameter_checks.pop(parameter)\n else:\n log.warning(f'No check was configured for parameter {parameter}.')",
"def delete_params(s, *params):\n patt = '(?s)' + '|'.join(\n r'(?<=\\n)' + s + r'\\s*:.+?\\n(?=\\S+|$)' for s in params)\n return re.sub(patt, '', '\\n' + s.strip() + '\\n').strip()",
"def delete_params_s(s, params):\n patt = \"(?s)\" + \"|\".join(\"(?<=\\n)\" + s + \"\\s*:.+?\\n(?=\\S+|$)\" for s in params)\n return re.sub(patt, \"\", \"\\n\" + s.strip() + \"\\n\").strip()",
"def remove_info(config):\n clean_config = copy.deepcopy(config)\n\n if 'parameters' in clean_config:\n params = clean_config['parameters']\n for name in params:\n val = params[name]\n if isinstance(val, dict):\n # This should not generally happen since we deal with it in update_param_info, but just in case\n if 'val' not in val:\n raise ConfigurationError(\n \"Parameter info remove error.\"\n \" Parameter that is defined by a dictionary must contain 'val' field that\"\n \" defines its default value. Found this definition: %s=%s\" % (name, val)\n )\n params[name] = val['val']\n\n return clean_config",
"def deleteParameter(self, session: Session, name: str) -> None:\n\n try:\n p = self._globalParametersDbHandler.getParameter(session, name)\n\n self._logger.debug('Deleting parameter [%s]' % (name))\n\n session.delete(p)\n\n session.commit()\n\n self._logger.info('Deleted parameter [%s]' % name)\n except TortugaException:\n session.rollback()\n raise\n except Exception as ex:\n session.rollback()\n self._logger.exception(str(ex))\n raise",
"def remove(self, x) -> None:\n pass",
"def discard(self, param):\n self._data.discard(param)",
"def clear(parameter: int):\n if parameter > 0 and parameter % 2 == 0:\n return parameter\n raise ArgumentError('parameter N should be integer, positive and even')",
"def delete_param(param, verbose=False):\n try:\n if param == GLOBALNS:\n # not allowed to delete the root of the tree as it must always\n # have a value. the equivalent command is setting the root to an\n # empty dictionary\n get_param_server().setParam(GLOBALNS, {})\n if verbose:\n print(\"deleted ENTIRE parameter server\")\n else:\n get_param_server().deleteParam(param)\n if verbose:\n print(\"deleted parameter [%s]\"%param)\n except socket.error:\n raise RosParamIOException(\"Unable to communicate with master!\")"
] | [
"0.7307341",
"0.6964018",
"0.694667",
"0.6563779",
"0.6451144",
"0.6374244",
"0.6289401",
"0.6283459",
"0.62516946",
"0.6223711",
"0.6092202",
"0.60898453",
"0.60846263",
"0.6074248",
"0.6055217",
"0.5930937",
"0.5896282",
"0.58926374",
"0.58634466",
"0.5804568",
"0.57228655",
"0.5687137",
"0.56479627",
"0.5561358",
"0.54974383",
"0.54824126",
"0.54750925",
"0.5422528",
"0.5413916",
"0.5409512"
] | 0.8153545 | 0 |
Merges two signature object, dropping the return annotations | def _sig_merge(lsig: inspect.Signature, rsig: inspect.Signature) -> inspect.Signature:
return inspect.Signature(
sorted(
list(lsig.parameters.values()) + list(rsig.parameters.values()),
key=lambda param: param.kind,
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_two_calls(self) -> None:",
"def mergeWith(self, others):",
"def _merge(self):\n raise NotImplementedError",
"def merge(): #Status: WIP\r\n pass",
"def merge(a: dict, b: dict) -> dict:\n return __merge(a, b)",
"def merge(self, other):\n # todo: Using the return value None to denote the identity is a\n # bit dangerous, since a function with no explicit return statement\n # also returns None, which can lead to puzzling bugs. Maybe return\n # a special singleton Identity object instead?\n raise NotImplementedError",
"def variant_add(v1: dict, v2: dict) -> Dict[str, Any]:\n left = set(v1.keys()).difference(v2.keys())\n right = set(v2.keys()).difference(v1.keys())\n joint = set(v1.keys()) & set(v2.keys())\n\n # deal with __migrator: ordering\n if \"__migrator\" in v2:\n ordering = v2[\"__migrator\"].get(\"ordering\", {})\n operation = v2[\"__migrator\"].get(\"operation\")\n # handle special operations\n if operation:\n return VARIANT_OP[operation](v1, v2)\n else:\n ordering = {}\n\n # special keys\n if \"__migrator\" in right:\n right.remove(\"__migrator\")\n\n # special keys in joint\n special_variants = {}\n if \"pin_run_as_build\" in joint:\n # For run_as_build we enforce the migrator's pin\n # TODO: should this just be a normal ordering merge, favoring more exact pins?\n joint.remove(\"pin_run_as_build\")\n special_variants[\"pin_run_as_build\"] = {\n **v1[\"pin_run_as_build\"],\n **v2[\"pin_run_as_build\"],\n }\n\n if \"zip_keys\" in joint:\n # zip_keys is a bit weird to join on as we don't have a particularly good way of identifying\n # a block. Longer term having these be named blocks would make life WAY simpler\n # That does require changes to conda-build itself though\n #\n # A zip_keys block is deemed mergeable if zkₛ,ᵢ ⊂ zkₘ,ᵢ\n zk_out = []\n zk_l = {frozenset(e) for e in v1[\"zip_keys\"]}\n zk_r = {frozenset(e) for e in v2[\"zip_keys\"]}\n\n for zk_r_i in sorted(zk_r, key=lambda x: -len(x)):\n for zk_l_i in sorted(zk_l, key=lambda x: -len(x)):\n # Merge the longest common zk first\n if zk_l_i.issubset(zk_r_i):\n zk_l.remove(zk_l_i)\n zk_r.remove(zk_r_i)\n zk_out.append(zk_r_i)\n break\n else:\n # Nothing to do\n pass\n\n zk_out.extend(zk_l)\n zk_out.extend(zk_r)\n zk_out = sorted(\n [sorted(zk) for zk in zk_out], key=lambda x: (len(x), str(x))\n )\n\n joint.remove(\"zip_keys\")\n special_variants[\"zip_keys\"] = zk_out\n\n joint_variant = {}\n for k in joint:\n v_left, v_right = ensure_list(v1[k]), ensure_list(v2[k])\n joint_variant[k] = variant_key_add(\n k, v_left, v_right, ordering=ordering.get(k, None)\n )\n\n out = {\n **toolz.keyfilter(lambda k: k in left, v1),\n **toolz.keyfilter(lambda k: k in right, v2),\n **joint_variant,\n **special_variants,\n }\n\n return out",
"def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()",
"def _merge_two(self, obj1, obj2):\r\n for uniq_ident in obj2.keys():\r\n if (uniq_ident not in obj1) \\\r\n or (obj1[uniq_ident]['modified'] \\\r\n < obj2[uniq_ident]['modified']):\r\n obj1[uniq_ident] = obj2[uniq_ident]\r\n\r\n return obj1 # self._dict_to_list(obj1)\r",
"def _merge_tensor_signatures(self, signatures):\n sorted_update = []\n if self._num_signature_dimensions() > 1:\n signature_indices = self._signature_types()\n for _, val in sorted(signatures.items(),\n key=lambda item: signature_indices[item[0]]):\n sorted_update.append(val)\n updates = array_ops_stack.stack(\n sorted_update, axis=0, name='merge_single_op_signatures')\n elif self._num_signature_dimensions() == 1:\n # Avoid stack operation if there is only a single signature.\n (_, val), = signatures.items()\n updates = val\n else:\n raise ValueError('Cannot merge 0 signatures. Check the value passed for '\n 'flag --signatures.')\n return updates",
"def merge(self, first, second):\n return second if self.failed(first) else first",
"def merge(self, *other):\n # Compute union of Fingerprints\n union = set().union(self, *other)\n # Create new fingerprint from union\n result = super(Fingerprint, type(self)).__new__(type(self), union)\n # Set n_flows to combination of self and other\n result.__setattr__('n_flows', self.n_flows + sum(o.n_flows for o in other))\n # Return result\n return result",
"def merge(*args):\n from ..operators.observable.merge import merge_\n return merge_(*args)",
"def merge(self, obj):\n pass",
"def make_union(self, *args, **kwargs): # real signature unknown\n pass",
"def merge_extras(extras1, extras2):\n if not extras1:\n return extras2\n if not extras2:\n return extras1\n return tuple(sorted(set(extras1) | set(extras2)))",
"def canBeMergedWith(self, other):",
"def merge(a, b):\n if isinstance(a, CONFIG_VALID) \\\n and isinstance(b, CONFIG_VALID):\n # dict update\n if isinstance(a, dict) and isinstance(b, dict):\n a.update(b)\n return a\n # list update\n _a = list(a)\n for x in list(b):\n if x not in _a:\n _a.append(x)\n return _a\n if a and b:\n raise Exception(\"Cannot merge\")\n raise NotImplementedError",
"def merge_two_dicts(self, x, y):\n z = x.copy()\n z.update(y)\n return z",
"def merge_schema(first, second):\n if not (type(first) == type(second) == dict):\n raise ValueError(\"Argument is not a schema\")\n\n if not (first.get('type') == second.get('type') == 'object'):\n raise NotImplementedError(\"Unsupported root type\")\n\n return merge_objects(first, second)",
"def merge(self, other):\n self.isotxsMetadata = self.isotxsMetadata.merge(\n other.isotxsMetadata, self, other, \"ISOTXS\", AttributeError\n )\n self.gamisoMetadata = self.gamisoMetadata.merge(\n other.gamisoMetadata, self, other, \"GAMISO\", AttributeError\n )\n self.pmatrxMetadata = self.pmatrxMetadata.merge(\n other.pmatrxMetadata, self, other, \"PMATRX\", AttributeError\n )\n self.micros.merge(other.micros)\n self.gammaXS.merge(other.gammaXS)\n self.neutronHeating = _mergeAttributes(self, other, \"neutronHeating\")\n self.neutronDamage = _mergeAttributes(self, other, \"neutronDamage\")\n self.gammaHeating = _mergeAttributes(self, other, \"gammaHeating\")\n self.isotropicProduction = _mergeAttributes(self, other, \"isotropicProduction\")\n self.linearAnisotropicProduction = _mergeAttributes(\n self, other, \"linearAnisotropicProduction\"\n )\n # this is lazy, but should work, because the n-order wouldn't be set without the others being set first.\n self.nOrderProductionMatrix = (\n self.nOrderProductionMatrix or other.nOrderProductionMatrix\n )",
"def _merge(x, y):\n for key in x:\n if key in y:\n x[key] = _merge(x[key], y[key])\n y[key] = None\n for key in y:\n if y[key] is not None:\n x[key] = y[key]\n return x",
"def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)",
"def test_merge_repl(self):\n ars = self.ar[2009][11]['general']\n ars2 = awstats_reader.AwstatsReader(test_file_dir,\n 'joshuakugler.com')[2009][11]['general']\n self.assertEqual(ars.merge(ars2, 'LastLine', 'signature'), '')",
"def __finalize__(self, other, method=None, **kwargs):\n self = super().__finalize__(other, method=method, **kwargs)\n # merge operation: using metadata of the left object\n if method == \"merge\":\n for name in self._metadata:\n print(\"self\", name, self.au_columns, other.left.au_columns)\n object.__setattr__(self, name, getattr(other.left, name, None))\n # concat operation: using metadata of the first object\n elif method == \"concat\":\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\n return self",
"def __finalize__(self, other, method=None, **kwargs):\r\n # merge operation: using metadata of the left object\r\n if method == 'merge':\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other.left, name, None))\r\n # concat operation: using metadata of the first object\r\n elif method == 'concat':\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\r\n else:\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other, name, None))\r\n return self",
"def merge(self, a, b, path=None):\n if path is None: path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n if key == 'attributes':\n self.merge_attribute_defs(b, a)\n else:\n self.merge(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n # raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))\n self.append_or_replace(a,b,key, '/'.join(path + [str(key)]));\n else:\n a[key] = b[key]\n return a",
"def union(self, other): # -> BaseGeometry:\n ...",
"def _merge(self, other: dict):\n self._storage = dict_merge(self._storage, other)",
"def merged_rep(self,other):\n raise NotImplementedError(\"Abstract method\")"
] | [
"0.6488625",
"0.6069725",
"0.6052872",
"0.5887509",
"0.587492",
"0.5822402",
"0.57430786",
"0.5727546",
"0.5721868",
"0.5715418",
"0.568721",
"0.56440103",
"0.56340355",
"0.5614331",
"0.559623",
"0.55736935",
"0.5567006",
"0.55511916",
"0.5545177",
"0.55353576",
"0.55132633",
"0.54373896",
"0.5436827",
"0.54139453",
"0.5413089",
"0.5387118",
"0.5377879",
"0.5375054",
"0.5369311",
"0.5365474"
] | 0.7194756 | 0 |
Class decorator to automatically support __post_init__() on classes This is useful for .s decorated classes, because __attr_post_init__() doesn't support additional arguments. This decorators wraps the class __init__ in a new function that accept merged arguments, and dispatch them to __init__ and then __post_init__() | def post_init(cls: Type[U]) -> Type[U]:
if not isinstance(cls, type):
raise TypeError("Can only decorate classes")
if not hasattr(cls, "__post_init__"):
raise TypeError("The class must have a __post_init__() method")
# Ignore the first argument which is the "self" argument
sig = init_sig = _sig_without(inspect.signature(cls.__init__), 0)
previous = [(cls, "__init__", sig)]
for parent in reversed(cls.__mro__):
if hasattr(parent, "__post_init__"):
post_sig = _sig_without(
inspect.signature(getattr(parent, "__post_init__")), 0
)
try:
sig = _sig_merge(sig, post_sig)
except Exception as err:
# find the incompatibility
for parent, method, psig in previous:
try:
_sig_merge(psig, post_sig)
except Exception:
break
else:
raise TypeError(
"__post_init__ signature is incompatible with the class"
) from err
raise TypeError(
f"__post_init__() is incompatible with {parent.__qualname__}{method}()"
) from err
# No exception
previous.append((parent, "__post_init__", post_sig))
# handles type annotations and defaults
# inspired by the dataclasses modules
params = list(sig.parameters.values())
localns = (
{
f"__type_{p.name}": p.annotation
for p in params
if p.annotation is not inspect.Parameter.empty
}
| {
f"__default_{p.name}": p.default
for p in params
if p.default is not inspect.Parameter.empty
}
| cls.__dict__
)
for i, p in enumerate(params):
if p.default is not inspect.Parameter.empty:
p = p.replace(default=Variable(f"__default_{p.name}"))
if p.annotation is not inspect.Parameter.empty:
p = p.replace(annotation=f"__type_{p.name}")
params[i] = p
new_sig = inspect.Signature(params)
# Build the new __init__ source code
self_ = "self" if "self" not in sig.parameters else "__post_init_self"
init_lines = [
f"def __init__({self_}, {_sig_to_def(new_sig)}) -> None:",
f"__original_init({self_}, {_sig_to_call(init_sig)})",
]
for parent, method, psig in previous[1:]:
if hasattr(parent, "__post_init__"):
if parent is not cls:
init_lines.append(
f"super({parent.__qualname__}, {self_}).{method}({_sig_to_call(psig)})"
)
else:
init_lines.append(f"{self_}.{method}({_sig_to_call(psig)})")
init_src = "\n ".join(init_lines)
# Build the factory function source code
local_vars = ", ".join(localns.keys())
factory_src = (
f"def __make_init__(__original_init, {local_vars}):\n"
f" {init_src}\n"
" return __init__"
)
# Create new __init__ with the factory
globalns = inspect.getmodule(cls).__dict__
ns: dict[str, Any] = {}
exec(factory_src, globalns, ns)
init = ns["__make_init__"](cls.__init__, **localns)
self_param = inspect.Parameter(self_, inspect.Parameter.POSITIONAL_ONLY)
init.__signature__ = inspect.Signature(
parameters=[self_param] + list(sig.parameters.values()), return_annotation=None
)
setattr(cls, "__init__", init)
return cls | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __post_init__(self, *args, **kwargs) -> None:\n # add other __init__ items here ...\n pass",
"def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n cls.__init__ = _wrap_init(cls.__init__, cls.__post_init_check)",
"def decorate_init(cls, f):\n def wrap(*args, **kwargs):\n if not hasattr(cls, '_init'):\n f(*args, **kwargs)\n cls._init = True\n return wrap",
"def __init__(self, decorated):\n self.decorated = decorated",
"def __init__(self, decorated):\n self.decorated = decorated",
"def __init__(self, decorated):\n self.decorated = decorated",
"def _set_init(cls):\n init = cls.class_type.instance_type.methods['__init__']\n init_sig = utils.pysignature(init)\n # get postitional and keyword arguments\n # offset by one to exclude the `self` arg\n args = _getargs(init_sig)[1:]\n cls._ctor_sig = init_sig\n ctor_source = _ctor_template.format(args=', '.join(args))\n glbls = {\"__numba_cls_\": cls}\n exec(ctor_source, glbls)\n ctor = glbls['ctor']\n cls._ctor = njit(ctor)",
"def __init__(self, classx, method_name, decorator_func):\n self.method_name = method_name\n self.decorator_func = decorator_func\n self.classx = classx\n self.patched_by_me = False",
"def kwargs_to_parent(cls):\n original_init = cls.__init__\n\n def new_init(self, *args, **kwargs):\n # pass only those kwargs to the dataclass which are expected\n dataclass_kwargs = {\n key: value\n for key, value in kwargs.items()\n if key in [f.name for f in dataclasses.fields(cls)]\n }\n\n # pass args and kwargs to the dataclasses' __init__\n original_init(self, *args, **dataclass_kwargs)\n\n # update kwargs with default arguments\n kwargs.update(dataclasses.asdict(self))\n\n # Pass only those arguments to solph component's __init__ that\n # are expected.\n init_expected_args = list(\n inspect.signature(super(cls, self).__init__).parameters\n )\n\n kwargs_expected = {\n key: value\n for key, value in kwargs.items()\n if key in init_expected_args\n }\n\n kwargs_unexpected = {\n key: value\n for key, value in kwargs.items()\n if key not in init_expected_args\n }\n\n if \"custom_attributes\" in init_expected_args:\n kwargs_expected[\"custom_attributes\"] = kwargs_unexpected\n\n if kwargs_unexpected and \"custom_attributes\" not in init_expected_args:\n warnings.warn(\n f\"No custom_attributes in parent class {cls.__mro__[1]}\"\n )\n\n super(cls, self).__init__(\n **kwargs_expected,\n )\n\n if not kwargs.get(\"build_solph_components\") is False:\n self.build_solph_components()\n\n cls.__init__ = new_init\n return cls",
"def __post_init__(self):\n super().__post_init__()",
"def after_class_creation(cls):\n pass",
"def __attrs_post_init__(self):",
"def autoprops_decorate(cls, # type: Type[T]\n include=None, # type: Union[str, Tuple[str]]\n exclude=None # type: Union[str, Tuple[str]]\n ):\n # type: (...) -> Type[T]\n # first check that we do not conflict with other known decorators\n check_known_decorators(cls, '@autoprops')\n\n # retrieve and filter the names\n init_fun = cls.__init__\n selected_names, init_fun_sig = read_fields_from_init(init_fun, include=include, exclude=exclude,\n caller=\"@autoprops\")\n\n # perform the class mod\n execute_autoprops_on_class(cls, init_fun=init_fun, init_fun_sig=init_fun_sig, prop_names=selected_names)\n\n return cls",
"def wrapper(self, *args, **kwargs):\n self.__wrapped_init__(*args, **kwargs)\n _process_dependencies(self)",
"def wrapper(self, *args, **kwargs):\n self.__wrapped_init__(*args, **kwargs)\n _process_dependencies(self)",
"def set_init_args(self, args_obj):\n if self and self[0][0] == '__init__':\n print(\"Only one __init__ step is allowed\")\n return\n self.insert(0, ('__init__', args_obj))",
"def __call__ (self, cls):\n # Define a wrapper function to capture the actual instantiation and __init__ params\n @wraps(cls)\n def wrapper_f(*args, **kwargs):\n #print(f'type of cls is {type(cls)}')\n peripheral = self.peripheral_type(**self.kwargs)\n o = cls(*args, **kwargs)\n o.message_debug(f\"Decorating class {cls.__name__} with {self.peripheral_type.__name__}\")\n o.attach_sensor(peripheral)\n return o\n return wrapper_f",
"def __init_subclass__(*args, **kwargs): # real signature unknown\n pass",
"def __init_subclass__(*args, **kwargs): # real signature unknown\n pass",
"def __init_subclass__(*args, **kwargs): # real signature unknown\n pass",
"def __init_subclass__(*args, **kwargs): # real signature unknown\n pass",
"def __init_subclass__(*args, **kwargs): # real signature unknown\n pass",
"def __init_subclass__(*args, **kwargs): # real signature unknown\n pass",
"def test_with_hook_init_param(self):\n class h_dup(funhook.Hook):\n def __init__(self, n):\n super(h_dup, self).__init__(n)\n self.accept_kwargs = False\n self.accept_pos_args = True\n self.accept_ret = False\n\n self._n = n\n\n def before(self, bnd, n):\n return (n+self._n, )\n\n\n class cls_pp(object):\n @funhook.attach_([h_dup(501)])\n def func(self, n):\n return n+1\n\n class cls_p1(cls_pp):\n pass\n\n class cls_p2(cls_pp):\n pass\n\n @funhook.setup_([adapt_hook_from()]) \n class cls_chd(cls_p1, cls_p2):\n def func(self, n):\n return n-1\n\n self.assertEqual(cls_pp().func(1), 503)\n self.assertEqual(cls_chd().func(1), 501)",
"def __init__(self):\n\n super(DynaField, self).__init__(**init_args)",
"def __init__(self, decoratedObj):\n\n self.__decoratedObj = decoratedObj",
"def __init_subclass__(cls, **kwargs):\n\n super().__init_subclass__(**kwargs)\n if hasattr(cls, \"suspicion_func_num\"):\n cls.runnable_managers.append(cls)",
"def __post_init__(self):\n pass",
"def __post_init__(self) -> 'None':",
"def __subclasshook__(*args):"
] | [
"0.7525738",
"0.7044511",
"0.6475061",
"0.61861414",
"0.61861414",
"0.61861414",
"0.61847025",
"0.6122355",
"0.6048974",
"0.59707534",
"0.5969232",
"0.5939027",
"0.59256953",
"0.59035474",
"0.59035474",
"0.5886067",
"0.58421826",
"0.579057",
"0.579057",
"0.579057",
"0.579057",
"0.579057",
"0.579057",
"0.5782987",
"0.5778398",
"0.57767624",
"0.5776394",
"0.57496107",
"0.57287574",
"0.5702693"
] | 0.78774583 | 0 |
split an iterable based on the truth value of the function for element Arguments func a callable to apply to each element in the iterable iterable an iterable of element to split Returns falsy, truthy two tuple, the first with element e of the itrable where func(e) return false, the second with element of the iterable that are True | def split(func, iterable):
falsy, truthy = [], []
for e in iterable:
if func(e):
truthy.append(e)
else:
falsy.append(e)
return tuple(falsy), tuple(truthy) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_cond(f, iterable):\n split_point = [i for i, e in enumerate(iterable) if f(e)]\n split_point += [len(iterable)]\n return [iterable[i:j] for i, j in zip(split_point[:-1], split_point[1:])]",
"def split(iterator, criterion):\n a = []\n b = []\n for x in iterator:\n if criterion(x):\n a.append(x)\n else:\n b.append(x)\n\n return a, b",
"def split_if(seq, pred):\n\n retval = []\n for key, group in itertools.groupby(\n sorted(seq, key=pred, reverse=True), key=pred):\n retval.append(list(group))\n return retval",
"def partition(iterable, predicate):\n passes = list()\n fails = list()\n for element in iterable:\n if predicate(element):\n passes.append(element)\n else:\n fails.append(element)\n return passes, fails",
"def split_on(iterable, predicate):\n it = iter(iterable)\n\n # Initialize the chunk list with an item\n # StopIteration will be thrown if there are no further items in the iterator\n chunk = [it.next()]\n\n while True:\n try:\n item = it.next()\n\n if predicate(item):\n # If the next item should be in a new chunk then return the current chunk\n yield chunk\n # Then rest the chunk list\n chunk = [item]\n else:\n # Simply append the item to current chunk if it doesn't match the predicate\n chunk.append(item)\n\n except StopIteration:\n # If the end of the iterator is reached then simply return the current chunk\n yield chunk\n break",
"def split_list(items, pred):\n\n thisresult = []\n results = [thisresult]\n for i in items:\n thisresult.append(i)\n if pred(i):\n thisresult = []\n results.append(thisresult)\n return results",
"def partition(iterable : Iterable[T], predicate : Callable[[T], bool]) -> Tuple[Iterable[T], Iterable[T]]:\n\n iter1, iter2 = tee(iterable)\n return filterfalse(predicate, iter1), filter(predicate, iter2)",
"def partition(is_included_fn, items):\n item_by_exclusion = { True : [], False : [] }\n for item in items:\n # \"not\" to normalise all values to either True or False\n item_by_exclusion[not is_included_fn(item)].append(item)\n return (item_by_exclusion[False], item_by_exclusion[True])",
"def filter(function, iterable):\n\n if function is bool:\n return [x for x in iterable if x]\n\n return [x for x in iterable if function(x)]",
"def partition(pred, iterable):\n stream = list(iterable)\n matched = list(itertools.takewhile(pred, stream))\n unmatched = list(itertools.dropwhile(pred, stream))\n return matched, unmatched",
"def split_every(n, iterable):\r\n iterator = iter(iterable)\r\n return takewhile(bool, (list(islice(iterator, n)) for _ in repeat(None)))",
"def isplit(iterable, splitters):\n return [list(g) for k,g in itertools.groupby(iterable,lambda x:x in splitters) if not k]",
"def partition_strict(function, items):\n left = []\n right = []\n for item in items:\n (left if function(item) else right).append(item)\n return (left, right)",
"def every(lst, fn):\n return reduce(lambda acc, elem: acc and fn(elem), lst, True)",
"def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result",
"def lookahead(iterable):\n it = iter(iterable)\n data = next(it)\n for val in it:\n yield itertools.chain(data, (False, ))\n data = val\n\n yield itertools.chain(data, (True, ))",
"def split_list_by(lst, sepfunc, includesep):\n\tblocks = []\n\tblock = []\n\tfor elem in lst:\n\t\tif sepfunc(elem):\n\t\t\tif includesep:\n\t\t\t\tblock.append(elem)\n\t\t\tblocks.append(block)\n\t\t\tblock = []\n\t\telse:\n\t\t\tblock.append(elem)\n\tif len(block):\n\t\tblocks.append(block)\n\treturn blocks",
"def test_splitlist():\n lst = [4, 2, 3, 1, 6, 7]\n lt, pi, gt = splitlist(lst)\n if lt == [2, 3, 1] and pi == 4 and gt == [6, 7]:\n print(\"test splitlist OK!\")\n else:\n print(\"test splitlist Failed!\")",
"def test_split_by_iterable_is_empty(self):\n integers = []\n predicates = [predicate_1, predicate_2]\n\n r = list(multi_split_by(integers, predicates))\n self.assertEqual(1 + len(predicates), len(r))\n\n a, b, c = r\n self.assertIsNotNone(a)\n self.assertIsNotNone(b)\n self.assertIsNotNone(c)\n\n a = _consume(a)\n b = _consume(b)\n c = _consume(c)\n\n self.assertEqual([], a)\n self.assertEqual([], b)\n self.assertEqual([], c)",
"def test_split_by_predicates_is_empty(self):\n integers = [1, 2, 3, 4]\n predicates = []\n\n r = tuple(multi_split_by(integers, predicates))\n self.assertEqual(1 + len(predicates), len(r))\n\n a, = r\n self.assertIsNotNone(a)\n a = _consume(a)\n self.assertEqual([1, 2, 3, 4], a)",
"def partition(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n successes = []\n failures = []\n\n for item in seq:\n if iteratee(item):\n successes.append(item)\n else:\n failures.append(item)\n\n return successes, failures",
"def custom_filter(some_func, iterator_list):\n\n local_iterator = from_input_to_list(iterator_list)\n func_map = [some_func(i) for i in local_iterator]\n true_list = [j for j in func_map if j > 100] # here we can hardcode any condition\n\n return true_list",
"def sync_filter(func, *iterables):\n return tuple(zip(*tuple(i for i in zip(*iterables) if func(*i)))) or ((),) * len(\n iterables\n )",
"def ft_filter(function_to_apply, list_of_inputs):\n if not callable(function_to_apply):\n exit(\"First param should be a Function\")\n try:\n object_iter = iter(list_of_inputs)\n except TypeError:\n exit(\"Second Argument must be iterable\")\n lst = []\n for item in list_of_inputs:\n if function_to_apply(item) == True: \n lst.append(item)\n return lst",
"def lookahead(iterable):\n\n # Cf. http://stackoverflow.com/a/1630350/654755\n\n it = iter(iterable)\n\n # next(it) in Python 3\n last = it.next()\n\n for val in it:\n yield last, False\n last = val\n\n yield last, True",
"def separate_by(self, *criteria):\n def is_a(seq): return all(c(seq) for c in criteria)\n \n def op_separate(s):\n if s is None: return None, None\n return [s for s in s if is_a(s)], [s for s in s if not is_a(s)]\n tuple_array = self.element_wise(op_separate)\n\n return tuple_array.element_wise(lambda x: x[0]), tuple_array.element_wise(lambda x: x[1])",
"def partition(lst, pred):\n start = []\n append = start.append\n\n while lst:\n x, lst_ = lst.uncons\n if pred(x):\n break\n lst = lst_\n append(x)\n\n return List(start), lst",
"def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))",
"def chunks(iterable, size=20):\n stop = object()\n\n for chunk in recipes.grouper(size, iterable, fillvalue=stop):\n if chunk[-1] is stop:\n is_not_stop = functools.partial(operator.is_not, stop)\n\n yield tuple(itertools.takewhile(is_not_stop, chunk))\n\n break\n\n yield chunk",
"def custom_filter(function, iterable):\n map_list = []\n\n for i in iterable:\n if function(i):\n map_list.append(i)\n\n return map_list"
] | [
"0.72967714",
"0.6812678",
"0.6628116",
"0.6506657",
"0.63730377",
"0.6313598",
"0.6198382",
"0.5958652",
"0.59502995",
"0.59330523",
"0.589644",
"0.5895313",
"0.5894667",
"0.58582944",
"0.579485",
"0.5768882",
"0.57654256",
"0.570171",
"0.5645428",
"0.56209177",
"0.5508018",
"0.54942334",
"0.5492828",
"0.54870665",
"0.54841775",
"0.54279566",
"0.534375",
"0.5278326",
"0.5261207",
"0.52395946"
] | 0.8402982 | 0 |
Filter multiple iterable at once, selecting values at index i such that func(iterables[0][i], iterables[1][i], ...) is True | def sync_filter(func, *iterables):
return tuple(zip(*tuple(i for i in zip(*iterables) if func(*i)))) or ((),) * len(
iterables
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))",
"def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result",
"def filter(function, iterable):\n\n if function is bool:\n return [x for x in iterable if x]\n\n return [x for x in iterable if function(x)]",
"def ft_filter(function_to_apply, list_of_inputs):\n if not callable(function_to_apply):\n exit(\"First param should be a Function\")\n try:\n object_iter = iter(list_of_inputs)\n except TypeError:\n exit(\"Second Argument must be iterable\")\n lst = []\n for item in list_of_inputs:\n if function_to_apply(item) == True: \n lst.append(item)\n return lst",
"def custom_filter(function, iterable):\n map_list = []\n\n for i in iterable:\n if function(i):\n map_list.append(i)\n\n return map_list",
"def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n raise NotImplementedError",
"def custom_filter(some_func, iterator_list):\n\n local_iterator = from_input_to_list(iterator_list)\n func_map = [some_func(i) for i in local_iterator]\n true_list = [j for j in func_map if j > 100] # here we can hardcode any condition\n\n return true_list",
"def filter_(f: Callable[[A], Maybe[bool]], iterable: Iterable[A]\n ) -> Maybe[Iterable[A]]:\n return cast(Maybe[Iterable[A]], filter_m_(Just, f, iterable))",
"def filter_n(function, iterable, **kwargs) -> iter:\n n_pass, n_fail = 0, 0\n\n for item in iterable:\n if function(item, **kwargs):\n yield item\n n_pass += 1\n else:\n n_fail += 1\n\n LOGGER.info(\"Filter %s: output %s rows (dropped %s rows)\", function.__name__, n_pass, n_fail)",
"def filter(iterable, filter_func):\n for item in iterable:\n item = filter_func(item)\n if item is not None:\n yield item",
"def my_filter(function,lst):\n return list(x for x in lst if function(x))",
"def every(lst, fn):\n return reduce(lambda acc, elem: acc and fn(elem), lst, True)",
"def filter_compose(*fns: T.Callable[[T.Any], bool]):\n def composite(x):\n for f in fns:\n if not f(x):\n return False\n return True\n\n return composite",
"def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in iter(self) if fn(entry))",
"def filter_generic(mt_list, func):\r\n return [mt for mt in mt_list if func(mt)]",
"def filter(iterable, predicate):\n\n for x in iterable:\n if predicate(x):\n yield x",
"def filter(iteratee, seq):\n return _filter(fnc.iteratee(iteratee), seq)",
"def __map_and_filter(_input: MutableSequence[T],\n _map: Callable[[T], Any] = lambda x: x,\n _filter: Callable[[T], bool] = lambda x: True) -> MutableSequence[Any]:\n\n return [_map(x) for x in _input if _filter(x)]",
"def filter_and(filters):\n def filt(item):\n for f in filters:\n if not f(item):\n return False\n return True\n return filt",
"def split_cond(f, iterable):\n split_point = [i for i, e in enumerate(iterable) if f(e)]\n split_point += [len(iterable)]\n return [iterable[i:j] for i, j in zip(split_point[:-1], split_point[1:])]",
"def filter_or(filters):\n def filt(item):\n for f in filters:\n if f(item):\n return True\n return False\n return filt",
"def ifilter_c(func):\n return functools.partial(ifilter, func)",
"def partition(iterable : Iterable[T], predicate : Callable[[T], bool]) -> Tuple[Iterable[T], Iterable[T]]:\n\n iter1, iter2 = tee(iterable)\n return filterfalse(predicate, iter1), filter(predicate, iter2)",
"def filter(x):\r\n # Is `x` a container we can iterate on?\r\n iter_on = None\r\n if isinstance(x, list) or isinstance(x, tuple):\r\n iter_on = x\r\n elif isinstance(x, dict):\r\n iter_on = x.iteritems()\r\n if iter_on is not None:\r\n return all(filter(y) for y in iter_on)\r\n else:\r\n return (isinstance(x, theano.Variable) or\r\n isinstance(x, theano.scan_module.until))",
"def filtered(self, keys, lst=None, func=\"all\"):\n lst = self if lst is None else lst\n if len(lst) == 0:\n raise ValueError(\"No rows in list\")\n return [row for row in self.filter(keys, lst, func=func)]",
"def multimap(funcs, iterable):\n\n for f in funcs:\n iterable = map(f, iterable)\n\n return iterable",
"def filterRows(function, rows):\n return [y for y in rows if function(y)]",
"def cofilter(function, iterator):\n results = []\n\n def checkFilter(notfiltered, item):\n if notfiltered == True:\n results.append(item)\n\n def dofilter(item):\n d = maybeDeferred(function, item)\n d.addCallback(checkFilter, item)\n return d\n\n d = _CoFunCaller(resultCollector=dofilter).coiterate(iterator)\n d.addCallback(lambda _: results)\n return d",
"def filter(self, func=bool, *args, **kwargs):\n return self.apply(func, *args, **kwargs).apply(bool) == True",
"def filter(self, func: Callable[[Tuple[keyType, valueType]], Tuple[keyType, valueType]]) -> List[Tuple[keyType, valueType]]:\n result = []\n it = self.__iter__()\n while True:\n try:\n key, value = next(it)\n pair = (key, value)\n tmp = func(pair)\n if not (tmp is None):\n result.append(tmp)\n except StopIteration:\n break\n return result"
] | [
"0.7254669",
"0.70515805",
"0.6633785",
"0.6542129",
"0.65260065",
"0.6445253",
"0.6422799",
"0.6301492",
"0.628052",
"0.6261402",
"0.6222613",
"0.62073445",
"0.6094451",
"0.60292643",
"0.60198414",
"0.5971072",
"0.59694105",
"0.5959575",
"0.5932348",
"0.5888483",
"0.5888037",
"0.5815205",
"0.5801351",
"0.57704926",
"0.5685822",
"0.5672363",
"0.5665609",
"0.5664323",
"0.5649664",
"0.5608547"
] | 0.781177 | 0 |
report a runtime error | def runtime_error(self, error: 'LoxRuntimeError'):
output = f'{error.get_message()}{os.linesep}[line {error.token.line}]'
print(output, file=sys.stderr)
self.had_runtime_error = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error():\r\n raise RuntimeError('admin ticket generator at your service')",
"def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])",
"def serious_error(self, e):\n pass",
"def error(self):\n pass",
"def unexpected_error(self, exception):",
"def error(error):\n print(\"Error\", error)\n erlport.erlang.cast(this.erlang_pid, (erlport.erlterms.Atom(b'python_error'), str(error)))",
"def error(self):\n ...",
"def _error(self, *args, **kwargs):\n print(\"[{}]\".format(self.type), *args, file=sys.stderr, **kwargs)\n sys.exit(1)",
"def report_unexpected_exception(self, *args, **kwargs):\n pass",
"def getCompilerError():",
"def error(self, error):\n pass",
"def error_occured(self) -> None:\r\n \r\n warnings.warn(\r\n '''An Error has occured when processing this photo!\r\n The plants are too emerged in some places to analyze.''',\r\n RuntimeWarning)",
"def error(msg):\n print 'ERROR: %s' % msg\n sys.exit(1)",
"def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()",
"def _RaiseFatal(cls, sub, subargs, errorcode, *args):\n ScriptForge.InvokeSimpleScript('ScriptForge.SF_Utils._EnterFunction', sub, subargs)\n cls.RaiseFatal(errorcode, *args)\n raise RuntimeError(\"The execution of the method '\" + sub.split('.')[-1] + \"' failed. Execution stops.\")",
"def raise_error(Err):\n raise Err()",
"def handle_error():\n print \"An error occurred. Trace:\\n\"\n traceback.print_exc()",
"def error(msg):\n\n raise Exception(msg)",
"def test_init_err_size(self):\n with self.assertRaises(InitializationException):\n pyint = Interpreter(size=INVALID_LOOP)",
"def raise_(err):\n raise err",
"def provoke_and_handle_TypeError():\n try:\n print(\"loetungdusohn\" + 3)\n except TypeError as te:\n print(f\"Sorry! {te}\")",
"def indicate_error(self):\n pass",
"def error(msg: str) -> None:\n print('ERROR: {msg}'.format(msg=msg))\n sys.exit()",
"def ReportError(text):\n raise IOError(text)",
"def error(self, message, location):\n raise CompilerError(message, loc=location)",
"def errFunc(runType):\n logger.error('Execution type not recognized! {}'.format(runType))\n raise InvalidExecutionType('{} is not a valid command'.format(runType))",
"def _check(error: int) -> None:\n if error < 0:\n raise RuntimeError(ffi.string(lib.TCOD_get_error()).decode())",
"def test_does_not_crash(self):\n py_function(6)",
"def error(ctx, flow):\n ctx.log(\"error\")",
"def error(self, msg, *args, **kwargs):\n pass"
] | [
"0.68166107",
"0.679111",
"0.6713061",
"0.64715713",
"0.6364697",
"0.63401216",
"0.6333252",
"0.6314151",
"0.62917364",
"0.6244229",
"0.61968243",
"0.6178209",
"0.6150258",
"0.61317104",
"0.6127837",
"0.6092866",
"0.6092197",
"0.60902953",
"0.60429615",
"0.6041189",
"0.6018859",
"0.60057044",
"0.599984",
"0.5996369",
"0.5976035",
"0.5958245",
"0.5944744",
"0.59354067",
"0.59213483",
"0.5920147"
] | 0.7311913 | 0 |
Note, SMTPTimeoutError vs SMTPConnectError here depends on processing time. | async def test_timeout_error_with_no_server(event_loop):
client = SMTP(hostname="127.0.0.1", port=65534, loop=event_loop)
with pytest.raises(SMTPTimeoutError):
await client.connect(timeout=0.000000001) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _connect_smtp(self):\n smtp = None\n try:\n smtp = smtplib.SMTP(self.servername, timeout = self.timeout)\n except smtplib.SMTPException as err:\n log.critical('smtp service at {} is not currently available'.format(self.servername))\n log.critical(err)\n except Exception as err:\n log.critical('smtp other error {} is not currently available'.format(self.servername))\n log.critical(err)\n \n if self.auth is not None:\n try:\n smtp.login(self.auth[0], self.auth[1])\n except smtplib.SMTPException as err:\n log.warn('smtp service login error for {}'.format(self.servername))\n log.warn(err)\n return smtp",
"def Check_SMTP(name, my_ip):\n\n if nslookup(name)[0] != 0:\n add_info (name, SMTP_SERVER, \"cannot resolve SMTP server\")\n return 1\n if ping_machine(name) != 0:\n add_info(name, SMTP_SERVER, \"cannot ping SMTP server\")\n return 2\n\n status, err = tryconnect(name, SMTP_PORT)\n if status == 1 or status == 2:\n add_info(name, SMTP_SERVER, err)\n if status == 1:\n # if we time'd out, things can still be OK (say reverse DNS problems)\n # so return only an error if no timeout\n return 3\n\n stat, out = port_talker.TCPTalk(name, SMTP_PORT,\n 60, # timeout (>30sec for messed up servers)\n \"HELO \" + my_ip + \"\\r\\nQUIT\\r\\n\",\n None, # terminator\n 1024, # max len\n 1) # use external resolver\n\n # expected answer:\n #220 'mail.forobozz.com' ESMTP\n #250 mail.frobozz.com Hello grue.frobozz.com [192.168.0.21], pleased to meet ya\n #221 mail.frobozz.com closing connection\n\n # Each line can be repeated several times, so we check that all codes appear\n # and that no other codes appear\n codes = map(lambda x: x[:4], string.split(out, '\\n'))\n valid_codes = ('220 ', '250 ', '221 ', '')\n try:\n for code in codes:\n assert(code in valid_codes)\n for valid in valid_codes:\n assert(valid in codes)\n except:\n # If we wanted, we could check whether reverse DNS lookup is not working.\n # This would be the most likely explanation\n add_info(name, SMTP_SERVER, \"cannot HELO SMTP server\")\n return 4\n add_info(name, SMTP_SERVER, \"OK\")\n return 0",
"def send_mail_when_failed(self, body):\r\n pass",
"def EmailError(error_messages, to_address, from_address, smtp_server, subject):\n error_string = '\\n'.join(error_messages)\n if( None not in [error_string, to_address, from_address, smtp_server] ):\n email_message = MIMEMultipart('alternative')\n email_message['Subject'] = subject\n email_message['From'] = from_address\n email_message['To'] = to_address\n\n html_message = ''\n for error in error_messages:\n html_message = '%s<br/><h4>%s</h4><p>%s</p>' % (html_message, \n error.split('\\n')[0], error.lstrip('%s\\n' % \\\n error.split('\\n')[0]).replace('\\n','<br/>'))\n html_message = '<html><head></head><body>%s</body></html>' % html_message\n\n email_message.attach(MIMEText(error_string, 'plain'))\n email_message.attach(MIMEText(html_message, 'html'))\n\n smtp_handle = None\n try:\n smtp_handle = smtplib.SMTP(smtp_server)\n except socket.gaierror:\n print '%s is an invalid smtp server.' % smtp_server\n except smtplib.SMTPConnectError:\n print 'Failed to connect to %s.' % smtp_server\n if( smtp_handle is not None ):\n try:\n smtp_handle.sendmail(from_address,[to_address],\n email_message.as_string())\n except smtplib.SMTPRecipientsRefused:\n print '%s is an invalid email address.' % to_address\n smtp_handle.quit()",
"async def test_connect_error_with_no_server(event_loop):\n client = SMTP(hostname=\"127.0.0.1\", port=65534, loop=event_loop)\n\n with pytest.raises(SMTPConnectError):\n await client.connect(timeout=0.1)",
"def test_failed_email(self):\n self.assertEqual(send_email(\"testtestcom\", \"test\", \"test\"), 'There was an error sending')",
"def test_smtp(self):\n self._endpointServerTest(\"smtp\", protocols.SMTPFactory)",
"def connect(smtp_url: str, timeout: Optional[float] = None) -> smtplib.SMTP:\n return smtplib.SMTP(smtp_url, timeout=timeout)",
"def SendTimeout(self) -> int:",
"def SendTimeout(self) -> int:",
"async def test_expn_error(\n smtp_client: SMTP, smtpd_server: asyncio.AbstractServer\n) -> None:\n async with smtp_client:\n with pytest.raises(SMTPResponseException):\n await smtp_client.expn(\"a-list\")",
"def test_conn_err_retry(self, retry, get_conn):\r\n get_conn.return_value.open.side_effect = SMTPConnectError(424, \"Bad Connection\")\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n self.assertTrue(retry.called)\r\n (__, kwargs) = retry.call_args\r\n exc = kwargs['exc']\r\n self.assertIsInstance(exc, SMTPConnectError)",
"def test_endpointSMTP(self):\n self._endpointTest(\"smtp\")",
"def postprocess():\n if ERRORS:\n address = '[email protected]'\n body = '\\n\\n'.join( ERRORS )\n msg = create_message( body, address )\n send_mail( msg, address )",
"def send_mail_raise_smtp(messages):\n raise SMTPRecipientsRefused(recipients=messages[0].recipients())",
"def send_error_email(receiver_email, subject, body):\n\n sender_email = \"[email protected]\"\n\n with open(CWD(\"mailcreds.txt\"), \"r\") as file:\n password = file.read()\n\n # Create a multipart message and set headers\n message = MIMEMultipart()\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n message[\"Subject\"] = subject\n\n # Add body to email\n message.attach(MIMEText(body, \"plain\"))\n\n text = message.as_string()\n\n # Log in to server using secure context and send email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", 465, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, text)",
"def connection_timedout(self):\n\n try:\n message = (\n f\"Connection to {self.channel} timed out, was the channel\"\n \" spelt correctly and is port 6667 open?\\n\"\n )\n self.send_to_outputfield(message)\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Exception : \")\n self.close()",
"def test_data_err_retry(self, retry, get_conn):\r\n get_conn.return_value.send_messages.side_effect = SMTPDataError(455, \"Throttling: Sending rate exceeded\")\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n # Test that we retry upon hitting a 4xx error\r\n self.assertTrue(retry.called)\r\n (__, kwargs) = retry.call_args\r\n exc = kwargs['exc']\r\n self.assertIsInstance(exc, SMTPDataError)",
"def send_failed(self, message, exc=None):\n with self.app.peers_lock:\n self.declare_no_connection(self.app.peers[message.to])\n return None",
"def __init__(self, smtp_server, smtp_user, smtp_password,\n smtp_port=25, is_with_tls=False):\n self.smtp_server = smtp_server\n self.smtp_port = smtp_port\n self.smtp_user = smtp_user\n self.smtp_password = smtp_password\n self.is_with_tls = is_with_tls",
"def send_email(self, text):\n msg_text = MIMEText(text)\n msg_text['Subject'] = '[WebSite Watchdog] Failure'\n msg_text['From'] = self.from_email\n msg_text['To'] = self.to_email\n \n s = smtplib.SMTP(self.smtp_server)\n s.sendmail(self.from_email, [self.to_email], msg_text.as_string())\n s.quit()",
"def send(self):\n answers = dns.resolver.query(self.domain, 'MX')\n try:\n for answer in answers:\n ex = answer.exchange.to_text()\n server = smtplib.SMTP(ex)\n server.set_debuglevel(self.verbose)\n server.sendmail(self.sender, [self.recipient], self.message.as_string())\n server.quit()\n except OSError as e:\n if e.errno is errno.ENETUNREACH:\n print('Looks like port 25 is blocked')\n raise e",
"def _setup_smtp_server(self):\n\n # Init; Attempt to use external first\n target = 'external'\n\n # ============================================================\n # Attempt (1): External mail server\n # ============================================================\n\n if target == 'external':\n # Assume it's a machine external to company network.\n # We will use an external email account that requires a login.\n\n # msg = f'_setup_smtp_server(): Attempting to launch session as external machine...'\n # fancy_print(msg, fg=COMMUNICATOR_MSG_COLOR, bold=True)\n\n self.host = EXTERNAL_HOST\n self.port = EXTERNAL_PORT\n self.sender_address = EXTERNAL_USER_NAME\n self.sender_pwd = EXTERNAL_USER_PWD\n\n try:\n sess = smtplib.SMTP(host=self.host, port=self.port)\n sess.starttls()\n sess.login(self.sender_address, self.sender_pwd)\n return sess\n except:\n target = 'internal'\n\n # ============================================================\n # Attempt (2): Company internal mail server\n # ============================================================\n\n if target == 'internal':\n # Assume machine is internal to company network.\n # Current user should already be authenticated.\n\n # msg = f'_setup_smtp_server(): Attempting to launch session as internal Cooper machine...'\n # fancy_print(msg, fg=COMMUNICATOR_MSG_COLOR, bold=True)\n\n self.host = INTERNAL_HOST\n self.port = INTERNAL_PORT\n self.sender_address = INTERNAL_USER_NAME\n self.sender_pwd = INTERNAL_USER_PWD\n\n try:\n sess = smtplib.SMTP(self.host)\n return sess\n except:\n msg = f'COMMUNICATOR WARNING: Could not establish SMTP connection. Check configuration.'\n fancy_print(msg, fg=COMMUNICATOR_WARN_COLOR)\n\n msg = f'Could not establish SMTP connection'\n raise ConnectionError(msg)",
"def test_send_email_with_incomplete_payload(app, session, email_msg):\n # TEST\n with pytest.raises(QueueException) as excinfo:\n worker.send_email(email_msg, None)\n\n assert 'Unsuccessful sending email' in str(excinfo)",
"def tcp_error(self, flow: mitmproxy.tcp.TCPFlow):",
"async def connect_async(smtp_url,\n timeout: Optional[float] = None) -> aiosmtplib.SMTP:\n return await aiosmtplib.SMTP(smtp_url, timeout=timeout)",
"def check_smtp_server_connection(self):\n try:\n connected = True\n\n while not self.config:\n time.sleep(1)\n\n # Create SMTP server and handshake\n server = smtplib.SMTP(self.config.smtp_host + ':' + self.config.smtp_port)\n server.connect(self.config.smtp_host + ':' + self.config.smtp_port)\n\n self.logger.info(MODULE_NAME + '::check_smtp_server_connection::Successfully '\n 'connected to the configured SMTP server and port at: ' + self.config.smtp_host + ':' + self.config.smtp_port)\n\n server.quit()\n\n return connected\n\n except Exception as e:\n self.logger.error(MODULE_NAME + '::check_smtp_server_connection()::The following '\n 'unhandled exception occurred: ' + e.message)\n connected = False\n return connected",
"def _test_retry_after_unlimited_retry_error(self, exception):\r\n num_emails = 8\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n # Note that because celery in eager mode will call retries synchronously,\r\n # each retry will increase the stack depth. It turns out that there is a\r\n # maximum depth at which a RuntimeError is raised (\"maximum recursion depth\r\n # exceeded\"). The maximum recursion depth is 90, so\r\n # num_emails * expected_retries < 90.\r\n expected_retries = 10\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Cycle through N throttling errors followed by a success.\r\n get_conn.return_value.send_messages.side_effect = cycle(\r\n chain(repeat(exception, expected_retries), [None])\r\n )\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_nomax=(expected_retries * num_emails)\r\n )",
"def raise_timeout_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.Timeout",
"def raise_timeout_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.Timeout"
] | [
"0.67359835",
"0.6189997",
"0.6074653",
"0.60194963",
"0.59302324",
"0.58749896",
"0.58332354",
"0.58151186",
"0.58065194",
"0.58065194",
"0.57931924",
"0.57492477",
"0.5701048",
"0.5651594",
"0.56239885",
"0.5590371",
"0.5548968",
"0.54814523",
"0.545041",
"0.5434488",
"0.5432996",
"0.54319906",
"0.5427672",
"0.5393198",
"0.53886634",
"0.5386925",
"0.52865934",
"0.5262578",
"0.5209261",
"0.5209261"
] | 0.6397967 | 1 |
Register the device with the provisioning service. This is a synchronous call, meaning that this function will not return until the registration process has completed successfully or the attempt has resulted in a failure. Before returning the client will also disconnect from the Hub. If a registration attempt is made while a previous registration is in progress it may throw an error. | def register(self):
logger.info("Registering with Hub...")
register_complete = Event()
def on_register_complete(result=None, error=None):
# This could be a failed/successful registration result from the HUB
# or a error from polling machine. Response should be given appropriately
if result is not None:
if result.status == "assigned":
logger.info("Successfully registered with Hub")
else: # There be other statuses
logger.error("Failed registering with Hub")
if error is not None: # This can only happen when the polling machine runs into error
logger.info(error)
register_complete.set()
self._polling_machine.register(callback=on_register_complete)
register_complete.wait() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_device():\n payload = request.get_json()\n return _register_device(payload)",
"def RegisterDeviceAndSendResponse(self, msg, username):\n device_id = self.GetUniqueParam('deviceid')\n if not device_id:\n return (400, 'Missing device identifier')\n\n token_info = self.server.RegisterDevice(\n device_id, msg.machine_id, msg.type, username)\n\n # Send back the reply.\n response = dm.DeviceManagementResponse()\n response.register_response.device_management_token = (\n token_info['device_token'])\n response.register_response.machine_name = token_info['machine_name']\n response.register_response.enrollment_type = token_info['enrollment_mode']\n\n return (200, response)",
"def register_device(self, expiry: int) -> str:\n # pylint: disable=c0103\n sr = self._id_scope + \"%2Fregistrations%2F\" + self._device_id\n sig_no_encode = DeviceRegistration.compute_derived_symmetric_key(self._key, sr + \"\\n\" + str(expiry))\n sig_encoded = parse.quote(sig_no_encode, \"~()*!.'\")\n auth_string = \"SharedAccessSignature sr=\" + sr + \"&sig=\" + sig_encoded + \"&se=\" + str(expiry) + \"&skn=registration\"\n\n headers = {\n \"content-type\": \"application/json; charset=utf-8\",\n \"user-agent\": \"iot-central-client/1.0\",\n \"Accept\": \"*/*\",\n }\n\n if auth_string is not None:\n headers[\"authorization\"] = auth_string\n\n body = {\"registrationId\": self._device_id}\n\n uri = \"https://%s/%s/registrations/%s/register?api-version=%s\" % (\n self._dps_endpoint,\n self._id_scope,\n self._device_id,\n self._dps_api_version,\n )\n target = parse.urlparse(uri)\n\n self._logger.info(\"Connecting...\")\n self._logger.info(\"URL: \" + target.geturl())\n self._logger.info(\"body: \" + json.dumps(body))\n print(\"headers: \" + json.dumps(headers))\n\n response = self.__run_put_request_with_retry(target.geturl(), body, headers)\n\n data = None\n try:\n data = response.json()\n except Exception as e:\n err = \"ERROR: non JSON is received from \" + self._dps_endpoint + \" => \" + str(response) + \" .. message : \" + str(e)\n self._logger.error(err)\n raise DeviceRegistrationError(err)\n\n if \"errorCode\" in data:\n err = \"DPS => \" + str(data)\n self._logger.error(err)\n raise DeviceRegistrationError(err)\n\n time.sleep(1)\n return self._loop_assign(data[\"operationId\"], headers)",
"def register(self):\n if self.hub.is_connected:\n if self._private_key is not None:\n raise SAMPClientError(\"Client already registered\")\n\n result = self.hub.register(self.hub.lockfile[\"samp.secret\"])\n\n if result[\"samp.self-id\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.self-id was not set by the hub.\"\n )\n\n if result[\"samp.private-key\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.private-key was not set by the hub.\"\n )\n\n self._public_id = result[\"samp.self-id\"]\n self._private_key = result[\"samp.private-key\"]\n self._hub_id = result[\"samp.hub-id\"]\n\n if self._callable:\n self._set_xmlrpc_callback()\n self._declare_subscriptions()\n\n if self._metadata != {}:\n self.declare_metadata()\n\n self._is_registered = True\n\n else:\n raise SAMPClientError(\n \"Unable to register to the SAMP Hub. Hub proxy not connected.\"\n )",
"def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))",
"def register():\n user_gender = request.form['user_gender']\n user_age = request.form['user_age']\n sensors = {\n 'accelerometer': request.form.get('accelerometer', False),\n 'ambient_temperature': request.form.get('ambient_temperature', False),\n 'gravity': request.form.get('gravity', False),\n 'gyroscope': request.form.get('gyroscope', False),\n 'light': request.form.get('light', False),\n 'linear_accelerometer': request.form.get('linear_accelerometer', False),\n 'magnetic_field': request.form.get('magnetic_field', False),\n 'orientation': request.form.get('orientation', False),\n 'pressure': request.form.get('pressure', False),\n 'proximity': request.form.get('proximity', False),\n 'relative_humidity': request.form.get('relative_humidity', False),\n 'rotation_vector': request.form.get('rotation_vector', False),\n 'temperature': request.form.get('temperature', False)\n }\n\n device, token = create_subject(user_gender, user_age, sensors)\n\n response = jsonify(status=\"Register Success\", message=\"Your device has been registered.\",\n device=device, token=token)\n\n response.status_code = 201\n\n return response",
"async def _perform_register(self):\n data = {\"username\": self.user, \"password\": self.password}\n return await self._perform_request(\"register\", data, lambda r: r.text())",
"def ProcessRegister(self, msg):\n policy = self.server.GetPolicies()\n # Check the auth token and device ID.\n auth = self.CheckGoogleLogin()\n if not auth:\n return (403, 'No authorization')\n\n if ('managed_users' not in policy):\n return (500, 'Error in config - no managed users')\n username = self.server.ResolveUser(auth)\n if ('*' not in policy['managed_users'] and\n username not in policy['managed_users']):\n return (403, 'Unmanaged')\n\n return self.RegisterDeviceAndSendResponse(msg, username)",
"def RegisterDevice(self, device_id, machine_id, type, username):\n dmtoken_chars = []\n while len(dmtoken_chars) < 32:\n dmtoken_chars.append(random.choice('0123456789abcdef'))\n dmtoken = ''.join(dmtoken_chars)\n allowed_policy_types = {\n dm.DeviceRegisterRequest.BROWSER: [\n 'google/chrome/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.USER: [\n 'google/chromeos/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.DEVICE: [\n 'google/chromeos/device',\n 'google/chromeos/publicaccount',\n 'google/chrome/extension',\n 'google/chromeos/signinextension'\n ],\n dm.DeviceRegisterRequest.ANDROID_BROWSER: [\n 'google/android/user'\n ],\n dm.DeviceRegisterRequest.TT: ['google/chromeos/user',\n 'google/chrome/user'],\n }\n if machine_id in KIOSK_MACHINE_IDS:\n enrollment_mode = dm.DeviceRegisterResponse.RETAIL\n else:\n enrollment_mode = dm.DeviceRegisterResponse.ENTERPRISE\n self._registered_tokens[dmtoken] = {\n 'device_id': device_id,\n 'device_token': dmtoken,\n 'allowed_policy_types': allowed_policy_types[type],\n 'machine_name': 'chromeos-' + machine_id,\n 'machine_id': machine_id,\n 'enrollment_mode': enrollment_mode,\n 'username': username,\n }\n self.WriteClientState()\n return self._registered_tokens[dmtoken]",
"def register_device(project_id, credentials, device_model_id, device_id):\n base_url = '/'.join([DEVICE_API_URL, 'projects', project_id, 'devices'])\n device_url = '/'.join([base_url, device_id])\n session = google.auth.transport.requests.AuthorizedSession(credentials)\n r = session.get(device_url)\n print(device_url, r.status_code)\n if r.status_code == 404:\n print('Registering....', end='', flush=True)\n r = session.post(base_url, data=json.dumps({\n 'id': device_id,\n 'model_id': device_model_id,\n 'client_type': 'SDK_LIBRARY'\n }))\n if r.status_code != 200:\n raise Exception('failed to register device: ' + r.text)\n print('\\rDevice registered.')",
"def registerDevice(device, device_config):\n raise NotImplementedError(\"All inherited classes of DeviceRegisterer must implement registerDevice.\")",
"def register_to_core(self):\n self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))",
"def test_register_device(self):\n resp = self.client.post(reverse('ios-notifications-device-create'),\n {'token': self.device_token,\n 'service': self.service.id})\n\n self.assertEqual(resp.status_code, 201)\n self.assertTrue(isinstance(resp, JSONResponse))\n content = resp.content\n device_json = json.loads(content)\n self.assertEqual(device_json.get('model'), 'ios_notifications.device')",
"def register_user(self):\n response = self.client.post(self.register_url, self.register_data, format='json')\n return response",
"def register(self, voice=False):\n payload = {\"type\": \"register\", \"username\": self.username, \"voice\": voice}\n self._send_command(payload)",
"def register(self, dbus_path, uuid, codec, capabilities):\n self._media_proxy.proxy.RegisterEndpoint(\n dbus_path,\n {\n \"UUID\": uuid,\n \"Codec\": Byte(codec),\n \"Capabilities\": Array(capabilities, signature=\"y\")\n })",
"def register(self, device_token, alias=None, tags=None, badge=None):\n url = DEVICE_TOKEN_URL + device_token\n payload = {}\n if alias is not None:\n payload['alias'] = alias\n if tags is not None:\n payload['tags'] = tags\n if badge is not None:\n payload['badge'] = badge\n if payload:\n body = json.dumps(payload)\n content_type = 'application/json'\n else:\n body = ''\n content_type = None\n\n status, response = self._request('PUT', body, url, content_type)\n if not status in (200, 201):\n raise AirshipFailure(status, response)\n return status == 201",
"def RegisterProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_device_registration(self):\n sensor = self._get_dummy_sensor()\n responses = []\n sensor.set_response_callback(func=lambda response: responses.append(response))\n sensor.register()\n time.sleep(wait_seconds)\n for response in responses:\n print(json.loads(response.content.decode()))\n assert len(responses) > 0\n assert json.loads(responses[0].content.decode())['module_name'] == 'test_get_sensor'\n sensor.stopped.set()",
"def attempt_to_register(self, message: Message):\n\t\tlogger.info(\"Attempting to register client.\")\n\n\t\tsuccessful_parse = re.match(r'\\/regi (.{1,30})', message.body)\n\n\t\tif successful_parse and self.validate_name(successful_parse.group(1)):\n\t\t\tlogger.info(\"Client successfully registered.\")\n\t\t\tself.registry.register(successful_parse.group(1), message.sender)\n\t\telse:\n\t\t\tlogger.info(\"Client not registered\") # Ignore the message",
"def connect(self, register = False, on_success=None, on_fail=None):\n JabberClient.connect(self, register)\n if register:\n s = self.stream\n s.registration_callback = self.process_registration_form\n s.registration_error_callback = on_fail\n s.registration_success_callback = lambda: (self.disconnect(), on_success())",
"def register():\n response = jsonify(msg='unauthorized'), 400\n if request.method == 'POST' and request.get_json():\n data = request.get_json()\n valid = validator.user_registration(data)\n\n if valid['status']:\n ecomap_user.register(data['first_name'],\n data['last_name'],\n data['email'],\n data['password'])\n msg = 'added %s %s' % (data['first_name'],\n data['last_name'])\n response = jsonify({'status_message': msg}), 201\n else:\n response = Response(json.dumps(valid),\n mimetype='application/json'), 400\n return response",
"def ProcessCertBasedRegister(self, signed_msg):\n # Unwrap the request\n try:\n req = self.UnwrapCertificateBasedDeviceRegistrationData(\n signed_msg.signed_request)\n except (IOError):\n return(400, 'Invalid request')\n\n # TODO(drcrash): Check the certificate itself.\n if req.certificate_type != dm.CertificateBasedDeviceRegistrationData.\\\n ENTERPRISE_ENROLLMENT_CERTIFICATE:\n return(403, 'Invalid certificate type for registration')\n\n register_req = req.device_register_request\n username = None\n\n if (register_req.flavor == dm.DeviceRegisterRequest.\n FLAVOR_ENROLLMENT_ATTESTATION_USB_ENROLLMENT):\n enrollment_token = self.CheckEnrollmentToken()\n policy = self.server.GetPolicies()\n if not enrollment_token:\n return (401, 'Missing enrollment token.')\n\n if ((not policy['token_enrollment']) or\n (not policy['token_enrollment']['token']) or\n (not policy['token_enrollment']['username'])):\n return (500, 'Error in config - no token-based enrollment')\n if policy['token_enrollment']['token'] != enrollment_token:\n return (403, 'Invalid enrollment token')\n username = policy['token_enrollment']['username']\n\n return self.RegisterDeviceAndSendResponse(register_req, username)",
"def registration(self):\n response = self.app.get(\"/registration\")\n self.assertTrue(response.status_code, 200)\"\"\"\"\"\"",
"def async_register_device(self, device):\n self._entities.append(device)\n\n @asyncio.coroutine\n def async_shutdown(event):\n \"\"\"Stop ffmpeg process.\"\"\"\n yield from device.async_stop_ffmpeg()\n\n self.hass.bus.async_listen_once(\n EVENT_HOMEASSISTANT_STOP, async_shutdown)\n\n # start on startup\n if device.initial_state:\n @asyncio.coroutine\n def async_start(event):\n \"\"\"Start ffmpeg process.\"\"\"\n yield from device.async_start_ffmpeg()\n yield from device.async_update_ha_state()\n\n self.hass.bus.async_listen_once(\n EVENT_HOMEASSISTANT_START, async_start)",
"def register():\n result = register_helper(User)\n return jsonify(result[0]), result[1]",
"def users_register(self):\n content = request.form\n if not USERS_REGISTER_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug((messages.MISSING_FIELDS_ERROR % (USERS_REGISTER_MANDATORY_FIELDS - set(content.keys()))))\n return messages.ERROR_JSON % (\n messages.MISSING_FIELDS_ERROR % (USERS_REGISTER_MANDATORY_FIELDS - set(content.keys()))), 400\n photo = None\n if 'photo' in request.files:\n photo = Photo.from_bytes(request.files['photo'].stream)\n try:\n self.auth_server.user_register(email=content[\"email\"], fullname=content[\"fullname\"],\n phone_number=content[\"phone_number\"], photo=photo,\n plain_password=content[\"password\"])\n except UserAlreadyRegisteredError:\n self.logger.debug(messages.USER_ALREADY_REGISTERED_MESSAGE % content[\"email\"])\n return messages.ERROR_JSON % messages.USER_ALREADY_REGISTERED_MESSAGE % content[\"email\"], 400\n except InvalidRegisterFieldError as e:\n self.logger.debug(str(e))\n return messages.ERROR_JSON % str(e), 400\n return messages.SUCCESS_JSON, 200",
"async def attempt_register(username, password, email, expect=201):\n\n response = await database_sync_to_async(UserCreateView.as_view())(factory.post('/register', {\"username\": username, \"password\": password, \"email\": email}))\n assert response.status_code == expect",
"def registerServiceAsync(self, uri, identity=None, correlationId=None,\n options=None):\n if correlationId is None:\n correlationId = CorrelationId()\n if options is None:\n options = ServiceRegistrationOptions()\n\n _ExceptionUtil.raiseOnError(\n internals.blpapi_ProviderSession_registerServiceAsync(\n self.__handle,\n uri,\n get_handle(identity),\n get_handle(correlationId),\n get_handle(options)\n ))\n\n return correlationId",
"def post(self):\n data = request.json\n\n register(data)\n return \"User Successfully Registered\", 200"
] | [
"0.7048597",
"0.6574808",
"0.63383627",
"0.6312321",
"0.6306717",
"0.6257553",
"0.6199068",
"0.61455584",
"0.60931623",
"0.60073423",
"0.5975581",
"0.5859668",
"0.57837015",
"0.57369715",
"0.57089573",
"0.5657908",
"0.5651063",
"0.56088966",
"0.55459535",
"0.5511144",
"0.54968065",
"0.5454494",
"0.5407802",
"0.5407348",
"0.54046065",
"0.53984326",
"0.5394558",
"0.5392012",
"0.53647506",
"0.534053"
] | 0.7058231 | 0 |
This is a synchronous call, meaning that this function will not return until the cancellation process has completed successfully or the attempt has resulted in a failure. Before returning the client will also disconnect from the Hub. In case there is no registration in process it will throw an error as there is no registration process to cancel. | def cancel(self):
logger.info("Cancelling the current registration process")
cancel_complete = Event()
def on_cancel_complete():
cancel_complete.set()
logger.info("Successfully cancelled the current registration process")
self._polling_machine.cancel(callback=on_cancel_complete)
cancel_complete.wait() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disconnect(self):\n if self.is_connected:\n try:\n self.client.unregister()\n finally:\n if self.client.is_running:\n self.client.stop()\n self.hub.disconnect()",
"async def async_cancel(self):\n raise NotImplementedError",
"def cancel(self) -> asyncio.Future:\n pass # pragma: no cover",
"def test_cancel_sync_handle_call_during_execution(serve_instance):\n running_signal_actor = SignalActor.remote()\n cancelled_signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Ingress:\n async def __call__(self, *args):\n await running_signal_actor.send.remote()\n await send_signal_on_cancellation(cancelled_signal_actor)\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n # Send a request and wait for it to start executing.\n r = h.remote()\n ray.get(running_signal_actor.wait.remote(), timeout=10)\n\n # Cancel it and verify that it is cancelled via signal.\n r.cancel()\n ray.get(cancelled_signal_actor.wait.remote(), timeout=10)\n\n with pytest.raises(ray.exceptions.TaskCancelledError):\n r.result()",
"def register(self):\n if self.hub.is_connected:\n if self._private_key is not None:\n raise SAMPClientError(\"Client already registered\")\n\n result = self.hub.register(self.hub.lockfile[\"samp.secret\"])\n\n if result[\"samp.self-id\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.self-id was not set by the hub.\"\n )\n\n if result[\"samp.private-key\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.private-key was not set by the hub.\"\n )\n\n self._public_id = result[\"samp.self-id\"]\n self._private_key = result[\"samp.private-key\"]\n self._hub_id = result[\"samp.hub-id\"]\n\n if self._callable:\n self._set_xmlrpc_callback()\n self._declare_subscriptions()\n\n if self._metadata != {}:\n self.declare_metadata()\n\n self._is_registered = True\n\n else:\n raise SAMPClientError(\n \"Unable to register to the SAMP Hub. Hub proxy not connected.\"\n )",
"def register_signal(client):\n def process_failure_signal(sender, task_id, *args, **kwargs):\n client.captureException(\n extra={\n 'task_id': task_id,\n 'task': sender,\n }\n )\n task_failure.connect(process_failure_signal, weak=False)",
"async def test_cancelled_task(self) -> None:\n cancelledMessage: str = \"I have been cancelled\"\n\n class CancelHandler(Handler):\n async def getName(self) -> str:\n raise asyncio.CancelledError(\n cancelledMessage\n ) # Pretend that this is some await call that gets cancelled\n\n async with TestServer(handler=CancelHandler(), ip=\"::1\") as sa:\n ip, port = sa.ip, sa.port\n assert ip and port\n async with get_client(TestingService, host=ip, port=port) as client:\n with self.assertRaises(ApplicationError) as ex:\n await client.getName()\n self.assertEqual(\n ex.exception.message,\n f\"Application was cancelled on the server with message: {cancelledMessage}\",\n )",
"async def disconnect(self):",
"def test_cancel_generator_sync(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Ingress:\n async def __call__(self, *args):\n yield \"hi\"\n await send_signal_on_cancellation(signal_actor)\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True, stream=True)\n\n # Send a request and wait for it to start executing.\n g = h.remote()\n\n assert next(g) == \"hi\"\n\n # Cancel it and verify that it is cancelled via signal.\n g.cancel()\n\n with pytest.raises(ray.exceptions.TaskCancelledError):\n next(g)\n\n ray.get(signal_actor.wait.remote(), timeout=10)",
"async def cancel():\n await asyncio.get_running_loop().run_in_executor(None, cancel_inner)",
"def register(self):\n logger.info(\"Registering with Hub...\")\n register_complete = Event()\n\n def on_register_complete(result=None, error=None):\n # This could be a failed/successful registration result from the HUB\n # or a error from polling machine. Response should be given appropriately\n if result is not None:\n if result.status == \"assigned\":\n logger.info(\"Successfully registered with Hub\")\n else: # There be other statuses\n logger.error(\"Failed registering with Hub\")\n if error is not None: # This can only happen when the polling machine runs into error\n logger.info(error)\n\n register_complete.set()\n\n self._polling_machine.register(callback=on_register_complete)\n\n register_complete.wait()",
"def test_cancel_async_handle_call_during_execution(serve_instance):\n running_signal_actor = SignalActor.remote()\n cancelled_signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Downstream:\n async def __call__(self, *args):\n await running_signal_actor.send.remote()\n await send_signal_on_cancellation(cancelled_signal_actor)\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._h = handle.options(use_new_handle_api=True)\n\n async def __call__(self, *args):\n # Send a request and wait for it to start executing.\n r = self._h.remote()\n await running_signal_actor.wait.remote()\n\n # Cancel it and verify that it is cancelled via signal.\n r.cancel()\n await cancelled_signal_actor.wait.remote()\n\n with pytest.raises(ray.exceptions.TaskCancelledError):\n await r\n\n h = serve.run(Ingress.bind(Downstream.bind())).options(use_new_handle_api=True)\n h.remote().result() # Would raise if test failed.",
"async def new_coro():\n try:\n await coro\n except asyncio.CancelledError:\n pass",
"async def close(self) -> None:\n logger.debug(\n f\"['{self._instance_connection_string}']: Waiting for _current to be cancelled\"\n )\n self._current.cancel()\n logger.debug(\n f\"['{self._instance_connection_string}']: Waiting for _next to be cancelled\"\n )\n self._next.cancel()\n logger.debug(\n f\"['{self._instance_connection_string}']: Waiting for _client_session to close\"\n )\n await self._client_session.close()",
"def test_cancel_sync_handle_call_during_assignment(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment(max_concurrent_queries=1)\n class Ingress:\n def __init__(self):\n self._num_requests = 0\n\n async def __call__(self, *args):\n self._num_requests += 1\n await signal_actor.wait.remote()\n\n return self._num_requests\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n # Send a request and wait for it to be ongoing so we know that further requests\n # will block trying to assign a replica.\n initial_response = h.remote()\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 1)\n\n # Make a second request, cancel it, and verify that it is cancelled.\n second_response = h.remote()\n second_response.cancel()\n with pytest.raises(concurrent.futures.CancelledError):\n second_response.result()\n\n # Now signal the initial request to finish and check that the second request\n # never reached the replica.\n ray.get(signal_actor.send.remote())\n assert initial_response.result() == 1\n for i in range(2, 12):\n assert h.remote().result() == i",
"def guiding_disconnect():\r\n try:\r\n app.guider.disconnect()\r\n return jsonify({\"status\": True})\r\n except Exception as e:\r\n return jsonify(\r\n {\"status\": False, \"error\": \"Failed disconnecting from guider: %s\" % e}\r\n )",
"def test_only_relevant_task_is_cancelled(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Ingress:\n async def __call__(self, *args):\n await signal_actor.wait.remote()\n return \"ok\"\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n r1 = h.remote()\n r2 = h.remote()\n\n # Wait for both requests to be executing.\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 2)\n\n r1.cancel()\n with pytest.raises(ray.exceptions.TaskCancelledError):\n r1.result()\n\n # Now signal r2 to run to completion and check that it wasn't cancelled.\n ray.get(signal_actor.send.remote())\n assert r2.result() == \"ok\"",
"def test_cancel_generator_async(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Downstream:\n async def __call__(self, *args):\n yield \"hi\"\n await send_signal_on_cancellation(signal_actor)\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._h = handle.options(use_new_handle_api=True, stream=True)\n\n async def __call__(self, *args):\n # Send a request and wait for it to start executing.\n g = self._h.remote()\n assert await g.__anext__() == \"hi\"\n\n # Cancel it and verify that it is cancelled via signal.\n g.cancel()\n\n with pytest.raises(ray.exceptions.TaskCancelledError):\n assert await g.__anext__() == \"hi\"\n\n await signal_actor.wait.remote()\n\n h = serve.run(Ingress.bind(Downstream.bind())).options(use_new_handle_api=True)\n h.remote().result() # Would raise if test failed.",
"async def disconnect(self):\n self.client.close()",
"def registerServiceAsync(self, uri, identity=None, correlationId=None,\n options=None):\n if correlationId is None:\n correlationId = CorrelationId()\n if options is None:\n options = ServiceRegistrationOptions()\n\n _ExceptionUtil.raiseOnError(\n internals.blpapi_ProviderSession_registerServiceAsync(\n self.__handle,\n uri,\n get_handle(identity),\n get_handle(correlationId),\n get_handle(options)\n ))\n\n return correlationId",
"def attempt_to_register(self, message: Message):\n\t\tlogger.info(\"Attempting to register client.\")\n\n\t\tsuccessful_parse = re.match(r'\\/regi (.{1,30})', message.body)\n\n\t\tif successful_parse and self.validate_name(successful_parse.group(1)):\n\t\t\tlogger.info(\"Client successfully registered.\")\n\t\t\tself.registry.register(successful_parse.group(1), message.sender)\n\t\telse:\n\t\t\tlogger.info(\"Client not registered\") # Ignore the message",
"async def async_disconnect(self) -> None:\n\n def stop() -> None:\n \"\"\"Stop the MQTT client.\"\"\"\n # Do not disconnect, we want the broker to always publish will\n self._mqttc.loop_stop()\n\n def no_more_acks() -> bool:\n \"\"\"Return False if there are unprocessed ACKs.\"\"\"\n return not any(not op.is_set() for op in self._pending_operations.values())\n\n # stop waiting for any pending subscriptions\n await self._subscribe_debouncer.async_cleanup()\n # reset timeout to initial subscribe cooldown\n self._subscribe_debouncer.set_timeout(INITIAL_SUBSCRIBE_COOLDOWN)\n # stop the unsubscribe debouncer\n await self._unsubscribe_debouncer.async_cleanup()\n # make sure the unsubscribes are processed\n await self._async_perform_unsubscribes()\n\n # wait for ACKs to be processed\n async with self._pending_operations_condition:\n await self._pending_operations_condition.wait_for(no_more_acks)\n\n # stop the MQTT loop\n async with self._paho_lock:\n await self.hass.async_add_executor_job(stop)",
"async def disconnect_callback_async(self, excep):\r\n _LOGGER.debug(\" ........... attempting reconnection\")\r\n await self.service_panel_stop(excep)\r\n await self.service_panel_start(excep)",
"async def wait():\n try:\n await asyncio.get_running_loop().run_in_executor(None, wait_inner)\n except asyncio.CancelledError:\n await cancel()\n raise\n finally:\n kernel32.CloseHandle(timer)\n kernel32.CloseHandle(cancel_event)",
"async def on_disconnect(self) -> None:",
"def test_cancel_on_http_client_disconnect_during_assignment(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment(max_concurrent_queries=1)\n class Ingress:\n def __init__(self):\n self._num_requests = 0\n\n async def __call__(self, *args):\n self._num_requests += 1\n await signal_actor.wait.remote()\n\n return self._num_requests\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n # Send a request and wait for it to be ongoing so we know that further requests\n # will block trying to assign a replica.\n initial_response = h.remote()\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 1)\n\n # Intentionally time out on the client, causing it to disconnect.\n with pytest.raises(requests.exceptions.ReadTimeout):\n requests.get(\"http://localhost:8000\", timeout=0.5)\n\n # Now signal the initial request to finish and check that the request sent via HTTP\n # never reaches the replica.\n ray.get(signal_actor.send.remote())\n assert initial_response.result() == 1\n for i in range(2, 12):\n assert h.remote().result() == i",
"def connect_never_retry():\n try:\n messaging_service = MessagingService.builder().from_properties(boot.broker_properties()) \\\n .with_reconnection_retry_strategy(RetryStrategy.never_retry()).build()\n future = messaging_service.connect_async()\n\n return future.result()\n\n except PubSubPlusClientError as exception:\n raise exception\n\n finally:\n messaging_service.disconnect_async()",
"def test_out_of_band_task_is_not_cancelled(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Downstream:\n async def hi(self):\n await signal_actor.wait.remote()\n return \"ok\"\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._h = handle.options(use_new_handle_api=True)\n self._out_of_band_req = self._h.hi.remote()\n\n async def __call__(self, *args):\n await self._h.hi.remote()\n\n async def get_out_of_band_response(self):\n return await self._out_of_band_req\n\n h = serve.run(Ingress.bind(Downstream.bind())).options(use_new_handle_api=True)\n\n # Send a request, wait for downstream request to start, and cancel it.\n r1 = h.remote()\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 2)\n\n r1.cancel()\n with pytest.raises(ray.exceptions.TaskCancelledError):\n r1.result()\n\n # Now signal out of band request to run to completion and check that it wasn't\n # cancelled.\n ray.get(signal_actor.send.remote())\n assert h.get_out_of_band_response.remote().result() == \"ok\"",
"async def _disconnect(self):\n if self._heart_beat_task:\n self._heart_beat_task.cancel()\n try:\n await self._heart_beat_task\n except asyncio.CancelledError:\n pass\n self._heart_beat_task = None\n if self._response_handler_task:\n self._response_handler_task.cancel()\n try:\n await self._response_handler_task\n except asyncio.CancelledError:\n pass\n self._response_handler_task = None\n if self._command_queue_task:\n self._command_queue_task.cancel()\n try:\n await self._command_queue_task\n except asyncio.CancelledError:\n pass\n self._command_queue_task = None\n if self._expected_responses:\n self._expected_responses.cancel_tasks()\n if self._writer:\n self._writer.close()\n self._writer = None\n self._reader = None\n self._sequence = 0\n self._command_queue.clear()",
"def test_cancel_async_handle_call_during_assignment(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment(max_concurrent_queries=1)\n class Downstream:\n def __init__(self):\n self._num_requests = 0\n\n async def __call__(self, *args):\n self._num_requests += 1\n await signal_actor.wait.remote()\n\n return self._num_requests\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._h = handle.options(use_new_handle_api=True)\n\n async def __call__(self, *args):\n # Send a request and wait for it to be ongoing so we know that further\n # requests will block trying to assign a replica.\n initial_response = self._h.remote()\n\n async def one_waiter():\n return await signal_actor.cur_num_waiters.remote() == 1\n\n await async_wait_for_condition(one_waiter)\n\n # Make a second request, cancel it, and verify that it is cancelled.\n second_response = self._h.remote()\n second_response.cancel()\n with pytest.raises(asyncio.CancelledError):\n await second_response\n\n # Now signal the initial request to finish and check that the second request\n # never reached the replica.\n await signal_actor.send.remote()\n assert await initial_response == 1\n for i in range(2, 12):\n assert await self._h.remote() == i\n\n h = serve.run(Ingress.bind(Downstream.bind())).options(use_new_handle_api=True)\n h.remote().result() # Would raise if test failed."
] | [
"0.561611",
"0.53545636",
"0.53502023",
"0.52480394",
"0.5240914",
"0.5077308",
"0.50643533",
"0.49984068",
"0.49921355",
"0.498722",
"0.49676874",
"0.49660704",
"0.49642637",
"0.49493033",
"0.49309194",
"0.49151906",
"0.4906097",
"0.4895857",
"0.48792404",
"0.48342264",
"0.47956735",
"0.4792095",
"0.4781637",
"0.47739178",
"0.47660327",
"0.4757601",
"0.46935606",
"0.46833223",
"0.4681466",
"0.46604437"
] | 0.6079265 | 0 |
Ask a yes/no/quit question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits . It must be "yes" (the default), "no", "quit" or None (meaning an answer is required of the user). The "answer" return value is one of "yes", "no" or "quit". | def query_yes_no_quit(question, default="yes"):
valid = {"yes":"yes", "y":"yes", "ye":"yes", "no":"no", "n":"no", "quit":"quit", "qui":"quit", "qu":"quit", "q":"quit"}
if default == None:
prompt = " [y/n/q] "
elif default == "yes":
prompt = " [Y/n/q] "
elif default == "no":
prompt = " [y/N/q] "
elif default == "quit":
prompt = " [y/n/Q] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes', 'no' or 'quit'.\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_yes_no_quit(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\",\n \"quit\":\"quit\", \"qui\":\"quit\", \"qu\":\"quit\", \"q\":\"quit\"}\n if default == None:\n prompt = \" [y/n/q] \"\n elif default == \"yes\":\n prompt = \" [Y/n/q] \"\n elif default == \"no\":\n prompt = \" [y/N/q] \"\n elif default == \"quit\":\n prompt = \" [y/n/Q] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes', 'no' or 'quit'.\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n while 1:\n sys.stdout.write(question + prompt)\n if sys.version_info[0]==2:\n choice = raw_input().lower()\n elif sys.version_info[0]>2:\n choice = input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(self,question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\", \"no\":\"no\", \"n\":\"no\"}\n\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"no\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = { \"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError( _(\"invalid default answer:\") + \" '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write( _(\"Please respond with 'yes' or 'no' \") + \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \" \\\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\r\n \"no\": False, \"n\": False}\r\n if default is None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\r\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\r\n \"no\": False, \"n\": False}\r\n\r\n if default is None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n\tvalid = {\"yes\": True, \"y\": True, \"ye\": True,\n\t\t\t \"no\": False, \"n\": False}\n\tif default is None:\n\t\tprompt = \" [y/n] \"\n\telif default == \"yes\":\n\t\tprompt = \" [Y/n] \"\n\telif default == \"no\":\n\t\tprompt = \" [y/N] \"\n\telse:\n\t\traise ValueError(\"invalid default answer: '%s'\" % default)\n\n\twhile True:\n\t\tsys.stdout.write(question + prompt)\n\t\tchoice = raw_input().lower()\n\t\tif default is not None and choice == '':\n\t\t\treturn valid[default]\n\t\telif choice in valid:\n\t\t\treturn valid[choice]\n\t\telse:\n\t\t\tsys.stdout.write(\"Please respond with 'yes' or 'no' \"\n\t\t\t\t\t\t\t \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default='no'):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None: \n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n print(question, end=' ')\n sys.stdout.write(prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid: \n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n return",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\r\n \"no\":False, \"n\":False}\r\n if default == None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\r\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\r\n \"no\":False, \"n\":False}\r\n if default == None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\r\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def query_yes_no(question, default=\"yes\"):\n\tvalid = {\"yes\": True, \"y\": True, \"ye\": True,\n\t\t\t \"no\": False, \"n\": False}\n\tif default is None:\n\t\tprompt = \" [y/n] \"\n\telif default == \"yes\":\n\t\tprompt = \" [Y/n] \"\n\telif default == \"no\":\n\t\tprompt = \" [y/N] \"\n\telse:\n\t\traise ValueError(\"invalid default answer: '%s'\" % default)\n\n\twhile True:\n\t\t# sys.stdout.write(question + prompt)\n\t\tchoice = raw_input(question + prompt).lower()\n\t\t# print(choice)\n\t\tif default is not None and choice == '':\n\t\t\treturn valid[default]\n\t\telif choice in valid:\n\t\t\treturn valid[choice]\n\t\telse:\n\t\t\tsys.stdout.write(\"Please respond with 'yes' or 'no' \"\n\t\t\t\t\t\t\t \"(or 'y' or 'n').\\n\")"
] | [
"0.8437878",
"0.8223772",
"0.8212373",
"0.8156623",
"0.8149691",
"0.81355673",
"0.81233436",
"0.8122114",
"0.8120957",
"0.8120957",
"0.8120957",
"0.8120957",
"0.8120957",
"0.8120957",
"0.8120957",
"0.8120957",
"0.81196237",
"0.81192315",
"0.81192315",
"0.81192315",
"0.81192315",
"0.81192315",
"0.81175584",
"0.81165403",
"0.8115645",
"0.8111393",
"0.8111262",
"0.8111262",
"0.81042784",
"0.81026244"
] | 0.834716 | 1 |
Compute the mean absolute error on test set given X, y, and model parameter w. | def mean_absolute_error(w, X, y):
#####################################################
# TODO 1: Fill in your code here #
#####################################################
err = None
temp = np.dot(X, w)
err = np.mean(np.abs(_error(y, temp)))
return err | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mean_absolute_error(w, X, y):\n #####################################################\n # TODO 1: Fill in your code here #\n #####################################################\n if w is None:\n return None\n\n err = None\n yhat = np.dot(X , w)\n err = np.abs(np.subtract(yhat,y)).mean()\n return err",
"def mean_absolute_error(y, y_pred, w):\n return np.average(np.abs(y_pred - y), weights=w)",
"def evaluate_mean_tst_loss(model, X_test, y_test):\n\ttest_losses = []\n\tfor j in range(len(X_test)):\n\t\tsingle_mol_as_array = np.array(X_test[j:j+1])\n\t\tsingle_y_as_array = np.reshape(y_test[j], (1, -1))\n\t\tsloss = model.test_on_batch(single_mol_as_array, single_y_as_array)\n\t\ttest_losses.append(sloss)\n\n\tmean_test_loss = np.mean(test_losses)\n\treturn mean_test_loss",
"def mean_square_error(y, y_pred, w):\n return np.average(((y_pred - y) ** 2), weights=w)",
"def mean_absolute_error(y_true, y_pred, *, sample_weight=..., multioutput=...):\n ...",
"def mse(datax,datay,w):\n return np.mean((datax.dot(w.T)-datay)**2)",
"def mae(t, y):\n\treturn mean_absolute_error(t, y)",
"def root_mean_square_error(y, y_pred, w):\n return np.sqrt(np.average(((y_pred - y) ** 2), weights=w))",
"def calculate_mean_squared_error(self, X, y):\n mserror = 0\n results = self.predict(X)\n \n mserror = mean_squared_error(results,y)\n #print(error)\n return mserror",
"def MeanSquaredError(y_data, y_model):\n\tn = np.size(y_model)\n\tMSE = (1/n)*np.sum((y_data-y_model)**2)\n\n\treturn MSE",
"def mae(self, x_train, y_train):\n # number of training examples\n m = x_train.shape[0]\n error = 0\n for pair, r in zip(x_train, y_train):\n u, i = pair\n error += abs(r - np.dot(self.P[u], self.Q[i]))\n return error / m",
"def mae(y_true: np.ndarray, y_pred: np.ndarray):\n return np.mean(np.abs(y_true - y_pred))",
"def calculate_mean_squared_error(self, X, y):\r\n mean_squared_error = np.square(np.subtract(y,self.predict(X))).mean()\r\n return mean_squared_error",
"def mse(actual,expected):\n return np.mean(se(actual,expected))",
"def relative_mean_absolute_error(y_true, y_pred, sample_weight=None):\n diff = np.abs(y_pred - y_true) / np.abs(y_true)\n rmae = np.average(diff, weights=sample_weight, axis=0)\n return rmae",
"def eval_regression_model(model: NeuralNetwork, X_test: np.ndarray, y_test: np.ndarray):\n preds = model.forward(X_test)\n preds = preds.reshape(-1, 1)\n print(\"Mean absolute error: {:.2f}\".format(mae(preds, y_test)))\n print()\n print(\"Root mean squared error {:.2f}\".format(rmse(preds, y_test)))",
"def mean_absolute_percentage_error(y_true, y_pred, sample_weight=..., multioutput=...):\n ...",
"def compute_mean_squared_error(self, X_data, y_data):\n #assert isinstance(X_data, np.ndarray)\n #assert isinstance(y_data, np.ndarray)\n #assert X_data.shape[0] == y_data.shape[0]\n \n return np.square(np.subtract(X_data, y_data)).mean()",
"def mae(y_true, y_pred):\n return K.mean(K.abs(y_true - y_pred))",
"def mean_absolute_error(self):\n print('Mean absolute error regression loss: ' + str(mean_absolute_error(self.model.dataset.get_y_test(),\n self.model.get_predicted())))",
"def modelmean(self, model_params, this_data, this_suff_stat):\n pass",
"def mse(y_true: np.ndarray, y_pred: np.ndarray) -> float:\n return np.mean(np.power(y_true - y_pred, 2))",
"def mean_baseline(d, mode='test'):\n m = d.trainY.mean()\n y = getattr(d, mode + \"Y\")\n preds = np.array([m] * y.shape[0])\n return (get_mse(d, preds, mode), get_mae(d, preds, mode),\n get_mape(d, preds, mode))",
"def mean_squared_error(y_true, y_pred, *, sample_weight=..., multioutput=..., squared=...):\n ...",
"def score(self, test_index):\n y_pred = self.predict(test_index)\n mae = mean_absolute_error(self._y[test_index], y_pred)\n return mae",
"def score(self, test_index):\n y_pred = self.predict(test_index)\n mae = mean_absolute_error(self._y[test_index], y_pred)\n return mae",
"def RMSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sqrt(np.sum(np.power((y - f), 2)) / n)\r\n return J",
"def avg_abs_err(fitx, std0, target_cov, n):\n err = np.empty(n)\n for i in range(n):\n err[i] = get_abs_err(fitx, std0, target_cov)\n print(fitx, err.mean())\n return err.mean()",
"def mean_baseline(self):\n train_mean = np.mean(self.data.loc[self.train_index, self.target_name])\n rmse = np.sqrt(\n np.mean(np.square(self.data.loc[self.test_index, self.target_name] - train_mean)))\n print 'mean baseline RMSE: {}'.format(rmse)",
"def error_calculation_test(self):\n dataOrg = [[1,1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,8], [7.3, 5], [8, 0], [9,10]]\n dataCalc = [[1,3], [2,5], [3,0], [4,3], [5,5], [6.1,6], [7,3], [7.3, 5], [8, 0], [9,9]]\n # abs difference: 2 3 3 1 0 NA 5 0 NA 1\n # local errors: 200 150 200 50 0 NA 125 0 NA 20\n # sum: 745\n\n tsOrg = TimeSeries.from_twodim_list(dataOrg)\n tsCalc = TimeSeries.from_twodim_list(dataCalc)\n\n wmape = WeightedMeanAbsolutePercentageError()\n wmape.initialize(tsOrg, tsCalc)\n assert str(wmape.get_error())[:6] == \"93.125\""
] | [
"0.77339864",
"0.76291317",
"0.7148356",
"0.7034834",
"0.68424296",
"0.67941666",
"0.66706353",
"0.6591392",
"0.6557777",
"0.6557774",
"0.6482599",
"0.6372792",
"0.63640726",
"0.6351532",
"0.63476115",
"0.63377297",
"0.63268167",
"0.6277267",
"0.6224823",
"0.61900854",
"0.616818",
"0.6135212",
"0.61208963",
"0.6112047",
"0.6108037",
"0.6108037",
"0.61037517",
"0.607309",
"0.6059663",
"0.60321563"
] | 0.7912273 | 0 |
Iterate over modes. Synchronized iterator to iterate the modes in an order. | def modes(self):
try:
order = self._current_order
except AttributeError:
raise AttributeError('Cannot iterate over modes without iterating over orders!') from None
mode = -order
while mode <= order:
yield mode
mode += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __iter__(self):\n return iter([v for k, v in sorted(self._modes.items())])",
"def get_modes(self):\n return [i for i, j in enumerate(self._modemap._map) if j is not None]",
"async def _load_modes(self) -> None:\n modes: List[Dict[str, Any]] = await self._api_request(\"modes\")\n _LOGGER.debug(\"Loaded modes\")\n self._modes = [Mode(m) for m in modes]",
"def iter_mode(n, obj='ndarray'):\n for mode in cap[obj][MODE]:\n for char in fmtdict[mode]:\n yield randitems(n, obj, mode, char)",
"def iter_mode(n, obj='ndarray'):\n for mode in cap[obj][MODE]:\n for char in fmtdict[mode]:\n yield randitems(n, obj, mode, char)",
"def values(self):\n return self._modes.values()",
"def _get_modes(self):\n return self.__modes",
"def get_modes(self):\n modes = set()\n for er in self.exercise_recordings:\n if er.mode not in modes:\n modes.add(er.mode)\n return list(modes)",
"def modes(self, modes):\n\n self._modes = modes",
"def modes(self, modes):\n\n self._modes = modes",
"def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError('Cannot locate modes in output.dat file.')\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.molecule['input'])) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n\n all_modes = [float(val) for val in structures]\n\n return array(all_modes)",
"def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open(\"output.dat\", \"r\") as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError(\"Cannot locate modes in output.dat file.\")\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.atoms)) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += (\n lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n )\n\n all_modes = [float(val) for val in structures]\n\n return np.array(all_modes)",
"def _sort_modes(self):\n sort_idx = np.lexsort((self.modes[:, 1], self.modes[:, 0], self.modes[:, 2]))\n self._modes = self.modes[sort_idx]",
"def get_modes(self):\n return self.circuit.get_modes()",
"def getModes(this):\n\t\tthis.checkInit()\n\t\t\n\t\t# On sauvegarde la config actuelle\n\t\tinit = this.config(get=True)\n\t\t\n\t\t# Ensembles de modes\n\t\tformats = Camera.formats.copy()\n\t\tmodes = set()\n\t\t\n\t\t# On averti du départ\n\t\tprint '\\nLooping modes for the camera... (%d modes)' % (len(formats))\n\t\t\t\n\t\t# Pour chaques formats\n\t\twhile formats:\n\t\t\t\n\t\t\t# On récupère le format à tester\n\t\t\tformat = formats.pop()\n\t\t\t\n\t\t\t# Configuration actuelle\n\t\t\tmode = this.config(\n\t\t\t\theight = float(format[1]),\n\t\t\t\twidth = float(format[0])\n\t\t\t)\n\t\t\t\n\t\t\t# On enregistre le mode\n\t\t\tcurrentFormat = (mode['width'], mode['height'])\n\t\t\tmodes.add(currentFormat)\n\t\t\tif currentFormat in formats:\n\t\t\t\tformats.remove(currentFormat)\n\t\t\t\n\t\t\t# On affiche l'itération courante\n\t\t\tprintf('%d%5s\\r' % (len(formats), ''))\n\t\t###\n\t\t\n\t\t# On remet comme avant et on retourne la liste de modes\n\t\tthis.config(params=init); print 'Done, found %d.' % (len(modes))\n\t\treturn [(int(mode[0]), int(mode[1])) for mode in modes]",
"def modes(self) -> List[str]:\n return [m.name for m in self._modes]",
"def modes(self):\n return np.hstack(tuple(self.operator.modes))",
"def get_modes(self, code_block):\r\n # FUCK YOU INDEX ERRORS, LIST COMPS, AND EVEN YOU LAMBDAS I DON'T NEED PRETTY\r\n # 0 = pos mode\r\n # 1 = imm mode\r\n modes, mode_codes = [0, 0], list(reversed(str(code_block[0])))[2:]\r\n x = 0\r\n for mode in mode_codes:\r\n modes[x] = int(mode)\r\n x += 1\r\n print('Get modes: ')\r\n print(modes)\r\n return modes",
"def __iter__(self):\n for x in self._order:\n yield x",
"def sort_modes(self):\n # sorts by l, then n, then freq\n ind = np.lexsort((self.modes['freq'], self.modes['n'],self.modes['l']))\n self.modes = np.array([self.modes[i] for i in ind],dtype=modetype)",
"def modes_for_course(cls, course_id):\r\n now = datetime.now(pytz.UTC)\r\n found_course_modes = cls.objects.filter(Q(course_id=course_id) &\r\n (Q(expiration_datetime__isnull=True) |\r\n Q(expiration_datetime__gte=now)))\r\n modes = ([Mode(\r\n mode.mode_slug,\r\n mode.mode_display_name,\r\n mode.min_price,\r\n mode.suggested_prices,\r\n mode.currency,\r\n mode.expiration_datetime\r\n ) for mode in found_course_modes])\r\n if not modes:\r\n modes = [cls.DEFAULT_MODE]\r\n return modes",
"def test_modes_for_course_multiple(self):\r\n mode1 = Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None)\r\n mode2 = Mode(u'verified', u'Verified Certificate', 0, '', 'usd', None)\r\n set_modes = [mode1, mode2]\r\n for mode in set_modes:\r\n self.create_mode(mode.slug, mode.name, mode.min_price, mode.suggested_prices)\r\n\r\n modes = CourseMode.modes_for_course(self.course_key)\r\n self.assertEqual(modes, set_modes)\r\n self.assertEqual(mode1, CourseMode.mode_for_course(self.course_key, u'honor'))\r\n self.assertEqual(mode2, CourseMode.mode_for_course(self.course_key, u'verified'))\r\n self.assertIsNone(CourseMode.mode_for_course(self.course_key, 'DNE'))",
"def get_all_servers_modes():\n return _get_list(\n lambda server: server.mode,\n lambda server: server.mode_name_long\n )",
"def modes(self, avg=False):\n if not self.fp_init:\n self._init_full_props\n if avg:\n return self._modes_avg, self._num_modes\n return self._modes, self._num_modes",
"def compute_modes(self, mode_indices, mode_handles, vec_handles=None):\n if vec_handles is not None:\n self.vec_handles = util.make_iterable(vec_handles)\n build_coeffs = np.dot(self.eigvecs, np.diag(self.eigvals**-0.5))\n self.vec_space.lin_combine(\n mode_handles, self.vec_handles, build_coeffs,\n coeff_array_col_indices=mode_indices)",
"def modes(self, exp_id: int) -> List[str]:\n return list(self.state[exp_id].keys())",
"def change_modes(self, change_list):\n\t\tprint \"CHG_MODE START\"\n\t\tfor mode_ix in range(0,len(change_list),2):\n\t\t\tsetid_and_index = self.__mode_modesetid(change_list[mode_ix])\n\t\t\tif setid_and_index is not None:\n\t\t\t\tif change_list[mode_ix+1] == True:\n\t\t\t\t\tprint \"Setting Active Set:{0} Index:{1}\".format(setid_and_index[0], setid_and_index[1])\n\t\t\t\t\tself.ms_all[setid_and_index[0]].activate(setid_and_index[1])\n\t\t\t\telif change_list[mode_ix+1] == False:\n\t\t\t\t\tprint \"Setting DEactive Set:{0} Index:{1}\".format(setid_and_index[0], setid_and_index[1])\n\t\t\t\t\tself.ms_all[setid_and_index[0]].deactivate(setid_and_index[1])\n\t\t\t\telse:\n\t\t\t\t\tprint \"Invalid State\"\n\t\tif 'volume' in self.ms_all:\n\t\t\tprint self.ms_all['volume'].active()\n\t\tif 'modecycle1' in self.ms_all:\n\t\t\tprint self.ms_all['modecycle1'].active()\n\t\tprint \"CHG_MODE STOP\"",
"def temp_mode_as(self, mode):\n previous_mode = self._mode\n self._mode = mode\n yield\n self._mode = previous_mode",
"def cycle_mode(self):\n n_modes = len(self.sensors) + N_EXTRA_MODES\n self.mode += 1\n self.mode %= n_modes\n logger.info(f'Switched to mode {self.mode}')",
"def getDisplayModes(self, obj):\n modes = []\n return modes"
] | [
"0.7799798",
"0.63018954",
"0.6299035",
"0.62619734",
"0.62619734",
"0.6169084",
"0.61189497",
"0.60541093",
"0.5993781",
"0.5993781",
"0.59646934",
"0.58745813",
"0.58432204",
"0.58386713",
"0.5829693",
"0.57918906",
"0.57898873",
"0.57819664",
"0.5591517",
"0.5588966",
"0.55590606",
"0.55087155",
"0.550612",
"0.54356",
"0.5426513",
"0.54214007",
"0.5403037",
"0.53745687",
"0.5368656",
"0.5362822"
] | 0.78386045 | 0 |
r"""Find the approximate location of a levitation trap. Find an approximate position of a acoustic levitation trap close to a starting point. This is done by following the radiation force in the sound field using an differential equation solver. The differential equation is the unphysical equation | def find_trap(array, start_position, complex_transducer_amplitudes, tolerance=10e-6, time_interval=50, path_points=1, **kwargs):
from scipy.integrate import solve_ivp
from numpy.linalg import lstsq
if 'radius' in kwargs:
from .fields import SphericalHarmonicsForce as Force, SphericalHarmonicsForceGradient as ForceGradient
else:
from .fields import RadiationForce as Force, RadiationForceGradient as ForceGradient
evaluator = Force(array, **kwargs) + ForceGradient(array, **kwargs)
mg = evaluator.fields[0].field.mg
def f(t, x):
F = evaluator(complex_transducer_amplitudes, x)[0]
F[2] -= mg
return F
def bead_close(t, x):
F, dF = evaluator(complex_transducer_amplitudes, x)
F[2] -= mg
dx = lstsq(dF, F, rcond=None)[0]
distance = np.sum(dx**2, axis=0)**0.5
return np.clip(distance - tolerance, 0, None)
bead_close.terminal = True
outs = solve_ivp(f, (0, time_interval), np.asarray(start_position), events=bead_close, vectorized=True, dense_output=path_points > 1)
if outs.message != 'A termination event occurred.':
print('End criterion not met. Final path position might not be close to trap location.')
if path_points > 1:
return outs.sol(np.linspace(0, outs.sol.t_max, path_points))
else:
return outs.y[:, -1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def anl_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) * 1.0 \\\r\n / float(self.size_tick * self.size_tick)\r\n print 'qE=', qe\r\n c = self.light_vel\r\n for i in range(0, len(self.obs.obt_g)):\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n x = m * c ** 2 / qe * (math.sqrt(1.0 + (qe * self.t[i] / (m\r\n * c)) ** 2) - 1.0)\r\n self.xa_track.append(x)\r\n p = qe * self.t[i]\r\n self.pa.append(p)\r\n v = p / math.sqrt(m ** 2 + (p / c) ** 2)\r\n jv = self.t[i] * qe / (m * c)\r\n v = math.sqrt(jv * jv / (1 + jv * jv)) * c\r\n self.va.append(v)\r\n print 'Analytical solution of the differential equation of motion'",
"def find_lower_tangent(l_x_as, l_y_as, r_x_as, r_y_as, r_yp_as):\n # logg = logging.getLogger(f\"c.{__name__}.find_lower_tangent\")\n # logg.debug(f\"Start find_lower_tangent\")\n\n # compute the second derivative\n r_ypp = r_yp_as[1:] - r_yp_as[:-1]\n mean_r_ypp = np.mean(r_ypp)\n\n # logg.debug(f\"r_yp_as: {r_yp_as}\")\n # logg.debug(f\"r_ypp: {r_ypp}\")\n\n if mean_r_ypp >= 0:\n # logg.debug(f\"ypp positive\")\n range_xid = range(r_x_as.shape[0])\n else:\n # logg.debug(f\"ypp negative\")\n range_xid = range(r_x_as.shape[0])[::-1]\n\n tangent_start = timer()\n for xid in range_xid:\n # point tangent to the *right* segment\n tang_op = OrientedPoint(r_x_as[xid], r_y_as[xid], slope2deg(r_yp_as[xid]))\n tang_coeff = tang_op.to_ab_line()\n\n # sample it on the *left* segment sample\n l_tang_y_as = poly_model(l_x_as, tang_coeff, flip_coeff=True)\n # ax.plot(l_x_as, l_tang_y_as, color=\"b\", ls=\"-\", marker=\"\")\n # ax.plot(l_x_as, l_tang_y_as, color=\"b\", ls=\"\", marker=\".\")\n\n # find if the left segment has some points lower than the tangent\n lower = l_y_as < l_tang_y_as\n # logg.debug(f\"lower: {lower} {np.sum(lower)}\")\n if np.sum(lower) == 0:\n # logg.debug(f\"Breaking at xid: {xid}\")\n break\n\n tangent_end = timer()\n tangent_time = tangent_end - tangent_start\n # logg.debug(f\"Time to find tangent: {tangent_end - tangent_start:.6f}\")\n\n # find distance from left segment to tangent\n dist_left_tangent = l_y_as - l_tang_y_as\n min_dist_left_tangent = np.min(dist_left_tangent)\n argmin_dist_left_tangent = np.argmin(dist_left_tangent)\n recap = f\"min_dist_left_tangent: {min_dist_left_tangent:.6f}\"\n recap += \" argmin_dist_left_tangent: {argmin_dist_left_tangent}\"\n # logg.debug(recap)\n\n if min_dist_left_tangent < 0:\n # logg.debug(f\"Tangent not found\")\n return -1, -1, None, tangent_time\n\n l_xid = argmin_dist_left_tangent\n r_xid = xid\n\n return l_xid, r_xid, l_tang_y_as, tangent_time",
"def analyticalLinearSol(self, t):\n return self.c*t + self.I",
"def LineSearch(Pos, Dir, dx, EFracTol, M, L, Cut,\n Accel = 1.5, MaxInc = 10., MaxIter = 10000):\n #start the iteration counter\n Iter = 0\n\n #find the normalized direction\n NormDir = Dir / np.sqrt(np.sum(Dir * Dir))\n\n #take the first two steps and compute energies\n Dists = [0., dx]\n PEs = [mdlib.calcenergy(Pos + NormDir * x, M, L, Cut) for x in Dists]\n\n #if the second point is not downhill in energy, back\n #off and take a shorter step until we find one\n while PEs[1] > PEs[0]:\n Iter += 1\n dx = dx * 0.5\n Dists[1] = dx\n PEs[1] = mdlib.calcenergy(Pos + NormDir * dx, M, L, Cut)\n\n #find a third point\n Dists = Dists + [2. * dx]\n PEs = PEs + [mdlib.calcenergy(Pos + NormDir * 2. * dx, M, L, Cut)]\n\n #keep stepping forward until the third point is higher\n #in energy; then we have bracketed a minimum\n while PEs[2] < PEs[1]:\n Iter += 1\n\n #find a fourth point and evaluate energy\n Dists = Dists + [Dists[-1] + dx]\n PEs = PEs + [mdlib.calcenergy(Pos + NormDir * Dists[-1], M, L, Cut)]\n\n #check if we increased too much in energy; if so, back off\n if (PEs[3] - PEs[0]) > MaxInc * (PEs[0] - PEs[2]):\n PEs = PEs[:3]\n Dists = Dists[:3]\n dx = dx * 0.5\n else:\n #shift all of the points over\n PEs = PEs[-3:]\n Dists = Dists[-3:]\n dx = dx * Accel\n\n #we've bracketed a minimum; now we want to find it to high\n #accuracy\n OldPE3 = 1.e300\n while True:\n Iter += 1\n if Iter > MaxIter:\n print(\"Warning: maximum number of iterations reached in line search.\")\n break\n\n #store distances for ease of code-reading\n d0, d1, d2 = Dists\n PE0, PE1, PE2 = PEs\n\n #use a parobolic approximation to estimate the location\n #of the minimum\n d10 = d0 - d1\n d12 = d2 - d1\n Num = d12*d12*(PE0-PE1) - d10*d10*(PE2-PE1)\n Dem = d12*(PE0-PE1) - d10*(PE2-PE1)\n if Dem == 0:\n #parabolic extrapolation won't work; set new dist = 0\n d3 = 0\n else:\n #location of parabolic minimum\n d3 = d1 + 0.5 * Num / Dem\n\n #compute the new potential energy\n PE3 = mdlib.calcenergy(Pos + NormDir * d3, M, L, Cut)\n\n #sometimes the parabolic approximation can fail;\n #check if d3 is out of range < d0 or > d2 or the new energy is higher\n if d3 < d0 or d3 > d2 or PE3 > PE0 or PE3 > PE1 or PE3 > PE2:\n #instead, just compute the new distance by bisecting two\n #of the existing points along the line search\n if abs(d2 - d1) > abs(d0 - d1):\n d3 = 0.5 * (d2 + d1)\n else:\n d3 = 0.5 * (d0 + d1)\n PE3 = mdlib.calcenergy(Pos + NormDir * d3, M, L, Cut)\n\n #decide which three points to keep; we want to keep\n #the three that are closest to the minimum\n if d3 < d1:\n if PE3 < PE1:\n #get rid of point 2\n Dists, PEs = [d0, d3, d1], [PE0, PE3, PE1]\n else:\n #get rid of point 0\n Dists, PEs = [d3, d1, d2], [PE3, PE1, PE2]\n else:\n if PE3 < PE1:\n #get rid of point 0\n Dists, PEs = [d1, d3, d2], [PE1, PE3, PE2]\n else:\n #get rid of point 2\n Dists, PEs = [d0, d1, d3], [PE0, PE1, PE3]\n\n #check how much we've changed\n if abs(OldPE3 - PE3) < EFracTol * abs(PE3):\n #the fractional change is less than the tolerance,\n #so we are done and can exit the loop\n break\n OldPE3 = PE3\n\n #return the position array at the minimum (point 1)\n PosMin = Pos + NormDir * Dists[1]\n PEMin = PEs[1]\n\n #if using visualization, update the display\n if UseVisual:\n if atomvis.Initialized:\n #update the positions\n atomvis.Update(PosMin)\n else:\n #initialize the visualization window\n atomvis.Init(PosMin)\n\n return PEMin, PosMin",
"def _get_closest_light_ahead(self, pose):\n #\n # we have very few lights in either the simulation or the live test,\n # so it is easiest just to loop thru them rather than use KDTree\n #\n pos = pose.position\n x = pos.x\n y = pos.y\n closest_idx = -1\n closest_dist2 = None\n idx = 0\n for light in self.lights:\n xl = light.pose.pose.position.x\n yl = light.pose.pose.position.y\n\n #\n # make sure light is ahead, otherwise ignore it\n # we can only do this if the car velocity is nonzero\n #\n skip_light = False\n if self.velocity_unit_vector:\n dx = xl - x\n dy = yl - y\n car_to_light = [dx,dy]\n val = self.dot2d(car_to_light,self.velocity_unit_vector)\n if val < 0.0:\n #\n # light is behind us so continue\n #\n skip_light = True\n\n if not skip_light:\n if closest_dist2 is None:\n closest_idx = idx\n closest_dist2 = (x-xl)*(x-xl) + (y-yl)*(y-yl)\n else:\n dist2 = (x-xl)*(x-xl) + (y-yl)*(y-yl)\n if dist2 < closest_dist2:\n closest_idx = idx\n closest_dist2 = dist2\n idx+=1\n \n return closest_idx",
"def lidar_relative(self):\n return self.distance",
"def calcPos(self,newpol):\n\n\t\tdetlatoff=(self.offset9()-self.offset10())*cosd(newpol)+self.offset10()\n\t\tnewoffcry = (self.offset2()-self.offset3())*cosd(newpol)+self.offset3()\n\t\tnewdetoff = (self.offset4()-self.offset8())*cosd(newpol)+self.offset8() +self.offset5()\n\n\t\twl = BLi.getWavelength()\n\t\tself.thbragg = 180/pi*asin(wl/(2*self.dspace))\n\t\tnewthp=self.sign()*self.thbragg+newoffcry\n\t\tnewtthp=2*self.sign()*self.thbragg+newdetoff\n\t\tprint \"stokes=%1.2f thp=%1.2f tthp=%1.2f detlatoff=%1.2f\"%(newpol,newthp,newtthp,detlatoff)",
"def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n\n \"*** YOUR CODE HERE ***\"\n return search.bfs(problem)",
"def LAT(self):\n # The maximum update amount for these element\n LateralFraction_DELTA = self.dt * (self.LateralFraction_LIMITS[1] -\n self.LateralFraction_LIMITS[0]) / (\n 2.0)\n\n # Add either positive or negative or zero delta for each\n # NOTE: 'High' is open bracket ) so the max is 1\n LateralFraction_DIRECTION = np.random.randint(-1, 2, 1)[0]\n\n # Now, modify modifiable params AND CLIP\n self.LateralFraction += LateralFraction_DIRECTION * LateralFraction_DELTA\n self.LateralFraction = np.clip(self.LateralFraction,\n self.LateralFraction_LIMITS[0],\n self.LateralFraction_LIMITS[1])",
"def FindClosestPoint(self, ):\n ...",
"def mrv_leadterm(e, x):\n Omega = SubsSet()\n if not e.has(x):\n return (e, S.Zero)\n if Omega == SubsSet():\n Omega, exps = mrv(e, x)\n if not Omega:\n # e really does not depend on x after simplification\n return exps, S.Zero\n if x in Omega:\n # move the whole omega up (exponentiate each term):\n Omega_up = moveup2(Omega, x)\n exps_up = moveup([exps], x)[0]\n # NOTE: there is no need to move this down!\n Omega = Omega_up\n exps = exps_up\n #\n # The positive dummy, w, is used here so log(w*2) etc. will expand;\n # a unique dummy is needed in this algorithm\n #\n # For limits of complex functions, the algorithm would have to be\n # improved, or just find limits of Re and Im components separately.\n #\n w = Dummy(\"w\", positive=True)\n f, logw = rewrite(exps, Omega, x, w)\n try:\n lt = f.leadterm(w, logx=logw)\n except (NotImplementedError, PoleError, ValueError):\n n0 = 1\n _series = Order(1)\n incr = S.One\n while _series.is_Order:\n _series = f._eval_nseries(w, n=n0+incr, logx=logw)\n incr *= 2\n series = _series.expand().removeO()\n try:\n lt = series.leadterm(w, logx=logw)\n except (NotImplementedError, PoleError, ValueError):\n lt = f.as_coeff_exponent(w)\n if lt[0].has(w):\n base = f.as_base_exp()[0].as_coeff_exponent(w)\n ex = f.as_base_exp()[1]\n lt = (base[0]**ex, base[1]*ex)\n return (lt[0].subs(log(w), logw), lt[1])",
"def lib_pt_loc(sys_chars_vals, tolerance=1e-12):\r\n mu = sys_chars_vals.mu\r\n\r\n lib_loc = np.zeros((5, 3))\r\n lib_loc[3, :] = [\r\n 0.5 - mu,\r\n 3**0.5 / 2,\r\n 0,\r\n ] # L4, analytical_guessal solution known\r\n lib_loc[4, :] = [\r\n 0.5 - mu,\r\n -(3**0.5) / 2,\r\n 0,\r\n ] # L5, analytical solution known\r\n\r\n # 5th degree polynomial of L1, L2 and L3\r\n f_lib = np.array(\r\n [\r\n [1, mu - 3, 3 - 2 * mu, -mu, 2 * mu, -mu],\r\n [1, 3 - mu, 3 - 2 * mu, -mu, -2 * mu, -mu],\r\n [1, 2 + mu, 1 + 2 * mu, mu - 1, 2 * mu - 2, -1 + mu],\r\n ]\r\n )\r\n\r\n # First-order derivative of the polyomial defined in f_lib\r\n fd_lib = np.array(\r\n [\r\n [0, 5, 4 * (mu - 3), 3 * (3 - 2 * mu), 2 * -mu, 2 * mu],\r\n [0, 5, 4 * (3 - mu), 3 * (3 - 2 * mu), 2 * -mu, -2 * mu],\r\n [0, 5, 4 * (2 + mu), 3 * (1 + 2 * mu), 2 * (mu - 1), 2 * mu - 2],\r\n ]\r\n )\r\n\r\n initial_guess = np.array([0.9, 1.1, -1])\r\n\r\n for i in range(3):\r\n val = np.vander([initial_guess[i]], 6)\r\n h = np.dot(val, f_lib[i, :]) / np.dot(val, fd_lib[i, :])\r\n while abs(h) >= tolerance:\r\n val = np.vander([initial_guess[i]], 6)\r\n h = np.dot(val, f_lib[i, :]) / np.dot(val, fd_lib[i, :])\r\n lib_loc[i, 0] = initial_guess[i] - h\r\n\r\n initial_guess[i] = lib_loc[i, 0]\r\n\r\n if i == 0:\r\n lib_loc[i, 0] = 1 - mu - lib_loc[i, 0]\r\n elif i == 1:\r\n lib_loc[i, 0] = 1 - mu + lib_loc[i, 0]\r\n elif i == 2:\r\n lib_loc[i, 0] = -mu - lib_loc[i, 0]\r\n\r\n return lib_loc",
"def get_tissue_coords(self, xdva, ydva):\n if self.eye == 'LE':\n raise NotImplementedError\n # Convert x, y (dva) into polar coordinates:\n theta, rho_dva = utils.cart2pol(xdva, ydva)\n # Add RGC displacement:\n meridian = np.where(xdva < 0, 'temporal', 'nasal')\n rho_dva += self._watson_displacement(rho_dva, meridian=meridian)\n # Convert back to x, y (dva):\n x, y = utils.pol2cart(theta, rho_dva)\n # Convert to retinal coords:\n return dva2ret(x), dva2ret(y)",
"def estimated_distance(self, log=False):\n\t\t\n\t\tx0 = GRAVITY - self.thrust*.95 / (self.total_mass - self.fuel_consumption * 0.0)\n\t\tx1 = GRAVITY - self.thrust*.95 / (self.total_mass - self.fuel_consumption * 1.0)\n\n\t\t# Derivative at x=0 and x=1\n\t\tu = x0\n\t\tv = x1\n\t\t# Initial height at x=0\n\t\ty = abs(self.velocity)\n\n\t\tif log:\n\t\t\tprint(f'u: {u}, v: {v}, y: {y}\\nEstimated distance: {get_positive_area(u, v, y)}\\n')\n\t\t\n\t\treturn get_positive_area(u, v, y)",
"def closest_cruising_altitude(altitude):\n return 1000 * ((altitude + 500) // 1000)",
"def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition()\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState)\n\n \"*** YOUR CODE HERE ***\"\n return breadthFirstSearch(problem)\n # util.raiseNotDefined()",
"def calc_nearest_ind(self, robot_pose):\n pass",
"def get_lookahead_point(self):\n lookahead_target_dist = self.lookahead_dist #+ (1 + self.curr_v)\n\n if self.path_point_idx == len(self.current_path) - 1 or self.path_point_idx == -1:\n #End of path, no more lookahead\n return self.path_point\n\n prev_pt = self.current_path[self.path_point_idx]\n curr_pt = self.current_path[self.path_point_idx + 1]\n pt_dist = np.hypot((prev_pt - curr_pt)[0], (prev_pt - curr_pt)[1])\n curr_dist = pt_dist\n c = self.path_point_idx\n while curr_dist < lookahead_target_dist and c < len(self.current_path) - 1:\n prev_pt = self.current_path[c]\n curr_pt = self.current_path[c + 1]\n pt_dist = np.hypot((prev_pt - curr_pt)[0], (prev_pt - curr_pt)[1])\n curr_dist += pt_dist\n c += 1\n\n if curr_dist < lookahead_target_dist:\n return self.current_path[-1]\n else:\n #Interpolate to get the actual lookahead point\n frac = (curr_dist - lookahead_target_dist) / pt_dist\n pt = frac * prev_pt + (1-frac) * curr_pt\n return pt",
"def FindClosestInsertedPoint(self, ):\n ...",
"def calibrate(self):\n\t\twl = BLi.getWavelength()\n\t\tif abs(self.stokes()) <= .5:\n\t\t\txxx=self.sign()*180/pi*asin( wl/(2*self.dspace)) - (self.thp())\n\t\t\tself.offset2(-xxx)\n\t\t\tyyy=self.tthp()-self.sign()*2*180/pi*asin(wl/(2*self.dspace))-self.offset5()\n\t\t\tself.offset4(yyy)\n\t\t\tself.offset9(self.dettrans())\n\t\telif abs(self.stokes()-90.) <= .5:\n\t\t\txxx=self.sign()*180/pi*asin( wl/(2*self.dspace)) - (self.thp())\n\t\t\tself.offset3(-xxx)\n\t\t\tyyy=self.tthp()-self.sign()*2*180/pi*asin(wl/(2*self.dspace))-self.offset5()\n\t\t\tself.offset8(yyy)\n\t\t\tself.offset10(self.dettrans())\n\t\telse:\n\t\t\tprint \"Can't calibrate at stokes=\",self.stokes()\n\t\treturn [self.sign(),self.offset2(), self.offset3(),self.offset4(),self.offset5(),self.offset8(),self.offset9(),self.offset10()]",
"def approach_gps(g_lat,g_lon,emily_lat_start, emily_lon_start, pose_rad, Parameters): #approach a gps position using potential fields\r\n\tx_goal,y_goal = latlongtoxy(g_lat,g_lon,g_lat)\r\n\tx_e_start,y_e_start = latlongtoxy(emily_lat_start,emily_lon_start,g_lat)\r\n\r\n\tprint (\"\\n HERE I AM\\n\\n\")\r\n\r\n\tdist = haver_distance(g_lat, g_lon, emily_lat_start, emily_lon_start)\r\n\tinitial_dist = dist\r\n\r\n\tprint ('Distance: ',dist)\r\n\theading = get_heading(emily_lat_start, emily_lon_start, g_lat, g_lon)\r\n print ('After get heading')\r\n\t# Eric: I'm not sure if turn_towards is necessary for a successful run.\r\n\t#turn_towards(heading)\r\n\tprint ('After Turn towards')\r\n\t#turn towards the goal initially\r\n\r\n\tstart_time = time.time()\r\n\tcurrent_time = 0\r\n\tdstore = []\r\n\thstore = []\r\n\twhile(dist >= goal_radius):\r\n\r\n\t\t#------------ code for reading gps location of emily and its orientation ------\r\n\t\te_lat = vehicle.location.global_frame.lat\r\n\t\te_lon = vehicle.location.global_frame.lon\r\n\t\te_heading = vehicle.heading * pi/180\t\t# convert heading to radians\r\n\t\t#------------------ get e_lat,e_lon, e_orient ---------------------\r\n\r\n\r\n\t\tx_e,y_e = latlongtoxy(e_lat,e_lon,g_lat)\t\t\t#change latitude and longitude to xy\r\n\r\n\t\t#x,y are given to approach victim function as y,x to algin the north heading and direction in x,y\r\n\r\n\t\tdx,dy = approach_victim_behaviour(y_goal,x_goal, y_e,x_e, pose_rad, Parameters)\t#get potential field vector\r\n\t\trc1, rc3 = dxdytorc(dx,dy, e_heading,g_lon)\t\t\t\t\t#get rc parameters\r\n\t\tdist = haver_distance(g_lat, g_lon, e_lat, e_lon)\t\t\t\t#haversine distance\r\n\r\n\t\tcurrent_time = time.time() - start_time\r\n\t\tprint (\"Time, Heading, Distance\")\r\n\t\tprint (current_time, e_heading*180/pi, dist)\r\n\t\tdstore.append(dist)\r\n\t\thstore.append(e_heading*180/pi)\r\n\t\t#code for sending the writing the rc commands\r\n\t\t# 3 is the thrust control\r\n\t\t#vehicle.channels.overrides = {'3':rc3}\r\n\t\tsendThrottleCommand(rc3, enableThrottle)\r\n\t\ttime.sleep(0.5)\r\n\t\tvehicle.channels.overrides = {'1':rc1}\r\n\t\tprint (\"Rudder: \",rc1)\r\n\t\tprint (\"Throttle: \",rc3)\r\n\t\tsaveToLog(e_lat, e_lon,dist,rc1,rc3)\r\n\t\ttime.sleep(0.5)\r\n\tprint(initial_dist)\r\n\tprint(\"intial \", emily_lat_start,emily_lon_start)\r\n\tprint(\"final \",e_lat,e_lon)\r\n\tplt.plot(dstore)\r\n\t#plt.title('Distance form home vs time')\r\n\tplt.xlabel(\"Time\")\r\n\tplt.ylabel('Distance')\r\n\tplt.show()\r\n\tplt.plot(hstore)\r\n\tplt.show()",
"def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx",
"def exactsolution(x, t, u):\n if 0 <= (x - u*t) and (x - u*t) <= 0.2:\n temp = 1 - (10 * (x - u*t) -1)**2\n else:\n temp = 0\n return temp",
"def solve(self, theta):\n ndim = len(theta)\n nspots = int((ndim - 3) / 3)\n y = np.ones_like(self.x)\n for i in range(1, nspots + 1):\n y += -1 + eker(self.x, np.append(theta[:3], theta[3 * i:3 * (i + 1)]), l1=self.l1, l2=self.l2, ir=self.ir)\n return y",
"def _hill_diff(self, position):\n if position < 0:\n return 2 * position + 1\n else:\n return (1/math.sqrt(1 + 5 * position ** 2)\n - 5 * position ** 2 * (1 + 5 * position ** 2)**-1.5)",
"def leff(self):\n with Vega() as v:\n s = self.reinterp(v.wavelength)\n w = s._wavelength\n if s.transmit.max() > 0:\n leff = np.trapz(w * s.transmit * v.flux.value, w, axis=-1)\n leff /= np.trapz(s.transmit * v.flux.value, w, axis=-1)\n else:\n leff = float('nan')\n if s.wavelength_unit is not None:\n leff = leff * Unit(s.wavelength_unit)\n if self.wavelength_unit is not None:\n return leff.to(self.wavelength_unit)\n return leff\n else:\n return leff",
"def find_relative_time_reference(self, search_direction, tags, timex, timesIndex):\n \n if search_direction == 'Backward':\n ts = [r[2] for r in self.timexImpactZones if r[0]<=timex.getStartPos() and r[1]>=timex.getEndPos()]\n if ts:\n return ts[-1]\n else:\n return None\n \n parts = tags[timesIndex+1:]\n \n anchorTagSet = set(['Vaccine', 'Drug', 'Vaccination', 'Injection', 'Hospitalization', 'Administration']) \n doseNum = 0\n doseTag = [tg for tg in parts if tg[1]=='DoseIndicator']\n counts = []\n doseTagRange = 5\n if doseTag:\n counts = [(i, tg[0]) for i, tg in enumerate(parts) if tg[1]=='Count'] \n for i, tag in enumerate(parts):\n if tag[1]=='Drug' or tag[1]=='Vaccine':\n if counts:\n dist = 10000\n doseword = None\n for k, w in counts:\n if abs(k-i) < dist:\n dist = abs(k-i)\n doseword = w\n if doseword and dist<doseTagRange:\n doseNum = util.text2num.convertOrdinal(doseword) \n \n t = self.find_time_reference_with_tag(tag[1], tag[0], timex.getSentNum(), doseNum)\n if t:\n return t\n \n if tag[1] in ['Hospitalization', 'Administration']:\n t = self.find_time_reference_with_tag(tag[1], tag[0], timex.getSentNum())\n if t:\n return t\n \n if tag[1] in ['Vaccination', 'Injection']:\n if i+2<len(parts) and parts[i+1][0] in ['with', 'of', 'to'] and parts[i+2][1] in ['Drug', 'Vaccine']:\n continue\n t = self.find_time_reference_with_tag(tag[1], tag[0], timex.getSentNum())\n if t:\n return t\n \n ##: No reference tag is found, search backward for a valid time range\n ##: In ideal case, this should \"return None\" directly. However, considering that the current lexicon is not \n ##: complete enough, it's very likely some Vaccines or drugs are not tagged, we return the previous time\n ##: in the current development stage.\n ts = [r[2] for r in self.timexImpactZones if r[0]<=timex.getStartPos() and r[1]>=timex.getEndPos()]\n if ts:\n return ts[-1]\n \n return None",
"def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.position.x,\n pose.position.y))\n else:\n return 0",
"def integrateModel(self, tend, initial=[1.0, 1.0, 0.0]):\n\n dt = 0.1\n self.ts = np.arange(0.0, tend + dt, dt)\n\n r = sp.integrate.solve_ivp(self.derv, (0, tend), initial, t_eval=self.ts, method='Radau') # uses RK45\n self.results = np.transpose(r.y)\n\n ent_angle = 1.0 * atan2(self.results[-1, 1], self.results[\n -1, 0]); # times negative one because VDP runs clockwise versus counterclockwise\n if (ent_angle < 0.0):\n ent_angle += 2 * sp.pi;\n\n ent_angle = ent_angle * 24.0 / (2.0 * sp.pi);\n return (ent_angle)",
"def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.x,\n pose.y))\n else:\n return 0"
] | [
"0.5917251",
"0.5454485",
"0.53869474",
"0.53047276",
"0.5189361",
"0.51892936",
"0.5161829",
"0.5157585",
"0.51523453",
"0.51253486",
"0.51195055",
"0.5104085",
"0.5075305",
"0.5061984",
"0.50457203",
"0.50427437",
"0.50212985",
"0.49925196",
"0.498679",
"0.49814695",
"0.49780074",
"0.49516854",
"0.49278033",
"0.4910076",
"0.49099723",
"0.48954636",
"0.48867476",
"0.48846233",
"0.48824978",
"0.48753715"
] | 0.63866895 | 0 |
Play a song based on its path. | def play_song(self):
path = input('Give path to wanted song: ') # Request path to song
path = path.replace('\\', '/')
if not self.path_storage_re.match(path): # Check if the wanted song is from the storage directory
print("Give a valid path")
else:
p = vlc.MediaPlayer(path) # Create VLC instance and play the song
p.play()
self.playSong.append(p)
self.isPlaying = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def play(path):\n sound = AudioSegment.from_mp3(path)\n playback.play(sound)",
"def play(self, songpos=None):\n # TODO: implement songpos !\n if songpos is None:\n resp = yield from self.command('play')\n return True",
"def play(song):\n # Show the metadata\n if (verbose==True):\n for s in song.keys():\n print s, \":\", \n print song[s]\n else:\n print \"Title:\", song[\"title\"]\n print \"Artisit:\", song[\"artist\"]\n print \"Album:\", song[\"albumtitle\"]\n print \"Year\", song[\"public_time\"]\n print \"Company:\", song[\"company\"]\n print \"Length\", song[\"length\"]\n print \"Playing...\"\n mp3_url = song[\"url\"]\n song_length = song[\"length\"]\n p = subprocess.Popen([\"mplayer\", \"-msglevel\", \"all=0\", mp3_url])\n\n # At the same time, download the song:\n u = urllib2.urlopen(mp3_url)\n local_mp3 = open(song[\"title\"] + \"-\" + song[\"artist\"] + \".mp3\", \"w\")\n local_mp3.write(u.read())\n local_mp3.close()\n # time.sleep(song_length)\n i = 0\n while(True):\n time.sleep(1)\n i += 1\n if i == song_length:\n # Kill the process when the song is finished.\n p.terminate()\n print \"#\" * 80\n break",
"def play(sound):\n if SOUNDDIR != \"\":\n call([\"aplay\", SOUNDDIR + sound])",
"def _play_audio(self, path_or_location):\n url = path_or_location.replace('https', 'http')\n audi_commd = self._vlc_audio_command + [url]\n logger.info('VLC command: {}'.format(audi_commd))\n process = subprocess.Popen(audi_commd)\n self._player_pid = process.pid\n logger.info(\"vlc pid \" + str(process.pid))\n\n # add pid to child_pids\n self._child_pids[process.pid] = True",
"def update(self, song: int) -> None:\n if 0 <= song < len(self.sounds):\n self.sounds[song].play()",
"def play_async(path):\n p = Process(target=play, args=(path,))\n p.start()",
"async def play(self, msg, *, song):\n if msg.guild.id in self.player:\n if msg.voice_client.is_playing() is True: # NOTE: SONG CURRENTLY PLAYING\n return await self.queue(msg, song)\n\n if self.player[msg.guild.id]['queue']:\n return await self.queue(msg, song)\n\n if msg.voice_client.is_playing() is False and not self.player[msg.guild.id]['queue']:\n return await self.start_song(msg, song)\n\n else:\n # IMPORTANT: THE ONLY PLACE WHERE NEW `self.player[msg.guild.id]={}` IS CREATED\n self.player[msg.guild.id] = {\n 'player': None,\n 'queue': [],\n 'author': msg,\n 'name': None,\n \"reset\": False,\n 'repeat': False,\n 'volume': 0.5\n }\n return await self.start_song(msg, song)",
"def play_sound(self):\n # http://soundbible.com/2103-1-Person-Cheering.html\n my_path = os.path.dirname(__file__)\n sound_path = os.path.join(my_path, 'yay.mp3')\n sound = SoundLoader.load(sound_path)\n sound.play()",
"def load(self, song):\n self.currentSongName = song\n self.currentSong = pygame.mixer.music.load(song)",
"def play_music(self):\n song_index = -1\n if self.num_songs == 0:\n sys.stdout.write(\"No songs found\\n\")\n sys.exit(0)\n \n # FIXME: spacebar/pause is an mplayer-specific command\n sys.stdout.write(\"Press spacebar to pause songs\\n\")\n sys.stdout.write(\"Press ctrl+c once to skip a song\\n\")\n sys.stdout.write(\"Hold ctrl+c to exit\\n\")\n sys.stdout.write(\"%d files found.\\n\" % self.num_songs)\n while True:\n try:\n song_index = self._get_song_index(song_index)\n if song_index == None:\n sys.exit(0)\n song = self.songs[song_index]\n sys.stdout.write(\"%s\\n\" % song)\n \n # Disabled the following as it got pretty annoying seeing a \n # torrent of notifications for non-music files (mplayer \n # gracefully skips these). \n #try:\n # notify_cmd=\"notify-send -t 1000 '%s'\" % \\\n # song.split(\"/\")[-1]\n # subprocess.check_call(notify_cmd, shell=True)\n #except:\n # pass\n #FIXME: escape quotes in songs\n play_cmd = '\"%s\" \"%s\" > /dev/null 2>&1 ' % \\\n (self.music_client, song) \n subprocess.check_call(play_cmd, shell=True)\n except KeyboardInterrupt:\n try:\n # HACK to allow repeated ctrl+c to exit outright\n time.sleep(0.1) \n except KeyboardInterrupt:\n sys.stderr.write(\"\\nExiting...\\n\")\n sys.exit(0)",
"async def play(self, ctx, *, filename: str):\r\n if not ctx.voice_client:\r\n await self.connect(ctx)\r\n if filename not in self.audio_files:\r\n await ctx.send(\"File {0} not found\".format(filename))\r\n await self.audiofiles(ctx)\r\n else:\r\n ctx.voice_client.play(discord.FFmpegPCMAudio(source=\"{0}{1}.mp3\".format(self.audio_base_dir, filename)))\r\n await ctx.message.delete()",
"async def play(self, ctx, *, song: str):\n state = self.get_voice_state(ctx.message.server)\n opts = {\n 'default_search': 'ytsearch',\n 'quiet': True,\n }\n\n if state.voice is None:\n success = await ctx.invoke(self.summon)\n if not success:\n return\n if state.voice.channel != ctx.message.author.voice_channel:\n await self.bot.say('You can only modify the queue if you\\'re in the same channel as me!')\n return\n if len(state.songs._queue) >= 6:\n await self.bot.say('There can only be up to 6 items in queue!')\n return\n\n status = await self.bot.say('Loading... 🌚')\n pg_task = self.loop.create_task(self.progress(status, 'Loading'))\n state.voice.encoder_options(sample_rate=48000, channels=2)\n try:\n player = await state.voice.create_ytdl_player(song, ytdl_options=opts, after=state.toggle_next)\n except Exception as e:\n if type(e).__name__.endswith('DownloadError') or type(e).__name__.endswith('IndexError'):\n pg_task.cancel()\n await self.bot.delete_message(status)\n await self.bot.say('**That video couldn\\'t be found!**')\n return False\n else:\n raise e\n\n player.volume = 0.7\n entry = VoiceEntry(ctx.message, player, False)\n was_empty = state.songs.empty()\n await state.songs.put(entry)\n if state.current:\n await self.bot.say('Queued ' + str(entry))\n pg_task.cancel()\n await self.bot.delete_message(status)",
"def play_local(self, music, device):\n # Look at all the files in the specified directory and add their URIs.\n mp3s = []\n try:\n files = os.listdir(music)\n except OSError, ex:\n logging.warning(\"OS Error: %s\", ex)\n return\n for filename in files:\n if filename.endswith(\".mp3\"):\n mp3s.append(os.path.join(self.webserver, music,\n urllib.pathname2url(filename)))\n\n device.play(sorted(mp3s))",
"def play(filename):\n SoundClient(blocking=True).playWave(filename)",
"def play_music(sid):\n # Get the parameters for the get_song_id request\n artist = None\n album = None\n title = None\n if not request.json:\n # If no JSON parameters were given, just resume playing the song\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n spotify.resume(host['ip'])\n return jsonify({})\n else:\n try:\n # Get the host data from the database\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n artist = None\n album = None\n track = None\n if request.json.has_key('track') and request.json.get('track'):\n track = request.json.get('track')\n elif request.json.has_key('album') and request.json.get('album'):\n album = request.json.get('album')\n elif request.json.has_key('artist') and request.json.get('artist'):\n artist = request.json.get('artist')\n else:\n spotify.resume(host['ip'])\n return jsonify({})\n spotify.compound_play(host['ip'], artist=artist, album=album, song=track)\n return jsonify({})\n except:\n abort(400)",
"async def play(self, ctx, *, query):\n\n source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))\n ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)\n\n await ctx.send('Now playing: {}'.format(query))",
"async def play(self, ctx, *, query):\n\n source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))\n ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)\n\n await ctx.send('Now playing: {}'.format(query))",
"def load_song(self, path):\n self._menu_select('File->Open')\n self._open_file(path)\n try:\n # Get the annoying Comments window out of the way\n self._app.Comments.minimize()\n except MatchError:\n pass",
"def play(self, position=None):\n if position!=None and (0 <= position < len(self.playlist)):\n self.playlist_id = position\n self.stop()\n if not self.is_playing:\n self.logger.debug(\"play: {}\".format(os.path.join(self.path_songs, self.playlist[self.playlist_id])))\n self.tstamp_play = time.time()\n self.is_playing = True\n self.thread = CommandPlayer(\n command=self.player_command.format(\n file=shlex.quote(\n os.path.join(self.path_songs, self.playlist[self.playlist_id])\n )\n ),\n callback=self._callback\n )\n self.thread.start()",
"def playSound(self,sound):\n sound.play()",
"def playsong(song, failcount=0, override=False):\n # pylint: disable=R0911,R0912\n if not Config.PLAYER.get or not has_exefile(Config.PLAYER.get):\n g.message = \"Player not configured! Enter %sset player <player_app> \"\\\n \"%s to set a player\" % (c.g, c.w)\n return\n\n if Config.NOTIFIER.get:\n subprocess.Popen(shlex.split(Config.NOTIFIER.get) + [song.title])\n\n # don't interrupt preloading:\n while song.ytid in g.preloading:\n writestatus(\"fetching item..\")\n time.sleep(0.1)\n\n try:\n streams.get(song, force=failcount, callback=writestatus)\n\n except (IOError, URLError, HTTPError, socket.timeout) as e:\n dbg(\"--ioerror in playsong call to streams.get %s\", str(e))\n\n if \"Youtube says\" in str(e):\n g.message = F('cant get track') % (song.title + \" \" + str(e))\n return\n\n elif failcount < g.max_retries:\n dbg(\"--ioerror - trying next stream\")\n failcount += 1\n return playsong(song, failcount=failcount, override=override)\n\n elif \"pafy\" in str(e):\n g.message = str(e) + \" - \" + song.ytid\n return\n\n except ValueError:\n g.message = F('track unresolved')\n dbg(\"----valueerror in playsong call to streams.get\")\n return\n\n try:\n cmd, songdata = generate_real_playerargs(song, override, failcount)\n\n except (HTTPError) as e:\n\n # Fix for invalid streams (gh-65)\n dbg(\"----htterror in playsong call to gen_real_args %s\", str(e))\n if failcount < g.max_retries:\n failcount += 1\n return playsong(song, failcount=failcount, override=override)\n else:\n g.message = str(e)\n return\n\n except IOError as e:\n # this may be cause by attempting to play a https stream with\n # mplayer\n # ====\n errmsg = e.message if hasattr(e, \"message\") else str(e)\n g.message = c.r + str(errmsg) + c.w\n return\n\n songdata = \"%s; %s; %s Mb\" % songdata\n writestatus(songdata)\n dbg(\"%splaying %s (%s)%s\", c.b, song.title, failcount, c.w)\n dbg(\"calling %s\", \" \".join(cmd))\n returncode = launch_player(song, songdata, cmd)\n failed = returncode not in (0, 42, 43)\n\n if failed and failcount < g.max_retries:\n dbg(c.r + \"stream failed to open\" + c.w)\n dbg(\"%strying again (attempt %s)%s\", c.r, (2 + failcount), c.w)\n writestatus(\"error: retrying\")\n time.sleep(1.2)\n failcount += 1\n return playsong(song, failcount=failcount, override=override)\n\n return returncode",
"def play_sound():\r\n try:\r\n\r\n file_name = r\"sound.mp3\"\r\n playsound.playsound(file_name)\r\n except OSError:\r\n AudioSegment.converter=r\"/Users/russelllamb/Downloads/ffmpeg\"\r\n sound=AudioSegment.from_mp3(r\"sound.mp3\")\r\n play(sound)",
"async def _play(self, bot, server_id):\n srv = self.get_server_dict(server_id)\n srv['song'] = self.dequeue(server_id)\n if not srv['song']:\n await self._finish_playback(bot, server_id)\n return\n try:\n srv['player'] = srv['voice'].create_ffmpeg_player(srv['song'][0], stderr=subprocess.PIPE, before_options=self._ffmpeg_options, after=lambda: self._after(bot, server_id))\n await bot.change_presence(game = Game(name=srv['song'][1]))\n except Exception as ex:\n #shit's fucked\n self.logger.exception(ex, exc_info=True)\n await self._finish_playback(bot, server_id)\n return\n srv['player'].volume = srv['volume']\n srv['player'].start()",
"def play_sound(self, sound) -> None:\n pass",
"def play_sound(file_path: Path) -> None:\n threading.Thread(target=playsound.playsound, args=(file_path,), daemon=True).start()",
"async def play(self, ctx, *, query):\n source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))\n ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)",
"def do_play(*_args):\n print(last_wav_path)\n if last_wav_path and last_wav_path.is_file():\n threading.Thread(\n target=lambda: subprocess.check_call(\n [\"aplay\", \"-q\", str(last_wav_path)]\n )\n ).start()",
"def play_podcast(url, name):\n\n player.play(url, name)",
"def on_play(self, event, song):\n\n if event.guild.id not in self.players:\n raise CommandFail('Not in a voice channel here.')\n\n playables = list(YoutubeDLPlayable.from_url(song))\n for playable in playables:\n self.players[event.guild.id].play(playable, event)"
] | [
"0.79737085",
"0.68196875",
"0.66903174",
"0.66628766",
"0.65618736",
"0.6544999",
"0.648962",
"0.64856863",
"0.6419521",
"0.6384046",
"0.6374167",
"0.633997",
"0.63393456",
"0.63371986",
"0.63166153",
"0.6316028",
"0.6295789",
"0.6295789",
"0.62778527",
"0.6240952",
"0.62402165",
"0.6212399",
"0.6185112",
"0.61553085",
"0.6151835",
"0.613386",
"0.6110401",
"0.6086122",
"0.6072462",
"0.60707605"
] | 0.8189756 | 0 |
Stop the current playing/paused song. | def stop_song(self):
if self.isPlaying:
self.playSong[0].stop()
self.playSong.clear()
self.isPlaying = False
print("Music stopped")
else:
print("Play a song first...") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def _stop(self, ctx: commands.Context):\n ctx.voice_state.songs.clear()\n\n if ctx.voice_state.is_playing:\n ctx.voice_state.voice.stop()\n return await ctx.send(embed=embed_msg(description=\"🛑 Stopped the music\"))\n\n else:\n return await ctx.send('Cannot stop. Not playing any song right now.')",
"def stop(self):\n if self.player and self.player.is_playing():\n self.player.pause()\n super().stop()",
"def stop(self):\n if self.is_playing:\n self.is_playing = False\n self.tstamp_play = None\n self.thread.stop()",
"def stop_soundtrack(self):\n SoundManager.remove_music(self.source, self)",
"def stop_soundtrack(self):\n SoundManager.remove_music(self.source, self)",
"async def _stop(self, ctx: commands.Context):\n\n ctx.voice_state.songs.clear()\n\n if not ctx.voice_state.is_playing:\n ctx.voice_state.voice.stop()\n await ctx.message.add_reaction('⏹')",
"async def _stop(self, ctx: commands.Context):\n\n ctx.voice_state.songs.clear()\n\n if ctx.voice_state.is_playing:\n ctx.voice_state.voice.stop()\n await ctx.message.add_reaction('⏹')",
"def stop():\n if playlist_box.size() > 0:\n # Displaying Status\n track_status.set(\"(Stopped)\")\n play_pause_btn.configure(image=play_img)\n track_pos_slider.configure(state=\"disabled\")\n reset_track()\n cancel_update_play_time_loop()\n cancel_track_end_event_loop()\n # Stopped Song\n # pygame.mixer.music.stop()\n # BUG Using rewind and pause instead of stop,\n # Reason: after stoping track and playing the same track,\n # an \"End of track\" event is generated, BUGGG???\n pygame.mixer.music.rewind()\n pygame.mixer.music.pause()",
"def stop(self):\n self.stopped = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?",
"async def stop(self, ctx):\n server = ctx.message.server\n state = self.get_voice_state(server)\n\n if state.is_playing():\n player = state.player\n player.stop()\n\n try:\n state.speech_player.cancel()\n state.audio_player.cancel()\n del self.voice_states[server.id]\n await state.voice.disconnect()\n await self.bot.say('Stopped.')\n except:\n await self.bot.say('Couldn\\'t stop.')\n pass",
"def stop_music(self):\n self.load_music(None)",
"def _control_stop(self):\n self.player.stop()",
"async def stop(self, ctx):\n if not await self.control_checks(ctx):\n return\n server_id = ctx.message.server.id\n srv = self.get_server_dict(server_id)\n srv['queue'].clear()\n if self.is_playing(server_id):\n srv['player'].stop()",
"def stopping_sound(self):\n logger.warning('current sound play is being stopped')\n self.stop_sound = True",
"async def stop_(client, event):\n player = get_player_or_abort(client, event)\n\n await player.stop()\n return 'Stopped playing'",
"def stop(self):\n if logging.getLogger().getEffectiveLevel() != 10:\n try:\n self._player.terminate()\n except AttributeError as e: # Make things a bit more user friendly and allow a stop command even if not playing\n if str(e) == \"'Player' object has no attribute '_player'\":\n return\n else:\n raise AttributeError(str(e)) # Only catch the known error and raise any others to pass them through\n logging.debug(\"Stopping Playback\")",
"def stop_video(self):\n\n # Enabling all the buttons, the speedCombo and the checkbox\n self.enable_btns()\n\n if self.mediaPlayer.state() == QMediaPlayer.PlayingState or self.mediaPlayer.state() == QMediaPlayer.PausedState:\n self.mediaPlayer.stop()\n else:\n pass",
"def stop(self):\n self.set_state_null()\n self.player = None",
"def toggle_play(self):\n\t\tif self._playing:\n\t\t\tself.stop()\n\t\t\tself._playing = False\n\t\telse:\n\t\t\tself.play()\n\t\t\tself._playing = True",
"def stop_video(self):\n if self.now_playing_videoid:\n # remove the current video id from the record\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n print(f\"Stopping video: {video_playing.title}\")\n self.now_playing_videoid = ''\n self.pause = False\n else: \n print(f\"Cannot stop video: No video is currently playing\")\n\n # print(\"stop_video needs implementation\")",
"def stop_video(self):\n if self._current_video is None:\n print(\"Cannot stop video: No video is currently playing\")\n return\n print(f\"Stopping video: {self._current_video.title}\")\n self._current_video = None\n self._paused = False",
"async def stop(self, ctx):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n voice.stop()\n await ctx.send(\"Playing stopped.\") \n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not playing anything right now.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")",
"def pause_song(self):\r\n if self.isPlaying:\r\n self.playSong[0].pause()\r\n print(\"Song paused. To continue type Play.\")\r\n else:\r\n print(\"Play a song first...\")",
"def stop(self,event=None):\r\n # If no video data\r\n if self.isEmpty():\r\n return\r\n if self.hasAudio:\r\n mixer.music.stop()\r\n self.state = VideoPlayer.State.STOPPED\r\n self.progress = 0\r\n self.startTimestamp = time.time()",
"def stop_state(self):\n\n QMetaObject.invokeMethod(self.video_player, \"stop\", Qt.QueuedConnection)\n self.video_playing = False",
"async def stop(self, ctx: commands.Context) -> None:\n player = self.get_player(ctx.guild)\n\n if not player.queue.empty:\n await player.stop()\n player.queue.clear_queue()\n embed = Embeds.status(\n success=True, desc=\"Playback stopped and queue cleared.\"\n )\n await ctx.send(embed=embed)",
"async def stop(self):\n _LOGGER.debug(f\"[Foobar2k] In Stop. Current state is [{self._state}]\")\n if (self._power == POWER_ON and self._state == STATE_PLAYING):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_STOP, data=None)\n self._state = STATE_STOPPED\n _LOGGER.debug(f\"[Foobar2k] State now is [{self._state}]\")",
"def stop_video(self):\n if self.is_playing:\n print(f\"Stopping video: {self.playing_now}\")\n self.is_playing = False\n else:\n print(\"Cannot stop video: No video is currently playing\")",
"def StopTrack(self):\n handler = self.get_command_object(\"StopTrack\")\n handler()",
"async def stop(self):\n await self._bot.lavalink.ws.send(op='stop', guildId=self.guild_id)\n self.current = None"
] | [
"0.7680431",
"0.76113164",
"0.759389",
"0.75338066",
"0.75338066",
"0.7464192",
"0.74512273",
"0.7417875",
"0.7278527",
"0.71976995",
"0.71916264",
"0.7135208",
"0.710452",
"0.69504833",
"0.6907551",
"0.68905944",
"0.6856153",
"0.6830112",
"0.68043816",
"0.6791049",
"0.67570907",
"0.6738051",
"0.6732128",
"0.6717595",
"0.6671323",
"0.66347104",
"0.6624268",
"0.6592487",
"0.65596366",
"0.6428721"
] | 0.81801236 | 0 |
Pause the current playing song. | def pause_song(self):
if self.isPlaying:
self.playSong[0].pause()
print("Song paused. To continue type Play.")
else:
print("Play a song first...") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pause(self):\n self.paused = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?",
"def pause(self):\n if not self.paused:\n pygame.mixer.music.pause()\n self.paused = True\n else:\n pygame.mixer.music.unpause()\n self.paused = False",
"async def _pause(self, ctx: commands.Context):\n try:\n if ctx.voice_state.voice.is_playing:\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')\n\n except AttributeError:\n await ctx.send(\"Can't pause. No song is being played!\")",
"def pause_play(self):\n\n if self.estado == gst.STATE_PAUSED \\\n or self.estado == gst.STATE_NULL \\\n or self.estado == gst.STATE_READY:\n self.__play()\n\n elif self.estado == gst.STATE_PLAYING:\n self.__pause()",
"def media_play_pause(self) -> None:\n if self.state == MediaPlayerState.PLAYING:\n self.media_pause()\n else:\n self.media_play()",
"def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()",
"def pause(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackPause())",
"def media_play_pause(self):\n if self._state == STATE_PLAYING:\n self._state = STATE_PAUSED\n else:\n self._state = STATE_PLAYING",
"async def pause(self, ctx):\n state = self.get_voice_state(ctx.message.server)\n if state.is_playing():\n player = state.player\n player.pause()\n await self.bot.say('Paused.')",
"def pause(self,event=None):\r\n # If pause -> pause or stop -> pause, ignore, or if no video\r\n if not self.isPlaying():\r\n return\r\n # If play -> pause\r\n self.progress = time.time() - self.startTimestamp\r\n if self.hasAudio:\r\n mixer.music.pause()\r\n self.state = VideoPlayer.State.PAUSED",
"def _control_pause(self):\n self.player.pause()",
"async def _pause(self, ctx: commands.Context):\n\n if ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')",
"async def _pause(self, ctx: commands.Context):\n\n if not ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')",
"def pause(self):\n self.paused_time = time.time()\n self.paused = True",
"def media_pause(self) -> None:\n self._attr_state = MediaPlayerState.PAUSED\n self._client.pause()",
"def media_pause(self):\n self._state = STATE_PAUSED",
"def media_pause(self):\n self._state = STATE_PAUSED",
"async def async_media_pause(self):\n if not self._slave_mode:\n if self._playing_stream and not (self._playing_mediabrowser or self._playing_mass):\n # Pausing a live stream will cause a buffer overrun in hardware. Stop is the correct procedure in this case.\n # If the stream is configured as an input source, when pressing Play after this, it will be started again (using self._prev_source).\n await self.async_media_stop()\n return\n\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:pause\", None)\n if value == \"OK\":\n self._position_updated_at = utcnow()\n self._idletime_updated_at = self._position_updated_at\n if self._playing_spotify:\n self._spotify_paused_at = utcnow()\n self._state = STATE_PAUSED\n if self._slave_list is not None:\n for slave in self._slave_list:\n await slave.async_set_state(self._state)\n await slave.async_set_position_updated_at(self.media_position_updated_at)\n# self.async_schedule_update_ha_state(True)\n else:\n _LOGGER.warning(\"Failed to pause playback. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n await self._master.async_media_pause()",
"async def pause(self, ctx):\n if not await self.control_checks(ctx):\n return\n server_id = ctx.message.server.id\n srv = self.get_server_dict(server_id)\n if self.is_playing(server_id):\n srv['player'].pause()\n else:\n srv['player'].resume()",
"def play_pause(self):\n return self._call_player_proxy('PlayPause', None)",
"def __pause(self):\n\n self.set_state(gst.STATE_PAUSED)",
"def pause_play(self):\n\n try:\n if self.entrada:\n if self.estado == \"playing\": # pausa\n self.__pause()\n\n elif self.estado == \"paused\":\n self.__pause(True)\n self.estado = \"playing\"\n self.emit(\"estado\", \"playing\")\n\n else:\n #if self.uri: self.load(self.uri)\n pass\n\n except Exception, e:\n print \"HA OCURRIDO UN ERROR EN PAUSE_PLAY DEL REPRODUCTOR\", e",
"async def toggle_play_pause(self):\n _LOGGER.debug(\"[Foobar2k] In Play / Pause\")\n if (self._power == POWER_ON):\n if (self._state == STATE_STOPPED):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_PLAY_PLAYLIST.format(self._current_playlist_id, self._current_index), data=None)\n else: \n await self.prep_fetch(HTTP_POST, POST_PLAYER_PAUSE_TOGGLE, data=None)",
"async def pause(self, msg):\n if msg.author.voice is not None and msg.voice_client is not None:\n if msg.voice_client.is_paused() is True:\n return await msg.send(\"Song is already paused\")\n\n if msg.voice_client.is_paused() is False:\n msg.voice_client.pause()\n await msg.message.add_reaction(emoji='✅')",
"def pause(self):\n self.block.mobile = not self.block.mobile\n if not self.paused:\n self.paused = True\n # Also print paused message\n self.screen.print(\"PAUSED\")\n else:\n self.paused = False\n self.screen.print(\"\")\n # Also reset tick time\n self.t = time.time()",
"async def play_pause(self) -> None:\n return await self.relay(\"play_pause\")()",
"async def pause(self, ctx):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n voice.pause()\n await ctx.send(\"Playing paused.\")\n else:\n await ctx.message.add_reaction('\\U0001F615');\n await ctx.send(\"Not playing anything right now.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")",
"def _toggle_paused(self, paused=None):\n #automatically start the first wave\n if self._wave == 0:\n self.next_wave()\n\n if paused is None:\n paused = not self._paused\n\n #Task 1.5 (Play Controls): Reconfigure the pause button here\n \n if paused:\n self.pause()\n self._play_button_text.set(\"play\")\n else:\n self.start()\n self._play_button_text.set(\"pause\")\n\n self._paused = paused",
"async def set_paused(self, value: bool):\n await self._pytheos.api.player.set_play_state(self.id, models.player.PlayState.Paused if value else models.player.PlayState.Playing)",
"def pause(self):\n self.sendCommand(\"pause\")"
] | [
"0.7713319",
"0.74910986",
"0.74175787",
"0.7405902",
"0.7326798",
"0.72035414",
"0.71642816",
"0.71472704",
"0.7133736",
"0.7114518",
"0.70905143",
"0.7087032",
"0.7062801",
"0.70473045",
"0.70375997",
"0.70167387",
"0.70167387",
"0.6942558",
"0.6881331",
"0.6881242",
"0.68398225",
"0.68370444",
"0.6831393",
"0.68272746",
"0.6692693",
"0.6680627",
"0.66648364",
"0.658427",
"0.65313506",
"0.6499516"
] | 0.82099915 | 0 |
Add song to the storage directory and to the database. Return ID of the new song / error message. | def add_song(self):
path = input("Give file path:\t") # Request file path
path = path.replace('\\', '/')
if self.path_song_re.match(path) and not self.path_storage_re.match(
path): # Check that the path leads to a song that is not already found in Storage
copy(path, self.p_storage) # Copy the song to the storage directory
file_title, form = path.split("/")[-1].split(".") # Save file title and format from the path
sql = "SELECT COUNT(*) FROM songs WHERE file_title = %s AND form = %s" # Check the existence of a song
# with the same title and format in the database
self.cursor.execute(sql, (file_title, form))
r = self.cursor.fetchall()
if r[0][0] != 0:
return "A song with this file name and format already exists!"
song_title = input("Song title:\t")
artist = input("Artist:\t")
data = input("Release date:\t")
tags = input("Associated tags:\t")
sql = "INSERT INTO songs (file_title, song_title, artist, form, data, tag) VALUES (%s, %s, %s, %s, %s, " \
"%s) " # Insert song into database
columns = (file_title, song_title, artist, form, data, tags)
self.cursor.execute(sql, columns)
self.cnx.commit()
self.cursor.execute(
"SELECT MAX(ID) FROM songs")
result = self.cursor.fetchall()
return "New song ID: " + str(result[0][0])
else:
return "Give valid path" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_song():\n options = queue.instantiate_options()\n raw_queue = queue.instantiate_queue()\n track_id = request.args.get('song')\n\n for song in raw_queue:\n if song['track_id'] == track_id[14:]:\n return json.dumps({'error': 'Cannot add a song already in the queue'})\n\n num_songs_added = 0\n for song in raw_queue:\n if song['added_by'] == session['id']:\n num_songs_added += 1\n\n if num_songs_added >= int(options['max_individual_songs']):\n print('user reached max songs')\n return json.dumps({'error': \"You are not allowed to add any more songs until one plays\"})\n\n song_obj = create_song(track_id, added_by=session['id'])\n queue.addSong(song_obj)\n queue_change()\n return json.dumps({'success': 'added ' + track_id})",
"def insert_song(self, song_name: str, title: str, artist: str, file_hash: str, total_hashes: int) -> int:\n id = random.randint(1, 1000000000000)\n song = Songs(meta={'id': id}, song_name=song_name, song_title=title, artist=artist, file_sha1=file_hash, total_hashes=total_hashes)\n song.save()\n return id",
"def add_song(_name_of_the_song, _duration_in_number_of_seconds):\r\n # creating an instance of our Song constructor\r\n new_song = Song(name_of_the_song=_name_of_the_song,\r\n duration_in_number_of_seconds=_duration_in_number_of_seconds)\r\n db.session.add(new_song) # add new song to database session\r\n db.session.commit() # commit changes to session\r",
"def add_song(self, song):\n self.songs.append(song)",
"def new_song():\n song_id = int(request.args['song_id'])\n track_info = shiva.get_tracks([song_id])[song_id]\n vlc.add_song(track_info['path'])\n return 'ok'",
"def add_song_to_database(artist, name, db):\n if exists(db):\n f = open(db, 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, name);\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print str(current_entry) + \" already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open(db, 'w')\n song_list = [Song_data(artist, name)]\n f.seek(0,0)\n pickle.dump(song_list, f)",
"def add_song(self, song: Song):\n self.playlist.append(song)",
"def newsong(self, filename):\n datas = None\n try:\n unicode(filename)\n except UnicodeDecodeError:\n return\n\n cur = self.conn.cursor()\n\n try:\n datas = mutagen.File(filename, easy=True)\n except:\n query = \"\"\"INSERT INTO caro_logs (filename, message, date_import) VALUES (%s, 'ERROR 01', now());\"\"\"\n cur.execute(query, (filename,))\n\n\n if datas is not None:\n artist = None\n album = None\n title = None\n genre = None\n\n try:\n artist = datas['artist'][0]\n album = datas['album'][0]\n title = datas['title'][0]\n genre = datas['genre'][0]\n except KeyError as e:\n msg = str(sys.exc_type), \":\", \"%s is not in the list.\" % sys.exc_value\n\n query = \"\"\"INSERT INTO caro_logs (filename, message, date_import) VALUES (%s, %s, now());\"\"\"\n cur.execute(query, (filename, msg, ))\n\n if artist and album and genre and title:\n fsig = hashfile(filename)\n chk = self.checkfile(fsig)\n if chk == 0:\n self.insertfile([filename, artist, album, title, genre, fsig])\n else:\n self.update_path(filename, fsig)\n else:\n print \"Missing tag\"\n self.conn.commit()",
"def add_song(name, duration):\n song = Song(\n name=name,\n duration=duration,\n )\n db.session.add(song)\n db.session.commit()\n\n return song",
"def add_song(self, song, position=None):\n\n # Use find_object to see if the song exist already.\n song_found = find_object(song, self.tracks) # look for song.tracks to see if it exist in the list\n if song_found is None: # if song is not found\n song_found = Song(song, self.artist) # We create new song using \"Song\" function and assign it to song_found\n if position is None: # If there are no songs in this track\n self.tracks.append(song_found) # Add this song_found in the first position\n else: # else if there are already some songs in the track\n self.tracks.insert(position, song_found) # inserts the position and song in self.tracks list",
"def add_song(self, song, position=None):\n\n song_found = find_object(song, self.tracks)\n if song_found is None:\n song_found = Song(song, self.artist)\n if position is None:\n self.tracks.append(song_found)\n else:\n self.tracks.insert(position, song_found)",
"def insert_song(self, song_name: str, file_hash: str, total_hashes: int) -> int:\n try:\n record = {FIELD_SONGNAME:song_name,FIELD_FILE_SHA1:file_hash,FIELD_TOTAL_HASHES:total_hashes,FIELD_FINGERPRINTED:False}\n outcome = self.cursor.index(index=SONGS_INDEXNAME, body=record)\n except Exception as ex:\n print('Error indexing data')\n print(str(ex))\n return outcome['_id']",
"def add_song(self, song: Song) -> None:\n\n self.songs.append(song)\n self.set_song_count(len(self.songs))",
"def add_song(self):\n # Error check for blank inputs\n if \"\" in (self.root.ids.input_title.text, self.root.ids.input_artist.text, self.root.ids.input_year.text):\n self.root.ids.status_text.text = \"All fields must be completed\"\n return\n # Error check for negative numbers\n try:\n if int(self.root.ids.input_year.text) < 0:\n self.root.ids.status_text.text = \"Year must be >= 0\"\n return\n # Error check for invalid numbers\n except ValueError:\n self.root.ids.status_text.text = \"Please enter a valid number\"\n return\n # Song add, clear inputs, sort songlist\n song_to_add = Song(self.root.ids.input_title.text, self.root.ids.input_artist.text,\n int(self.root.ids.input_year.text))\n self.songs.add_song(song_to_add)\n SongsToLearnApp.clear_inputs(self)\n self.sort_songs(self.root.ids.sort_options.text)",
"def add_song(self, song, position=None):\n if position:\n self.tracks.insert(position, song)\n else:\n self.tracks.append(song)",
"def add_song(self, song, position=None):\n if position is None:\n self.tracks.append(song)\n else:\n self.tracks.insert(position, song)",
"def add_music(request, music_id: int) -> HttpResponse:\n music_item = get_object_or_404(Music, id=music_id)\n\n if music_item in request.user.profile.playlist.all():\n return HttpResponse('Success')\n\n playpos = PlayPosition(\n position=music_item,\n plist=request.user.profile\n )\n playpos.add_order()\n playpos.save()\n\n return HttpResponse('Success')",
"def import_song(self, song, playlist):\n\n try:\n song_uri = self.find_song_uri(song)\n except SongNotFoundError as e:\n print(f\"could not find song {song} to add to playlist '{playlist['name']}'\")\n else:\n self.add_song_to_playlist(song_uri, playlist[\"id\"])",
"def add_song_to_playlist(self):\n #populate our songs dictionary\n self.get_liked_videos()\n\n #collect all of uri\n uris = []\n for song,info in self.all_song_info.items():\n uris.append(info[\"spotify_uri\"])\n\n #create a new playlist\n playlist_id = self.create_playlist()\n\n #add all songs into new playlist\n\n #Spotipy can only add 100 songs at a time to a playlist that is why this method is taken\n g = len(uris)\n if g > 100:\n s = 0\n e = 99\n while g > 100:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:e])\n g -= 100\n s = e + 1\n e += 100\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:])\n else:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris)",
"def associate_song(self, song):\n self.songs.append(song)",
"def add_new_song(self):\n return \"New Song Added\"",
"def update_db(self):\n songs = self.db.get_all_songs()\n for song in songs:\n if choose_song(song) == ERROR:\n self.db.delete_song(song)\n files = []\n for song in glob.glob(\"songs\\*.wav\"):\n to_append = song.split('\\\\')[ONE][:-4]\n files.append(to_append)\n for song in files:\n if song not in songs:\n self.db.add_new_song(song)",
"def add_lyrics_and_song_data_to_database(artist, song):\n if exists('song_database.txt'):\n f = open('song_database.txt', 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, song)\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print \"Song '\" + song + \"' already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open('song_database.txt', 'w')\n song_list = [Song_data(artist, song)]\n f.seek(0,0)\n pickle.dump(song_list, f)",
"def __add_song(self, song, genius_api):\n\t\tentry = {\n\t\t\t'id' : int(song['id']),\n\t\t\t'title' : song['title'],\n\t\t\t'primary_artist' : {\n\t\t\t\t'id' : song['primary_artist']['id'],\n\t\t\t\t'name' : str(song['primary_artist']['name']).lower(),\n\t\t\t\t'url' : song['primary_artist']['url'],\n\t\t\t\t'is_verified' : song['primary_artist']['is_verified'],\n\t\t\t\t},\n\t\t\t'url' : song['url'],\n\t\t\t'lyrics' : genius_api.get_lyrics(song['id'], song['url'])\n\t\t\t}\n\t\tif song['album']:\n\t\t\tentry['album'] = {\n\t\t\t\t'id': song['album']['id'], \n\t\t\t\t'full_title': song['album']['full_title'], \n\t\t\t\t'name': song['album']['name'], \n\t\t\t\t'artist': song['album']['artist']['id']\n\t\t\t\t}\n\t\tif song['release_date']:\n\t\t\tentry['release_date'] = song['release_date']\n\t\tif len(song['featured_artists']) > 0:\n\t\t\tfeatured_artists = list()\n\t\t\tfor artist in song['featured_artists']:\n\t\t\t\tart = {\n\t\t\t\t\t'id' : artist['id'],\n\t\t\t\t\t'name' : artist['name'].lower()\n\t\t\t\t\t}\n\t\t\t\tfeatured_artists.append(art)\n\t\t\tentry['featured_artists'] = featured_artists\n\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\tself.db.songs.insert_one(entry)",
"def add_entry():\n username = util.remove_commas_from_string(request.form[\"name\"])\n link = util.remove_commas_from_string(request.form[\"ytLink\"])\n song = util.remove_commas_from_string(request.form[\"songName\"])\n\n festive = CHRISTMAS_MODE and \"christmasSong\" in request.form\n\n with database.connect_to_database() as db:\n user_id = database.get_userid(db, username)\n database.add_song(db, link, song, user_id, month=12 if festive else None)\n\n return redirect(url_for('main'))",
"def addSong(self, title, filename):\n #make sure that the filename is valid? or does this happen outside?\n self.__songDictionary[title]=filename\n return True",
"def add_album(self, artist='', album='', totaltracks=0, totalseconds=0,\n altype='album', commit=True):\n self.app.curs.execute(\"\"\"insert into album(\n alartist, alalbum, totaltracks, totalseconds, altype\n ) values ( %s, %s, %s, %s, %s)\"\"\", (\n artist, album, totaltracks, totalseconds, altype\n ))\n if commit:\n self.app.db.commit()\n return self.app.curs.lastrowid",
"def add_song(self):\n settings = dict(initialdir=pathlib.Path().absolute(), title=\"Choose songs\", filetypes=(\n (\"flac files\", \"*.flac\"),\n (\"mp3 files\", \"*.mp3\"),\n (\"all files\", \"*\")))\n\n song = filedialog.askopenfilename(**settings)\n\n self.update_playlist(song)\n self.listbox.insert(\"end\", self.song_list[-1]['name'])",
"async def add(self, ctx, url_string : str):\n logger.info(\"add command issued by {0} with {1}\".format(ctx.message.author.name, url_string))\n if self.spotify_device is None:\n await ctx.send(\"No device playing\")\n elif ctx.voice_client is None:\n await ctx.send(\"No voice to skip\")\n else:\n try:\n url_parsed = urllib.parse.urlparse(url_string)\n except:\n await ctx.send(\"invalid spotify url\")\n return\n url_split = url_parsed.path\n url_split, url_id = os.path.split(url_split)\n url_split, url_type = os.path.split(url_split)\n logger.info(\"type is {0} and id is {1}\".format(url_type, url_id))\n if url_type == 'track':\n self.song_list.append(url_id)\n await ctx.send(\"Added song\")\n else:\n await ctx.send(\"Only single tracks for now\")",
"def addSong(self, song):\n queue = self.instantiate_queue()\n history = self.instantiate_history()\n options = self.instantiate_options()\n\n queue = [song for song in queue if song['explicit']]\n queue.append(song.to_dict())\n\n if len(queue) < 5:\n self.addImplicit(queue, history, fallback_song=song.to_dict())\n \n queue = self.sortSongs(queue)\n self.cache.set('queue', queue)"
] | [
"0.72021145",
"0.7178171",
"0.68124354",
"0.67616093",
"0.6730884",
"0.661579",
"0.6544573",
"0.6535995",
"0.65308034",
"0.6529165",
"0.6515031",
"0.64952904",
"0.64827377",
"0.6465418",
"0.6454924",
"0.64515936",
"0.6450223",
"0.64397067",
"0.64342123",
"0.63851374",
"0.63463825",
"0.6323187",
"0.6295637",
"0.6265829",
"0.6242704",
"0.6231726",
"0.6189661",
"0.6178191",
"0.6153588",
"0.6153416"
] | 0.8322641 | 0 |
Remove song from database and from the storage directory based on ID | def delete_song(self):
song_id = tuple(input("Give the melody id to be deleted:\t"))
sql = "SELECT file_title, form FROM songs WHERE id = %s" # Check existence of song with given ID
self.cursor.execute(sql, song_id)
result = self.cursor.fetchall()
if len(result) > 0:
path = self.p_storage + "/" + result[0][0] + "." + result[0][
1] # Find path of song by appending the name and format to the storage directory path
os.remove(path) # Remove song from directory
sql = "DELETE FROM songs WHERE id = %s" # Delete song from database
self.cursor.execute(sql, song_id)
self.cnx.commit()
print(self.cursor.rowcount, "record(s) deleted")
else:
print("Give a valid id...") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_song(_id):\r\n Song.query.filter_by(id=_id).delete()\r\n # filter song by id and delete\r\n db.session.commit() # commiting the new change to our database\r",
"def delete_music():\n track_id = request.vars.track_id\n if track_id is None:\n raise HTTP(500)\n db(db.track_data.track_id == track_id).delete()\n return \"ok\"",
"def delete_song(song):\n logging.debug('{CRUD_operations} BEGIN function delete_song()')\n logging.debug('{CRUD_operations} Data received: song: %s', song)\n song.is_deleted = True\n logging.debug('{CRUD_operations} END function delete_song()')",
"def markfile(self, song_id):\n cur = self.conn.cursor()\n query = \"\"\"UPDATE caro_song SET score = -1000 WHERE id=%s\"\"\"\n cur.execute(query, (song_id, ))\n\n self.memcache.delete(\":1:song_%d\" % song_id)\n\n query = \"\"\"DELETE FROM caro_playlistentry WHERE song_id=%s\"\"\"\n cur.execute(query, (song_id, ))",
"def delete_song(id):\n # check if the song exists, if not return a 404 with a helpful message\n song = session.query(models.Song).get(id)\n if not song:\n message = \"Could not find song with id {}\".format(id)\n data = json.dumps({\"message\": message})\n return Response(data, 404, mimetype=\"application/json\")\n \n session.delete(song)\n session.commit\n \n message = \"deleted song {}\".format(id)\n data = json.dumps({\"message\": message})\n return Response(data, 404, mimetype=\"application/json\")",
"def remove(request, music_id: int) -> HttpResponseRedirect:\n music_item = get_object_or_404(Music, id=music_id)\n request.user.profile.playlist.remove(music_item)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))",
"def disassociate_song(self, song):\n self.songs.remove(song)",
"def Delete(self):\n\n self.db.ExecuteSql('delete from tracks where id=%d;'\n % self.persistant['id'])\n self.db.ExecuteSql('commit;')",
"def delete(self, cls, id):\n\n del FileStorage.__objects[key(cls, id)]",
"def remove_song(self, song):\n # code omitted\n self.playlist.remove(song)",
"def DeletePlaylist(self):\n os.remove(self.path)",
"def delete_file(file_id):\n file_obj = Data.objects.get(id=file_id)\n print(\"Removing file: \", file_obj.name)\n print(file_obj.file.path)\n file_dir = file_obj.file.path\n os.remove(file_dir)\n print(\"Done.\")",
"def remove(id):\n upload = Upload.query.get_or_404(id)\n\n if upload.publisher == session['username']:\n db.session.delete(upload)\n db.session.commit()\n else:\n return 'you do not have right perms'\n\n return redirect(url_for('dashboard'))",
"def delete(self, show_id):\r\n song = Shows.query.filter_by(ShowID=show_id).first_or_404()\r\n db.session.delete(song)\r\n db.session.commit()\r\n return make_response(\"\", 204)",
"def remove_song(self):\n self.stop()\n self.listbox.delete(\"anchor\")\n pygame.mixer.music.stop()",
"def delete_leftovers(self):\n for each_file, artist in self.past_songs_db_data:\n if os.path.isfile(each_file): \n os.remove(each_file)\n print \"Deleted \" + each_file\n\n for each_file in os.listdir(\".\"):\n if each_file.endswith(\".jpg\"):\n os.remove(each_file)",
"async def clear_data(self, msg):\n name = self.player[msg.guild.id]['name']\n os.remove(name)\n self.player['audio_files'].remove(name)",
"def current_remove(self):\n storage.close()",
"def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')",
"def delete_video(self, video_ID): # WORKS\n try:\n self.cur.execute(\"DELETE FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n self.db.commit()\n os.remove('static/videos/' + str(video_ID) + '.mp4')\n os.remove('static/images/' + str(video_ID) + '.jpg')\n except:\n self.db.rollback()",
"def delete():",
"def delete(self, request, pk=None):\n song = get_object_or_404(Song, pk=pk)\n\n self.check_object_permissions(request, song.creator)\n\n song.delete()\n return Response({}, status.HTTP_204_NO_CONTENT)",
"def remove(self, path):\n path = path.decode('utf8')\n cursor = self._dbcon.cursor()\n filename = os.path.basename(path)\n dirname = os.path.dirname(path)\n t = (dirname, filename)\n sql = u\"delete from books where path = ? and filename = ?\"\n cursor.execute(sql, t)\n self._dbcon.commit()\n cursor.close()",
"def delete_audiobook(_id):\r\n Audiobook.query.filter_by(id=_id).delete()\r\n # filter audio book by id and delete\r\n db.session.commit() # commiting the new change to our database\r",
"def unlink(self):\n album_id = self.albums_map[self.artist][self.c_album][1]\n # clear entry in self.albums_map[artist]\n self.albums_map[self.artist].pop(self.c_album)\n # remove Albums recording only if no more references to the album exist\n still_present = False\n for item in self.albums_map[self.artist].values():\n if item[1] == album_id:\n still_present = True\n if not still_present:\n dmla.unlink_album(self.a_album)\n self.modified = True\n self.refresh_screen(self.artists_list.currentIndex(),\n self.albums_list.currentIndex(), modifyoff=False)",
"def remove(table, id_):\n\n # your code\n\n common.toremoveid(\"store/games.csv\",data_manager.get_table_from_file(\"store/games.csv\"),id_)",
"def remove():\n\n db_remove()",
"def update_db(self):\n songs = self.db.get_all_songs()\n for song in songs:\n if choose_song(song) == ERROR:\n self.db.delete_song(song)\n files = []\n for song in glob.glob(\"songs\\*.wav\"):\n to_append = song.split('\\\\')[ONE][:-4]\n files.append(to_append)\n for song in files:\n if song not in songs:\n self.db.add_new_song(song)",
"def delete(self):\r\n delete_tracks(self.project, [self])",
"def delete(self, file_id: str):\n file_path = self._path_to_file(file_id)\n os.remove(file_path)\n del self.index[file_id]"
] | [
"0.724472",
"0.68608385",
"0.68424237",
"0.6805456",
"0.66874766",
"0.6629392",
"0.65805244",
"0.65743804",
"0.6480073",
"0.6455035",
"0.64183515",
"0.6390148",
"0.63755953",
"0.6350693",
"0.63068855",
"0.6271331",
"0.62588185",
"0.6248313",
"0.62059045",
"0.6190672",
"0.61641735",
"0.6159572",
"0.6143351",
"0.6108577",
"0.60998833",
"0.609651",
"0.6076376",
"0.6048415",
"0.60313904",
"0.60286045"
] | 0.76542425 | 0 |
Modifies song info in the database | def modify_data(self):
song_id = tuple(input("Give the id of the song to be modified:\t")) # Request song ID
sql = "SELECT song_title, artist, data, tag FROM songs WHERE id = %s" # Find song with given ID
self.cursor.execute(sql, song_id)
res = self.cursor.fetchall()
if len(res) > 0:
while True:
sql = "SELECT song_title, artist, data, tag FROM songs WHERE id = %s" # Save current info
self.cursor.execute(sql, song_id)
result = self.cursor.fetchall()
modify = input(
"What do you want to modify? [title/artist/(release )date/tags/none]\t") # Request data to be
# modified
if modify == 'title': # Modify title
print('Current title is ' + result[0][0])
new = (input('Give new title:\t'), song_id[0])
sql = "UPDATE songs SET song_title = %s WHERE id = %s"
self.cursor.execute(sql, new)
self.cnx.commit()
print("New title assigned")
if modify == 'artist': # Modify artist
print('Current artist is ' + result[0][1])
new = (input('Give new artist:\t'), song_id[0])
sql = "UPDATE songs SET artist = %s WHERE id = %s"
self.cursor.execute(sql, new)
self.cnx.commit()
print("New artist assigned")
if modify == 'date': # Modify release date
print('Current date is ' + result[0][2])
new = (input('Give new date:\t'), song_id[0])
sql = "UPDATE songs SET data = %s WHERE id = %s"
self.cursor.execute(sql, new)
self.cnx.commit()
print("New date assigned")
if modify == 'tags': # Modify tags
print('Current tags are ' + result[0][3])
new = (input('Give new tags:\t'), song_id[0])
sql = "UPDATE songs SET tag = %s WHERE id = %s"
self.cursor.execute(sql, new)
self.cnx.commit()
print("New tags assigned")
if modify == 'none': # Do not modify anything, print the current song info
sql = "SELECT song_title, artist, data, tag FROM songs WHERE id = %s"
self.cursor.execute(sql, song_id)
result = self.cursor.fetchall()
print(
"Current data for the song with id" + song_id[0] + "are:\ntitle:" + result[0][0] + "\nartist:" +
result[0][1] + "\nrelease date:" + result[0][2] + "\ntags:" + result[0][3])
break
else:
print("Give a valid id...") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_db(self):\n songs = self.db.get_all_songs()\n for song in songs:\n if choose_song(song) == ERROR:\n self.db.delete_song(song)\n files = []\n for song in glob.glob(\"songs\\*.wav\"):\n to_append = song.split('\\\\')[ONE][:-4]\n files.append(to_append)\n for song in files:\n if song not in songs:\n self.db.add_new_song(song)",
"def song(self, value):\r\n self._song_id = value\r\n data = Song(value)\r\n self.songtitel = data.songtitel if data.found else \"\"",
"def updatesong(song, fpath):\n song.filename = fpath\n song.save()\n return \"[U] %s\\n\" % song.title",
"def update_song(_id, _name_of_the_song, _duration_in_number_of_seconds):\r\n song_to_update = Song.query.filter_by(id=_id).first()\r\n song_to_update.name_of_the_song = _name_of_the_song\r\n song_to_update.duration_in_number_of_seconds = _duration_in_number_of_seconds\r\n db.session.commit()",
"def update_title_song(title_song, mess_chat_id):\n connection = connection_to_db()\n cursor = connection.cursor()\n\n cursor.execute(\n \"UPDATE song_data SET title_song = %s \"\n \"WHERE user_id = %s;\", (title_song, mess_chat_id)\n )\n\n connection.commit()",
"def add_lyrics_and_song_data_to_database(artist, song):\n if exists('song_database.txt'):\n f = open('song_database.txt', 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, song)\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print \"Song '\" + song + \"' already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open('song_database.txt', 'w')\n song_list = [Song_data(artist, song)]\n f.seek(0,0)\n pickle.dump(song_list, f)",
"def update():\n\tglobal songList\n\tglobal songs\n\tsongList=os.listdir(\"./music/\")\n\tsongs=['```']\n\tfor song in songList:\n\t\tif len(songs[-1])>1800:\n\t\t\tsongs[-1]+='```'\n\t\t\tsongs.append('```')\n\t\tif '.mp3' in song:\n\t\t\tsongs[-1]+=song.replace('.mp3','')\n\t\t\tsongs[-1]+='\\n'\n\tsongs[-1]+='```'",
"def markfile(self, song_id):\n cur = self.conn.cursor()\n query = \"\"\"UPDATE caro_song SET score = -1000 WHERE id=%s\"\"\"\n cur.execute(query, (song_id, ))\n\n self.memcache.delete(\":1:song_%d\" % song_id)\n\n query = \"\"\"DELETE FROM caro_playlistentry WHERE song_id=%s\"\"\"\n cur.execute(query, (song_id, ))",
"def enterSong(song):\n\tc, conn = connect()\n\tsql = []\n\n\t# checks if the song is already in the database by hash\n\tif checkHash(song):\n\t\tsql2 = appendSong(song)\n\t\tsql += sql2\n\t\t\n\t\t# checks if the song has an artist\n\t\tif song.artist:\n\t\t\tsql2 = appendArtist(song)\n\t\t\tsql += sql2\n\t\n\t\t# checks if the song has an album\n\t\tif song.album:\n\t\t\tsql2 = appendAlbum(song)\n\t\t\tsql += sql2\n\t\n\t# execute all the queries\n\tfor query in sql:\n\t\tc.execute(query)\n\t\t\n\tconn.commit()\n\treturn sql",
"def update(self, song: int) -> None:\n if 0 <= song < len(self.sounds):\n self.sounds[song].play()",
"def songUpdate(song,cindex):\r\n if cindex == 0:\r\n song[MpMusic.SONGINDEX] = songGetAlbumIndex(song);\r\n return 0;",
"def __insert_song_data(cur, df):\n song_data = (\n df.song_id.values[0],\n df.title.values[0],\n df.artist_id.values[0],\n (df.year.values[0]).item(),\n (df.duration.values[0]).item()\n )\n cur.execute(song_table_insert, song_data)",
"def add_song_to_database(artist, name, db):\n if exists(db):\n f = open(db, 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, name);\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print str(current_entry) + \" already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open(db, 'w')\n song_list = [Song_data(artist, name)]\n f.seek(0,0)\n pickle.dump(song_list, f)",
"def associate_song(self, song):\n self.songs.append(song)",
"def add_song(self):\r\n path = input(\"Give file path:\\t\") # Request file path\r\n path = path.replace('\\\\', '/')\r\n if self.path_song_re.match(path) and not self.path_storage_re.match(\r\n path): # Check that the path leads to a song that is not already found in Storage\r\n copy(path, self.p_storage) # Copy the song to the storage directory\r\n file_title, form = path.split(\"/\")[-1].split(\".\") # Save file title and format from the path\r\n sql = \"SELECT COUNT(*) FROM songs WHERE file_title = %s AND form = %s\" # Check the existence of a song\r\n # with the same title and format in the database\r\n self.cursor.execute(sql, (file_title, form))\r\n r = self.cursor.fetchall()\r\n if r[0][0] != 0:\r\n return \"A song with this file name and format already exists!\"\r\n song_title = input(\"Song title:\\t\")\r\n artist = input(\"Artist:\\t\")\r\n data = input(\"Release date:\\t\")\r\n tags = input(\"Associated tags:\\t\")\r\n sql = \"INSERT INTO songs (file_title, song_title, artist, form, data, tag) VALUES (%s, %s, %s, %s, %s, \" \\\r\n \"%s) \" # Insert song into database\r\n columns = (file_title, song_title, artist, form, data, tags)\r\n self.cursor.execute(sql, columns)\r\n self.cnx.commit()\r\n self.cursor.execute(\r\n \"SELECT MAX(ID) FROM songs\")\r\n result = self.cursor.fetchall()\r\n return \"New song ID: \" + str(result[0][0])\r\n else:\r\n return \"Give valid path\"",
"def appendSong(song):\n\tsql = []\n\tsql.append(\"INSERT INTO SONGS (filename, path, hash, length, track, \"\n\t\t+ \"genre, date, title, base) VALUES ('\" + song.filename + \"', '\" + song.path \n\t\t+ \"', '\" + str(song.hash) + \"', '\" + str(song.length) + \"', '\" \n\t\t+ '/'.join(song.track) + \"', '\" + '/'.join(song.genre) \n\t\t+ \"', '\" + str(song.year) + \"', '\" + '/'.join(song.title) + \"', '\"\n\t\t+ song.base + \"');\")\n\treturn sql",
"def __add_song(self, song, genius_api):\n\t\tentry = {\n\t\t\t'id' : int(song['id']),\n\t\t\t'title' : song['title'],\n\t\t\t'primary_artist' : {\n\t\t\t\t'id' : song['primary_artist']['id'],\n\t\t\t\t'name' : str(song['primary_artist']['name']).lower(),\n\t\t\t\t'url' : song['primary_artist']['url'],\n\t\t\t\t'is_verified' : song['primary_artist']['is_verified'],\n\t\t\t\t},\n\t\t\t'url' : song['url'],\n\t\t\t'lyrics' : genius_api.get_lyrics(song['id'], song['url'])\n\t\t\t}\n\t\tif song['album']:\n\t\t\tentry['album'] = {\n\t\t\t\t'id': song['album']['id'], \n\t\t\t\t'full_title': song['album']['full_title'], \n\t\t\t\t'name': song['album']['name'], \n\t\t\t\t'artist': song['album']['artist']['id']\n\t\t\t\t}\n\t\tif song['release_date']:\n\t\t\tentry['release_date'] = song['release_date']\n\t\tif len(song['featured_artists']) > 0:\n\t\t\tfeatured_artists = list()\n\t\t\tfor artist in song['featured_artists']:\n\t\t\t\tart = {\n\t\t\t\t\t'id' : artist['id'],\n\t\t\t\t\t'name' : artist['name'].lower()\n\t\t\t\t\t}\n\t\t\t\tfeatured_artists.append(art)\n\t\t\tentry['featured_artists'] = featured_artists\n\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\tself.db.songs.insert_one(entry)",
"def set_artist_song_entry(self, artist, song):\n self.artist_name.set_text(artist)\n self.song_name.set_text(song)",
"def process_song_file(cur, filepath):\n # open song file\n df = pd.read_json(filepath,lines=True)\n\n # insert song record\n __insert_song_data(cur, df)\n \n # insert artist record\n __insert_artist_data(cur, df)",
"def test_update_song():\n\n target_song = {\n \"artist\": \"Heng\",\n \"song\": \"I can do all things\",\n \"genre\": \"Hip-Hop\",\n \"lyrics\": \"Like Steph said I can do all things In...\",\n \"year\": get_timestamp_year(),\n \"timestamp\": get_timestamp()\n }\n\n update_song(target_song)\n\n updated_song_lyrics = \"Like Steph said I can do all things In...\"\n\n given_artist = \"Heng\"\n given_song_title = \"I can do all things\"\n\n song_data = get_one_song(given_artist, given_song_title)\n\n print(\"updated_song_data['lyrics']: \", song_data['lyrics'])\n\n assert song_data['lyrics'] == updated_song_lyrics",
"def song_changed(self, song):\n if song == NOTPLAYING:\n print(\"Not playing\")\n else:\n print(\"Changed to: {} - {}\". format(song.get('artist', 'Unknown artist'), song.get('title', 'Unknown title')))\n self._publish({TAGS[tag]: value for (tag, value) in song.items() if tag in TAGS})",
"def process_song_file(cur, filepath):\n # open song file\n df = get_file_df(filepath)\n\n # insert song record\n song_data = songs_data = [df.loc[0].song_id, df.loc[0].title, df.loc[0].artist_id, int(df.loc[0].year), int(df.loc[0].duration)]\n cur.execute(song_table_insert, song_data)\n \n # insert artist record\n artist_data = [df.loc[0].artist_id, df.loc[0].artist_name, df.loc[0].artist_location, df.loc[0].artist_latitude, df.loc[0].artist_longitude] \n\n cur.execute(artist_table_insert, artist_data)",
"def update_author_song(author_song, mess_chat_id):\n connection = connection_to_db()\n cursor = connection.cursor()\n\n cursor.execute(\n \"UPDATE song_data SET author_song = %s \"\n \"WHERE user_id = %s;\", (author_song, mess_chat_id)\n )\n\n connection.commit()",
"def add_song(self, song):\n self.songs.append(song)",
"def set_artists(audio: EasyID3, artists):\r\n audio['artist'] = artists\r\n audio.save()",
"def process_song_file(cur, filepath):\r\n\r\n \"\"\" open song file\r\n drop duplicates\r\n set NAs to Zero \"\"\"\r\n df = pd.read_json(filepath, lines=True)\r\n df.drop_duplicates(subset=['song_id','artist_id'], keep = 'first')\r\n df['artist_latitude'] = df['artist_latitude'].fillna(0)\r\n df['artist_longitude'] = df['artist_longitude'].fillna(0)\r\n\r\n\r\n \"\"\" Extract columns for dataframe for song table\r\n drop duplicates before performing insert\r\n convert dataframe to a list for insert \"\"\"\r\n\r\n song_data = (df[['song_id','title','artist_id','year','duration']])\r\n song_data.drop_duplicates(subset='song_id',keep ='first',inplace = True)\r\n song_data = (song_data.values).tolist()\r\n song_data = song_data[0]\r\n # insert song record\r\n cur.execute(song_table_insert,song_data)\r\n\r\n \"\"\" Extract columns for dataframe for artist table,\r\n drop duplicates before performing insert\r\n convert dataframe to a list for insert \"\"\"\r\n\r\n artist_data = (df[['artist_id','artist_name','artist_location','artist_latitude','artist_longitude']])\r\n artist_data.drop_duplicates(subset='artist_id',keep ='first',inplace = True)\r\n artist_data = (artist_data.values).tolist()\r\n artist_data = artist_data[0]\r\n # insert artist record\r\n cur.execute(artist_table_insert, artist_data)",
"def process_song_file(cur, filepath):\n \n # open song file\n \n df = pd.read_json(filepath,lines=True)\n \n # insert song record\n song_data = df[['song_id', 'title', 'artist_id','year',\n 'duration']].values[0].tolist()\n cur.execute(song_table_insert, song_data)\n \n # insert artist record\n artist_data = df[['artist_id','artist_name',\n 'artist_location', 'artist_latitude',\n 'artist_longitude']].values[0].tolist()\n cur.execute(artist_table_insert, artist_data)",
"def process_song_file(cur, filepath):\n # open song file\n data_frame = pd.read_json(filepath, lines=True)\n\n # insert song record\n song_data = list(data_frame[['song_id', 'title', 'artist_id', 'year', 'duration']].values[0])\n cur.execute(song_table_insert, song_data)\n\n # insert artist record\n artist_data = list(\n data_frame[['artist_id', 'artist_name', 'artist_location',\n 'artist_latitude', 'artist_longitude']].values[0])\n cur.execute(artist_table_insert, artist_data)",
"def set_nowplaying_metadata(self, track, album, artist):\n\n\t\tparts = [artist[:30], album[:30], track[:30]]\n\t\tself._send_message(\"MUSIC_CONTROL\", self._pack_message_data(16, parts))",
"def set_nowplaying_metadata(self, track, album, artist):\n\n\t\tparts = [artist[:30], album[:30], track[:30]]\n\t\tself._send_message(\"MUSIC_CONTROL\", self._pack_message_data(16, parts))"
] | [
"0.7413717",
"0.6995219",
"0.68380946",
"0.6823178",
"0.66802067",
"0.6608994",
"0.6446211",
"0.6343561",
"0.6318853",
"0.6218813",
"0.6192342",
"0.6175601",
"0.6141071",
"0.61293864",
"0.61132926",
"0.60554576",
"0.6035266",
"0.6034275",
"0.5926394",
"0.592424",
"0.591346",
"0.5883479",
"0.5878672",
"0.5841953",
"0.5828499",
"0.5824835",
"0.580802",
"0.5783843",
"0.57731354",
"0.57731354"
] | 0.70110184 | 1 |
Create a Batch from an existing batch id. Notes | def from_batch_id(batch_id: int, *args, **kwargs):
b = Batch(*args, **kwargs)
assert isinstance(b._backend, _backend.ServiceBackend)
b._batch_handle = b._backend._batch_client.get_batch(batch_id)
return b | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_batch(self, batch_name, priority = 0, comments = '', notifications = []):\n\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n batch = {\n \"batch_name\": batch_name,\n \"priority\": priority,\n \"comments\": comments,\n \"notifications\": [\n ]\n }\n data = json.dumps(batch)\n response = apiCall.post(self._get_token(), url,self._proxy,data, 30)\n logging.debug(response['id'])\n return response['id']",
"def create_batch(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"create_batch\"), kwargs)",
"def create_batch(self, batch_index, *args, **kwargs):\n batch = self.dataset.create_batch(batch_index, *args, **kwargs)\n batch_res = self._exec(batch)\n return batch_res",
"def batch_id(self, batch_id):\n\n self._batch_id = batch_id",
"def add_to_batch(self, created_job_ids):\n batch_folder = BatchFolder(path=self.current_dir)\n if batch_folder.has_batch():\n batch: JobBatch = batch_folder.load()\n else:\n batch = JobBatch(job_ids=[], server=self.get_active_server())\n if batch.server.url != self.get_active_server().url:\n logger.info(\n \"A batch exists in this folder, but for a different server. \"\n \"Not saving job ids in batch\"\n )\n else:\n logger.info(\"Saving job ids in batch in current folder\")\n batch.job_ids = sorted(\n list(set(batch.job_ids) | set(created_job_ids))\n ) # add only unique new ids\n batch_folder.save(batch)",
"def test_batch(self):\n batch = batch_test_utils.create_batch()\n self.job1.batch_id = batch.id\n self.job1.save()\n\n url = '/%s/jobs/?batch_id=%d' % (self.api, batch.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['id'], self.job1.id)",
"def get_batch(self, batch_id):\n #fmt = lambda x: join(self.path, self.simulation_paths[x])\n fmt = lambda x: self.simulation_paths[x]\n simulation_paths = [fmt(i) for i in self.batch_indices[batch_id]]\n return Batch(simulation_paths, root=self.path)",
"def sfdcCreateBatch(query, chunk_size=10000, **kwargs):\n api_ver = kwargs.get('api_ver', '')\n session_id = kwargs.get('session_id', '')\n instance = kwargs.get('instance', '')\n job_id = kwargs.get('job_id', '')\n sfdcXml = kwargs.get('sfdcXml', {})\n\n bodyXml = sfdcXml.get('batch', {}).get('body')\n url = sfdcXml.get('batch', {}).get('url')\n headers = sfdcXml.get('batch', {}).get('headers')\n\n bodyXml = unicode(query, \"UTF-8\")\n url = url.format(instance=instance, api_ver=api_ver,\\\n job_id=job_id)\n headers['Content-Type'] = headers.get('Content-Type', '')\\\n .format(chunk_size=chunk_size)\n headers['X-SFDC-Session'] = session_id\n\n resp = requests.post(url=url, headers=headers, data=bodyXml)\n dictResp = xmltodict.parse(resp.text)\n batch_id = str(dictResp['batchInfo']['id'])\n\n return batch_id",
"def nextBatch(self, batch_to_get_id=None):\n\n # batch id to get\n if batch_to_get_id is None:\n batch_to_get_id = self.current_batch_id\n\n # batch to get\n batch_to_get = self.batches[batch_to_get_id]\n\n # check if batch is available in memory / disk\n if batch_to_get.is_stored:\n # get batch data\n X_data, y_data = batch_to_get.getBatchData()\n # return X np array, label array\n return X_data, y_data\n\n # get data of current batch\n urls = list()\n\n for key in batch_to_get.ids:\n value = self.data_dict.data_dict[key]\n batch_to_get.batch_subjects[key] = value\n batch_to_get.y_data.append(value['label'])\n urls.append(value['path'])\n\n # get images using Image Loader class\n binary_images = self.imageLoader.getImages(urls)\n\n # convert images to array\n X_data = self._listOfImagesToNumpy(images=binary_images)\n y_data = np.array(batch_to_get.y_data)\n\n # decide where to store batch\n system_memory_usage_percent = psutil.virtual_memory()[2]\n if (system_memory_usage_percent < 90):\n save_to = \"memory\"\n elif self.disk_scratch is not None:\n save_to = \"disk\"\n elif self.disk_scratch is not None:\n save_to = \"disk_raw\"\n else:\n save_to = \"none\"\n\n # store batch\n batch_to_get.storeBatch(storage=save_to, X_data=X_data,\n y_data=y_data)\n\n # increment current batch\n if self.current_batch_id < (self.n_batches-1):\n self.current_batch_id += 1\n else:\n self.current_batch_id = 0\n\n # return X np array, label array\n return X_data, y_data",
"def post(self, batch):\n num_jobs = len(batch)\n plural = \"\" if num_jobs == 1 else \"s\"\n log.info(\"> Sending batch request with %s job%s\", num_jobs, plural)\n data = []\n for i, job in enumerate(batch):\n if job.finished:\n raise Finished(job)\n else:\n job.finished = True\n log.info(\"> {%s} %s\", i, job)\n data.append(dict(job, id=i))\n response = self.resource.post(data)\n log.info(\"< Received batch response for %s job%s\", num_jobs, plural)\n return response",
"def add_plant_batch(db_path: str, plant_batch: PlantBatch) -> None:\n plant, location, tray = parse_plant_location_tray_to_dict(plant_batch)\n\n query = f'INSERT INTO batches (Plant, Location, Tray, n_trays, planting_time) VALUES (\"{plant}\", \"{location}\", \"{tray}\", {plant_batch.n_tray}, \"{plant_batch.planting_time.isoformat()}\")'\n\n conn: Connection = sqlite3.connect(path.join(db_path, 'batches.db'))\n curr: Cursor = conn.cursor()\n try:\n curr.execute(query)\n except sqlite3.IntegrityError:\n raise ValueError(\"Error occured\")\n\n conn.commit()\n curr.close()\n conn.close()",
"def get_batch(self, name):\n batches = self._meta['sets'].get('batches', {})\n if batches.get(name):\n b = name\n elif batches.get(name):\n b = name\n else:\n raise KeyError('No Batch found named {}.'.format(name))\n return qp.Batch(self, b)",
"def submit_job(self, batch_id):\n\n job_name = self.bot_id + \"_\" + batch_id\n job_queue = self.jobQueueName\n job_definition = self.job_def\n command = self.bot_cmd\n\n kwargs = {'jobName': job_name,\n 'jobQueue': job_queue,\n 'jobDefinition': job_definition,\n 'containerOverrides': {'command': [command]}}\n print(\">>> Going to create job: \" + str(kwargs))\n submit_job_response = self.batch_client.submit_job(jobName=job_name,\n jobQueue=job_queue,\n jobDefinition=job_definition,\n # containerOverrides={'command': [command]}\n )\n\n print(\">>> submit job response is :\" + str(submit_job_response))\n job_id = submit_job_response['jobId']\n print('Submitted job [%s - %s] to the job queue [%s]' % (job_name, job_id, job_queue))",
"def add(\n self,\n batch: RolloutBatchProtocol,\n buffer_ids: Optional[Union[np.ndarray, list[int]]] = None,\n ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n # preprocess batch\n new_batch = Batch()\n for key in set(self._reserved_keys).intersection(batch.keys()):\n new_batch.__dict__[key] = batch[key]\n batch = new_batch\n batch.__dict__[\"done\"] = np.logical_or(batch.terminated, batch.truncated)\n assert {\"obs\", \"act\", \"rew\", \"terminated\", \"truncated\", \"done\"}.issubset(batch.keys())\n if self._save_only_last_obs:\n batch.obs = batch.obs[:, -1]\n if not self._save_obs_next:\n batch.pop(\"obs_next\", None)\n elif self._save_only_last_obs:\n batch.obs_next = batch.obs_next[:, -1]\n # get index\n if buffer_ids is None:\n buffer_ids = np.arange(self.buffer_num)\n ptrs, ep_lens, ep_rews, ep_idxs = [], [], [], []\n for batch_idx, buffer_id in enumerate(buffer_ids):\n ptr, ep_rew, ep_len, ep_idx = self.buffers[buffer_id]._add_index(\n batch.rew[batch_idx],\n batch.done[batch_idx],\n )\n ptrs.append(ptr + self._offset[buffer_id])\n ep_lens.append(ep_len)\n ep_rews.append(ep_rew)\n ep_idxs.append(ep_idx + self._offset[buffer_id])\n self.last_index[buffer_id] = ptr + self._offset[buffer_id]\n self._lengths[buffer_id] = len(self.buffers[buffer_id])\n ptrs = np.array(ptrs)\n try:\n self._meta[ptrs] = batch\n except ValueError:\n batch.rew = batch.rew.astype(float)\n batch.done = batch.done.astype(bool)\n batch.terminated = batch.terminated.astype(bool)\n batch.truncated = batch.truncated.astype(bool)\n if self._meta.is_empty():\n self._meta = create_value(batch, self.maxsize, stack=False) # type: ignore\n else: # dynamic key pops up in batch\n alloc_by_keys_diff(self._meta, batch, self.maxsize, False)\n self._set_batch_for_children()\n self._meta[ptrs] = batch\n return ptrs, np.array(ep_rews), np.array(ep_lens), np.array(ep_idxs)",
"def helper_create_batch_item(\n *,\n amount,\n bank_code,\n bank_account_name,\n bank_account_number,\n description,\n external_id,\n email_to=None,\n email_cc=None,\n email_bcc=None,\n **kwargs,\n ):\n params = locals()\n del params[\"kwargs\"]\n\n return BatchDisbursementItem.Query(**params)",
"def test_adding_a_batch(created_job, bulk_request):\n bulk_request.reset_mock()\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <batchInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>BATCHONE</id>\n <jobId>THEJOBID</jobId>\n <state>Queued</state>\n </batchInfo>\n '''\n\n fake_data = [('1', '2'), ('3', '4')]\n created_job.add_batch(['Id', 'Name'], iter(fake_data))\n\n assert created_job.pending_batches == ['BATCHONE']\n\n bulk_request.assert_called_once_with(\n 'post',\n 'https://salesforce/services/async/34.0/job/THEJOBID/batch',\n content_type='text/csv; charset=UTF-8',\n data=mock.ANY\n )\n\n data = bulk_request.call_args[1]['data']\n assert b''.join(data) == b'Id,Name\\r\\n1,2\\r\\n3,4\\r\\n'",
"def make_batch_request(self, batch):\n args = {}\n args['access_token'] = self.access_token\n args['batch'] = json.dumps(batch)\n args = {k.encode('utf-8'): unicode(v).encode('utf-8')\n for k, v in args.items()}\n logger.info('Making a batched request with %s' % args)\n try:\n f = urllib2.urlopen(self.api_root, urllib.urlencode(args))\n data = json.load(f)\n # For debugging\n self.data = data\n for idx, val in enumerate(data):\n data[idx] = json.loads(val['body'])\n return data\n except urllib2.HTTPError as e:\n logger.info('%s' % e)\n return json.load(e)\n except urllib2.URLError as e:\n logger.warn('URLError: %s' % e.reason)",
"def create_batch(count):\n\n if count < 1:\n raise click.BadParameter('count needs to be > 0')\n\n factory = V2ProfileFactory()\n output = factory.create_batch(count, export_json=True)\n click.echo(output)",
"def add_delete_batch() -> str:\r\n batches = app.config[\"batches\"]\r\n # Contains HTML form data inputted by the user submitted using POST.\r\n response = request.form\r\n batch_id_input = response.get(\"id_input\")\r\n batch_volume_input = response.get(\"volume_input\")\r\n batch_beer_type_input = response.get(\"beer_type_input\")\r\n delete_batch_input = response.get(\"delete_batch_input\")\r\n # True if user submits batch id, volume, and beer_type (part of one form).\r\n if batch_id_input is not None:\r\n # Removes blanks (whitespace characters) from batch id.\r\n batch_id_input = batch_id_input.replace(\" \", \"\")\r\n # Volume is always a number as str; HTML is set to only allow num input\r\n batch_volume_input = int(batch_volume_input)\r\n # Creates and adds Batch to batches dict. if ID isn't in batches dict.\r\n if batches.get(batch_id_input) is None:\r\n handle = {\"inventory\": app.config[\"inventory\"],\r\n \"tanks\": app.config[\"tanks\"]}\r\n batches[batch_id_input] = Batch(batch_id_input,\r\n batch_beer_type_input,\r\n batch_volume_input,\r\n handle)\r\n log_message = (\"Batch {} with beer type {} and {} L volume \"\r\n + \"was added.\").format(batch_id_input,\r\n batch_beer_type_input,\r\n batch_volume_input)\r\n app.config[\"logger\"].info(log_message)\r\n # Elif user wants to del. a batch and batch id exists, deletes this batch.\r\n elif delete_batch_input is not None and delete_batch_input in batches:\r\n del batches[delete_batch_input]\r\n log_message = \"Batch {} was deleted.\".format(delete_batch_input)\r\n app.config[\"logger\"].info(log_message)\r\n # Creates HTML table containing all batches.\r\n html_batch_table = update_batch_table(batches)\r\n return \"\"\"<style>\r\n h1, h2, h3 {\r\n font-family: arial, sans-serif;\r\n }\r\n table {\r\n font-family: arial, sans-serif;\r\n border-collapse: collapse;\r\n width: 100%;\r\n }\r\n td, th {\r\n border: 1px solid #dddddd;\r\n text-align: left;\r\n padding: 8px;\r\n }\r\n tr:nth-child(even) {\r\n background-color: #dddddd;\r\n }\r\n </style>\r\n <h2>Add batch</h2>\r\n <form action=\"/add_delete_batch\" method=\"POST\">\r\n Batch ID:<br>\r\n <input type=\"text\" name=\"id_input\" required=\"required\">\r\n <br>\r\n Volume (in litres):<br>\r\n <input type=\"number\" name=\"volume_input\" min=\"0\"\r\n required=\"required\">\r\n <br>\r\n Beer type:<br>\r\n <select name=\"beer_type_input\">\r\n <option value=\"dunkers\">Dunkers</option>\r\n <option value=\"pilsner\">Pilsner</option>\r\n <option value=\"red_helles\">Red Helles</option>\r\n </select>\r\n <br><br>\r\n <input type=\"submit\" value=\"Add batch\">\r\n </form>\r\n <h2>Delete batch</h2>\r\n <form action=\"/add_delete_batch\" method=\"POST\">\r\n Batch ID:<br>\r\n <input type=\"text\" name=\"delete_batch_input\" \r\n required=\"required\">\r\n <br><br>\r\n <input type=\"submit\" value=\"Delete batch\">\r\n </form>\r\n <form action=\"/\" method=\"POST\">\r\n <input type=\"hidden\">\r\n <br>\r\n <input type=\"submit\" value=\"Go back to tracking screen\">\r\n </form>\r\n <h2>Batches</h2>\r\n <table>\r\n <tr>\r\n <th>Batch ID</th>\r\n <th>Beer type</th>\r\n <th>Volume (L)</th>\r\n <th>Current production phase</th>\r\n <th>Current tank</th>\r\n <th>Current phase finishes</th>\r\n <th>Last completed phase</th>\r\n <th>Bottles put in inventory</th>\r\n </tr>\"\"\" + html_batch_table + \"</table>\"",
"def _defineBatches(self):\n # extract all ids\n all_keys = list(self.data_dict.unique_ids)\n\n # randomly shuffle keys\n if self.random_shuffle_batches:\n random.shuffle(all_keys)\n\n # create batches based on number of batches\n if self.n_big_batches is not None:\n self.n_big_batches += 1\n # define cuts for batches\n cuts = np.linspace(0, self.n_observations,\n self.n_big_batches).round()\n # create batches based on batch size\n elif self.batch_size is not None:\n cuts = [x for x in range(0, self.n_observations,\n int(self.batch_size))]\n if cuts[-1] < self.n_observations:\n cuts.append(self.n_observations)\n\n # convert batch sizes to integers\n cuts = [int(x) for x in cuts]\n\n # save batches into dictionary\n batches = dict()\n for i in range(0, (len(cuts) - 1)):\n # create DataBatch object\n current_batch = DataBatch(ids=all_keys[cuts[i]:cuts[i+1]],\n batch_id=i)\n current_batch.setDiskStoragePath(self.disk_scratch)\n batches[i] = current_batch\n\n # save batches\n self.n_batches = len(batches.keys())\n self.batches = batches",
"def post(self, request, work_batch_id):\n\n from sentry.models.workbatch import WorkBatch\n\n try:\n work_batch = WorkBatch.objects.get(pk=int(work_batch_id))\n except WorkBatch.DoesNotExist:\n raise ResourceDoesNotExist\n\n logger = logging.getLogger('clims.files')\n logger.info('workbatchfile.start')\n\n if 'file' not in request.data:\n return Response({'detail': 'Missing uploaded file'}, status=400)\n\n fileobj = request.data['file']\n\n full_name = request.data.get('name', fileobj.name)\n if not full_name or full_name == 'file':\n return Response({'detail': 'File name must be specified'}, status=400)\n\n name = full_name.rsplit('/', 1)[-1]\n\n if _filename_re.search(name):\n return Response(\n {\n 'detail': 'File name must not contain special whitespace characters'\n }, status=400\n )\n\n headers = {\n 'Content-Type': fileobj.content_type,\n }\n for headerval in request.data.getlist('header') or ():\n try:\n k, v = headerval.split(':', 1)\n except ValueError:\n return Response({'detail': 'header value was not formatted correctly'}, status=400)\n else:\n if _filename_re.search(v):\n return Response(\n {\n 'detail': 'header value must not contain special whitespace characters'\n },\n status=400\n )\n headers[k] = v.strip()\n\n file = File.objects.create(\n name=name,\n type='work_batch.file',\n headers=headers,\n )\n file.putfile(fileobj, logger=logger)\n\n try:\n with transaction.atomic():\n # TODO: Remove the organization id from the user task file\n work_batch_file = WorkBatchFile.objects.create(\n organization_id=work_batch.organization_id,\n file=file,\n name=full_name,\n work_batch_id=work_batch.id\n )\n except IOError:\n file.delete()\n return Response({'detail': ERR_FILE_EXISTS}, status=409)\n\n return Response(serialize(work_batch_file, request.user), status=201)",
"def created_job(new_job, bulk_request):\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>THEJOBID</id>\n <operation>update</operation>\n <object>Lead</object>\n </jobInfo>\n '''\n new_job.create()\n return new_job",
"def create(self, batch_outs):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def batch(self, batch):\n\n self._batch = batch",
"def insert_job(sess, filetype, status, type_id, submission, job_id=None, filename=None,\n file_size=None, num_rows=None):\n job = Job(\n file_type_id=filetype,\n job_status_id=status,\n job_type_id=type_id,\n submission_id=submission,\n original_filename=filename,\n file_size=file_size,\n number_of_rows=num_rows\n )\n if job_id:\n job.job_id = job_id\n sess.add(job)\n sess.commit()\n return job",
"def get_batch(self, batch_kwargs, batch_parameters=None) -> None:\n raise NotImplementedError",
"def insert(self, json_data, batch=None):\n headers = {'Content-type': 'application/json;charset=UTF-8'}\n url = '/'.join([self.url, self.db])\n time.sleep(0.01)\n if batch:\n r = requests.post(url, data=json_data, headers=headers, params={'batch': 'ok'})\n else:\n r = requests.post(url, data=json_data, headers=headers) \n time.sleep(0.01)\n if not r.status_code in (201, 202):\n raise Exception(\"HTTP \" + str(r.status_code))",
"def add_batch(self, batch, env_ids=None):\n batch_size = get_nest_batch_size(batch, tf.int32)\n with tf.device(self._device):\n if env_ids is None:\n env_ids = tf.range(self._num_envs)\n\n assert len(\n env_ids.shape.as_list()) == 1, \"env_ids should be an 1D tensor\"\n tf.Assert(batch_size == tf.shape(env_ids)[0], [\n \"batch and env_ids do not have same length\", batch_size, \"vs.\",\n tf.shape(env_ids)[0]\n ])\n\n # Make sure that there is no duplicate in `env_id`\n _, _, env_id_count = tf.unique_with_counts(tf.sort(env_ids))\n tf.Assert(\n tf.reduce_max(env_id_count) == 1,\n [\"There are duplicated ids in env_ids\", env_ids])\n current_pos = tf.gather(self._current_pos, env_ids, axis=0)\n indices = tf.concat([\n tf.cast(tf.expand_dims(env_ids, -1), tf.int64),\n tf.expand_dims(current_pos, -1)\n ],\n axis=-1)\n\n tf.nest.map_structure(\n lambda buf, bat: buf.scatter_nd_update(indices, bat),\n self._buffer, batch)\n\n self._current_pos.scatter_nd_update(\n tf.expand_dims(env_ids, -1),\n (current_pos + 1) % self._max_length)\n current_size = tf.gather(self._current_size, env_ids, axis=0)\n self._current_size.scatter_nd_update(\n tf.expand_dims(env_ids, -1),\n tf.minimum(current_size + 1, self._max_length))",
"def BatchCreate(self, request, global_params=None):\n config = self.GetMethodConfig('BatchCreate')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def GetBatchJob(client, batch_job_id):\n batch_job_service = client.GetService('BatchJobService')\n\n selector = {\n 'fields': ['Id', 'Status', 'DownloadUrl'],\n 'predicates': [\n {\n 'field': 'Id',\n 'operator': 'EQUALS',\n 'values': [batch_job_id]\n }\n ]\n }\n\n return batch_job_service.get(selector)['entries'][0]"
] | [
"0.69767964",
"0.69643956",
"0.6652114",
"0.6631825",
"0.61280215",
"0.6110446",
"0.60883904",
"0.60016143",
"0.59871805",
"0.5886255",
"0.5873244",
"0.58249927",
"0.5809669",
"0.57384014",
"0.5699515",
"0.56257725",
"0.56246674",
"0.5616465",
"0.551616",
"0.53761625",
"0.537489",
"0.5363888",
"0.53520834",
"0.5332634",
"0.53145397",
"0.53142214",
"0.53026015",
"0.5297402",
"0.52970856",
"0.5290663"
] | 0.7599396 | 0 |
Create a new input resource file object representing a single file. | def read_input(self, path: str) -> _resource.InputResourceFile:
irf = self._new_input_resource_file(path)
return irf | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_file_object(inputfile=None):\n if type(inputfile) == str:\n return open(inputfile, 'r')\n return inputfile",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n content_type: Optional[pulumi.Input[Union[str, 'FileImportContentType']]] = None,\n file_import_id: Optional[pulumi.Input[str]] = None,\n import_file: Optional[pulumi.Input[pulumi.InputType['FileMetadataArgs']]] = None,\n ingestion_mode: Optional[pulumi.Input[Union[str, 'IngestionMode']]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n source: Optional[pulumi.Input[str]] = None,\n workspace_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...",
"def from_file(cls, path_src):\n cp_cond = [os.path.exists(path_src), os.path.isfile(path_src),\n len(path_new) != 0]\n content = \"\"\n\n # read input from file\n if cp_cond[0] and cp_cond[1]:\n with open(path_src) as f:\n content = f.read()\n\n # connect object with file content\n return cls(path_src, inp_string=content, to_file=False)",
"def __init__(__self__,\n resource_name: str,\n args: FileImportArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def file(self):\n file = BytesIO(b\"the_file\")\n file.name = \"the file name\"\n return File(file)",
"def file(self):\n file = BytesIO(b\"the_file\")\n file.name = self.name\n return File(file)",
"def __init__(__self__, *,\n content_type: pulumi.Input[Union[str, 'FileImportContentType']],\n import_file: pulumi.Input['FileMetadataArgs'],\n ingestion_mode: pulumi.Input[Union[str, 'IngestionMode']],\n resource_group_name: pulumi.Input[str],\n source: pulumi.Input[str],\n workspace_name: pulumi.Input[str],\n file_import_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"content_type\", content_type)\n pulumi.set(__self__, \"import_file\", import_file)\n pulumi.set(__self__, \"ingestion_mode\", ingestion_mode)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"source\", source)\n pulumi.set(__self__, \"workspace_name\", workspace_name)\n if file_import_id is not None:\n pulumi.set(__self__, \"file_import_id\", file_import_id)",
"def input_file(ddir, file_prefix):\n name = autofile.name.input_file(file_prefix)\n return factory.DataFile(ddir=ddir, name=name)",
"def make_instance(self, include_optional):\n # model = yapily.models.resource.Resource() # noqa: E501\n if include_optional :\n return Resource(\n description = '0', \n file = yapily.models.file.File(\n absolute = True, \n absolute_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n parent_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n parent_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n absolute_path = '0', \n canonical_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n parent_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n filename = '0', \n input_stream = None, \n open = True, \n readable = True, \n uri = yapily.models.uri.URI(\n absolute = True, \n authority = '0', \n fragment = '0', \n host = '0', \n opaque = True, \n path = '0', \n port = 56, \n query = '0', \n raw_authority = '0', \n raw_fragment = '0', \n raw_path = '0', \n raw_query = '0', \n raw_scheme_specific_part = '0', \n raw_user_info = '0', \n scheme = '0', \n scheme_specific_part = '0', \n user_info = '0', ), \n url = yapily.models.url.URL(\n authority = '0', \n content = yapily.models.content.content(), \n default_port = 56, \n file = '0', \n host = '0', \n path = '0', \n port = 56, \n protocol = '0', \n query = '0', \n ref = '0', \n user_info = '0', )\n )\n else :\n return Resource(\n )",
"def create_initialised_input(self):\n input_creator = InputCreator(self.times, self.inputs,\n params=self.params, outputs=self.outputs)\n f_out = input_creator.initialised_creation(self.burn_in)\n\n if self.debug:\n print(f_out.getvalue(), file=sys.stderr)\n f_out.seek(0)\n\n self.input_file = f_out.getvalue()\n pprint.pprint(self.input_file)\n return self.input_file",
"def __init__(self, input_filename='input.txt', output_filename='output.txt'):\n self._input = input_filename\n self._output = output_filename\n self._fin = open(self._input, 'r')\n self._fout = open(self._output, 'w')",
"def _get_file_object(infilename):\n\n _, extension = os.path.splitext(infilename)\n if extension.lower() == '.spe':\n return parsers.SpeFile(infilename)\n elif extension.lower() == '.spc':\n return parsers.SpcFile(infilename)\n elif extension.lower() == '.cnf':\n return parsers.CnfFile(infilename)\n else:\n raise NotImplementedError(\n 'File type {} can not be read'.format(extension))",
"def makeFileResource(path):\n\n path = unwrapStr(path)\n segments = [segment.encode(\"utf-8\") for segment in path.split(u'/')]\n if not path.startswith(u'/'):\n # Relative path.\n segments = os.getcwd().split('/') + segments\n log.log([\"fs\"], u\"makeFileResource.run/1: Relative path '%s'\" % path)\n return FileResource(segments)",
"def __init__(self, file: IO, mode: str = \"single\"):\n self.file = file\n super().__init__(mode=mode)",
"def getResource(self, file_name):\n path = os.path.join(os.path.dirname(__file__), \"resource\", file_name)\n return open(path)",
"def create_from_file(cls, path):\n\n with open(path, 'r') as file:\n # Possible FileNotFound.\n text = file.read()\n return cls.create_from_string(text)",
"def inputfile(self, path):\n if isinstance(path, Path):\n path = str(path)\n if self.default_remote_provider is not None:\n path = self.modifier.modify_path(path)\n return IOFile(path)",
"def __init__(self, owner, resourceFile):\n log.debug(u\"init resourceFile=%s\" % resourceFile)\n self._storageName = self._fn2ascii(resourceFile)\n self._userName = resourceFile.encode('utf-8')\n self._originalFile = resourceFile\n try:\n self.checksum = resourceFile.md5\n from exe.engine.idevice import Idevice\n if isinstance(owner, Idevice):\n self._idevice = owner\n if owner.parentNode:\n self.package = owner.parentNode.package\n else:\n self.package = None\n else:\n self._idevice = None\n self.package = owner\n finally:\n del self._originalFile",
"def create(self):\n self.file = open(self.filename, \"xb\", buffering=self.bufferSize)",
"def __new__(cls, name, bases, attrs):\n new_class = super(DasFileMetaclass, cls).__new__(cls, name, bases,\n attrs)\n opts = getattr(new_class, 'Meta', None)\n new_class._meta = DasResourceOptions(opts)\n # Note that ResourceOptions and DasResourceOptions both get called.\n filename = getattr(new_class._meta, \"filename\")\n filetype = getattr(new_class._meta, \"filetype\", None)\n\n if not filetype or filetype == '' and name != 'DasResource':\n global FILETYPES\n try:\n extension = filename.split(\".\")[1]\n if extension in FILETYPES:\n filetype = extension\n setattr(new_class._meta, \"filetype\", filetype)\n else:\n raise KeyError(\"Bleg No extension of filename found\")\n\n except IndexError:\n raise KeyError(\"No extension of filename found\")\n else:\n # Check if it is a valid filetype\n pass\n return new_class",
"def __init__(self, file: str):\n self._file = file",
"def __init__(self, file_name: Optional[str] = None):\n self.entries = OrderedDict() # Dict[str, PathElement]\n self.file_name = file_name # input file for logging\n self.jsonf = None # json image of input file\n self.namespaces = Namespaces('http://hl7.org/fhir/StructureDefinition/')\n self.path_map = {} # map from path to name (Dict[str, str])",
"def __init__(self, input_file=None, output_file=None, gt_file=None):\n\n self.input_file = input_file\n self.output_file = output_file\n self.gt_file = gt_file\n\n #print('Created object: ', musicxml_file)",
"def from_file(cls, f, **kwargs):\n if isinstance(f, string_types):\n with open(f, 'rb') as f:\n return cls(value=f.read(), **kwargs)\n else:\n if 'format' not in kwargs:\n ext = os.path.splitext(f)[1]\n if ext:\n kwargs['format'] = ext[1:] # remove the .\n return cls(value=f.read(), **kwargs)",
"def _constructInstance(self, container, id, *args, **kw):\n file, title = None, ''\n id = container.manage_addProduct['OFSP'].manage_addFile(id, file, title)\n return container.get(id, None)",
"def create_default_input(self):\n input_creator = InputCreator(self.times, self.inputs)\n self.input_file = input_creator.default_creation().getvalue()\n\n return self.input_file",
"def from_file(cls, slots, fileobj, offset = 0):\n return cls.from_fileno(slots, fileobj.fileno(), offset)",
"def __init__(self, filename):\n self.filename = filename\n self.file = open(filename, \"w\")",
"def test_single_file_resource(self):\n year = random.randint(2001, 2020)\n name = \"eia923-%d.zip\" % year\n size = random.randint(500000, 800000)\n\n md5_hash = random.choice([\n \"4bd7e1025c91c00b50b6cef87cb9bfad\",\n \"883895453cb3144b97d0095472f6136e\",\n \"c271dfc0ca452b6582f0e592f57351ef\"])\n\n url = \"https://zenodo.org/api/deposit/depositions/%d/files/%s\" % (\n random.randint(10000, 99999), uuid.uuid4())\n\n fake_resource = {\n \"filename\": name,\n \"links\": {\"download\": url},\n \"filesize\": size,\n \"checksum\": md5_hash\n }\n\n package = eia923_raw.datapackager([fake_resource])\n res = package[\"resources\"][0]\n\n assert(res[\"name\"] == name)\n assert(res[\"title\"] == \"eia923-%d\" % year)\n assert(res[\"path\"] == url)\n assert(res[\"parts\"][\"year\"] == year)\n assert(res[\"remote_url\"] == url)\n\n assert(res[\"mediatype\"] == \"application/zip\")\n assert(res[\"format\"] == \"zip\")\n\n assert(res[\"bytes\"] == size)\n assert(res[\"hash\"] == md5_hash)",
"def from_file(self, path, **kwargs):\n\t\twith codecs.open(path, 'r', encoding='utf-8') as file_h:\n\t\t\tsource = file_h.read()\n\t\treturn self.from_string(source, **kwargs)"
] | [
"0.6874475",
"0.6812216",
"0.65446967",
"0.65262955",
"0.6509462",
"0.6438844",
"0.6272451",
"0.62469476",
"0.61195093",
"0.60929656",
"0.6091407",
"0.60815406",
"0.60800767",
"0.6025725",
"0.5954721",
"0.59426826",
"0.59278876",
"0.59035605",
"0.5878973",
"0.5866811",
"0.58572775",
"0.58512354",
"0.5825164",
"0.58176243",
"0.58088",
"0.5808197",
"0.57923186",
"0.5787447",
"0.57869726",
"0.5781285"
] | 0.70438915 | 0 |
Create a new resource group representing a mapping of identifier to input resource files. | def read_input_group(self, **kwargs: str) -> _resource.ResourceGroup:
root = secret_alnum_string(5)
new_resources = {name: self._new_input_resource_file(file, root) for name, file in kwargs.items()}
rg = _resource.ResourceGroup(None, root, **new_resources)
self._resource_map.update({rg._uid: rg})
return rg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_resource_group(self):\n pass",
"def load(self):\n self.suite.load()\n self.resource_map = {}\n dirlist = os.listdir(self.resources)\n for resource_name in (name for name in dirlist\n if os.path.isfile(os.path.join(self.resources,name)) and\n os.path.splitext(name)[1].lower() == '.fbr'):\n try:\n f = open(os.path.join(self.resources,resource_name),'rU')\n expr = f.read()\n d = eval(expr)\n resource_id = os.path.splitext(resource_name)[0].lower()\n d['id'] = resource_id\n kind = d['kind']\n del d['kind']\n self.resource_map[resource_id] = Resource.create(kind,**d)\n finally:\n f.close()",
"def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs",
"def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)",
"def MakeResource(resource_list, output_list=None):\n content = {'resources': resource_list}\n if output_list:\n content['outputs'] = output_list\n return yaml.dump(content)",
"def createGroup(root, group, fileList):\n topGroupElem = ElementTree.SubElement(root, ELEM_GROUP, {ATTR_NAME: group})\n headerGroupElem = None\n sourceGroupElem = None\n pathElem = None\n for fl in fileList:\n if fl.endswith(\".h\"):\n if headerGroupElem == None:\n headerGroupElem = ElementTree.SubElement(topGroupElem, ELEM_GROUP, {ATTR_NAME: GRP_HEADER})\n pathElem = ElementTree.SubElement(headerGroupElem, ELEM_PATH)\n else:\n if sourceGroupElem == None:\n sourceGroupElem = ElementTree.SubElement(topGroupElem, ELEM_GROUP, {ATTR_NAME: GRP_SRC})\n pathElem = ElementTree.SubElement(sourceGroupElem, ELEM_PATH)\n pathElem.text = fl",
"def create_raster_resources(self, file_path):\n extension = os.path.splitext(os.path.normpath(file_path))[1]\n fomart_x = extension[1:]\n file_name = os.path.basename(file_path)\n base = os.path.splitext(file_name)[0]\n resource_pk = []\n if os.path.isfile(file_path) and fomart_x in self.pk_formats:\n sub_dataset_name = file_path\n src_ds = self.get_source(sub_dataset_name)\n if not self.name:\n self.name = os.path.basename(src_ds.GetDescription())\n self.set_global(src_ds)\n\n resource_pk = []\n for band_num in range(1, src_ds.RasterCount + 1):\n bands = OrderedDict()\n srcband = src_ds.GetRasterBand(band_num)\n bands[\"extensions\"] = [fomart_x]\n bands[\"other_paths\"] = \"\"\n bands[\"format\"] = \"raster\"\n bands[\"name\"] = clean_table_name(base)\n bands[\"path\"] = os.path.basename(src_ds.GetDescription())\n bands[\"band_name\"] = base + \"_\" + str(band_num)\n bands[\"no_data_value\"] = srcband.GetNoDataValue()\n bands[\"scale\"] = srcband.GetScale()\n bands[\"color_table\"] = (None\n if not srcband.GetRasterColorTable() else True)\n bands[\"url\"] = None\n bands[\"statistics\"] = OrderedDict(\n zip(\n [\"minimum\", \"maximum\", \"mean\", \"stddev\"],\n srcband.GetStatistics(True, False),\n ))\n resource_pk.append(bands)\n return resource_pk[0]",
"def build_groupings(idir: str) -> dict:\n bkg_group = {key: [ifile for ifile in glob(f'{idir}/*_{key}_*.root')] for key in bkgs}\n pw_group = {key: [ifile for ifile in glob(f'{idir}/{key}*.root')] for key in powhegs}\n wh_pw_group = [ifile for name in wh_powhegs for ifile in glob(f'{idir}/{name}*.root')]\n ungrouped = [ifile for ifile in glob(f'{idir}/*.root') if 'madgraph' in ifile or 'JHU' in ifile]\n\n group = {}\n for key, files in bkg_group.items():\n if len(files) > 0:\n group[key] = files\n\n for key, files in pw_group.items():\n if len(files) > 0:\n group[key] = files\n\n for ifile in ungrouped:\n name = ifile.split('/')[-1].replace('.root', '')\n name = name.split('_SYST')[0].replace('-', '_')\n name = name.replace('_ggH125', '').replace('_VBF125', '').replace('_WH125', '').replace('_ZH125', '')\n group[name] = [ifile]\n\n if len(wh_pw_group) > 0:\n group['wh125_powheg'] = wh_pw_group\n\n return group",
"def gen_inventory(self):\n if isinstance(self.resource, list):\n self.my_add_group(self.resource, 'default_group')\n elif isinstance(self.resource, dict):\n for groupname, hosts_and_vars in self.resource.iteritems():\n self.my_add_group(hosts_and_vars.get(\"hosts\"), groupname, hosts_and_vars.get(\"vars\"))",
"def make_group(self, qid, name='', path='', attrs={}, link='', abort=True):\n gqid = qid + \"/\"\n sdef = self.get_sdef(gqid, self.default_ns, \"referenced in make_group\")\n id = sdef['id']\n ns = sdef['ns']\n path = self.deduce_path(id, ns, path)\n if not abort:\n id_noslash = id.rstrip('/') # could be different from gqid if namespace present\n grp = self.get_existing_group(path, id_noslash, name)\n if grp:\n # found already existing group\n return grp \n link_info = self.extract_link_info(name, link, Group)\n # create the group\n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent, link_info)\n return grp",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n entry_group_id: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n __props__=None,\n __name__=None,\n __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['description'] = description\n __props__['display_name'] = display_name\n if entry_group_id is None:\n raise TypeError(\"Missing required property 'entry_group_id'\")\n __props__['entry_group_id'] = entry_group_id\n __props__['project'] = project\n __props__['region'] = region\n __props__['name'] = None\n super(EntryGroup, __self__).__init__(\n 'gcp:datacatalog/entryGroup:EntryGroup',\n resource_name,\n __props__,\n opts)",
"def __init__(__self__,\n resource_name: str,\n args: GroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def make_group(self, id, name='', attrs={}, link='', abort=True ): \n gid = id + \"/\"\n sgd = self.get_sgd(gid, name)\n path = self.full_path\n link_info = self.file.extract_link_info(name, link, Group)\n if not abort:\n # id = sgd['id'].rstrip('/') # not sure if need this\n grp = self.file.get_existing_group(path, id, name)\n if grp:\n return grp\n grp = Group(self.file, sgd, name, path, attrs, self, link_info)\n # self.mstats[gid]['created'].append(grp)\n return grp",
"def create_rg_dict(self, bam_in, rg_dict_out_files, rg_prefix=False):\n if rg_prefix:\n sampleID = os.path.basename(bam_in).rstrip(\".input.bam\")\n prefix_string = \"--prefix %s\" % (sampleID)\n else:\n prefix_string = \"\"\n\n self.cmd(\"python {readgroup_mover} create\\\n {prefix_string} \\\n --input {bam_in}\\\n --output {dict_out}\"\n .format(\n readgroup_mover=self.cmds[\"readgroup_mover\"],\n prefix_string=prefix_string,\n bam_in=bam_in,\n dict_out=rg_dict_out_files[0]\n ),\n on_error=lambda: self.create_error_file(rg_dict_out_files[0]),\n shell=True)\n\n self.checkpoint(rg_dict_out_files[0])\n self.checkpoint(rg_dict_out_files[1])\n self.checkpoint(rg_dict_out_files[2])",
"def from_structure(self, structure):\n session = meta.Session()\n \n try:\n for resource_s in structure['resources']:\n log.debug(\"Importing: {0!r}\".format(resource_s))\n \n # First build up a list of group_ids for this resource that will correspond to groups\n # in *this* database.\n group_ids = []\n for gname in resource_s['groups']:\n group = groups.get_by_name(gname, assert_exists=False)\n if not group:\n group = groups.create(gname)\n log.info(\"Created group: {0!r}\".format(group))\n else:\n log.info(\"Found existing group: {0!r}\".format(group))\n \n group_ids.append(group.id)\n \n # First we should see if there is a match for the id and name; we can't rely on name alone since\n # there is no guarantee of name uniqueness (even with a group)\n resource = None\n resource_candidate = resources.get(resource_s['id'], assert_exists=False)\n if resource_candidate and resource_candidate.name == resource_s['name']:\n resource = resource_candidate \n else:\n # If we find a matching resource (by name) and there is only one then we'll use that.\n try:\n resource = resources.get_by_name(resource_s['name'], assert_single=True, assert_exists=True)\n except MultipleResultsFound:\n log.info(\"Multiple resource matched name {0!r}, will create a new one.\".format(resource_s['name']))\n except exc.NoSuchEntity:\n log.debug(\"No resource found matching name: {0!r}\".format(resource_s['name']))\n pass\n \n resource_attribs = ('name', 'addr', 'description', 'notes', 'tags')\n resource_attribs_update = dict([(k,v) for (k,v) in resource_s.items() if k in resource_attribs])\n \n if resource:\n (resource, modified) = resources.modify(resource.id, group_ids=group_ids, **resource_attribs_update)\n # (yes, we are overwriting 'resource' var with new copy returned from this method)\n log.info(\"Updating existing resource: {0!r} (modified: {1!r})\".format(resource, modified))\n if modified and modified != ['group_ids']:\n if not self.force:\n raise RuntimeError(\"Refusing to modify existing resource attributes {0!r} on {1!r} (use 'force' to override this).\".format(modified, resource))\n else:\n log.warning(\"Overwriting resource attributes {0!r} on {1!r}\".format(modified, resource))\n else:\n # We will just assume that we need to create the resource. Yes, it's possible it'll match an existing\n # one, but better to build a merge tool than end up silently merging things that are not the same.\n resource = resources.create(group_ids=group_ids, **resource_attribs_update)\n log.info(\"Created new resource: {0!r}\".format(resource))\n \n # Add the passwords\n for password_s in resource_s['passwords']:\n \n password_attribs = ('username', 'description', 'password', 'tags')\n password_attribs_update = dict([(k,v) for (k,v) in password_s.items() if k in password_attribs])\n \n # Look for a matching password. We do know that this is unique.\n password = passwords.get_for_resource(password_s['username'], password_s['resource_id'], assert_exists=False)\n if password:\n (password, modified) = passwords.modify(password_id=password.id, **password_attribs_update)\n # (Yeah, we overwrite password object.)\n log.info(\"Updating existing password: {0!r} (modified: {1!r})\".format(password, modified))\n \n non_pw_modified = set(modified) - set(['password'])\n if not modified:\n log.debug(\"Password row not modified.\")\n else:\n log.debug(\"Password modified: {0!r}\".format(modified))\n \n # If anything changed other than password, we need to ensure that force=true\n if non_pw_modified:\n if not self.force:\n raise RuntimeError(\"Refusing to modify existing password attributes {0!r} on {1!r} (use 'force' to override this).\".format(non_pw_modified, password))\n else:\n log.warning(\"Overwriting password attributes {0!r} on {1!r}\".format(non_pw_modified, password))\n else:\n password = passwords.create(resource_id=resource.id, **password_attribs_update)\n log.info(\"Creating new password: {0!r}\".format(password))\n \n \n # This probably isn't necessary as all the DAO methods should also flush session, but might as well.\n session.flush()\n \n except:\n session.rollback()\n raise",
"def create_input_files(self, datasets_dict):\n ifname = self.keywords['inputfile']\n dirstem = os.path.dirname(ifname)\n basename = os.path.basename(ifname).split('.')[0]\n createdfiles=list()\n if dirstem == \"\":\n dirstem = os.getcwd()\n dkeys = datasets_dict.keys()\n dkeys.sort()\n dct=1\n for didx in dkeys:\n newfile = MASTFile()\n newfile.data = list(datasets_dict[didx])\n newname=\"%s/loop_%s_%s.inp\" % (dirstem, basename, str(dct).zfill(2))\n newfile.to_file(newname)\n #createdfiles.append(os.path.basename(newname))\n createdfiles.append(newname)\n dct=dct+1\n return createdfiles",
"def create(self):\n\n if len(self.filenames) != len(self.download_links):\n print(\"Must have the same amount off file names than download links\", file=sys.stderr)\n return None\n\n resources = []\n\n #Creating the resource dict\n for i in range(len(self.filenames)):\n resources.append(\n {\n \"id\": self.ids[i],\n \"description\":\"\",\n \"filename\":self.filenames[i],\n \"download_link\":self.download_links[i]\n }\n )\n\n\n #The JSON\n data = {\n \"dataset\":{\n \"project\":self.project,\n \"version\":self.version,\n \"description\":self.description,\n \"project_link\":self.project_link,\n \"data_path\": self.data_path,\n \"metadata\": self.metadata,\n \"files_type\":self.file_type,\n \"protocole\":self.protocole,\n \"resources\":resources,\n \"data_representation\":self.data_representation\n }\n }\n with open(self.dataset_path, \"w\") as json_file:\n json_file.write(json.dumps(data))",
"def assemble(metadata_file):\n\n def read(file):\n with open(file) as yaml:\n return load(yaml.read())\n\n def add_name(info):\n info['name'] = slugify(info['title'], separator='_')\n return info\n\n def get_files(filetype):\n filename = metadata_file.replace('metadata', filetype)\n folder = dirname(metadata_file)\n schema_files_pattern = join(folder, filename)\n return glob(schema_files_pattern)\n\n descriptor = add_name(read(metadata_file))\n resources = [add_name(read(file)) for file in get_files('resource')]\n model = get_files('model')\n\n descriptor['resources'] = resources\n if model and len(model) == 1:\n descriptor['model'] = model.pop()\n\n return DataPackage(descriptor)",
"def _build_ec2_mapping_from_resources(resource_to_analyse, result_dict, session):\n for instance, security_group in _generate_ec2_instance_and_sg(resource_to_analyse):\n resource_dict = _check_if_in_list(result_dict, instance.id, \"resource_id\")\n if resource_dict is not None:\n resource_dict[\"sg_attached\"].append({\n \"sg_id\": security_group[\"GroupId\"],\n \"sg_name\": security_group[\"GroupName\"]\n })\n else:\n result_dict.append({\n \"resource_id\": instance.id,\n \"resource_type\": \"ec2\",\n \"resource_name\": \"\" if _check_if_in_list(instance.tags, \"Name\", \"Key\") is None else _check_if_in_list(instance.tags, \"Name\", \"Key\").get(\"Value\", \"\"),\n \"sg_attached\": [{\n \"sg_id\": security_group[\"GroupId\"],\n \"sg_name\": security_group[\"GroupName\"]\n }]\n })\n return result_dict",
"def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))",
"def create_entry_group(self, location_id, entry_group_id):\n entry_group = self.__datacatalog.create_entry_group(\n parent=f'projects/{self.__project_id}/locations/{location_id}',\n entry_group_id=entry_group_id,\n entry_group=datacatalog.EntryGroup())\n logging.info('Entry Group created: %s', entry_group.name)\n return entry_group",
"def create_input_file(self, polymer_identifier, format, outpath):\n\n\t\tsmiles = self.get_smiles_from_identifier(polymer_identifier)\n\t\t\n\t\tresult = generate_input_files(smiles, format)\n\t\twith open(outpath, 'w+') as f:\n\t\t\tf.write(result)",
"def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )",
"def load_ids_to_groups(self):\n self.groups = set([])\n self.h_group_ids = defaultdict(lambda: set([]))\n self.h_id_to_group = defaultdict(lambda: set([]))\n for i,g in zip(self.df.sample_id, self.df.group_name):\n self.h_group_ids[g].add(i)\n self.h_id_to_group[i] = g\n self.groups.add(g)",
"def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)",
"def pre_security_group_create(self, resource_dict):\n pass",
"def createDictionaryFromFile(inputfile):\n logger.info('loading file: %s' % inputfile)\n dic = {}\n with open(inputfile) as fin:\n for n, line in enumerate(fin, start=1):\n arr = line.strip().split()\n path = arr[0]\n\n labels = []\n for label in arr[1:]:\n labels.append(ast.literal_eval(label))\n\n cpath = path.split('/')\n id_img = int(cpath[-1].replace('.jpg', ''))\n size_img = cpath[-2]\n activity = cpath[-3]\n id_data = int((cpath[-4])[-1])\n home = '/'.join(cpath[:-4])\n\n if dic.has_key(id_data):\n if dic[id_data].has_key(activity):\n if dic[id_data][activity].has_key(size_img):\n dic[id_data][activity][size_img][id_img] = labels\n else:\n dic[id_data][activity][size_img] = {id_img: labels}\n else:\n dic[id_data][activity] = {size_img: {id_img: labels}}\n else:\n dic[id_data] = {activity: {size_img: {id_img: labels}}}\n return n, home, dic",
"def __make_group_by_res(self, group_name, name_list):\r\n if group_name not in self.groups:\r\n res_group = self.group['Residue'].getChildGrps()\r\n groups = [ res for res in res_groups if res.name in name_list ]\r\n new_group = Group(parent=[], id=-1, type=group_name, childs=groups)\r\n self.groups[group_name] = new_group",
"def __init__(self):\n groups = [\n os.path.splitext(f)[0] for f in os.listdir(data_dir) if f.endswith(\".json\")\n ]\n\n self._data = {\n group: IndicatorGroup.parse_file(os.path.join(data_dir, f\"{group}.json\"))\n for group in groups\n }",
"def __init__(__self__, resource_name, opts=None, attributes=None, name=None, parent_id=None, realm_id=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['attributes'] = attributes\n __props__['name'] = name\n __props__['parent_id'] = parent_id\n if realm_id is None:\n raise TypeError(\"Missing required property 'realm_id'\")\n __props__['realm_id'] = realm_id\n __props__['path'] = None\n super(Group, __self__).__init__(\n 'keycloak:index/group:Group',\n resource_name,\n __props__,\n opts)"
] | [
"0.58438635",
"0.5761084",
"0.55505747",
"0.5540623",
"0.5514604",
"0.55128634",
"0.5483182",
"0.54395956",
"0.5409617",
"0.5397805",
"0.5392991",
"0.53768927",
"0.5376017",
"0.5359928",
"0.5356735",
"0.5327738",
"0.52933335",
"0.52924156",
"0.5286034",
"0.52811337",
"0.52422285",
"0.5239233",
"0.5233111",
"0.52098596",
"0.520034",
"0.5199278",
"0.5168087",
"0.5167269",
"0.51671344",
"0.51649696"
] | 0.6814466 | 0 |
Write resource file or resource file group to an output destination. Examples | def write_output(self, resource: _resource.Resource, dest: str):
if not isinstance(resource, _resource.Resource):
raise BatchException(f"'write_output' only accepts Resource inputs. Found '{type(resource)}'.")
if (isinstance(resource, _resource.JobResourceFile)
and isinstance(resource._source, job.BashJob)
and resource not in resource._source._mentioned):
name = resource._source._resources_inverse[resource]
raise BatchException(f"undefined resource '{name}'\n"
f"Hint: resources must be defined within the "
f"job methods 'command' or 'declare_resource_group'")
if (isinstance(resource, _resource.PythonResult)
and isinstance(resource._source, job.PythonJob)
and resource not in resource._source._mentioned):
name = resource._source._resources_inverse[resource]
raise BatchException(f"undefined resource '{name}'\n"
f"Hint: resources must be bound as a result "
f"using the PythonJob 'call' method")
if isinstance(self._backend, _backend.LocalBackend):
dest_scheme = url_scheme(dest)
if dest_scheme == '':
dest = os.path.abspath(os.path.expanduser(dest))
resource._add_output_path(dest) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(self, output, resources, **kw):\n\n raise NotImplementedError()",
"def write_resources(self, resources):\n for filename, data in list(resources.get('outputs', {}).items()):\n # Determine where to write the file to\n dest = os.path.join(self.output_dir, filename)\n path = os.path.dirname(dest)\n if path and not os.path.isdir(path):\n os.makedirs(path)\n\n # Write file\n with open(dest, 'wb') as f:\n f.write(data)",
"def WriteDataPack(resources, output_file, encoding):\n content = WriteDataPackToString(resources, encoding)\n with open(output_file, \"wb\") as file:\n file.write(content)",
"def WriteDataPack(resources, output_file, encoding):\n content = WriteDataPackToString(resources, encoding)\n with open(output_file, 'wb') as file:\n file.write(content)",
"def WriteToFile(output_file, security_policy, file_format):\n resource_printer.Print(\n security_policy, print_format=file_format, out=output_file)",
"def WriteToFile(output_file, security_policy, file_format):\n resource_printer.Print(\n security_policy, print_format=file_format, out=output_file)",
"def testWriteResourceFiles(self):\n resource_files = ['test.rc']\n\n file_writer = writers.VS2010ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer._WriteResourceFiles(resource_files)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = (\n b' <ItemGroup>\\r\\n'\n b' <ResourceCompile Include=\"test.rc\" />\\r\\n'\n b' </ItemGroup>\\r\\n')\n self.assertEqual(output_data, expected_output_data)",
"def write(name, keyword, domain, citation, author, description, species, version, contact, license, values, output):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n file=output,\n )",
"def write(self, destination):\n if not self._errors and not self._warnings:\n return\n destination.write('%s :\\n' % self._asset_name)\n for error in self._errors:\n destination.write(' * ERROR : %s\\n' % error)\n for warning in self._warnings:\n destination.write(' * WARNING : %s\\n' % warning)\n\n destination.write('\\n')",
"def writeAPI(res_api, dst_file):\n with open(dst_file, 'w') as api_dst:\n api_dst.write(res_api)",
"def write(name, keyword, domain, citation, author, description, species, version, contact, license, values,\n functions, output, value_prefix):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n functions=functions,\n file=output,\n value_prefix=value_prefix\n )",
"def collect_helper(output_dir, cmd, file_name, resource_name, namespace=None):\n return_code, out = run_shell_command(cmd)\n if return_code:\n logger.warning(\"Error when running %s: %s\", cmd, out)\n return\n path = os.path.join(output_dir, file_name)\n with open(path, \"w+\", encoding='UTF-8') as file_handle:\n file_handle.write(out)\n logger.info(\"Namespace '%s': Collected %s\", namespace, resource_name)",
"def testWriteResourceFiles(self):\n resource_files = ['test.rc']\n\n file_writer = writers.VS2008ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer._WriteResourceFiles(resource_files)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = (\n b'\\t\\t<Filter\\r\\n'\n b'\\t\\t\\tName=\"Resource Files\"\\r\\n'\n b'\\t\\t\\tFilter=\"rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;'\n b'resx;tiff;tif;png;wav\"\\r\\n'\n b'\\t\\t\\tUniqueIdentifier=\"{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}\"\\r\\n'\n b'\\t\\t\\t>\\r\\n'\n b'\\t\\t\\t<File\\r\\n'\n b'\\t\\t\\t\\tRelativePath=\"test.rc\"\\r\\n'\n b'\\t\\t\\t\\t>\\r\\n'\n b'\\t\\t\\t</File>\\r\\n'\n b'\\t\\t</Filter>\\r\\n')\n self.assertEqual(output_data, expected_output_data)",
"def writeToFile(self, targetFolder):\r\n pass",
"def testWriteFiles(self):\n header_files = ['test.h']\n resource_files = ['test.rc']\n source_files = ['test.c']\n\n file_writer = writers.VS2010ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteFiles(source_files, header_files, resource_files)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n self.assertTrue(output_data.startswith(b' <ItemGroup>\\r\\n'))\n self.assertTrue(output_data.endswith(b' </ItemGroup>\\r\\n'))",
"def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()",
"def write(task_spec: TaskSpec, destination_dir: Path, force: bool = False):\n\n file_path: Path = destination_dir / task_spec.filename\n file_path.touch(exist_ok=force)\n\n writable_task_spec: Dict = clean(task_spec)\n\n yaml.dump(writable_task_spec, file_path)",
"def write_to_file(inventory):\n env = Environment(loader=FileSystemLoader('templates'), trim_blocks=True)\n output_template = env.get_template('output.j2')\n # create and clean an 'outputs' folder\n path = \"./outputs\"\n try:\n shutil.rmtree(path, ignore_errors = True, onerror = None)\n except:\n print('Error while deleting directory')\n os.mkdir(path)\n os.chdir(path)\n for node, node_data in inventory.items():\n if 'outputs' in node_data:\n os.mkdir(node)\n for command, output in node_data['outputs'].items():\n # when creating filenames based on command, swap 'spaces' with 'underscores':\n command = re.sub(r\"\\s\", r\"_\", command)\n open(f\"{node}/{command}.txt\", 'a').write(\n output_template.render(node=node, data=output))\n print(\"\\n\" + f\"Job complete. If data gathering was successful, see 'outputs' directory.\")\n return inventory",
"def write_to_files(section, csv_path, srt_path):\n write_to_csv(section, csv_path)\n write_to_srt(section, srt_path)",
"def write(self, data, dst, label=None, mode='wb'):\n\n self._tag(dst, label)\n self._mkdir_for(dst)\n with open(os.path.join(self.chroot, dst), mode) as wp:\n wp.write(data)",
"def write(self, data, dst, label=None, mode='wb'):\r\n\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n with open(os.path.join(self.chroot, dst), mode) as wp:\r\n wp.write(data)",
"def write(self, data, dst, label=None, mode='wb'):\r\n\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n with open(os.path.join(self.chroot, dst), mode) as wp:\r\n wp.write(data)",
"def _write_file(template, localcontext, output_path, name):\n output = template.render(localcontext)\n filename = os.sep.join((output_path, name))\n try:\n os.makedirs(os.path.dirname(filename))\n except Exception:\n pass\n with open(filename, 'w', encoding='utf-8') as f:\n f.write(output)\n print u' [ok] writing %s' % filename",
"def write_output(content, dir_to_file):\n\n if not dir_to_file:\n dir_to_file = '{0}output-{1}'.format(dir_to_file, uuid.uuid4())\n\n f = open(dir_to_file, 'a')\n f.write(content)\n f.close()\n\n log.info('function: {} dir_to_file: {}'.format('write_output', dir_to_file))\n\n return dir_to_file",
"async def dump(self, resource: ResourceType, location: PhysicalResourceLocation):\n try:\n # Serialize the resource.\n raw = self.serializer(resource)\n # Make sure the target directory exists.\n location.path.parent.mkdir(parents=True, exist_ok=True)\n # Dump the raw data to file.\n await self._dump_raw(raw, location)\n except ResourceDumperError:\n raise\n except Exception as ex:\n raise FailedToDumpResourceError(location.path) from ex",
"def write(self, iface, dest_folder, feedback=None):\n return WriterResult()",
"def write_file(rel_path, text, *args, **kwargs):\n path = os.path.join(os.path.dirname(__file__), \"resources\", rel_path)\n with open(path, 'w+', *args, **kwargs) as _file:\n _file.write(text)",
"def write(self):\n self.output_directory.mkdir(parents=True, exist_ok=True)\n parameter_set_files = [pathlib.Path(set_name) for set_name in\n self.parameter_study.coords[_set_coordinate_key].values]\n if self.write_meta and self.provided_output_file_template:\n self._write_meta(parameter_set_files)\n if self.output_file_type == 'h5':\n self._write_dataset()\n elif self.output_file_type == 'yaml':\n self._write_yaml(parameter_set_files)\n else:\n raise ValueError(f\"Unsupported output file type '{self.output_file_type}'\")",
"def _write_output_file(output: str, file_name: str):\n\tfile1 = open(file_name, 'w')\n\tfile1.write(output)\n\tfile1.close()",
"def collect_resources(namespace, output_dir, api_resources, k8s_cli_input=\"\", selector=\"\"):\n set_file_logger(output_dir)\n k8s_cli = detect_k8s_cli(k8s_cli_input)\n ns_output_dir = os.path.join(output_dir, namespace)\n make_dir(ns_output_dir)\n collect_api_resources(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_api_resources_description(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_pods_logs(namespace, ns_output_dir, k8s_cli, logs_from_all_pods=True)"
] | [
"0.7189583",
"0.67922026",
"0.6485851",
"0.6471249",
"0.5966388",
"0.5966388",
"0.5919293",
"0.58600813",
"0.5827604",
"0.5729985",
"0.5707116",
"0.5665977",
"0.55832523",
"0.557501",
"0.55459183",
"0.54772556",
"0.5467842",
"0.5445758",
"0.54371053",
"0.54174685",
"0.5403804",
"0.5403804",
"0.5396148",
"0.5392162",
"0.5386541",
"0.5378357",
"0.5377417",
"0.535219",
"0.5340851",
"0.52905977"
] | 0.68054146 | 1 |
Select all jobs in the batch whose name matches `pattern`. Examples | def select_jobs(self, pattern: str) -> List[job.Job]:
return [job for job in self._jobs if job.name is not None and re.match(pattern, job.name) is not None] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_jobs(jobs, keyword):\n for job in jobs:\n if keyword == \"all\":\n yield job\n elif job[\"name\"].find(keyword) != -1:\n yield job",
"def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)",
"def do_builds(self, pattern):\n if not pattern:\n print('\\n'.join(self._qm.get_available_stc_builds()))\n return\n\n for build in self._qm.get_available_stc_builds():\n if fnmatch.fnmatch(build, pattern):\n print(build)",
"def _get_wild_tasks(self, pattern):\n wild_list = []\n for t_name in self._def_order:\n if fnmatch.fnmatch(t_name, pattern):\n wild_list.append(t_name)\n return wild_list",
"def grep(requestContext, seriesList, pattern):\n regex = re.compile(pattern)\n return [s for s in seriesList if regex.search(s.name)]",
"def name_search(self, search):\n if isinstance(search, str):\n name_re = re.compile(search)\n else:\n name_re = search\n matches = [\n entry\n for entry in self\n if entry is not None and name_re.search(entry.name)\n ]\n return matches",
"def all_by_actor(actor) -> co.Parallel:\n df = _load_data()\n titles = df[df.cast.str.contains(actor) | False].title\n\n output = co.Parallel()\n for title in titles:\n output[title] = co.Exec(\n f\"python pipeline.py for_title {repr(title)}\"\n )\n return output",
"def WhereMatches(self, pattern):\n regex = re.compile(match_util.ExpandRegexIdentifierPlaceholder(pattern))\n return self.Filter(lambda s: (\n regex.search(s.source_path) or\n regex.search(s.object_path) or\n regex.search(s.full_name) or\n s.full_name is not s.template_name and regex.search(s.template_name) or\n s.full_name is not s.name and regex.search(s.name)))",
"def search(self, pattern):\n raise NotImplementedError()",
"def do_ls(self, pattern=\"\"):\n if pattern:\n print \"The available jobs with substring %s are:\" % pattern\n else:\n print \"The available jobs are:\"\n \n app_order = self.router.app_order\n app_path = self.router.app_path\n n = len(self.router.app_order)\n j = 0\n for i in range(n):\n path = app_order[i]\n if path.find(pattern) != -1:\n j += 1\n app, type = app_path[path]\n if type == \"func\":\n print \" %d. %-12s [%4s] --> %s\" % (i, path, type, Job.get_func_help(app))\n elif type in (\"Job\", \"MJob\", \"PJob\"):\n print \" %d. %-12s [%4s] --> %s\" % (i, path, type, app.get_line_help())\n else:\n raise Exception(\"unknown Object type = %s of %s\" % (type, app) )\n if pattern:\n print \"There are %d/%d including '%s'\" % (j, n, pattern)",
"def search(self, pattern=\"*\", mode=\"both\"):\n pattern = self._glob_to_sql(pattern)\n\n COND = \"(keyword.name like ? OR keyword.doc like ?)\"\n args = [pattern, pattern]\n if mode == \"name\":\n COND = \"(keyword.name like ?)\"\n args = [pattern,]\n\n sql = \"\"\"SELECT collection.collection_id, collection.name, keyword.name, keyword.doc\n FROM collection_table as collection\n JOIN keyword_table as keyword\n WHERE collection.collection_id == keyword.collection_id\n AND %s\n ORDER by collection.collection_id, collection.name, keyword.name\n \"\"\" % COND\n\n cursor = self._execute(sql, args)\n result = [(row[0], row[1], row[2], row[3].strip().split(\"\\n\")[0])\n for row in cursor.fetchall()]\n return list(set(result))",
"def tasks(self, pattern, **kwargs):\n\n config = self.config.make(**kwargs)\n # yield a task for every py file in selection\n base = Path(config['base_dir'])\n excluded = set([base.joinpath(e) for e in config['exclude_paths']])\n for src in base.glob(pattern):\n if src in excluded:\n continue\n for exclude_pattern in config['exclude_patterns']:\n if src.match(exclude_pattern):\n break\n else:\n yield self(str(src))",
"def list(self, pattern='*'):\n if self._group_dict is None:\n self._group_dict = collections.OrderedDict(\n (group.name, group) for group in self._client.list_groups())\n\n return [group for group in self._group_dict.values()\n if fnmatch.fnmatch(group.display_name, pattern)]",
"def match_name(pattern, rows):\n matching = []\n for row in rows:\n # Use regex matching to check whether first name or last name contains the pattern\n if re.search(r'%s' % pattern.lower(), row[0].lower()) != None or re.search(r'%s' % pattern.lower(), row[1].lower()) != None:\n matching.append(row)\n\n # print the matched records\n print_records(matching)",
"def tok_by_reg(pattern, list_of_toks):\n try:\n comped = re.compile(pattern)\n except:\n import traceback\n import sys\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lst = traceback.format_exception(exc_type, exc_value,\n exc_traceback)\n error_message = lst[-1]\n thetime = strftime(\"%H:%M:%S\", localtime())\n print '%s: Query %s' % (thetime, error_message)\n return 'Bad query'\n\n matches = [m for m in list_of_toks if re.search(comped, m)]\n\n return matches",
"def pattern_filter(patterns, name):\n return [pat for pat in patterns if fnmatch.fnmatchcase(name, pat)]",
"def _matching_jobs(buildername, all_jobs):\n LOG.debug(\"Find jobs matching '%s'\" % buildername)\n matching_jobs = []\n for j in all_jobs:\n if j[\"buildername\"] == buildername:\n matching_jobs.append(j)\n\n LOG.debug(\"We have found %d job(s) of '%s'.\" %\n (len(matching_jobs), buildername))\n return matching_jobs",
"def getitemsbypattern(self, pattern):\n\n return True",
"def tok_by_list(pattern, list_of_toks):\n if type(pattern) == str:\n pattern = [pattern]\n result = []\n matches = [m for m in list_of_toks if m in pattern]\n for m in matches:\n result.append(m)\n return result",
"def find(pattern):\n files = config.index.files(path_glob=\"*%s*\" % pattern)\n print_files(files)",
"def list_command_filter(self, testsuite_pattern, testcase_pattern):\n return None # Hobbes-test does not support listing by filter",
"def grep(pattern, *files_or_paths):\n matches = []\n\n for fop in files_or_paths:\n with fileobj(fop) as fo:\n matches.extend((line for line in fo if re.match(pattern, line)))\n\n return matches",
"def match(\n self,\n pattern: str\n ) -> Iterable[AbstractPage]:\n paths: List[str] = [page.path for page in self._page_table.keys()]\n for match in sorted(fn_filter(paths, pattern)):\n yield self[match]",
"def search_by_pattern(self, pattern, key=lambda data: data['meta']):\n result = []\n for node, data in self.traverse():\n if re.search(pattern, key(data), flags=re.VERBOSE):\n result.append([node, data])\n return result",
"def search_leaf_nodes_by_pattern(self, pattern, scope_pattern=False):\n is_match = lambda x, y: x.lower().startswith(y) if scope_pattern else y in x.lower()\n if pattern is not None:\n pattern = pattern.lower()\n searched_nodes = [\n node for name, node in self._leaf_nodes.items()\n if is_match(name, pattern)\n ]\n else:\n searched_nodes = [node for node in self._leaf_nodes.values()]\n return searched_nodes",
"def search_jobs(self, bill_id: int = 0, limit: int = 0) -> List[Job]:\n pass",
"def fetch_s3_keys_by_regex_pattern(s3_bucket, s3_directory, pattern):\n bucket_contents = s3_bucket.list(s3_directory)\n return [key for key in bucket_contents if pattern.search(key.name)]",
"def SearchRe(context, pattern, arg=None):\n if not arg:\n arg = context.node\n arg = Conversions.StringValue(arg)\n matches = re.findall(pattern, arg)\n proc = context.processor\n matches_nodeset = []\n for groups in matches:\n proc.pushResult()\n proc.writers[-1].startElement('Match', EMPTY_NAMESPACE)\n if type(groups) != type(()):\n groups = (groups,)\n for group in groups:\n proc.writers[-1].startElement('Group', EMPTY_NAMESPACE)\n proc.writers[-1].text(group)\n proc.writers[-1].endElement('Group')\n proc.writers[-1].endElement('Match')\n frag = proc.popResult()\n context.rtfs.append(frag)\n matches_nodeset.append(frag.childNodes[0])\n return matches_nodeset",
"def find_batch(self, arguments, start_with):\n if len(arguments) < 1:\n raise Exception(\"error, not valid number of arguments, need 2\")\n if not arguments[0].startswith(start_with):\n raise Exception(f\"error, tha name of batch need to start with {start_with}\")\n return arguments[0]",
"def search_nodes_by_pattern(self, pattern):\n searched_nodes = []\n if pattern and pattern != '/':\n pattern = pattern.lower()\n for name, node in self._normal_node_map.items():\n name = name.lower()\n pattern_index = name.rfind(pattern)\n if pattern_index >= 0 and name.find('/', pattern_index + len(pattern)) == -1:\n searched_nodes.append(node)\n return searched_nodes"
] | [
"0.6330027",
"0.57241905",
"0.5717007",
"0.5662955",
"0.5661944",
"0.5415436",
"0.5401124",
"0.53878415",
"0.5367021",
"0.5349991",
"0.53413725",
"0.53162223",
"0.5308199",
"0.5304073",
"0.5292824",
"0.5277535",
"0.5271562",
"0.52473783",
"0.52251303",
"0.5207287",
"0.5199997",
"0.5077761",
"0.5067722",
"0.50676364",
"0.50552154",
"0.50485784",
"0.5037991",
"0.50257653",
"0.5004253",
"0.4994167"
] | 0.7555124 | 0 |
Initializes querysets for keyword and headlinekeyword | def __init__(self):
self.keyword_queryset = Keyword.objects.all()
self.headlinekeyword_queryset = Headlinekeyword.objects.all()
self.headline_queryset = Headline.objects.all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d",
"def get_queryset(self):\r\n return Keyword.objects.all()",
"def setup_eager_loading(cls, queryset):\n queryset = queryset.prefetch_related('keywords_str')\n queryset = queryset.prefetch_related('tags_str')\n # queryset = queryset.prefetch_related('keywords')\n # queryset = queryset.prefetch_related('tags')\n return queryset",
"def SetupKeywords(self):\n kwlist = u\" \".join(self._keywords)\n self.SetKeyWords(0, kwlist)",
"def __init__(self, orKeywords=True, baseurl=None, version=\"1.0\"):\n if not baseurl: baseurl = RegistryService._STSCI_REGISTRY_BASEURL\n dalquery.DALQuery.__init__(self, baseurl, \"vaoreg\", version)\n self._kw = [] # list of individual keyword phrases\n self._preds = [] # list of SQL predicates\n self._svctype = None\n self._band = None\n self._orKw = orKeywords\n self._doSort = True\n self._dalonly = False",
"def set_keywords(self):\n\n if len(self.get_keywords()) == 0 and len(self.get_files()) > 0:\n self.keywords = self.files[0].get_parent()[\"title\"].split(\" \")\n for keyword in self.keywords:\n if str(keyword) in str(self.text):\n self.keywords = []",
"def init_by_keys(cls, **query):\n raise NotImplementedError()",
"def fetchRelatedkeywords(self, keyword, meta_keyword):\n prefix = [\"how\", \"which\", \"why\", \"where\", \"who\", \"when\", \"are\", \"what\"]\n suffix = [\"\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\",\n \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\" \"x\", \"y\", \"z\"]\n suffix_arr = list(map(lambda x: keyword+\" \"+x, suffix))\n prefix_arr = list(map(lambda x: x+\" \"+keyword, prefix))\n suffix_arr.extend(prefix_arr)\n # removes duplicates for a seed keyword\n duplicates = set()\n for word in suffix_arr:\n suggestion = self.fetchSuggestion(word, keyword, meta_keyword)\n if suggestion == False:\n return False\n self.api_rate_limit+=1\n for query in suggestion:\n if query['keyword'] not in duplicates:\n duplicates.add(query['keyword'])\n # allows same keywords with multiple keywords\n # self.results.append(query)\n if query['keyword'] not in self.already_fetched:\n # does not allow same keyword with multiple keywords\n # this line is temporary need to remove after fetching 10 categories\n self.results.append(query)\n self.queue.add(query['keyword']) \n self.keywords_count += len(self.results)",
"def __prepare_query(self, query, stopwords=[], stemming_func=None):\n pass",
"def get_queryset(self):\n\n # Get the keyword URL parameter value. Return empty string if the keyword is empty.\n # Filter the queryset based on the value of keyword and the queryset object's title.\n keyword = self.request.query_params.get('keyword', '')\n queryset = self.queryset.filter(title__icontains=keyword)\n\n if isinstance(queryset, QuerySet):\n # Ensure queryset is re-evaluated on each request.\n queryset = queryset.all()\n\n #return queryset\n return queryset.order_by('-first_published_at')",
"def __init__(self, tags=None, keyphrases=None, links=None):\n\n self.categories = set()\n\n if keyphrases:\n keyphrases = [keyphrase for keyphrase in keyphrases]\n self.keyphrases = keyphrases\n\n if tags:\n self.set_tags(tags)\n\n if links:\n self.set_links(links)",
"def init_queryset(self, qs):\n nsize = qs.count() # we assume each will be a line\n self._npages = nsize // self.height + (0 if nsize % self.height == 0 else 1)\n self._data = qs",
"def pre_search(self, qs):\n return qs",
"def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)",
"def _build_queryset(self, request, term):\n context = {}\n self.graphs_count = 0\n\n undefined = u''\n\n scribe = request.GET.get('scribe', undefined)\n # alternative names are for backward compatibility with old-style graph\n # search page\n script = request.GET.get('script', undefined)\n chartype = request.GET.get('chartype', undefined)\n character = request.GET.get('character', undefined)\n allograph = request.GET.get('allograph', undefined)\n component = request.GET.get('component', undefined)\n feature = request.GET.get('feature', undefined)\n repository = request.GET.get('repository', undefined)\n index = request.GET.get('index', undefined)\n\n excluded_images = None\n from digipal.utils import is_staff\n if not is_staff(request):\n excluded_images = Image.filter_permissions(\n Image.objects.all(), [MediaPermission.PERM_PRIVATE])\n\n none = u'-1'\n one_or_more = u'-2'\n\n from datetime import datetime\n\n t0 = datetime.now()\n t4 = datetime.now()\n\n wheres = []\n\n if self.search_hands:\n graphs = Graph.objects.filter(\n hand__id__in=self.search_hands.queryset)\n else:\n\n # .order_by('item_part__current_item__repository__name', 'item_part__current_item__shelfmark', 'descriptions__description','id')\n # Although we are listing hands on the front-end, we search for graphs and not for hand.\n # Two reasons:\n # searching for character and allograh at the same time through a Hand model would generate two separate joins to graph\n # this would bring potentially invalid results and it is also much slower\n # it is faster than excluding all the hands without a graph (yet another expensive join)\n #\n if term:\n term = term.replace('\"', '')\n graphs = Graph.objects.filter(\n Q(hand__descriptions__description__icontains=term) |\n Q(hand__scribe__name__icontains=term) |\n Q(hand__assigned_place__name__icontains=term) |\n Q(hand__assigned_date__date__icontains=term) |\n Q(hand__item_part__current_item__shelfmark__icontains=term) |\n Q(hand__item_part__current_item__repository__name__icontains=term) |\n Q(hand__item_part__current_item__repository__place__name__icontains=term) |\n Q(hand__item_part__historical_items__catalogue_number__icontains=term) | \\\n # JIRA 423\n Q(hand__item_part__historical_items__name__icontains=term) | \\\n Q(hand__item_part__group__historical_items__name__icontains=term) | \\\n Q(hand__item_part__display_label__icontains=term) | \\\n Q(hand__item_part__group__display_label__icontains=term)\n )\n else:\n graphs = Graph.objects.all()\n\n t1 = datetime.now()\n\n if index:\n graphs = graphs.filter(\n hand__item_part__historical_items__catalogue_number__iexact=index)\n if repository:\n matches = re.match(ur'^([^,]+?),([^,]+)$', repository)\n if matches:\n graphs = graphs.filter(Q(hand__item_part__current_item__repository__place__name__iexact=matches.group(\n 1).strip()) & Q(hand__item_part__current_item__repository__name__iexact=matches.group(2).strip()))\n if scribe:\n graphs = graphs.filter(hand__scribe__name__icontains=scribe)\n if script:\n graphs = graphs.filter(hand__script__name=script)\n\n if chartype:\n graphs = graphs.filter(\n idiograph__allograph__character__ontograph__ontograph_type__name=chartype)\n if character:\n graphs = graphs.filter(\n idiograph__allograph__character__name=character)\n if allograph:\n graphs = graphs.filter(idiograph__allograph__name=allograph)\n\n # we discard freak graph records (i.e. without annotation) to prevent\n # errors further down the line.\n graphs = graphs.filter(annotation__isnull=False)\n\n # if the user is not logged in we exclude graphs where the allograph is\n # hidden\n from digipal.models import has_edit_permission\n if not has_edit_permission(request, self.get_model()):\n graphs = graphs.exclude(idiograph__allograph__hidden=True)\n\n # exclude private images\n if excluded_images and excluded_images.count():\n graphs = graphs.exclude(annotation__image__in=excluded_images)\n\n # condition on component\n if component:\n component_where = Q(graph_components__component__name=component)\n if feature in [undefined, none]:\n # If no feature is specified we find all the graph which are supposed to have a component\n # according to their idiograph\n component_where = component_where | Q(\n idiograph__allograph__allograph_components__component__name=component)\n wheres.append(component_where)\n\n # condition on feature\n if feature not in [undefined, none, one_or_more]:\n wheres.append(Q(graph_components__features__name=feature))\n if feature in [one_or_more]:\n wheres.append(Q(graph_components__features__id__isnull=False))\n\n # ANDs all the Q() where clauses together\n if wheres:\n where_and = wheres.pop(0)\n for where in wheres:\n where_and = where_and & where\n\n graphs = graphs.filter(where_and)\n\n # Treat the feature=none case\n if feature == none:\n excluded_q = Q(graph_components__features__id__isnull=False)\n if component:\n excluded_q = excluded_q & Q(\n graph_components__component__name=component)\n excluded_graphs = Graph.objects.filter(excluded_q)\n graphs = graphs.exclude(\n id__in=excluded_graphs.values_list('id', flat=True))\n\n from digipal.utils import set_left_joins_in_queryset, get_str_from_queryset\n set_left_joins_in_queryset(graphs)\n # print get_str_from_queryset(graphs)\n\n t2 = datetime.now()\n\n # Get the graphs then id of all the related Hands\n # We use values_list because it is much faster, we don't need to fetch all the Hands at this stage\n # That will be done after pagination in the template\n # Distinct is needed here.\n #graphs = graphs.distinct().order_by('hand__scribe__name', 'hand__id', 'idiograph__allograph__character__ontograph__sort_order')\n chrono('graph filter:')\n graphs = graphs.distinct().order_by('hand__scribe__name', 'hand__id')\n chrono(':graph filter')\n\n # print graphs.query\n chrono('graph values_list:')\n graph_ids = graphs.values_list('id', 'hand_id')\n chrono(':graph values_list')\n\n# chrono('len:')\n# l = len(graph_ids)\n# print graph_ids.query\n# chrono(':len')\n\n # Build a structure that groups all the graph ids by hand id\n # context['hand_ids'] = [[1, 101, 102], [2, 103, 104]]\n # In the above we have two hands: 1 and 2. For hand 1 we have Graph 101\n # and 102.\n chrono('hand_ids:')\n context['hand_ids'] = [[0]]\n last = 0\n for g in graph_ids:\n if g[1] != context['hand_ids'][-1][0]:\n context['hand_ids'].append([g[1]])\n context['hand_ids'][-1].append(g[0])\n del(context['hand_ids'][0])\n chrono(':hand_ids')\n\n t3 = datetime.now()\n\n self.graphs_count = len(graph_ids)\n\n t4 = datetime.now()\n\n # print 'search %s; hands query: %s + graph count: %s' % (t4 - t0, t3 -\n # t2, t4 - t3)\n\n t5 = datetime.now()\n self._queryset = context['hand_ids']\n\n return self._queryset",
"def __init__(self, *args, **kwargs):\r\n\r\n super(DynamicMixin, self).__init__(*args, **kwargs)\r\n\r\n if not self._meta.queryset is None:\r\n self._meta.queryset = self._meta.queryset.all()",
"def init():\n execute(query=_query['cr_tweet'])\n execute(query=_query['cr_sentiment'])",
"def keywords(self, keywords):\n self._keywords = keywords",
"def get_queryset(self):\n queryset = Article.objects.all()\n username = self.request.query_params.get('username', None)\n if username is not None:\n queryset = queryset.filter(author__username__iexact=username)\n tag = self.request.query_params.get('tag', None)\n if tag is not None:\n queryset = queryset.filter(tags__tag_name__iexact=tag)\n search = self.request.query_params.get('search', None)\n if search is not None:\n queryset = queryset.filter(\n Q(title__icontains=search) |\n Q(slug__icontains=search) |\n Q(description__icontains=search) |\n Q(body__contains=search)\n )\n\n return queryset",
"def keyword_list(request):\n if request.method == 'GET':\n keywords = get_list_or_404(Keyword, is_active=True)\n if request.GET.get('pagination'):\n pagination = request.GET.get('pagination')\n if pagination == 'true':\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(keywords, request)\n serializer = KeywordSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n else:\n serializer = KeywordSerializer(keywords, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)",
"def keywords(self, keywords):\n\n self._keywords = keywords",
"def defaultKeywords(self, kwSet):\n return QsciLexerJava.keywords(self, kwSet)",
"def set_queries(self, **kwargs):\n for k, v in kwargs.items():\n self._query_dict[k] = v",
"def initialise():\n _initialiseGlobals()\n for pop in AnadPartOfPerspectiveDb.Iterator():\n _addToKnowledge(pop)\n return",
"def keywords(self):\n from hubspot3.keywords import KeywordsClient\n\n return KeywordsClient(**self.auth, **self.options)",
"def clearkeywords(self):\n self._kw = []",
"def __init__(self, query, title, link, subtext, searchterms, scripts):\n self.search_query = query\n self.title = title\n self.link = link\n self.subtext = subtext\n self.searchterms = searchterms\n self.link_scripts = scripts",
"def __init__(self):\n\t\tself.relevances = None",
"def initialize_survey(self, **kwargs):",
"def make_complex_query_set(self):\n\n query = self.request.GET.get(\"q\")\n category = self.request.GET.get(\"category\")\n title = self.request.GET.get(\"title\")\n\n q_object = Q()\n\n if query:\n q_object.add((\n Q(category__category_name__icontains=query) |\n Q(title__icontains=query) \n ), Q.OR)\n\n else:\n if category:\n q_object.add(\n Q(category=category),\n Q.AND)\n if title:\n q_object.add(\n Q(title__icontains=title),\n Q.AND)\n\n return q_object"
] | [
"0.62303746",
"0.62133765",
"0.5979311",
"0.59030783",
"0.5808249",
"0.5781533",
"0.5741475",
"0.57359666",
"0.5595646",
"0.55684435",
"0.5490495",
"0.54874605",
"0.54597276",
"0.5437996",
"0.5390633",
"0.5304389",
"0.5293679",
"0.5290758",
"0.5277677",
"0.5255377",
"0.52434176",
"0.52301097",
"0.52124393",
"0.5198709",
"0.51841795",
"0.5179254",
"0.51700854",
"0.51626796",
"0.51400644",
"0.5125439"
] | 0.7972529 | 0 |
Returns a dictionary of the keywords and the list of corresponding headlines (ids only) | def keyword_headlines(self):
d = {}
for q in self.keyword_queryset:
d[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)
return d | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())",
"def get_headlines_with_keyword(self, kw):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\theadlines = set()\r\n\r\n\t\tfor headlinekw in key_head[kw]:\r\n\t\t\tcontent = headlinekw.headlineid.content\r\n\t\t\theadlines.add(content)\r\n\r\n\t\treturn list(headlines)",
"def get_all_headlines(self):\r\n\t\tlist_vals = list(self.keyword_headlines().values())\r\n\t\tuniq_headlines = set()\r\n\t\tfor list_val in list_vals:\r\n\t\t\tfor headlineobj in list_val:\r\n\t\t\t\tuniq_headlines.add(headlineobj.headlineid.content)\r\n\r\n\t\treturn list(uniq_headlines)",
"def get_headlines(self, kw = None):\r\n\t\tif kw:\r\n\t\t\treturn self.get_headlines_with_keyword(kw)\r\n\t\telse:\r\n\t\t\treturn self.get_all_headlines()",
"def extract_keywords(self):\n keywords = [] \n for keyword in self.watsonLanguageModel['keywords'][:self.entitySizeLimit]: \n keywords.append(keyword['text'])\n return keywords",
"def Keywords(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('keywords', default)\n return [HEP.KeywordObject(i) for i in tmp]",
"def get_keywords(self, sectioned_text):\n \n keywords = []\n \n if 'full text' in list(sectioned_text.keys()):\n \n for word in self.keyword_list:\n if word in sectioned_text['full text']:\n keywords.append(word)\n \n else: \n fulltext = self.restitch_text(sectioned_text)\n for word in self.keyword_list:\n if word in fulltext:\n keywords.append(word)\n \n return keywords",
"def keywords(self):\n return {\n \"unary\": {\n k: v[0] for k, v in self.unary_commands.items()\n },\n \"terminal\": {\n k: v[0] for k, v in self.terminal_commands.items()\n },\n \"binary\": {\n k: v[0] for k, v in self.binary_commands.items()\n },\n }",
"def keyword_extraction(file_content):\n\n # [question, question....]\n for key, value in file_content.items():\n seg, hidden = ltp.seg([key])\n # ner: [[('Nh', 2, 2)]]\n ner = ltp.ner(hidden)\n # keywords: [('PERSON', \"吴轩\")], tuple_item: ('Nh', 2, 2)\n keywords = [(tag_to_name[tuple_item[0]], to_string(seg[0][tuple_item[1]: tuple_item[2]+1])) for tuple_item in ner[0]]\n file_content[key].keywords = keywords\n\n return file_content",
"def getKeywords(self):\n return",
"def chunk(keywords, lines):\n chunks = dict()\n chunk = []\n \n # Create an empty dictionary using all the keywords\n for keyword in keywords:\n chunks[keyword] = []\n \n # Populate dictionary with lists of chunks associated\n # with the keywords in the list \n for line in lines:\n if line.strip():\n token = line.split()[0]\n if token in keywords:\n chunk = [line] \n chunks[token].append(chunk) \n else:\n chunk.append(line)\n\n return chunks",
"def parse_keywords(medline):\n keyword_list = medline.find(\"KeywordList\")\n keywords = list()\n if keyword_list is not None:\n for k in keyword_list.findall(\"Keyword\"):\n if k.text is not None:\n keywords.append(k.text)\n keywords = \"; \".join(keywords)\n else:\n keywords = \"\"\n return keywords",
"def extract_keywords(raw_text,id):\n\n print(\"Extracting keywords for \"+id)\n\n stemmer = nltk.PorterStemmer()\n\n # Construct text\n\n # Tokens\n tokens = nltk.word_tokenize(raw_text)\n # filter undesirable words and format\n words = [w.replace('\\'','') for w in tokens if len(w)>=3]\n text = nltk.Text(words)\n\n tagged_text = nltk.pos_tag(text)\n #nouns = [tg[0] for tg in tagged_text if tg[1]=='NN' or tg[1]=='NNP' ]\n #print(nouns)\n\n # multi-term\n multiterms = set()\n stem_dico = {}\n for i in range(len(tagged_text)) :\n # max length 4 for multi-terms ==> 3\n for l in range(1,4) :\n if i+l < len(tagged_text) :\n tags = [tagged_text[k] for k in range(i,i+l)]\n if potential_multi_term(tags) :\n multistemlist = [str.lower(stemmer.stem(tagged_text[k][0])) for k in range(i,i+l)]\n #multistem.sort(key=str.lower)\n\t\t #python 3 : remove .encode('ascii','ignore')\n multistem = functools.reduce(lambda s1,s2 : s1+' '+s2,multistemlist)\n rawtext = functools.reduce(lambda s1,s2 : s1+' '+s2,[str.lower(tagged_text[k][0]) for k in range(i,i+l)])\n multiterms.add(multistem)\n if multistem in stem_dico :\n stem_dico[multistem].add(rawtext)\n else :\n stem_dico[multistem] = set([rawtext])\n\n return [list(multiterms),stem_dico]",
"def determine_keywords(self):\n\n split = dict()\n split['email_cc'] = re.compile(\"^\\s*CC[-_]?MAIL[:=]\\s*(.*)\")\n split['email_cc2'] = re.compile(\"^\\s*C[Cc][:=]\\s*(.*)\")\n split['fixed_in'] = re.compile(\"^\\s*FIXED[-_]?IN[:=]\\s*(.*)\")\n\n numeric = dict()\n numeric['bug_fixed'] = re.compile(\"^\\s*(?:BUGS?|FEATURE)[:=]\\s*(.+)\")\n numeric['bug_cc'] = re.compile(\"^\\s*CCBUGS?[:=]\\s*(.+)\")\n\n presence = dict()\n presence['email_gui'] = re.compile(\"^\\s*GUI:\")\n presence['silent'] = re.compile(\"(?:CVS|SVN|GIT|SCM).?SILENT\")\n presence['notes'] = re.compile(\"(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')\")\n\n results = defaultdict(list)\n for line in self.commit.message.split(\"\\n\"):\n # If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off\n # This allows for people to fill keywords in the Differential Summary and have this work smoothly for them\n line = re.sub(\"^Summary: (.+)\", \"\\g<1>\", line)\n\n # Start processing our keywords...\n for (name, regex) in split.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += [result.strip() for result in match.group(1).split(\",\")]\n\n for (name, regex) in numeric.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += re.findall(\"(\\d{1,10})\", match.group(1))\n\n for (name, regex) in presence.iteritems():\n if re.match( regex, line ):\n results[name] = True\n\n self.keywords = results",
"def get_meta_keywords(self):\n return self.get_meta_content(self.article.doc, \"meta[name=keywords]\")",
"def get_keywords(self):\n keys = []\n for post in self:\n keys.extend(post.Keywords)\n return list(sorted(set(keys)))",
"def add_keywords(self, response: Response) -> list:\n return response.xpath(\"//ul[@class='term']/li/a/text()\").getall()",
"def get_entities_dict(p_str):\n nlp = en_core_web_sm.load()\n doc = nlp(p_str)\n entities = {}\n relevant_keywords = []\n list_of_types = ['NORP', 'ORG', 'GPE', 'LAW', 'LANGUAGE']\n for X in doc.ents:\n if not(X.label_ in entities):\n entities[X.label_] = []\n entities[X.label_].append(X.text)\n if X.label_ in list_of_types:\n relevant_keywords.append(X.text)\n print(entities)\n print(\"HERE\")\n print(relevant_keywords)\n return entities, relevant_keywords",
"def articles_id_headwords (_id):\n\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE no = :id\n ORDER BY sortkeyword\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'id' : _id, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)",
"def _get_keywords(self, title: str):\n # Prepare data\n keywords = set()\n stops = set(nltk.corpus.stopwords.words(\"english\"))\n stemmer = nltk.stem.SnowballStemmer(\"english\")\n ent_types = [\n \"PERSON\", \"ORGANIZATION\", \"FACILITY\", \"LOCATION\", \"DATE\",\n \"TIME\", \"GPE\", \"MONEY\",\n ]\n excluded_word_types = [\"RB\", \"IN\", \"PRP\"]\n\n # Tokenize and chunk words using NLTK\n tokens = nltk.tokenize.word_tokenize(title)\n positions = nltk.pos_tag(tokens)\n chunk = nltk.ne_chunk(positions)\n\n # Make a word list of keywords we want to add, that\n # are not part of our excluded word types.\n words = set()\n for pos in positions:\n word, word_type = pos\n if word.isalnum() and word_type not in excluded_word_types:\n words.add(word)\n\n # Add all entities to keyword list and remove them from\n # our remaining word set so they don't get added again\n # and stemmed later.\n for subtree in chunk.subtrees(filter=lambda t: t.label() in ent_types):\n for leaf in subtree.leaves():\n keywords.add(leaf[0])\n if leaf[0] in words:\n words.remove(leaf[0])\n\n # Add remaining words in list and stem them to base form,\n # stemming means we change words from e.g. \"eating\" to \"eat\".\n for word in words:\n if word not in stops:\n keywords.add(stemmer.stem(word))\n\n return sorted([keyword.lower() for keyword in keywords])",
"def get_dictionary_file_lines_for_keywords(self):\n keywords_iter = iter(self.keywords)\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n\n self.dictionary_file.open_handle()\n result_lines = list()\n while next_keyword:\n line = self.dictionary_file.read_line_to_obj()\n if not line:\n print(\"Reached end of dictionary file\")\n break\n\n if line.term < next_keyword:\n continue\n elif line.term == next_keyword:\n print(\"Found postings list for term {}\".format(next_keyword))\n result_lines.append(line)\n\n try:\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n except StopIteration:\n print(\"Finished searching for all keywords\")\n break\n\n return result_lines",
"def headwords_id_context (_id):\n\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, \"\"\"\n SELECT keyword, sortkeyword\n FROM keyword\n WHERE id = :id\n \"\"\", { 'id' : _id })\n keyword, sortkeyword = res.fetchone ()\n\n res1 = execute (conn, \"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE sortkeyword < :sortkeyword\n ORDER BY sortkeyword DESC, n DESC, no DESC\n LIMIT :limit\n \"\"\", { 'sortkeyword' : sortkeyword, 'limit' : limit })\n\n res2 = execute (conn, \"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE sortkeyword >= :sortkeyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n \"\"\", { 'sortkeyword' : sortkeyword, 'limit' : limit + 1 })\n\n res = []\n\n for row in reversed (res1.fetchall ()):\n res.append (row[:3])\n for row in res2:\n res.append (row[:3])\n\n return make_headwords_response (res, limit)",
"def get_full_data(self):\n to_return = {}\n keywords = Keywords()\n for word in self.keywords:\n to_return[word] = keywords[word].get_info()\n return to_return",
"def find_keywords(anchor, keywords=['']):\n rel_keywords = []\n href, content = parse_anchor(anchor)\n \n for keyword in keywords:\n kw = keyword.lower()\n if kw in href.lower() or kw in content.lower():\n rel_keywords.append(keyword)\n \n return rel_keywords",
"def getMetaKeywords(self, article):\n return self.getMetaContent(article.doc, \"meta[name=keywords]\")",
"def get_all_keywords(resource):\n keywords = []\n resource.populate()\n for res in [i for i in resource.imports.data if isinstance(i, robot.parsing.settings.Resource)]:\n keyword_file = os.path.abspath('{}/{}'.format(res.directory, res.name))\n if keyword_file not in processed:\n res_obj = ResourceFile(keyword_file)\n processed[keyword_file] = res_obj\n keywords += get_all_keywords(res_obj)\n for keyword in resource.keywords:\n print(keyword.name)\n keywords.append(tuple((keyword.source, keyword.name, keyword.args.value if keyword.args.value else [])))\n return keywords",
"def keywords(self):\n return self.__keywords",
"def get_article_keywords(article,\n keywords,\n preprocess_type=PreprocessWordType.LEMMATIZE):\n matches = set()\n for word in article.words:\n preprocessed_word = query_utils.preprocess_word(word,\n preprocess_type)\n if preprocessed_word in keywords:\n matches.add(preprocessed_word)\n return sorted(list(matches))",
"def get_required_keywords_from_original():\n required_keywords = {}\n f = open('required_keywords.txt', 'r')\n curr_instrument = \"\"\n for line in f:\n if line[-2:] == \":\\n\":\n instrument = line[:-2]\n curr_instrument = instrument\n if instrument not in required_keywords.keys():\n required_keywords[instrument] = {}\n #print (line[:-2])\n elif line == \"\\n\":\n pass\n else:\n line = re.sub('[(),\\'|]', '', line)\n line = re.sub('\\.', ' ', line)\n new_line = line.split(' ')\n final_line = []\n final_line.append(new_line[0])\n for l in range(1,len(new_line)):\n temp_word = str(new_line[l][:8])\n temp_word = re.sub('\\n','',temp_word)\n if temp_word not in final_line:\n final_line.append(temp_word)\n required_keywords[curr_instrument][final_line[0]] = final_line[1:]\n more_required = ['REFTYPE', 'DESCRIP', 'AUTHOR', 'PEDIGREE', 'HISTORY']\n for k,v in required_keywords.iteritems():\n path = 'required_keywords/' + k + '_required_keywords.csv'\n with open(path, 'wb') as csvfile:\n keywriter = csv.writer(csvfile, delimiter=' ', quotechar='|',quoting=csv.QUOTE_MINIMAL)\n for key,value in v.iteritems():\n keywriter.writerow([key]+value + more_required)",
"def GetKeywords(self):\n if wx.VERSION >= (2, 9, 0, 0, ''):\n return [(0, R_KEYWORDS), (1, R_KEYWORDS2), (2, R_KEYWORDS3)]\n else:\n return [(1, KEYWORDS)]"
] | [
"0.763168",
"0.7235824",
"0.71703315",
"0.65968394",
"0.61900103",
"0.6189536",
"0.609543",
"0.60951686",
"0.60745686",
"0.6041486",
"0.6003403",
"0.5991974",
"0.5966871",
"0.5924465",
"0.5924083",
"0.5896881",
"0.5850133",
"0.58487964",
"0.58361",
"0.58255136",
"0.5808301",
"0.58039963",
"0.57934415",
"0.5756343",
"0.5741688",
"0.57205516",
"0.5694092",
"0.5693882",
"0.5686357",
"0.568364"
] | 0.81225014 | 0 |
Returns a list of keywords | def get_keywords(self):
return list(self.keyword_headlines().keys()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keywords(self):\n return list(self._kw)",
"def keywords(self):\n return self._keywords",
"def keywords(self):\n return self._keywords",
"def getKeywords(self):\n return",
"def keywords(self):\n return self.__keywords",
"def extract_keywords(self):\n keywords = [] \n for keyword in self.watsonLanguageModel['keywords'][:self.entitySizeLimit]: \n keywords.append(keyword['text'])\n return keywords",
"def get_keywords(self):\n keys = []\n for post in self:\n keys.extend(post.Keywords)\n return list(sorted(set(keys)))",
"def GetKeywords(self):\n return self._code['keywords']",
"def keywords(self) -> Set[str]:\n return self._keywords",
"def GetKeywords(self):\n if wx.VERSION >= (2, 9, 0, 0, ''):\n return [(0, R_KEYWORDS), (1, R_KEYWORDS2), (2, R_KEYWORDS3)]\n else:\n return [(1, KEYWORDS)]",
"def get_keywords(self):\n\n if str(self.keywords) == \"unset\": return []\n # if self.keywords: return self.keywords\n if len(self.keywords) > 0: return self.keywords\n # retrieve from args and return if exists\n keywords = Settings.get_keywords() or []\n if len(keywords) > 0: return keywords\n if not Settings.prompt(\"keywords\"):\n self.keywords = \"unset\" # used to skip prompting for value in future\n return []\n question = {\n 'type': 'input',\n 'name': 'keywords',\n 'message': 'Keywords:',\n 'validate': ListValidator\n }\n keywords = prompt(question)[\"keywords\"]\n keywords = [n.strip() for n in keywords.split(\",\")]\n # confirm keywords\n if not Settings.confirm(keywords): return self.get_keywords()\n self.keywords = keywords\n return self.keywords",
"def __get_keywords(self, text_list):\r\n specialKW = [\r\n 'run keyword',\r\n 'run keyword and continue on failure',\r\n 'run keyword and expect error',\r\n 'run keyword and ignore error',\r\n 'run keyword and return'\r\n 'run keyword and return if',\r\n 'run keyword and return status',\r\n 'run keyword if',\r\n 'run keyword if all critical tests passed',\r\n 'run keyword if all tests passed',\r\n 'run keyword if any critical tests failed',\r\n 'run keyword if any tests failed',\r\n 'run keyword if test failed',\r\n 'run keyword if test passed',\r\n 'run keyword if timeout occurred',\r\n 'run keyword unless',\r\n 'run keywords',\r\n 'wait until keyword succeeds',\r\n 'repeat keyword',\r\n 'else'\r\n ]\r\n specialSettings = [\r\n '[Arguments]',\r\n '[Documentation]'\r\n ]\r\n L = []\r\n if text_list[0] in specialSettings:\r\n return L\r\n for item in text_list:\r\n if self.__is_keyword(item):\r\n L.append(item)\r\n if not item.replace('_', ' ').replace('-', ' ').lower() in specialKW:\r\n break\r\n return L",
"def GetKeywords(self):\n return [FS_COMMANDS, FS_STDLIB, FS_FUNC, FS_CLASS]",
"def get_keywords(keyword_list: List[Tuple[str, str]], keyword_type: str) -> List[str]:\n keywords = [x[0] for x in keyword_list if x[1].startswith(keyword_type)]\n\n return keywords",
"def Keywords(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('keywords', default)\n return [HEP.KeywordObject(i) for i in tmp]",
"def extract_keywords(query):\n nlp_server_response = __post_request_nlpserver(extract_special_characters(query))\n keywords = []\n\n for sentence in nlp_server_response['sentences']:\n for token in sentence['tokens']:\n if token['pos'] in {'NN', 'JJ', 'NNP', 'NNS', 'NNPS', 'VB', 'VBN', 'VBZ', 'VBP', 'VBG'}:\n if not token[\"lemma\"].lower() in english_stopwords:\n if not token['lemma'] in {'be', 'have'}:\n keywords.append(token['lemma'])\n return keywords",
"def keywords(text:str) -> list:\n return sorted(set(text.split(' ')), key=frequency, reverse=True)[0:5]",
"def keywords(self, **kwargs):\n\n path = self._get_movie_id_path('keywords')\n resp = self._get_method(path, kwargs)\n return resp",
"def get_keywords(self, sectioned_text):\n \n keywords = []\n \n if 'full text' in list(sectioned_text.keys()):\n \n for word in self.keyword_list:\n if word in sectioned_text['full text']:\n keywords.append(word)\n \n else: \n fulltext = self.restitch_text(sectioned_text)\n for word in self.keyword_list:\n if word in fulltext:\n keywords.append(word)\n \n return keywords",
"def getKeywords(tmdbKeywords):\n \n words = []\n if \"keywords\" in tmdbKeywords:\n for keyword in tmdbKeywords[\"keywords\"]:\n words += _format(keyword[\"name\"]).split()\n else:\n raise AttributeError(\"%s instance has no attribute keywords\" % tmdbKeywords) \n return words",
"def keywords(self):\n defined_keywords = [\n ('allowempty_map', 'allowempty_map'),\n ('assertion', 'assertion'),\n ('default', 'default'),\n ('class', 'class'),\n ('desc', 'desc'),\n ('enum', 'enum'),\n ('example', 'example'),\n ('extensions', 'extensions'),\n ('format', 'format'),\n ('func', 'func'),\n ('ident', 'ident'),\n ('include_name', 'include'),\n ('length', 'length'),\n ('map_regex_rule', 'map_regex_rule'),\n ('mapping', 'mapping'),\n ('matching', 'matching'),\n ('matching_rule', 'matching_rule'),\n ('name', 'name'),\n ('nullable', 'nullable'),\n ('parent', 'parent'),\n ('pattern', 'pattern'),\n ('pattern_regexp', 'pattern_regexp'),\n ('range', 'range'),\n ('regex_mappings', 'regex_mappings'),\n ('required', 'required'),\n ('schema', 'schema'),\n ('schema_str', 'schema_str'),\n ('sequence', 'sequence'),\n ('type', 'type'),\n ('type_class', 'type_class'),\n ('unique', 'unique'),\n ('version', 'version'),\n ]\n found_keywords = []\n\n for var_name, keyword_name in defined_keywords:\n if getattr(self, var_name, None):\n found_keywords.append(keyword_name)\n\n return found_keywords",
"def get_keywords(source_or_file):\n tree = get_ast(source_or_file)\n lister = KeywordLister().visit(tree)\n return lister.data",
"def add_keywords(self, response: Response) -> list:\n return response.xpath(\"//ul[@class='term']/li/a/text()\").getall()",
"def get_all_keywords(resource):\n keywords = []\n resource.populate()\n for res in [i for i in resource.imports.data if isinstance(i, robot.parsing.settings.Resource)]:\n keyword_file = os.path.abspath('{}/{}'.format(res.directory, res.name))\n if keyword_file not in processed:\n res_obj = ResourceFile(keyword_file)\n processed[keyword_file] = res_obj\n keywords += get_all_keywords(res_obj)\n for keyword in resource.keywords:\n print(keyword.name)\n keywords.append(tuple((keyword.source, keyword.name, keyword.args.value if keyword.args.value else [])))\n return keywords",
"def get_meta_keywords(self):\n return self.get_meta_content(self.article.doc, \"meta[name=keywords]\")",
"def get_keywords(self):\n all_keywords = []\n z_index = 0\n for zettel in self.lemma_tokens:\n keywords = []\n w_index = 0\n cur_zettel_dict = {}\n for word in zettel:\n cur_zettel_dict.setdefault(word[0], 0)\n cur_word_total_score = self.all_scores[z_index][w_index]\n if cur_zettel_dict[word[0]] > cur_word_total_score:\n w_index += 1\n continue\n else:\n cur_zettel_dict[word[0]] = cur_word_total_score\n w_index += 1\n cur_sorted = sorted(cur_zettel_dict.items(), key=lambda kv: kv[1], reverse=True)\n for i in range(self.keyword_n):\n keywords.append(str(cur_sorted[i]))\n z_index += 1\n all_keywords.append(keywords)\n return all_keywords",
"def get_keywords(self, pattern=\"*\"):\n\n sql = \"\"\"SELECT collection.collection_id, collection.name,\n keyword.name, keyword.doc, keyword.args\n FROM collection_table as collection\n JOIN keyword_table as keyword\n WHERE collection.collection_id == keyword.collection_id\n AND keyword.name like ?\n ORDER by collection.name, keyword.name\n \"\"\"\n pattern = self._glob_to_sql(pattern)\n cursor = self._execute(sql, (pattern,))\n result = [(row[0], row[1], row[2], row[3], row[4])\n for row in cursor.fetchall()]\n return list(sorted(set(result), key=itemgetter(2)))",
"def target_words(self) -> List[str]:\n return list(map(\n lambda w: self.spaces[w.lower()] \n if w.lower() in self.spaces else w.lower(), \n self.keywords\n ))",
"def get_paper_keywords(tree):\n\tpath = '//table/tr/th[text() = \"Keywords:\"]/following-sibling::td/text()'\n\tkeywords = tree.xpath(path)\n\t# xpath returns a list with the keywords as a single string element separated by new lines, commas or semi-colons\n\t# Make this into a list of keywords\n\tif keywords:\n\t\t# Split on new lines, commas and semi-colons\n\t\tkeywords = re.split('[\\\\n,;]', keywords[0])\n\t\t# Remove trailing white space and empty strings\n\t\tkeywords = [kw.strip() for kw in keywords if kw]\n\n\treturn keywords",
"def GetKeywords(self):\n kwlist = [CSS1_KEYWORDS , CSS_PSUEDO_CLASS]\n # 2.9 supports CSS3 so for 2.8 just add CSS3 keywords to the css2 list \n if wx.VERSION < (2, 9, 0, 0, ''):\n css2_kw = (CSS2_KEYWORDS[0], \" \".join((CSS2_KEYWORDS[1], CSS3_KEYWORDS[1])))\n kwlist.append(css2_kw)\n else:\n kwlist.append(CSS2_KEYWORDS)\n kwlist.append(CSS3_KEYWORDS)\n kwlist.append(PSEUDO_ELEMENTS)\n return kwlist"
] | [
"0.82068",
"0.80030465",
"0.80030465",
"0.79815817",
"0.7965522",
"0.795927",
"0.79124683",
"0.7827855",
"0.7802334",
"0.7794272",
"0.77290803",
"0.7597001",
"0.7496878",
"0.74123514",
"0.7399484",
"0.73888963",
"0.737007",
"0.7306008",
"0.7305014",
"0.73011196",
"0.7273646",
"0.72724897",
"0.7268493",
"0.72314644",
"0.7217067",
"0.7145265",
"0.7125484",
"0.7044258",
"0.701768",
"0.7009621"
] | 0.8675625 | 0 |
Returns a list of headlines if given a keyword | def get_headlines(self, kw = None):
if kw:
return self.get_headlines_with_keyword(kw)
else:
return self.get_all_headlines() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_headlines_with_keyword(self, kw):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\theadlines = set()\r\n\r\n\t\tfor headlinekw in key_head[kw]:\r\n\t\t\tcontent = headlinekw.headlineid.content\r\n\t\t\theadlines.add(content)\r\n\r\n\t\treturn list(headlines)",
"def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d",
"def get_all_headlines(self):\r\n\t\tlist_vals = list(self.keyword_headlines().values())\r\n\t\tuniq_headlines = set()\r\n\t\tfor list_val in list_vals:\r\n\t\t\tfor headlineobj in list_val:\r\n\t\t\t\tuniq_headlines.add(headlineobj.headlineid.content)\r\n\r\n\t\treturn list(uniq_headlines)",
"def get_headline_search(query):\n query = query.replace(' ',\"\")\n category=\"\"\n get_headlines_url = 'https://newsapi.org/v2/top-headlines?category={}&query={}&language=en&apiKey={}'.format(category,query,api_key)\n headlines_results = []\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response = json.loads(get_headlines_data)\n if get_headlines_response['articles']:\n headlines_result_list=get_headlines_response['articles']\n for headline in headlines_result_list:\n headlines_results.append(headline)\n return headlines_results",
"def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())",
"def all_headlines_from(url):\n pass",
"def get_headlines(driver,site,URL_exclusions):\r\n links = get_all_links(driver,site,URL_exclusions)\r\n headlines = []\r\n n=0\r\n for link in links:\r\n driver = make_driver_obj() #get_all_links quits driver when finished.\r\n try:\r\n while True:\r\n try:\r\n driver.get(link) #No need to accept cookies to don't need return_search\r\n break\r\n except:\r\n continue\r\n except: #If we can't open the URL for any reason.\r\n driver.quit()\r\n continue\r\n n += 1\r\n headline = get_headline(driver)\r\n if headline != '':\r\n headlines.append(headline) #Only append if able to identify headline text\r\n #print(n)\r\n #print(headline)\r\n #print()\r\n driver.quit()\r\n return headlines",
"def all_headlines(html_root_node):\n pass",
"def gather_headlines(urls):\n pass",
"def is_headline(node):\n pass",
"def get_negative_headlines(headlines,words):\r\n negative_headlines = []\r\n for headline in headlines:\r\n for word in words:\r\n if headline.lower().find(word) != -1: #If particular word is found in lowercased headline.\r\n negative_headlines.append(headline)\r\n break #Stop iterating through words when we have found one negative word.\r\n return negative_headlines",
"def test_headlines_predefined(self) -> None:\n for headline in self.report.headlines:\n if not self.rules.get_headline_rules(headline.name):\n headlines = [headline.name for headline in self.rules.headlines]\n suggestion, _ = process.extractOne(\n headline.name, headlines, scorer=fuzz.partial_ratio\n )\n self.add_error(\n f\"{headline.name} är inte en valid rubrik. \"\n f\"Rättningsförlsag: {suggestion}.\",\n headline=headline,\n )\n elif re.search(\"\\\\W{1,}\", headline.name, re.I):\n self.add_error(\n f\"Rubriken {headline.name} innehåller tecken som inte är \"\n \"alfanumeriska vilket inte är tillåtet för en rubrik.\",\n headline=headline,\n )",
"def colorize_headlines_visitor(c, p, item):\n if p.h.startswith(\"!= \"):\n f = item.font(0)\n f.setBold(True)\n item.setFont(0, f)\n raise leoPlugins.TryNext",
"def parse_headlines(self):\n headlines = re.findall(r\"^\\.\\.\\.(.*?)\\.\\.\\.[ ]?\\n\\n\", self.unixtext,\n re.M | re.S)\n headlines = [\" \".join(h.replace(\"...\",\n \", \").replace(\"\\n\", \" \").split())\n for h in headlines]\n return headlines",
"def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)",
"def get_headlines(id):\n get_headlines_url = secondary_url.format(id,api_key)\n\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response= json.loads(get_headlines_data)\n\n headlines_results = None\n\n if get_headlines_response['articles']:\n headlines_results_list = get_headlines_response['articles']\n headlines_results = process_headlines(headlines_results_list)\n\n return headlines_results",
"def split_head(line, is_head=lambda line: line.startswith('>')):\n if is_head(line):\n return True\n else:\n return False",
"def find_keywords(anchor, keywords=['']):\n rel_keywords = []\n href, content = parse_anchor(anchor)\n \n for keyword in keywords:\n kw = keyword.lower()\n if kw in href.lower() or kw in content.lower():\n rel_keywords.append(keyword)\n \n return rel_keywords",
"def get_wiki_lines(wt, predicate=None):\n return [line for line in wt.contents.split('\\n') if not callable(predicate) or predicate(line)]",
"def onHeadlineClick(self, tag, keywords):\n self.handleEvent(\"headclick1\", tag, keywords)",
"def get_dictionary_file_lines_for_keywords(self):\n keywords_iter = iter(self.keywords)\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n\n self.dictionary_file.open_handle()\n result_lines = list()\n while next_keyword:\n line = self.dictionary_file.read_line_to_obj()\n if not line:\n print(\"Reached end of dictionary file\")\n break\n\n if line.term < next_keyword:\n continue\n elif line.term == next_keyword:\n print(\"Found postings list for term {}\".format(next_keyword))\n result_lines.append(line)\n\n try:\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n except StopIteration:\n print(\"Finished searching for all keywords\")\n break\n\n return result_lines",
"def lemma_headwords(self):\n new_var = 'lemma_headword'\n lemma_heads = [clx._lemmas[i]['Head'] for i in xrange(len(clx._lemmas))]\n has_item = self.compare_items(lemma_heads)\n new_column = []\n if False in has_item:\n self._warning_msg('lemma_headword', lemma_heads)\n for record, exists in zip(self._dict, has_item):\n if exists:\n lemma_id = clx.wordform_lookup(record)[0].IdNumLemma\n lemma_head = clx.lemma_by_id(lemma_id).Head\n else:\n lemma_head = None\n new_column.append(lemma_head)\n self._append_column(new_column, new_var)",
"def articles_id_headwords (_id):\n\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE no = :id\n ORDER BY sortkeyword\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'id' : _id, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)",
"def get_headline_data(website_url, source):\n\tpage = requests.get(website_url)\n\tpage.raise_for_status()\n\tall_headlines = []\n\tbs_obj = bs4.BeautifulSoup(page.text, 'html.parser')\n\titem_list = bs_obj.select('item')\n\tprintable = set(string.printable)\n\tfor curr_item in item_list:\n\t\titem_title = curr_item.title.string\n\t\tfollowup_link = curr_item.select('link')[0].string\n\t\tdatestamp = curr_item.select('pubdate')[0].string\n\t\titem_title = item_title.replace(\"'\", \"'\")\n\t\tfollowup_link = followup_link.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n\t\titem_title = item_title.encode('utf-8', errors='ignore')\n\t\tnew_headline = data_structures.Headline(item_title, followup_link, source, datestamp)\n\t\tall_headlines.append(new_headline)\n\treturn all_headlines",
"def get_headlines():\n country = request.args.get('country', type=str)\n if country is not None:\n data = te.getNews(country=country).dropna()\n return jsonify(data.to_dict(orient='records'))\n data = te.getNews()\n return jsonify(te.getNews().dropna().to_dict(orient='records'))",
"def fetch_headlines(self, retry=False):\n top_headlines_res = None\n try:\n top_headlines_res = self.news_api.get_top_headlines(\n country='in', page_size=100)\n except newsapi.newsapi_exception.NewsAPIException as err:\n print('NewsAPI Exception==', err)\n if not retry:\n print('Retrying with another key...')\n self.api_key = os.getenv('NEWS_API_KEY_BACKUP')\n self.configure_news_api()\n top_headlines_res = self.fetch_headlines(retry=True)\n else:\n return None\n except Exception as err:\n print('Exception occurred==', err)\n return None\n headlines = {}\n if top_headlines_res and top_headlines_res['status'] == 'ok':\n headlines = top_headlines_res\n else:\n headlines = None\n return headlines",
"def get_all_headline_data():\n\twebsites = database.get_website_URLs()\n\tall_headlines_arr = []\n\tfor curr_elt in websites:\n\t\tcurr_website = curr_elt[0]\n\t\tsource = curr_elt[1]\n\t\tcurr_headline_arr = get_headline_data(curr_website, source)\n\t\tall_headlines_arr.append(curr_headline_arr)\n\treturn all_headlines_arr",
"def all_words(self, min_word_length=0):\n return [word for headline in self.headlines for word in\n headline.words(min_word_length=min_word_length)]",
"def get_keywords(self, sectioned_text):\n \n keywords = []\n \n if 'full text' in list(sectioned_text.keys()):\n \n for word in self.keyword_list:\n if word in sectioned_text['full text']:\n keywords.append(word)\n \n else: \n fulltext = self.restitch_text(sectioned_text)\n for word in self.keyword_list:\n if word in fulltext:\n keywords.append(word)\n \n return keywords",
"def getHeadParts(self):\n return self.headParts"
] | [
"0.8183849",
"0.7191412",
"0.7000567",
"0.66961074",
"0.66491723",
"0.66434336",
"0.6356157",
"0.6131674",
"0.61187565",
"0.59622735",
"0.59542644",
"0.5890983",
"0.5875384",
"0.57926047",
"0.57580566",
"0.5697247",
"0.56422436",
"0.5626344",
"0.55865705",
"0.5584343",
"0.5565354",
"0.5558015",
"0.5479626",
"0.5474127",
"0.54610187",
"0.5453939",
"0.5449592",
"0.5445621",
"0.5369793",
"0.53384167"
] | 0.8182654 | 1 |
Returns a list of the headlines with the corresponding keyword | def get_headlines_with_keyword(self, kw):
key_head = self.keyword_headlines()
headlines = set()
for headlinekw in key_head[kw]:
content = headlinekw.headlineid.content
headlines.add(content)
return list(headlines) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_headlines(self, kw = None):\r\n\t\tif kw:\r\n\t\t\treturn self.get_headlines_with_keyword(kw)\r\n\t\telse:\r\n\t\t\treturn self.get_all_headlines()",
"def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d",
"def get_all_headlines(self):\r\n\t\tlist_vals = list(self.keyword_headlines().values())\r\n\t\tuniq_headlines = set()\r\n\t\tfor list_val in list_vals:\r\n\t\t\tfor headlineobj in list_val:\r\n\t\t\t\tuniq_headlines.add(headlineobj.headlineid.content)\r\n\r\n\t\treturn list(uniq_headlines)",
"def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())",
"def get_headline_search(query):\n query = query.replace(' ',\"\")\n category=\"\"\n get_headlines_url = 'https://newsapi.org/v2/top-headlines?category={}&query={}&language=en&apiKey={}'.format(category,query,api_key)\n headlines_results = []\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response = json.loads(get_headlines_data)\n if get_headlines_response['articles']:\n headlines_result_list=get_headlines_response['articles']\n for headline in headlines_result_list:\n headlines_results.append(headline)\n return headlines_results",
"def all_headlines_from(url):\n pass",
"def get_headlines(driver,site,URL_exclusions):\r\n links = get_all_links(driver,site,URL_exclusions)\r\n headlines = []\r\n n=0\r\n for link in links:\r\n driver = make_driver_obj() #get_all_links quits driver when finished.\r\n try:\r\n while True:\r\n try:\r\n driver.get(link) #No need to accept cookies to don't need return_search\r\n break\r\n except:\r\n continue\r\n except: #If we can't open the URL for any reason.\r\n driver.quit()\r\n continue\r\n n += 1\r\n headline = get_headline(driver)\r\n if headline != '':\r\n headlines.append(headline) #Only append if able to identify headline text\r\n #print(n)\r\n #print(headline)\r\n #print()\r\n driver.quit()\r\n return headlines",
"def parse_headlines(self):\n headlines = re.findall(r\"^\\.\\.\\.(.*?)\\.\\.\\.[ ]?\\n\\n\", self.unixtext,\n re.M | re.S)\n headlines = [\" \".join(h.replace(\"...\",\n \", \").replace(\"\\n\", \" \").split())\n for h in headlines]\n return headlines",
"def all_headlines(html_root_node):\n pass",
"def gather_headlines(urls):\n pass",
"def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)",
"def lemma_headwords(self):\n new_var = 'lemma_headword'\n lemma_heads = [clx._lemmas[i]['Head'] for i in xrange(len(clx._lemmas))]\n has_item = self.compare_items(lemma_heads)\n new_column = []\n if False in has_item:\n self._warning_msg('lemma_headword', lemma_heads)\n for record, exists in zip(self._dict, has_item):\n if exists:\n lemma_id = clx.wordform_lookup(record)[0].IdNumLemma\n lemma_head = clx.lemma_by_id(lemma_id).Head\n else:\n lemma_head = None\n new_column.append(lemma_head)\n self._append_column(new_column, new_var)",
"def test_headlines_predefined(self) -> None:\n for headline in self.report.headlines:\n if not self.rules.get_headline_rules(headline.name):\n headlines = [headline.name for headline in self.rules.headlines]\n suggestion, _ = process.extractOne(\n headline.name, headlines, scorer=fuzz.partial_ratio\n )\n self.add_error(\n f\"{headline.name} är inte en valid rubrik. \"\n f\"Rättningsförlsag: {suggestion}.\",\n headline=headline,\n )\n elif re.search(\"\\\\W{1,}\", headline.name, re.I):\n self.add_error(\n f\"Rubriken {headline.name} innehåller tecken som inte är \"\n \"alfanumeriska vilket inte är tillåtet för en rubrik.\",\n headline=headline,\n )",
"def get_all_headline_data():\n\twebsites = database.get_website_URLs()\n\tall_headlines_arr = []\n\tfor curr_elt in websites:\n\t\tcurr_website = curr_elt[0]\n\t\tsource = curr_elt[1]\n\t\tcurr_headline_arr = get_headline_data(curr_website, source)\n\t\tall_headlines_arr.append(curr_headline_arr)\n\treturn all_headlines_arr",
"def extract_keywords(self):\n keywords = [] \n for keyword in self.watsonLanguageModel['keywords'][:self.entitySizeLimit]: \n keywords.append(keyword['text'])\n return keywords",
"def column_headlines(self):\n elements = self._selenium.find_elements_by_xpath(\n '//div[@id=\"content\"]/table/thead/tr/th/a')\n return [x.text for x in elements]",
"def get_headlines(id):\n get_headlines_url = secondary_url.format(id,api_key)\n\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response= json.loads(get_headlines_data)\n\n headlines_results = None\n\n if get_headlines_response['articles']:\n headlines_results_list = get_headlines_response['articles']\n headlines_results = process_headlines(headlines_results_list)\n\n return headlines_results",
"def add_keywords(self, response: Response) -> list:\n return response.xpath(\"//ul[@class='term']/li/a/text()\").getall()",
"def get_dictionary_file_lines_for_keywords(self):\n keywords_iter = iter(self.keywords)\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n\n self.dictionary_file.open_handle()\n result_lines = list()\n while next_keyword:\n line = self.dictionary_file.read_line_to_obj()\n if not line:\n print(\"Reached end of dictionary file\")\n break\n\n if line.term < next_keyword:\n continue\n elif line.term == next_keyword:\n print(\"Found postings list for term {}\".format(next_keyword))\n result_lines.append(line)\n\n try:\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n except StopIteration:\n print(\"Finished searching for all keywords\")\n break\n\n return result_lines",
"def colorize_headlines_visitor(c, p, item):\n if p.h.startswith(\"!= \"):\n f = item.font(0)\n f.setBold(True)\n item.setFont(0, f)\n raise leoPlugins.TryNext",
"def all_words(self, min_word_length=0):\n return [word for headline in self.headlines for word in\n headline.words(min_word_length=min_word_length)]",
"def getHeadParts(self):\n return self.headParts",
"def articles_id_headwords (_id):\n\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE no = :id\n ORDER BY sortkeyword\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'id' : _id, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)",
"def onHeadlineClick(self, tag, keywords):\n self.handleEvent(\"headclick1\", tag, keywords)",
"def get_negative_headlines(headlines,words):\r\n negative_headlines = []\r\n for headline in headlines:\r\n for word in words:\r\n if headline.lower().find(word) != -1: #If particular word is found in lowercased headline.\r\n negative_headlines.append(headline)\r\n break #Stop iterating through words when we have found one negative word.\r\n return negative_headlines",
"def get_headline_data(website_url, source):\n\tpage = requests.get(website_url)\n\tpage.raise_for_status()\n\tall_headlines = []\n\tbs_obj = bs4.BeautifulSoup(page.text, 'html.parser')\n\titem_list = bs_obj.select('item')\n\tprintable = set(string.printable)\n\tfor curr_item in item_list:\n\t\titem_title = curr_item.title.string\n\t\tfollowup_link = curr_item.select('link')[0].string\n\t\tdatestamp = curr_item.select('pubdate')[0].string\n\t\titem_title = item_title.replace(\"'\", \"'\")\n\t\tfollowup_link = followup_link.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n\t\titem_title = item_title.encode('utf-8', errors='ignore')\n\t\tnew_headline = data_structures.Headline(item_title, followup_link, source, datestamp)\n\t\tall_headlines.append(new_headline)\n\treturn all_headlines",
"def get_headings(self):\n return self.headings",
"def GetListHead(self, *args, **kwargs):\n pass",
"def reddit_headlines(reddit):\n\n # Set metadata to make request:\n url = \"https://www.reddit.com/r/{}/.json?limit=10\".format(reddit)\n headers = {'User-Agent': '{} Reddit headlines'.format(reddit)}\n\n # Consume Reddit's API to gather info:\n html = requests.get(url, headers=headers)\n\n # If status code is OK:\n if html.status_code == requests.codes.ok:\n # Parse resonse:\n info = json.loads(html.content.decode('utf-8'))\n # pprint(info)\n\n # Get relevant info:\n child = info['data']['children']\n titles = [unidecode(elem['data']['title']) for elem in child]\n titles = \"... \".join([title for title in titles])\n else:\n titles = None\n\n return titles",
"def get_headlines():\n country = request.args.get('country', type=str)\n if country is not None:\n data = te.getNews(country=country).dropna()\n return jsonify(data.to_dict(orient='records'))\n data = te.getNews()\n return jsonify(te.getNews().dropna().to_dict(orient='records'))"
] | [
"0.8323906",
"0.77197367",
"0.746953",
"0.7303126",
"0.68182117",
"0.6805935",
"0.66141784",
"0.64018786",
"0.6364445",
"0.63333815",
"0.59744793",
"0.59658384",
"0.59578186",
"0.5909768",
"0.5894226",
"0.58745706",
"0.5857797",
"0.5850383",
"0.58470386",
"0.58389264",
"0.58179617",
"0.58088344",
"0.57964855",
"0.5795066",
"0.57684404",
"0.5751211",
"0.5724461",
"0.56948507",
"0.5630085",
"0.5623782"
] | 0.8318916 | 1 |
Calculate the min number of refills to reach 'distance'. You start with a full tank. | def compute_min_refills(distance: int, tank: int, stops: List[int]):
location: int = 0
n_stops = 0
last_stop = 0
max_drive = location + tank
while max_drive < distance:
counter = 0
# Handle the case that stops are depleted before we reach distance
if len(stops) == 0:
return -1
for s in stops:
if s <= max_drive:
counter += 1
last_stop = s
max_drive = last_stop + tank
# Handle the case that wi did not reach the next stop
if counter == 0:
return -1
else:
del stops[0:counter]
n_stops += 1
return n_stops | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_min_refills(distance, tank, stops):\n\n num_refills = 0\n current_refill = 0\n\n all_stops = []\n all_stops.append(0)\n for stop in stops:\n \tall_stops.append(stop)\n all_stops.append(distance)\n\n num_stops = len(all_stops)\n\n while current_refill < num_stops:\n \tlast_refill = current_refill\n\n \twhile (current_refill < num_stops and all_stops[current_refill+1]\n \t\t - all_stops[last_refill] <= tank):\n \t\tcurrent_refill += 1\n \t\tif current_refill == num_stops-1:\n \t\t\treturn num_refills\n\n \tif current_refill == last_refill:\n \t\treturn -1\n \tif current_refill < num_stops:\n \t\tnum_refills += 1\n\n return num_refills",
"def distance(self) -> int:\n return 0",
"def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance",
"def num_points_in_distance(d):\n return 1 + 3 * d * (d + 1)",
"def _distance_acc(distances, thr=0.5):\n distance_valid = distances != -1\n num_distance_valid = distance_valid.sum()\n if num_distance_valid > 0:\n return (distances[distance_valid] < thr).sum() / num_distance_valid\n return -1",
"def getMinimumDistancePacmanLand(self, pos):\n minD = 10000\n for p in self.ghostLandPositions:\n minD = min(minD, self.getMazeDistance(pos, p))\n return minD",
"def getMinimumDistanceOpponent(self, idx, pos, defense=False):\n minD = 10000\n if defense:\n for p in self.beliefs[idx]:\n minD = min(minD, self.getMazeDistanceDefense(pos, p))\n else:\n for p in self.beliefs[idx]:\n minD = min(minD, self.getMazeDistance(pos, p))\n return minD",
"def _edit_distance(prediction_tokens: List[str], reference_tokens: List[str]) ->int:\n dp = [([0] * (len(reference_tokens) + 1)) for _ in range(len(prediction_tokens) + 1)]\n for i in range(len(prediction_tokens) + 1):\n dp[i][0] = i\n for j in range(len(reference_tokens) + 1):\n dp[0][j] = j\n for i in range(1, len(prediction_tokens) + 1):\n for j in range(1, len(reference_tokens) + 1):\n if prediction_tokens[i - 1] == reference_tokens[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1\n return dp[-1][-1]",
"def part_1(distances: Distances) -> int:\n\n result, _ = min(generate_routes(distances))\n print(f\"part 1: shortest route has distance {result}\")\n return result",
"def get_distance(self, other, max_distance) -> int:\n if self == other:\n return 0\n # Iteratively check larger neighbourhoods, until other is in it\n distance = 1\n while True:\n if other in self.get_neighbourhood(distance):\n return distance\n distance += 1\n if distance > max_distance:\n break",
"def evaluate_distance(self):\n\n fitness = 0\n routes = split_to_routes(self)\n\n for route in routes:\n route = [home] + route + [home]\n for i in range(1,len(route)):\n # Calculates full distance, including from last city\n # to first, to terminate the trip\n pos_from = route[i - 1]\n pos_to = route[i]\n distance = dm[pos_from][pos_to]\n fitness += distance\n\n return int(fitness)",
"def _calc_min_distance(self, walker):\n\n cell_lengths, cell_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])\n\n t2 = time.time()\n # make a traj out of it so we can calculate distances through\n # the periodic boundary conditions\n walker_traj = mdj.Trajectory(walker.state['positions'],\n topology=self._mdj_top,\n unitcell_lengths=cell_lengths,\n unitcell_angles=cell_angles)\n\n t3 = time.time()\n # calculate the distances through periodic boundary conditions\n # and get hte minimum distance\n min_distance = np.min(mdj.compute_distances(walker_traj,\n it.product(self.ligand_idxs,\n self.receptor_idxs),\n periodic=self._periodic)\n )\n t4 = time.time()\n logging.info(\"Make a traj: {0}; Calc dists: {1}\".format(t3-t2,t4-t3))\n\n return min_distance",
"def score_waypoint(distance):\n return max(0,\n float(settings.SATISFIED_WAYPOINT_DIST_MAX_FT - distance) /\n settings.SATISFIED_WAYPOINT_DIST_MAX_FT)",
"def distance_tolerance(distance: float) -> float:\n ret = 10.0\n if distance < 0:\n ret += distance * (100 - ret) / -2500.0\n return ret",
"def make_exceeding_distance_cost(allowed_distance):\n\tdef exceeding_distance_cost(node, caller_relative_probabilities, caller_distances):\n\t\treturn np.sum(caller_relative_probabilities[caller_distances > allowed_distance])\n\n\treturn exceeding_distance_cost",
"async def distance(self):\n return round(await self._rpc.distance(), 2)",
"def drive(self, distance=0):\n if random.uniform(1, 100) <= self.reliability:\n distance_driven = super().drive(distance)\n return distance_driven\n return 0",
"def apply_penalty(self, distance):\n\n self.cities_hit += 1 # Adds to the counter of cities without visiting a prime.\n\n if self.cities_hit > 10: # If Santa has visted more than 10 cities ...\n penalty_distance = (\n distance * 0.1) + distance # ...Applies the penalty for not showing up to a prime city...\n return penalty_distance # ...and returns the value based on the penalty\n else:\n return distance # Else return the distance.",
"def distance_reward(action, distances): \n\n def index_by_size(distances):\n # bigger element gets a bigger number\n idxs = np.zeros_like(distances)\n arr = np.copy(distances)\n c = distances.size\n for i in range(distances.size):\n min_curr_idx = np.argmin(arr)\n arr[min_curr_idx] = 1000\n idxs[min_curr_idx] = c\n c -= 1\n return idxs\n\n idxs_array = index_by_size(distances)\n rews = np.array([-10, -2, 0, 2, 10]) # BUG: pozor pri temu arraju nared f(no_robots), interpolacija\n idxs_array = idxs_array - 1 # 0-4\n chosen_idx = int(idxs_array[action])\n return rews[chosen_idx]",
"def min_distance(self, target):\n difference = self.pivot - target\n return max(math.sqrt(np.dot(difference, difference)) - self.radius, 0)",
"def calculate_move_fast_reward(self, packet):\r\n return get_distance_location(packet.gamecars[self.index].Location, self.previous_car_location)",
"def get_min_distance(self):\n return round(min(self.combined_euclidian_distance))",
"def min_distance(ref_curve,curve,acc=1):\n euc_length = lambda a,b: pow(sum(pow(a-b,2)),0.5)\n ref_curve_c = Curve(ref_curve)\n #print(\"comparing distance\")\n calc_dist = lambda r: sum((euc_length(r.find_nearest_point(point),point) for point in curve))\n #ref_distance = calc_dist(ref_curve_c)\n #ref_curve_c.set_points(ref_curve_c.gen_num_points(ref_curve_c.points.shape[0]*2))\n distance = calc_dist(ref_curve_c)\n #print(ref_distance,distance)\n return pow(distance,0.5)",
"def heuristic(self):\n if self._dist < 0:\n self._dist = 0\n for pos, idx in enumerate(self.config):\n if idx != 0: # Skip blank\n self._dist += manhattan_dist(idx, pos, self.n)\n return self._dist",
"def min_distance(distance, spt_set, self_nodes):\n minimum = sys.maxsize\n minimum_node = None\n for curr_node in self_nodes.values():\n if distance[curr_node.id] < minimum and not spt_set[curr_node.id]:\n minimum = distance[curr_node.id]\n minimum_node = curr_node\n return minimum_node",
"def gap_to_next_car(self):\n c = self.next_car()\n if c.x > self.x:\n return c.x - c.length_in_cells - self.x\n elif c.x < self.x:\n return (self.road.N - self.x) + (c.x - c.length_in_cells)\n elif c.x == self.x:\n return self.road.N",
"def calculate_zoom(self):\n distances = [geopy.distance.geodesic(self.centre_location, centroid).km for centroid in self.centroids]\n a = 4 / 20000\n distances = [1 + 4 - a * distance for distance in distances]\n print(min(distances))\n return min(distances)",
"def minimumDominationCount(leaf):\n minimumDominationCount = np.nanmin(leaf.calDominationCount())\n return minimumDominationCount",
"def heuristic_1(node):\n x_node, y_node = node.state.location()\n goals = node.state.grid.components.white_walkers\n goals.append(node.state.grid.components.dragon_stone)\n distance = [np.sqrt((x_node - x)**2 + (y_node - y)**2) for x, y in goals]\n return distance[np.argmin(distance)]",
"def _distance_covered(self):\n\n # Calculation of distance traveled compared to the previous point\n self.gap = math.sqrt((self.x - self.x_ant)**2\n + (self.y - self.y_ant)**2)\n\n self.x_ant = self.x\n self.y_ant = self.y\n\n return self.gap"
] | [
"0.8302289",
"0.6621628",
"0.645036",
"0.6314632",
"0.6262712",
"0.61484736",
"0.60885",
"0.603681",
"0.59223753",
"0.58915836",
"0.5876038",
"0.5869804",
"0.58609515",
"0.58593315",
"0.58491707",
"0.5815681",
"0.5806749",
"0.57953686",
"0.57733434",
"0.5757533",
"0.57270324",
"0.57073605",
"0.56928164",
"0.56865036",
"0.5681687",
"0.5680497",
"0.5679108",
"0.56686556",
"0.562771",
"0.56275225"
] | 0.7821262 | 1 |
Place the vertex v at position, and apply transformation T. Return the grid points that are occupied by the piece. | def place( self, position, v, T):
geo = (self.geo - self.geo[v]).dot( T)
return position + geo | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def translate(self, v):\n return Position.fromnp(translate(self.tonp(), v))",
"def project_vector(u, v):\n u_np = np.array([u.get_x(), u.get_y()])\n v_np = np.array([v.get_x(), v.get_y()])\n proj = (np.dot(u_np, v_np) / np.dot(v_np, v_np)) * v_np\n return Point(proj[0], proj[1])",
"def translate(self, tr):\n c = self.c -self.a*tr[0] -self.b*tr[1]\n self.c =c\n self.pointN = self.pointN+tr\n self.point1 = self.point1+tr\n self.points +=tr",
"def translate(self, tr):\n self.points = self.points + tr",
"def point_at(self, u, v, world=True):\n u = u * pi\n v = v * PI2\n x = self.radius * cos(u) * sin(v)\n y = self.radius * sin(u) * sin(v)\n z = self.radius * cos(v)\n point = Point(x, y, z)\n if world:\n point.transform(self.transformation)\n return point",
"def proyZm1(u, v, t1):\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)",
"def move_vertex(self, p, v, x, y, z):\n self._move_vertex(p, v, x, y, z)",
"def geochart(u, v):\n cv = torch.cos(v)\n cu = torch.cos(u)\n sv = torch.sin(v)\n su = torch.sin(u)\n return torch.stack((cv*su, sv*su, cu)).T",
"def changePoint(self, P, V):\n if (not (isinstance(P, list) and isinstance(V, list))) or \\\n not (len(P) == 2 and len(V) == 4 and V[0] != 0):\n raise ValueError(\"(P,V) must be ([px, py], [u, r, s, t]) with u != 0.\")\n\n if self.ch == 0:\n Q0 = rational.IntegerIfIntOrLong(P[0]-V[1])/rational.IntegerIfIntOrLong(V[0]**2)\n Q1 = rational.IntegerIfIntOrLong(P[1]-V[2]*(P[0]-V[1])-V[3])/rational.IntegerIfIntOrLong(V[0]**3)\n else:\n v = self.basefield.createElement(V[0]).inverse()\n Q0 = ((P[0]-V[1])*v**2)\n Q1 = ((P[1]-V[2]*(P[0]-V[1])-V[3])*v**3)\n Q = [Q0, Q1]\n return Q",
"def transform_ip(self, H): # or update()\n self.vh = H @ self.vertices.T\n self.vh = self.vh.T \n self.va = self.vh[:,:2]",
"def vector_trans(self, v, T, V0):\n v = np.array(v)\n newv = np.add(v[0:2].dot(T), V0)\n self.log.debug(\"Transformation of vector {}, with transformation matrix {} nad V0 {}, to: {}\".format(v, T, V0, newv))\n return newv",
"def rigid_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A = mat3(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A += w[i]*p_adj.transpose_multiply(q_adj)\r\n A_arr = np.array(A.matrix).reshape(3, 3)\r\n U, S, V = np.linalg.svd(A_arr)\r\n M_arr = np.matmul(np.transpose(V), np.transpose(U))\r\n M = mat3(M_arr.ravel().tolist())\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out",
"def translate(self, vect):\n self.apply(lambda c: (vector(c) + vect).coords())",
"def position(self, t):\n return vector_add(self.origin, self.direction.scale(t))",
"def rigid_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A_fac = mat2([v.x - p_wgt.x, v.y - p_wgt.y, v.y - p_wgt.y, p_wgt.x - v.x])\r\n v_out = vec2(0, 0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A = mat2([p_adj.x, p_adj.y, p_adj.y, -p_adj.x])*A_fac*w[i]\r\n A = A.transpose()\r\n v_out += A*q_adj\r\n r = math.sqrt(v_out.dot(v_out))\r\n v_out /= r\r\n v_sub = v - p_wgt\r\n r = math.sqrt(v_sub.dot(v_sub))\r\n v_out *= r\r\n v_out += q_wgt\r\n return v_out",
"def eval_2pts(self, vector, t):\n if t < 0 or t > 1:\n raise Exception(\"Cannot Eval \", e, \" with t=\", t, \": t Should Satisfy 0<=t<=1.\")\n else:\n return Vector((1-t)*self.x + t*vector.x, (1-t)*self.y + t*vector.y, (1-t)*self.z + t*vector.z)",
"def vector_proj(v, w):\n w_hat = vector_hat(w)\n return vector_dot(v, w_hat) * w_hat",
"def project(self, win_width, win_height, fov, viewer_distance):\r\n factor = fov / (viewer_distance + self.z)\r\n x = self.x * factor + win_width / 2\r\n y = -self.y * factor + win_height / 2\r\n return Point3D(x, y, 1)",
"def locate_on_surface(sx, sy, sz, t, x, y, zz, v=1500):\n # Check sizes\n assert (len(sx) == len(sy)) and (len(sx) == len(sz))\\\n and (len(sx) == len(t)),\\\n 'Source position and time arrays must all be of the same size.'\n assert np.shape(zz) == (len(x), len(y)),\\\n 'shape(zz) must equal (len(x), len(y)).'\n # Calculate RMS for each grid node\n rms = [] \n pos = []\n for ix, _x in enumerate(x):\n for iy, _y in enumerate(y):\n _t = slant_time(sx, sy, sz, _x, _y, zz[ix, iy], v=v)\n rms.append(np.sqrt(np.sum(np.power(_t - t, 2))))\n pos.append([_x, _y, zz[ix, iy]])\n i = np.argmin(rms)\n return pos[i] + [rms[i]]",
"def get_vertex(self, p, v, x, y, z):\n x.value, y.value, z.value = self._get_vertex(p, v, x.value, y.value, z.value)",
"def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, 1)",
"def get_element_for_location(self, points):\n verts = np.zeros((points.shape[0], 4, 3))\n bc = np.zeros((points.shape[0], 4))\n tetras = np.zeros(points.shape[0], dtype=\"int64\")\n inside = np.zeros(points.shape[0], dtype=bool)\n npts = 0\n npts_step = int(1e4)\n # break into blocks of 10k points\n while npts < points.shape[0]:\n\n cell_index = np.array(\n self.aabb_grid.position_to_cell_index(points[: npts + npts_step, :])\n )\n inside = self.aabb_grid.inside(points[: npts + npts_step, :])\n global_index = (\n cell_index[:, 0]\n + self.aabb_grid.nsteps_cells[None, 0] * cell_index[:, 1]\n + self.aabb_grid.nsteps_cells[None, 0]\n * self.aabb_grid.nsteps_cells[None, 1]\n * cell_index[:, 2]\n )\n\n tetra_indices = self.aabb_table[global_index[inside], :].tocoo()\n # tetra_indices[:] = -1\n row = tetra_indices.row\n col = tetra_indices.col\n # using returned indexes calculate barycentric coords to determine which tetra the points are in\n vertices = self.nodes[self.elements[col, :4]]\n pos = points[row, :]\n vap = pos[:, :] - vertices[:, 0, :]\n vbp = pos[:, :] - vertices[:, 1, :]\n # # vcp = p - points[:, 2, :]\n # # vdp = p - points[:, 3, :]\n vab = vertices[:, 1, :] - vertices[:, 0, :]\n vac = vertices[:, 2, :] - vertices[:, 0, :]\n vad = vertices[:, 3, :] - vertices[:, 0, :]\n vbc = vertices[:, 2, :] - vertices[:, 1, :]\n vbd = vertices[:, 3, :] - vertices[:, 1, :]\n\n va = np.einsum(\"ij, ij->i\", vbp, np.cross(vbd, vbc, axisa=1, axisb=1)) / 6.0\n vb = np.einsum(\"ij, ij->i\", vap, np.cross(vac, vad, axisa=1, axisb=1)) / 6.0\n vc = np.einsum(\"ij, ij->i\", vap, np.cross(vad, vab, axisa=1, axisb=1)) / 6.0\n vd = np.einsum(\"ij, ij->i\", vap, np.cross(vab, vac, axisa=1, axisb=1)) / 6.0\n v = np.einsum(\"ij, ij->i\", vab, np.cross(vac, vad, axisa=1, axisb=1)) / 6.0\n c = np.zeros((va.shape[0], 4))\n c[:, 0] = va / v\n c[:, 1] = vb / v\n c[:, 2] = vc / v\n c[:, 3] = vd / v\n # inside = np.ones(c.shape[0],dtype=bool)\n mask = np.all(c >= 0, axis=1)\n\n verts[: npts + npts_step, :, :][row[mask], :, :] = vertices[mask, :, :]\n bc[: npts + npts_step, :][row[mask], :] = c[mask, :]\n tetras[: npts + npts_step][row[mask]] = col[mask]\n inside[: npts + npts_step][row[mask]] = True\n npts += npts_step\n return verts, bc, tetras, inside",
"def project(self, win_width, win_height, fov, viewer_distance):\n\t\tfactor = fov / (viewer_distance + self.z)\n\t\tx = self.x * factor + win_width / 2\n\t\ty = -self.y * factor + win_height / 2\n\t\treturn Point3D(x, y, 1)",
"def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)",
"def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)",
"def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)",
"def protrudes((u,v)):\r\n return ((u,v,W), (u,v,S), (u,v-1,W), (u-1,v,S))",
"def transform_coordinates(self, p):\n if type(p) == PhysicalObject:\n return self.transform_coordinates(p.position)\n elif type(p) == Vector:\n return tuple(map(\n lambda x: int(x),\n (p / SCALE_FACTOR - self.pos_shift))\n )",
"def position(self, u, v):\n raise NotImplementedError",
"def project(v, w):\n projection_length = dot(v, w)\n return scalar_multiply(projection_length, w)"
] | [
"0.61589414",
"0.5887289",
"0.56152284",
"0.55743974",
"0.5553705",
"0.55252033",
"0.55161303",
"0.54971194",
"0.5430372",
"0.54015726",
"0.5349081",
"0.5345369",
"0.53373307",
"0.53254366",
"0.5311833",
"0.5305778",
"0.5302551",
"0.53003585",
"0.52812153",
"0.52796966",
"0.5275249",
"0.52329",
"0.52311933",
"0.52299464",
"0.52299464",
"0.52299464",
"0.5227934",
"0.52165365",
"0.52145654",
"0.5205811"
] | 0.65050745 | 0 |
Generate all nondegenerate placements, with one of the vertices placed at (0,0). Return the placements as [ (v, T) ], where v is the vertex to be placed at (0,0), and T the 2x2 transformation matrix that place the piece according to self.geo[v] + T.dot(self.geo self.geo[v]) | def findNondegeneratePlacements( self):
# Rotate counterclockwise by 90 degrees around the v'th vertex.
r90 = np.array( [ [0,1], [-1,0] ], dtype=int)
# Flip the piece along the vertical axis through the v'th vertex.
fv = np.array( [ [1,0], [0,-1] ], dtype=int)
self.placements = []
uniques = set() # Unique placements generated so far
identity = np.array( [ [1,0], [0,1] ], dtype=int)
T = identity[:,:]
for i in xrange(self.nVertices):
geo = self.geo[:,:]
geo -= geo[i] # Place i'th vertex at (0,0)
for r in xrange(4):
T = T.dot( r90)
for f in xrange(2):
T = T.dot( fv)
pk = placementKey( geo.dot(T))
if (not pk in uniques):
uniques.add( pk)
self.placements.append( (i, T))
# After four rotations and two flips, we should be back to
# the original position.
assert( np.array_equal( T, identity))
return self.placements | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_all_locations(grid, shape):",
"def test_create_new_placements(self):\n subv = SimpleMachineVertex(None, \"\")\n pl = Placement(subv, 0, 0, 1)\n Placements([pl])",
"def generate_nearby_cells(self):\n for y in range(len(self.island_map)):\n for x in range(len(self.island_map[y])):\n list_of_nearby_cells = []\n\n if y != 0:\n self.generate_cell_above(x, y, list_of_nearby_cells)\n\n if x != 0:\n self.generate_cell_left(x, y, list_of_nearby_cells)\n\n if y != len(self.island_map)-1:\n self.generate_cell_below(x, y, list_of_nearby_cells)\n\n if x != len(self.island_map[y])-1:\n self.generate_cell_right(x, y, list_of_nearby_cells)\n\n self.island_map[y][x].nearby_cells = list_of_nearby_cells",
"def __init__( self, geo, index=None):\n # the column vector self.geo[:,i] gives the i'th vertex's positions\n self.geo = np.array( geo, dtype=int)\n self.findNondegeneratePlacements()\n self.id = index",
"def g_xy(self):\n for x in range(self.size.x):\n for y in range(self.size.y):\n yield self.p[0] + Vect(x, y)",
"def gen_placecells(self, min_spread=0.2):\r\n\r\n N = None\r\n num_tries = 1000 # a limit on the number of attempts to place a new placecell\r\n\r\n # assign random x,y locations to each neuron\r\n locations = [self.random_location()]\r\n while True:\r\n # generate a random new point\r\n new_loc = self.random_location()\r\n\r\n # check that the point isn't too close to previous points\r\n count = 0\r\n while min([self.calc_dist(new_loc, l) for l in locations]) < min_spread and count < num_tries:\r\n new_loc = self.random_location()\r\n count += 1\r\n\r\n # add the new point\r\n locations += [new_loc]\r\n\r\n if (N == None and count >= num_tries) or len(locations) == N:\r\n # stop when required number of place cells built (if N specified),\r\n # or when world has been decently filled\r\n break\r\n\r\n return locations",
"def createTopology(self):\n\n # find DAG root\n dagRoot = None\n for mote in self.motes:\n if mote.id == 0:\n mote.role_setDagRoot()\n dagRoot = mote\n assert dagRoot\n\n # put DAG root at center of area\n dagRoot.setLocation(x=self.squareSide/2,\n y=self.squareSide/2)\n\n # Copy the contents of the list (but keep the originals) and shuffle them.\n # shuffledMotes = list(self.motes)\n # random.shuffle(shuffledMotes)\n # print shuffledMotes\n\n #### GRID PREPRATIONS.\n dagRootX, dagRootY = dagRoot.getLocation()\n # determine the number of 'square levels'\n numberOfMotes = len(self.motes)\n currentLvl = 0\n sumMotes = 0\n while (sumMotes < numberOfMotes):\n if currentLvl == 0:\n sumMotes += 1\n else:\n sumMotes += currentLvl * 8\n currentLvl += 1\n maxLvl = currentLvl - 1\n # print sumMotes\n coordinatesPerLvl = []\n for lvl in range(0, maxLvl + 1):\n coordinatesThisLvl = []\n if lvl == 0:\n coordinatesThisLvl = [(dagRootX, dagRootY)]\n elif lvl == 1:\n coordinatesThisLvl = self.getSquareCoordinates((dagRootX, dagRootY), self.DISTANCE)\n elif lvl > 1:\n coordinatesPrevLvl = coordinatesPerLvl[lvl - 1]\n coordinatesPrevPrevLvl = coordinatesPerLvl[lvl - 2]\n for coordinatePrevLvl in coordinatesPrevLvl:\n squareCoordinates = self.getSquareCoordinates(coordinatePrevLvl, self.DISTANCE)\n for squareCoordinate in squareCoordinates:\n if not self.isInCoordinates(squareCoordinate,\n coordinatesPrevPrevLvl) and not self.isInCoordinates(\n squareCoordinate, coordinatesPrevLvl) and not self.isInCoordinates(squareCoordinate,\n coordinatesThisLvl):\n coordinatesThisLvl.append(squareCoordinate)\n coordinatesPerLvl.append(coordinatesThisLvl)\n # print 'Level %d: # motes = %d' % (lvl, len(coordinatesThisLvl))\n # print coordinatesThisLvl\n assert len(coordinatesThisLvl) == 1 or len(coordinatesThisLvl) == lvl * 8\n\n allCoordinates = [j for i in coordinatesPerLvl for j in i]\n # print allCoordinates\n\n # reposition each mote until it is connected\n countMote = 1 # root 0 already has coordinates\n connectedMotes = [dagRoot]\n for mote in self.motes:\n if mote in connectedMotes:\n continue\n\n connected = False\n while not connected:\n # pick a random location\n\n newX = None\n newY = None\n # if no topology is not given, build the topology yourself\n if SimEngine.SimEngine().ilp_topology is None:\n newX = np.random.normal(allCoordinates[countMote][0], self.DISTANCE / 8, 1)[0]\n newY = np.random.normal(allCoordinates[countMote][1], self.DISTANCE / 8, 1)[0]\n else:\n # if no topology is given, use that topology\n newX = SimEngine.SimEngine().ilp_topology[str(mote.id)]['x']\n newY = SimEngine.SimEngine().ilp_topology[str(mote.id)]['y']\n\n mote.setLocation(\n x=newX,\n y=newY\n )\n\n numStableNeighbors = 0\n\n # count number of neighbors with sufficient RSSI\n for cm in connectedMotes:\n\n rssi = self._computeRSSI(mote, cm)\n mote.setRSSI(cm, rssi)\n cm.setRSSI(mote, rssi)\n\n # save the intial RSSI values for future use in the mobility models\n mote.initialRSSI[cm] = rssi\n cm.initialRSSI[mote] = rssi\n\n if self.settings.individualModulations == 1:\n if rssi > Modulation.Modulation().modulationStableRSSI[Modulation.Modulation().minimalCellModulation[SimSettings.SimSettings().modulationConfig]]:\n # print rssi\n numStableNeighbors += 1\n else:\n if rssi > self.STABLE_RSSI:\n # print rssi\n numStableNeighbors += 1\n\n # make sure it is connected to at least STABLE_NEIGHBORS motes\n # or connected to all the currently deployed motes when the number of deployed motes\n # are smaller than STABLE_NEIGHBORS\n if numStableNeighbors >= self.STABLE_NEIGHBORS or numStableNeighbors == len(connectedMotes):\n connected = True\n\n connectedMotes += [mote]\n countMote += 1\n\n # for each mote, compute PDR to each neighbors\n for mote in self.motes:\n shortestDistance = None\n for m in self.motes:\n if mote == m:\n continue\n if self.settings.individualModulations == 1:\n rssi_value = mote.getRSSI(m)\n for modulationTmp in Modulation.Modulation().modulations:\n # if the rssi value is higher than the minimal signal value required for this neighbor, take that modulation\n # and compute the PDR using that modulation\n if rssi_value > Modulation.Modulation().modulationStableRSSI[modulationTmp]:\n pdr = self._computePDR(mote, m, modulation=modulationTmp)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)\n mote.setModulation(m, modulationTmp)\n m.setModulation(mote, modulationTmp)\n else:\n if mote.getRSSI(m) > mote.minRssi:\n pdr = self._computePDR(mote, m)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)\n # closest distance\n dist = self._computeDistance(mote, m)\n if shortestDistance == None or dist < shortestDistance:\n mote.closestNeighbor = m\n shortestDistance = dist",
"def createTopology(self):\n\n # find DAG root\n dagRoot = None\n for mote in self.motes:\n if mote.id == 0:\n mote.role_setDagRoot()\n dagRoot = mote\n assert dagRoot\n\n # put DAG root at center of area\n dagRoot.setLocation(x=self.squareSide/2,\n y=self.squareSide/2)\n\n # Copy the contents of the list (but keep the originals) and shuffle them.\n # shuffledMotes = list(self.motes)\n # random.shuffle(shuffledMotes)\n # print shuffledMotes\n\n #### GRID PREPRATIONS.\n dagRootX, dagRootY = dagRoot.getLocation()\n # determine the number of 'square levels'\n numberOfMotes = len(self.motes)\n currentLvl = 0\n sumMotes = 0\n while (sumMotes < numberOfMotes):\n if currentLvl == 0:\n sumMotes += 1\n else:\n sumMotes += currentLvl * 8\n currentLvl += 1\n maxLvl = currentLvl - 1\n # print sumMotes\n coordinatesPerLvl = []\n for lvl in range(0, maxLvl + 1):\n coordinatesThisLvl = []\n if lvl == 0:\n coordinatesThisLvl = [(dagRootX, dagRootY)]\n elif lvl == 1:\n coordinatesThisLvl = self.getSquareCoordinates((dagRootX, dagRootY), self.DISTANCE)\n elif lvl > 1:\n coordinatesPrevLvl = coordinatesPerLvl[lvl - 1]\n coordinatesPrevPrevLvl = coordinatesPerLvl[lvl - 2]\n for coordinatePrevLvl in coordinatesPrevLvl:\n squareCoordinates = self.getSquareCoordinates(coordinatePrevLvl, self.DISTANCE)\n for squareCoordinate in squareCoordinates:\n if not self.isInCoordinates(squareCoordinate,\n coordinatesPrevPrevLvl) and not self.isInCoordinates(\n squareCoordinate, coordinatesPrevLvl) and not self.isInCoordinates(squareCoordinate,\n coordinatesThisLvl):\n coordinatesThisLvl.append(squareCoordinate)\n coordinatesPerLvl.append(coordinatesThisLvl)\n # print 'Level %d: # motes = %d' % (lvl, len(coordinatesThisLvl))\n # print coordinatesThisLvl\n assert len(coordinatesThisLvl) == 1 or len(coordinatesThisLvl) == lvl * 8\n\n allCoordinates = [j for i in coordinatesPerLvl for j in i]\n # print allCoordinates\n\n # reposition each mote until it is connected\n countMote = 1 # root 0 already has coordinates\n connectedMotes = [dagRoot]\n for mote in self.motes:\n if mote in connectedMotes:\n continue\n\n connected = False\n while not connected:\n # pick a random location\n\n newX = np.random.normal(allCoordinates[countMote][0], self.DISTANCE / 8, 1)[0]\n newY = np.random.normal(allCoordinates[countMote][1], self.DISTANCE / 8, 1)[0]\n\n mote.setLocation(\n x=newX,\n y=newY\n )\n\n # mote.setLocation(\n # x = allCoordinates[countMote][0],\n # y = allCoordinates[countMote][1]\n # )\n\n numStableNeighbors = 0\n\n # count number of neighbors with sufficient RSSI\n for cm in connectedMotes:\n\n rssi = self._computeRSSI(mote, cm)\n mote.setRSSI(cm, rssi)\n cm.setRSSI(mote, rssi)\n\n # save the intial RSSI values for future use in the mobility models\n mote.initialRSSI[cm] = rssi\n cm.initialRSSI[mote] = rssi\n\n if rssi > self.STABLE_RSSI:\n # print rssi\n numStableNeighbors += 1\n\n # make sure it is connected to at least STABLE_NEIGHBORS motes\n # or connected to all the currently deployed motes when the number of deployed motes\n # are smaller than STABLE_NEIGHBORS\n if numStableNeighbors >= self.STABLE_NEIGHBORS or numStableNeighbors == len(connectedMotes):\n connected = True\n\n connectedMotes += [mote]\n countMote += 1\n\n # for each mote, compute PDR to each neighbors\n for mote in self.motes:\n shortestDistance = None\n for m in self.motes:\n if mote == m:\n continue\n if mote.getRSSI(m) > mote.minRssi:\n pdr = self._computePDR(mote, m)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)\n # closest distance\n dist = self._computeDistance(mote, m)\n if shortestDistance == None or dist < shortestDistance:\n mote.closestNeighbor = m\n shortestDistance = dist",
"def __new_position(self):\n iterables = [range(self.size_x), range(self.size_y)]\n points = [] # Save all points in size.\n for point in itertools.product(*iterables):\n points.append(point)\n\n current_points = [] # Save used points.\n for object in self.objects:\n if (object.x, object.y) not in current_points:\n current_points.append((object.x, object.y))\n\n for point in current_points:\n points.remove(point) # Remove all used points.\n\n location = np.random.choice(a=range(len(points)), replace=False)\n return points[location]",
"def test_get_placement_of_vertex(self):\n subv = list()\n for i in range(5):\n subv.append(SimpleMachineVertex(None, \"\"))\n\n pl = list()\n for i in range(4):\n pl.append(Placement(subv[i], 0, 0, i))\n\n pls = Placements(pl)\n for i in range(4):\n self.assertEqual(pls.get_placement_of_vertex(subv[i]), pl[i])",
"def place_allowed_tower_sites():\n self.coordinates__tower_sites = []\n for tk in xrange(self.N_tower_kinds):\n #Each kind of tower will have the correct number of sites placed\n \n coords = []\n while len(coords)<self.N_tower_sites[tk]:\n x = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[1]+1-self.BORDER_MARGIN,size=1)[0]\n y = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[0]+1-self.BORDER_MARGIN,size=1)[0]\n p = (x,y) \n all_valid = True\n for rect in self.coordinates__obstacles:\n if not check_valid_placement(p,rect):\n all_valid = False\n break\n if all_valid:\n coords.append(p)\n self.coordinates__tower_sites.append(coords)",
"def get_element_for_location(self, points):\n verts = np.zeros((points.shape[0], 4, 3))\n bc = np.zeros((points.shape[0], 4))\n tetras = np.zeros(points.shape[0], dtype=\"int64\")\n inside = np.zeros(points.shape[0], dtype=bool)\n npts = 0\n npts_step = int(1e4)\n # break into blocks of 10k points\n while npts < points.shape[0]:\n\n cell_index = np.array(\n self.aabb_grid.position_to_cell_index(points[: npts + npts_step, :])\n )\n inside = self.aabb_grid.inside(points[: npts + npts_step, :])\n global_index = (\n cell_index[:, 0]\n + self.aabb_grid.nsteps_cells[None, 0] * cell_index[:, 1]\n + self.aabb_grid.nsteps_cells[None, 0]\n * self.aabb_grid.nsteps_cells[None, 1]\n * cell_index[:, 2]\n )\n\n tetra_indices = self.aabb_table[global_index[inside], :].tocoo()\n # tetra_indices[:] = -1\n row = tetra_indices.row\n col = tetra_indices.col\n # using returned indexes calculate barycentric coords to determine which tetra the points are in\n vertices = self.nodes[self.elements[col, :4]]\n pos = points[row, :]\n vap = pos[:, :] - vertices[:, 0, :]\n vbp = pos[:, :] - vertices[:, 1, :]\n # # vcp = p - points[:, 2, :]\n # # vdp = p - points[:, 3, :]\n vab = vertices[:, 1, :] - vertices[:, 0, :]\n vac = vertices[:, 2, :] - vertices[:, 0, :]\n vad = vertices[:, 3, :] - vertices[:, 0, :]\n vbc = vertices[:, 2, :] - vertices[:, 1, :]\n vbd = vertices[:, 3, :] - vertices[:, 1, :]\n\n va = np.einsum(\"ij, ij->i\", vbp, np.cross(vbd, vbc, axisa=1, axisb=1)) / 6.0\n vb = np.einsum(\"ij, ij->i\", vap, np.cross(vac, vad, axisa=1, axisb=1)) / 6.0\n vc = np.einsum(\"ij, ij->i\", vap, np.cross(vad, vab, axisa=1, axisb=1)) / 6.0\n vd = np.einsum(\"ij, ij->i\", vap, np.cross(vab, vac, axisa=1, axisb=1)) / 6.0\n v = np.einsum(\"ij, ij->i\", vab, np.cross(vac, vad, axisa=1, axisb=1)) / 6.0\n c = np.zeros((va.shape[0], 4))\n c[:, 0] = va / v\n c[:, 1] = vb / v\n c[:, 2] = vc / v\n c[:, 3] = vd / v\n # inside = np.ones(c.shape[0],dtype=bool)\n mask = np.all(c >= 0, axis=1)\n\n verts[: npts + npts_step, :, :][row[mask], :, :] = vertices[mask, :, :]\n bc[: npts + npts_step, :][row[mask], :] = c[mask, :]\n tetras[: npts + npts_step][row[mask]] = col[mask]\n inside[: npts + npts_step][row[mask]] = True\n npts += npts_step\n return verts, bc, tetras, inside",
"def _generate_maze(self):\n grid = [[GridCell(x, y, self._treasure_prob) for x in range(self._map_size)] for y in range(self._map_size)]\n\n center_x = self._map_size // 2\n center_y = self._map_size // 2\n\n for _ in range(self._sparsity):\n current = grid[center_x][center_y]\n stack = list()\n start = True\n while len(stack) or start:\n start = False\n current.visited = True\n children = current.has_children(grid)\n\n if children:\n choice = np.random.choice(children)\n choice.visited = True\n\n stack.append(current)\n\n self._remove_walls(current, choice)\n\n current = choice\n\n elif stack:\n current = stack.pop()\n for row in grid:\n for cell in row:\n cell.visited = False\n\n # edit center area\n grid[center_x][center_y].set_treasury()\n for x in range(center_x - 1, center_x + 2):\n for y in range(center_y - 1, center_y + 2):\n grid[x][y].erase_walls()\n return grid",
"def get_random_coordinates(self):\n array_shape = np.shape(self.cells) # type: tuple\n points_on_island = []\n for i in range(1, array_shape[0] - 1):\n for j in range(1, array_shape[1] - 1):\n points_on_island.append((i, j))\n random.shuffle(points_on_island)\n return points_on_island",
"def test_get_placements(self):\n subv = list()\n for i in range(5):\n subv.append(SimpleMachineVertex(None, \"\"))\n\n pl = list()\n for i in range(4):\n pl.append(Placement(subv[i], 0, 0, i))\n\n pls = Placements(pl)\n container = pls.placements\n for i in range(4):\n self.assertIn(pl[i], container)",
"def iter_grid_tiles(self):\n all_points = self.grid[0].union(self.grid[1], self.grid[2], {self.position})\n min_x = min(p.x for p in all_points)\n min_y = min(p.y for p in all_points)\n\n if min_x < 0:\n xoffset = -min_x\n elif min_x == 0:\n xoffset = 0\n elif min_x > 0:\n xoffset = min_x\n if min_y < 0:\n yoffset = -min_y\n elif min_y == 0:\n yoffset = 0\n elif min_y > 0:\n yoffset = min_y\n origin = Point(0 + xoffset, 0 + yoffset)\n position = Point(self.position.x + xoffset, self.position.y + yoffset)\n for tile_type in (0, 1, 2):\n for point in self.grid[tile_type]:\n newpoint = Point(point.x + xoffset, point.y + yoffset)\n if newpoint not in (origin, position):\n yield newpoint.x, newpoint.y, tile_type\n yield origin.x, origin.y , 4\n yield position.x, position.y, 3",
"def _place_nodes(self, i, j, step, max_nodes):\n points = []\n for k in range(max_nodes):\n while(True):\n t = Point(random.randint(i,i+step), random.randint(j,j+step)) \n if all([point.get_distance(t) > self.min_distance for point in points]):\n points.append(t)\n break\n \n for point in points:\n n=Node(self.counter, point)\n self.nodes.append(n)\n self.counter+=1",
"def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret",
"def generate_possible_moves(self):\r\n\t\t# Moves:\r\n\t\t# 0 - North\r\n\t\t# 1 - East\r\n\t\t# 2 - South\r\n\t\t# 3 - West\r\n\r\n\t\tmoves = []\r\n\r\n\t\tif self.x != 0:\r\n\t\t\tmoves.append(0)\r\n\t\tif self.y != self.n-1:\r\n\t\t\tmoves.append(1)\r\n\t\tif self.x != self.n-1:\r\n\t\t\tmoves.append(2)\r\n\t\tif self.y != 0:\r\n\t\t\tmoves.append(3)\r\n\r\n\t\treturn moves",
"def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")",
"def get_candidate_tiles(self) -> List[Point]:\n\t\tempty_tiles = set()\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif not self.tiles[x][y] == 0:\n\t\t\t\t\tfor d in [[0,1], [1,1], [1,0], [1,-1], [0,-1], [-1,-1], [-1,0], [-1,1]]:\n\t\t\t\t\t\tif x+d[0] >= 0 and y+d[1] >= 0 and x+d[0] < self.size and y+d[1] < self.size and self.tiles[x+d[0]][y+d[1]] == 0:\n\t\t\t\t\t\t\tempty_tiles.add(Point(x+d[0],y+d[1]))\n\t\treturn list(empty_tiles)",
"def create_coords_medium(ph):\n # Min: 8, max 12\n for start_row in xrange(ph.pizza.shape[0]):\n for start_col in xrange(ph.pizza.shape[1]-2*ph.min_ing_per_slice+1):\n # First scenario\n for i in xrange(ph.min_ing_per_slice*2, ph.max_cells_per_slice+1):\n end_row = start_row + 1\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0]-1):\n for start_col in xrange(ph.pizza.shape[1]-3):\n # Second scenario\n for i in xrange(ph.min_ing_per_slice, ph.min_ing_per_slice+3):\n end_row = start_row + 2\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0] - 2):\n for start_col in xrange(ph.pizza.shape[1] - 2):\n # Third scenario\n for i in xrange(3, 5):\n end_row = start_row + 3\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)",
"def generatePos(self):\n self.pos = np.zeros((self.num_points, 2), dtype='int32')\n self.pos[:, 1] = np.repeat(list(reversed(np.arange(1, self.x*2, 2))), self.y)\n self.pos[:, 0] = np.tile(np.arange(1, self.x*2, 2), self.y)",
"def init_place(self):\n for i in range(self.numCells):\n x = randint(0,self.nx)\n y = randint(0,self.ny)\n while not self.is_empty(x,y):\n x = randint(0, self.nx)\n y = randint(0, self.ny)\n assert self.put_cell(x, y, i) is True\n self.cells.append(Cell(x,y))\n\n assert self.calc_cost() is True",
"def get_empty_tiles(self) -> List[Point]:\n\t\tempty_tiles = []\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif self.tiles[x][y] == 0:\n\t\t\t\t\tempty_tiles.append(Point(x,y))\n\t\treturn empty_tiles",
"def generate_positions(self):\n raise NotImplementedError(\"Should implement generate_positions()!\")",
"def create_points(self):\n v1 = 0.0\n v2 = 0.5\n v3 = 0.25\n v4 = 0.2 # only used for hexgrid\n\n points = []\n\n points.append((v1, v1, v1)) # 0\n points.append((v2, v1, v1)) # 1\n points.append((v2, v2, v1)) # 2\n points.append((v1, v2, v1)) # 3\n\n points.append((v1, v1, v2)) # 4\n points.append((v2, v1, v2)) # 5\n points.append((v2, v2, v2)) # 6\n points.append((v1, v2, v2)) # 7\n\n points.append((v3, v1, v1)) # 8\n points.append((v2, v3, v1)) # 9\n points.append((v3, v2, v1)) # 10\n points.append((v1, v3, v1)) # 11\n\n points.append((v1, v1, v3)) # 12\n points.append((v2, v1, v3)) # 13\n points.append((v2, v2, v3)) # 14\n points.append((v1, v2, v3)) # 15\n\n points.append((v3, v1, v2)) # 16\n points.append((v2, v3, v2)) # 17\n points.append((v3, v2, v2)) # 18\n points.append((v1, v3, v2)) # 19\n\n points.append((v4, v1, v1)) # 20\n points.append((v1, v4, v1)) # 21\n points.append((v1, v1, v4)) # 22\n\n return points",
"def _generate_pores(self):\n logger.info(\"Place randomly located pores in the domain\")\n #Original Random Point Generator\n #coords = sp.rand(self._Np,3)*[self._Lx,self._Ly,self._Lz]\n #Seeding Code\n coords = np.zeros([self._Np,3])\n #reject points close to boundaries - if False there will be slightly more\n rejection = [False,False,True]\n for j in range(3):\n i = 0\n while i < self._Np:\n coord = np.random.uniform(0,1,1)\n if self._reject(coord) == rejection[j]:\n coords[i][j]=coord\n i += 1\n coords*=np.array([self._Lx,self._Ly,self._Lz])\n #Seeding Code\n #Uniform Random Generator\n #coords = np.array([np.random.uniform(0,self._Lx,self._Np),np.random.uniform(0,self._Ly,self._Np),np.random.uniform(0,self._Lz,self._Np)]).T\n\n self['pore.coords'] = coords\n logger.debug(\"End of method\")",
"def fill(self, products):\n unplaced = []\n self.x = 0\n for p in products:\n if self.x + p.width < Cage.width:\n p.location = self.x, self.y, self.z\n self.placed_products.append(p)\n self.x += p.width\n else:\n unplaced.append(p)\n return unplaced",
"def __generate_spawn_points(self):\n while True:\n p1x = random.randint(0, self.width - 1)\n p1y = random.randint(0, self.height - 1)\n p2x, p2y = self.__mirror(p1x, p1y)\n d_sq = (p1x - p2x)**2 + (p1y - p2y)**2\n if d_sq >= (self.width / 2)**2:\n break\n return (p1x, p1y), (p2x, p2y)"
] | [
"0.6300398",
"0.59959567",
"0.5966825",
"0.5925378",
"0.58771193",
"0.5777176",
"0.57712615",
"0.57712615",
"0.5769345",
"0.5725288",
"0.5673758",
"0.5602161",
"0.56021327",
"0.56002945",
"0.5596927",
"0.5585428",
"0.5523877",
"0.5508298",
"0.5461161",
"0.546076",
"0.5455531",
"0.54486144",
"0.5427992",
"0.5425584",
"0.5418832",
"0.5412836",
"0.5397276",
"0.53711367",
"0.5332808",
"0.5325766"
] | 0.7902054 | 0 |
Count unoccupied neighbors of a point. | def countFreeNeighbors( p, board, occupation):
n = 0
for m in [0, 1]:
for d in [-1, 1]:
pn = [p[0], p[1]]
pn[m] += d
j = board.grids.get( tuple(pn), None)
if (j is None): continue # Not a board point
if (occupation.has_key( j)): continue # Occupied
n += 1
return n | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count_neighboors(self, x: int, y: int) -> int :\n\n cpt : int = 0\n min_x : int = max(0, x - 1)\n max_x : int = min(x + 1, self.width-1)\n min_y : int = max(0, y - 1)\n max_y : int = min(y + 1, self.height-1)\n\n x_tmp : int\n y_tmp : int\n for x_tmp in range(min_x, max_x+1):\n for y_tmp in range(min_y, max_y+1):\n if self.is_alive(x_tmp, y_tmp) and not (x_tmp == x and y_tmp == y):\n cpt += 1\n return cpt",
"def count_neighbors(self, row, col):\n neighbors = 0\n neighbors += self.get_cell_value(row - 1, col - 1)\n neighbors += self.get_cell_value(row - 1, col)\n neighbors += self.get_cell_value(row - 1, col + 1)\n neighbors += self.get_cell_value(row, col - 1)\n neighbors += self.get_cell_value(row, col + 1)\n neighbors += self.get_cell_value(row + 1, col - 1)\n neighbors += self.get_cell_value(row + 1, col)\n neighbors += self.get_cell_value(row + 1, col + 1)\n\n return neighbors",
"def n_neighbors(self,n):\n return sum(1 for x in self.hex.get_neighbors_ring(n) if x is not None and x.is_occupied == 1)",
"def count_neighbors(self, x, y):\n # IMPLEMENT ME\n # HINT: You do not have to use a for-loop for this method; just\n # if-statements will suffice. Also, you do not need to indent further\n # than two levels further than this comment.\n neighbours = 0\n if x > 0 and y > 0:\n if self.board[x-1][y-1] == \"x\":\n neighbours += 1\n if x > 0:\n if self.board[x-1][y] == \"x\":\n neighbours += 1\n if x > 0 and y < self.width - 1:\n if self.board[x-1][y+1] == \"x\":\n neighbours += 1\n if y > 0:\n if self.board[x][y-1] == \"x\":\n neighbours += 1\n if y < self.width - 1:\n if self.board[x][y+1] == \"x\":\n neighbours += 1\n if x < self.height - 1 and y > 0:\n if self.board[x+1][y-1] == \"x\":\n neighbours += 1\n if x < self.height - 1:\n if self.board[x+1][y] == \"x\":\n neighbours += 1\n if x < self.height - 1 and y < self.width - 1:\n if self.board[x+1][y+1] == \"x\":\n neighbours += 1\n return neighbours",
"def _count_living_neighbors(self, cell: Cell) -> int:\n count = 0\n # borders of the area in which we are trying to find neighbors\n # Let's assume y axis directs downside and x axis directs to the left\n \n for x in range(cell.x - 1, cell.x + 2):\n for y in range(cell.y - 1, cell.y + 2):\n if cell.x == x and cell.y == y:\n continue\n if (x, y) in self.living_cells.keys():\n count += 1\n \n return count",
"def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape[1]) # upper right\n bl = max(x - 1, 0) # bottom left\n br = min(x + 2, self.f_shape[0]) # bottom right\n\n # slice\n cells = self.cells[bl:br, ul:ur]\n n_cells = np.count_nonzero(cells)\n\n return n_cells - self.cells[x][y]",
"def neighbors(self, point):\n # Sanity checks\n # Check that point has same number of dimensions as graph\n if not len(point) == len(self.dimensions):\n raise Exception(\"Point has \" + str(len(point)) + \" dimensions, Coordination Space has \" + \\\n str(len(self.dimensions)) + \" dimensions.\")\n\n point_and_neighbors = self.point_neighbors_recursion(point) # All neighbors, including point\n point_and_neighbors_set = set()\n for i in point_and_neighbors:\n point_and_neighbors_set.add(tuple(i))\n\n point_and_neighbors_set.remove(point) # Remove point\n neighbors = point_and_neighbors_set # Renaming for readability\n\n neighbors = filter(self.in_bounds, neighbors) # Remove points that are out-of-bounds\n neighbors = filter(self.passable, neighbors) # Remove points that are not neighbors\n\n return neighbors",
"def neighbors(self, x):\n pass",
"def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count",
"def get_neighbours_sum(self, x, y, current_point) -> int:\n return np.sum(self.grid_array[x-1:x+2, y-1:y+2]) - current_point",
"def count_island(row, col, island):\n count = 0\n for i in range(row):\n for j in range(col):\n count = count + floodfill(i, j, row, col, island)\n return count",
"def count_neighbor_mines(self, x, y):\n\t\treturn sum(self.mines[n][m] for (n, m) in self.get_valid_neighbors(x, y))",
"def count_neighbour_mines(self, x, y):\n neighbour_mines = 0\n for _x in range(x - 1, x + 2):\n for _y in range(y - 1, y + 2):\n if is_valid(_x, _y):\n if is_mine(self.board, _x, _y):\n neighbour_mines += 1\n return neighbour_mines",
"def count_neighbour_mines(self, x, y):\n neighbour_mines = 0\n for _x in range(x - 1, x + 2):\n for _y in range(y - 1, y + 2):\n if is_valid(_x, _y):\n if is_mine(self.board, _x, _y):\n neighbour_mines += 1\n return neighbour_mines",
"def num_neighbors(self):\n return self._num_neighbors",
"def checkNumNeighbors():",
"def neighbor_count(A):\n sum2 = lambda A, B: map2(add, A, B)\n neighbors = ((-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1))\n return reduce(sum2,\n map(lambda d: rotate2(A, d[0], d[1]),\n neighbors))",
"def get_count_life_neighbor(arr, x, y, max_x, max_y):\n\tres_count = 0\n\n\tif x > 0 and y > 0:\n\t\tif arr[y-1][x-1]:\n\t\t\tres_count += 1\n\n\tif y > 0:\n\t\tif arr[y-1][x]:\n\t\t\tres_count += 1\n\n\tif y > 0 and x < max_x:\n\t\tif arr[y-1][x+1]:\n\t\t\tres_count += 1\n\n\tif x > 0:\n\t\tif arr[y][x-1]:\n\t\t\tres_count += 1;\n\n\tif x < max_x:\n\t\tif arr[y][x+1]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x > 0:\n\t\tif arr[y+1][x-1]:\n\t\t\tres_count += 1\n\n\tif y < max_y:\n\t\tif arr[y+1][x]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x < max_x:\n\t\tif arr[y+1][x+1]:\n\t\t\tres_count += 1\n\n\treturn res_count",
"def countNeighbors(row, col, A):\n h = len(A)\n w = len(A[0])\n count = 0\n for x in range(-1, 2, 1):\n for y in range(-1, 2, 1):\n if abs(x) + abs(y) != 0:\n count += A[row+x][col+y]\n return count",
"def neighbor(board, x, y, n, m):\n deltas = (\n (-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1),\n )\n count = 0\n for dx, dy in deltas:\n xx = x + dx\n yy = y + dy\n if xx >= 0 and xx < n and yy >= 0 and yy < m and board[xx][yy] % 2 == 1:\n count += 1\n\n return count",
"def get_neighbors(self, line, col):\n neighbors = 0\n for line_shift in [-1, 0, 1]:\n for col_shift in [-1, 0, 1]:\n if line_shift == 0 and col_shift == 0:\n continue # Do not count given cell\n # % connects left/right and up/down\n i = (line + line_shift) % self.lines\n j = (col + col_shift) % self.cols\n if self[i][j] == self.cell_state['alive']:\n neighbors += 1\n return neighbors",
"def get_number_neighbours_of_cell(self, x_cell, y_cell):\n alive_neighbours = 0\n \n # neighbour indices\n x_indices = [x_cell-1, x_cell, x_cell+1]\n y_indices = [y_cell-1, y_cell, y_cell+1]\n\n\n #TODO: use functional programming ^^^^^^\n #x_indices = list(filter(lambda x: x < 0 and x > self.size[0], x_indices))\n #y_indices = list(filter(lambda y: y < 0 and y > self.size[1], y_indices))\n \n # correct indices for cell neighbours based on wrap_around_borders\n #TODO: this so far only works for x,y same size..\n if self.wrap_around_borders:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n indices.append(self.board_size[0] - 1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n indices.append(0)\n else:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n\n # check each neighbour status and add to counter\n for x in x_indices:\n for y in y_indices:\n alive_neighbours = alive_neighbours + self.board_state[x][y]\n\n # dont count own value\n alive_neighbours = alive_neighbours - self.board_state[x_cell][y_cell]\n\n return alive_neighbours",
"def _get_neighbors(size, point):\n i, j = point\n\n neighbors = [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]\n _valid_neighbor = lambda neighbor: all(0 <= x < size for x in neighbor)\n neighbors = list(filter(_valid_neighbor, neighbors))\n \n return neighbors",
"def get_neighbors_count(self, atom):\n return self._graph.get_connected_vertices_count(atom)",
"def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count",
"def countAdjacentFloorNodes(self, x, y):\n\t\treturn self.isFloor(x - 1, y) + self.isFloor(x + 1, y) + self.isFloor(x, y - 1) + self.isFloor(x, y + 1)",
"def _get_neighbours(point):\n # Pull coords out of point.\n x = point[0]\n y = point[1]\n z = point[2]\n return ((x-1, y, z), (x+1, y, z), (x, y-1, z), (x, y+1, z), (x, y, z-1), (x, y, z+1))",
"def _count_subset_neighbors(v, X):\n return len(set(v.neighbors).intersection(X))",
"def get_neighbors(self, row, col):\n neighbors = set()\n for d in [-1,1]:\n if row+d >= 0 and row+d < self._height and \\\n (row+d,col) in self._empty_spaces:\n neighbors.add((row+d,col))\n if col+d >= 0 and col+d < self._width and \\\n (row,col+d) in self._empty_spaces:\n neighbors.add((row,col+d))\n return neighbors",
"def checkDimension(neighbour, current_point):\n for i in range(3):\n delta = abs(neighbour[i] - current_point[i])\n if delta > 0:\n return i"
] | [
"0.7307373",
"0.7237684",
"0.7203775",
"0.71407646",
"0.6955501",
"0.68983823",
"0.6883159",
"0.6826486",
"0.6824896",
"0.6804748",
"0.6788591",
"0.67754936",
"0.67619663",
"0.67619663",
"0.67333233",
"0.6709194",
"0.66639596",
"0.66181695",
"0.657233",
"0.65499747",
"0.65250754",
"0.64985716",
"0.64570016",
"0.6450929",
"0.6436147",
"0.64351785",
"0.6386303",
"0.6379967",
"0.6366813",
"0.6363727"
] | 0.7302581 | 1 |
Find unoccupied positions on the board. | def findUnoccupied( board, occupation):
return [ j for j in xrange(len(board.positions))
if not occupation.has_key(j) ] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def free_positions(self):\n positions = []\n for i in range(self.grid_size):\n for j in range(self.grid_size):\n if self.grid[i][j] == 0:\n positions.append((i, j))\n if positions == []:\n raise GameException('Game Over. No free position left.')\n return positions",
"def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)",
"def find_unsettled_spot(self):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif self.grid[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn",
"def filled_positions(self):\n return [x for x in assignable_positions if self.grid[x][0]]",
"def find_empty(self):\n num_rows = len(self.board)\n num_cols = len(self.board[0])\n\n for i in range(num_rows):\n for j in range(num_cols):\n if self.board[i][j] == 0:\n return (i, j)",
"def findEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return [x,y]",
"def available_positions(self):\n available_positions = []\n for i in range(self.positions_count):\n if self.board[i] == 0:\n available_positions.append(i+1)\n return available_positions",
"def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret",
"def available_positions(self):\n if len([x for x in self.grid.values() if x[0] != None]) < 13:\n return [x for x in assignable_positions if self.grid[x][1] == \"---\"]\n else:\n return []",
"def remove_filled_positions(self, positions, board):\n\n new_positions = []\n for p in positions:\n if board.check_move(p[0], p[1]):\n new_positions.append(p)\n return new_positions",
"def get_positions(self):\r\n null_pos, black_pos, white_pos = set(), set(), set()\r\n for pos in BOARD_POSITIONS:\r\n if self.state[pos[0]][pos[1]] == 0:\r\n null_pos.add(pos)\r\n elif self.state[pos[0]][pos[1]] == 1:\r\n black_pos.add(pos)\r\n else:\r\n white_pos.add(pos)\r\n return null_pos, black_pos, white_pos",
"def get_empty_board_indecies(self):\n empty_indecies = []\n for row_num in range(len(self.board)):\n for col_num in range(len(self.board)):\n if self.board[row_num][col_num] and self.board[row_num][col_num].state == PegState.EMPTY:\n empty_indecies.append((row_num, col_num))\n return empty_indecies",
"def get_empty_positions(self):\n\n empty_positions = []\n\n for i in range(self._dimension):\n for j in range(self._dimension):\n if self._board[i][j] == ' ':\n empty_positions.append((i, j))\n\n return empty_positions",
"def _find_empty_cell(self):\n\n for r, row in enumerate(self._board):\n for c, cell in enumerate(row):\n if cell is None:\n return r, c",
"def get_played_positions(board):\n return np.argwhere(board.state != -1)",
"def get_unknown_neighbours(self, row, col):\n return [cell for cell in self.get_neighbours(row, col) if cell.state == None ]",
"def find_empty_space(self, state):\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == 0:\r\n return (i, j)",
"def available_spots(self):\n occupied_tiles = self.board.keys()\n neighbors = lambda x, y: ((x+1, y), (x-1, y), (x, y+1), (y, y-1))\n tiles_near_occupied = set(neighbor for tile in occupied_tiles\n for neighbor in neighbors(*tile))\n unnoccupied_titles_near_occupied = tiles_near_occupied - set(occupied_tiles)\n return unnoccupied_titles_near_occupied",
"def find_empty(self):\n min_num_choices = 10\n ret_x, ret_y = (-1, -1)\n\n for x in range(0, 9):\n for y in range(0, 9):\n if self.field[x][y] != -1:\n continue\n\n if (min_num_choices > len(self.choices[x][y])):\n min_num_choices = len(self.choices[x][y])\n ret_x = x\n ret_y = y\n\n return (ret_x, ret_y)",
"def unoccupied(self):\n self.is_occupied = 0\n for hex in self.fon:\n hex.remove_neighbor()\n hex.set_quality()",
"def find_excited_locations(self):\n return np.asarray(np.where(self._grid == 8)).T",
"def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]",
"def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]",
"def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]",
"def opponentBoarderPosition(self, gameState):\n if self.red:\n i = self.midWidth\n else:\n i = self.midWidth - 1\n boudaries = [(i,j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not gameState.hasWall(i[0],i[1]):\n validPositions.append(i)\n return validPositions",
"def find_empty(puzzle):\r\n empty_squares = []\r\n for y in range(len(puzzle.squares)):\r\n for x in range(len(puzzle.squares[0])):\r\n if puzzle.squares[y][x].is_editable() is True:\r\n empty_squares.append((x, y))\r\n return empty_squares",
"def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours",
"def legalMoves(self):\n return [c for c in range(self.getWidth()) if len([r for r in range(self.getHeight()) if self.cell[c][r]==EMPTY])>0 ]",
"def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())",
"def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())"
] | [
"0.74757147",
"0.73623365",
"0.7305674",
"0.72659814",
"0.7136726",
"0.6983209",
"0.6902076",
"0.69016534",
"0.68626094",
"0.6858928",
"0.67981094",
"0.67022055",
"0.66423976",
"0.65927297",
"0.6580229",
"0.6564762",
"0.65628403",
"0.6559673",
"0.654521",
"0.65293694",
"0.65235007",
"0.6506315",
"0.6506315",
"0.6506315",
"0.64854264",
"0.64846927",
"0.64595026",
"0.64592546",
"0.64553624",
"0.64553624"
] | 0.77521825 | 0 |
Determines whether the model instance has already been selected in a related field (ManyToManyField, OneToOneField). | def available(self):
fields = self._meta.get_fields()
for field in fields:
if isinstance(field, models.ManyToManyRel):
attr = field.get_accessor_name()
if getattr(self, attr).count() > 0:
return False
elif isinstance(field, models.OneToOneRel):
attr = field.get_accessor_name()
if getattr(self, attr, None):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def relation_exists(cls, model):\n return bool(cls.get_related_field(model)\n or cls.get_reverse_related_field(model))",
"def isRelated(self):\n return len(self.user_storage.all()) > 0",
"def has_field(self, field):\n return field in self.extra_fields",
"def contains(self, obj):\n self._not_support_combined_queries(\"contains\")\n if self._fields is not None:\n raise TypeError(\n \"Cannot call QuerySet.contains() after .values() or .values_list().\"\n )\n try:\n if obj._meta.concrete_model != self.model._meta.concrete_model:\n return False\n except AttributeError:\n raise TypeError(\"'obj' must be a model instance.\")\n if obj.pk is None:\n raise ValueError(\"QuerySet.contains() cannot be used on unsaved objects.\")\n if self._result_cache is not None:\n return obj in self._result_cache\n return self.filter(pk=obj.pk).exists()",
"def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False",
"def exists(self):\n new_qs = self._copy_self()\n new_qs.max_items = 1\n return new_qs.count(page_size=1) > 0",
"def _optimize_field_by_name(self, store: QueryOptimizerStore, model, selection, field_def) -> bool:\n name = self._get_name_from_field_dev(field_def)\n if not (model_field := self._get_model_field_from_name(model, name)):\n return False\n _logger.info('_optimize_field_by_name %r %r', name, model_field)\n if self._is_foreign_key_id(model_field, name):\n # ToDo: check if this works - i write resolvers for this\n store.only(name)\n return True\n if model_field.many_to_one or model_field.one_to_one:\n # ForeignKey or OneToOneField\n field_store = self._optimize_gql_selections(\n selection.selections,\n self._get_type(field_def),\n )\n store.select_related(name, field_store)\n return True\n if model_field.one_to_many or model_field.many_to_many:\n field_store = self._optimize_gql_selections(\n selection.selections,\n self._get_type(field_def),\n )\n if isinstance(model_field, ManyToOneRel):\n field_store.only(model_field.field.name)\n related_queryset = model_field.related_model.objects.all()\n _logger.info('_optimize_field_by_name many relation %r %r', model, name)\n store.prefetch_related(name, field_store, related_queryset)\n return True\n if not model_field.is_relation:\n store.only(name)\n return True\n return False",
"def _filter_m2m(self, field):\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(field.model, field.rel.to, field):\n return field",
"def __contains__(self, key):\n if isinstance(key, Model):\n key = key.get_id()\n return (str(key) in self.get_models())",
"def is_m2m_set(self, int_model, model1, model2):\n for m2m in model1._meta.many_to_many:\n if m2m.rel.to == model2 and m2m.rel.through == int_model:\n return True\n for m2m in model2._meta.many_to_many:\n if m2m.rel.to == model1 and m2m.rel.through == int_model:\n return True\n return False",
"def table_contains_elements(self, model):\n if model.objects.count() > 1:\n return True\n return False",
"def exists(self, value=None):\n try:\n if not value:\n value = self.get()\n except AttributeError:\n # If the instance is deleted, the _pk attribute doesn't exist\n # anymore. So we catch the AttributeError to return False (this pk\n # field doesn't exist anymore) in this specific case\n return False\n else:\n return self.connection.sismember(self.collection_key, value)",
"def has_item(self, item):\n return item in self.set",
"def has_item(self, item):\n return item in self.set",
"def exists(self):\n return self.obj is not None",
"def __contains__(self, obj):\n if isinstance(obj, self):\n query = self.where(**obj.data).select()\n result = query.execute()\n if result.count:\n return True\n return False",
"def has_add_permissions(self):\n queryset = self.model.objects.all()\n if hasattr(queryset, 'has_add_permissions'):\n return queryset.has_add_permissions( PyFormsMiddleware.user() )\n else:\n return True",
"def is_one(self) -> bool:\n return self.field.one == self",
"def has_field(cls, field) -> bool:\n try:\n cls._meta.get_field(field)\n return True\n except models.FieldDoesNotExist:\n return False",
"def test_annotation(self):\n queryset = OtherModel.objects.annotate(\n has_related=Exists(QuerySetModel.objects.filter(other_id=OuterRef('pk')))\n )\n self.assertEqual(queryset.count(), 1)\n self.assertFalse(queryset[0].has_related)",
"def _filter_related_m2m(self, rel):\n field = rel.field\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel",
"def include_related(request):\n query_param_value = request.GET.get(\"include_related\")\n return query_param_value in [\"true\", \"True\"]",
"def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'data_collection' or \\\n obj2._meta.app_label == 'data_collection':\n return True\n return None",
"def contains_field(cls, field_id):\n return field_id in cls.__data",
"def IsDocumentRelated(self, *args, **kwargs):\n pass",
"def is_initially_selected(self, value):\n return value in self._get_selected_values_set()",
"def has_siblings(self):\n return self.siblings.exclude(pk=self.pk).exists()",
"def in_(self, other: Any) -> NoReturn:\n raise NotImplementedError(\n \"in_() not yet supported for \"\n \"relationships. For a simple \"\n \"many-to-one, use in_() against \"\n \"the set of foreign key values.\"\n )",
"def __eq__(self, other):\n return (other is not None and\n self.field_name == other.field_name and\n self.field_type is other.field_type and\n dict.__eq__(self.field_attrs, other.field_attrs) and\n self.related_model == other.related_model)",
"def hasField(self) -> bool:\n return bool(self.__field)"
] | [
"0.6742698",
"0.6351571",
"0.6249839",
"0.6177484",
"0.594861",
"0.58511186",
"0.58348316",
"0.5823608",
"0.5715787",
"0.56835854",
"0.5671056",
"0.5618836",
"0.5598657",
"0.5598657",
"0.55803764",
"0.55689776",
"0.5560058",
"0.55562407",
"0.551438",
"0.55038196",
"0.54985",
"0.54815364",
"0.54295796",
"0.5426876",
"0.54227006",
"0.5397911",
"0.5394847",
"0.53613305",
"0.5352232",
"0.5341833"
] | 0.66686696 | 1 |
outputs the noise covariance matrix, R | def getCovarianceNoiseMatrix(self):
return np.dot ( self.getB().T, self.getB() ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def covariance_matrix(self):\n\n self._order_observations()\n self.cov_matrix = self._compute_covariance_matrix(\n self.list_observations, self.list_observations)\n\n self.cov_matrix += np.diag(np.array([self.noise] * self.n_observation))\n\n return self.cov_matrix",
"def covariance(data_matrix):\n return np.asmatrix(np.cov(data_matrix, rowvar=0))",
"def process_noise_cov(self, dt=0.0):\n raise NotImplementedError",
"def process_noise_cov(self, dt=0.0):\n raise NotImplementedError",
"def process_noise_cov(self, dt=0.0):\n raise NotImplementedError",
"def covariance_matrix(self):\n\n cov_filename = self.covariance_filename\n cov_press, cov_data = self._co_star_read(cov_filename)\n\n # \"Fix\" covariances that are not positive definite\n if not np.all(np.linalg.eigvals(cov_data) > 0):\n warnings.warn(\"Covariance matrix for species {} is not positive definite, modifying eigenvals\".format(self.species))\n\n # Get eigen values and vector from matrix\n eigval, eigvec = np.linalg.eig(cov_data)\n\n # Find negative eigen values and set to the media\n eigval[np.where(eigval < 0)] = np.median(eigval)\n\n # Reconstruct matrix with modified eigen values\n cov_data = eigvec @ np.diag(eigval) @ np.linalg.inv(eigvec)\n\n return cov_data",
"def rand_cov():\n c = uniform(-1, 1)\n return [[uniform(0, 1), c], [c, uniform(0, 1)]]",
"def _gp_cov_matrix(Nt, snr2, clen2):\n f = lambda x: np.exp(-(x**2)/clen2)\n C = snr2 * f(np.arange(Nt))\n C[0] += 1 # noise\n return scipy.linalg.toeplitz(C)",
"def cov(self):\n cov_ = np.dot(self.weights * self.demeaned.T, self.demeaned)\n cov_ /= self.sum_weights - self.ddof\n return cov_",
"def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho",
"def process_noise_cov(self, dt=0.0):\n if dt not in self._Q_cache:\n with torch.no_grad():\n d = self._dimension\n dt2 = dt * dt\n dt3 = dt2 * dt\n dt4 = dt2 * dt2\n Q = torch.zeros(d, d, dtype=self.sa2.dtype, device=self.sa2.device)\n Q[: d // 2, : d // 2] = 0.25 * dt4 * eye_like(self.sa2, d // 2)\n Q[: d // 2, d // 2 :] = 0.5 * dt3 * eye_like(self.sa2, d // 2)\n Q[d // 2 :, : d // 2] = 0.5 * dt3 * eye_like(self.sa2, d // 2)\n Q[d // 2 :, d // 2 :] = dt2 * eye_like(self.sa2, d // 2)\n Q = Q * self.sa2\n self._Q_cache[dt] = Q\n\n return self._Q_cache[dt]",
"def get_cov_matrix_outputs(self):\n cov = numpy.diag(numpy.zeros(self.get_num_measured_outputs()))\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n cov[i,i] = o.get_covariance()\n i += 1\n return cov",
"def cov(self):\n E_x = Sample.mean(self)\n Std_x = Sample.std(self)\n cov = Std_x/E_x\n return(cov)",
"def process_noise_cov(self, dt=0.0):\n if dt not in self._Q_cache:\n with torch.no_grad():\n d = self._dimension\n dt2 = dt * dt\n dt3 = dt2 * dt\n Q = torch.zeros(d, d, dtype=self.sa2.dtype, device=self.sa2.device)\n eye = eye_like(self.sa2, d // 2)\n Q[: d // 2, : d // 2] = dt3 * eye / 3.0\n Q[: d // 2, d // 2 :] = dt2 * eye / 2.0\n Q[d // 2 :, : d // 2] = dt2 * eye / 2.0\n Q[d // 2 :, d // 2 :] = dt * eye\n # sa2 * dt is an intensity factor that changes in velocity\n # over a sampling period ``dt``, ideally should be ~``sqrt(q*dt)``.\n Q = Q * (self.sa2 * dt)\n self._Q_cache[dt] = Q\n\n return self._Q_cache[dt]",
"def _mn_cov_ ( self , size = -1 , root = False ) :\n #\n if size <= 0 : size = len ( self )\n size = min ( size , len ( self ) ) \n #\n from array import array\n matrix = array ( 'd' , [ 0 for i in range(0, size * size) ] )\n self.mnemat ( matrix , size )\n #\n import ostap.math.linalg\n from ostap.core.core import Ostap \n mtrx = Ostap.Math.SymMatrix ( size )() \n for i in range ( 0 , size ) :\n for j in range ( i , size ) : \n mtrx [ i , j ] = matrix [ i * size + j ]\n \n return mtrx",
"def process_noise_cov(self, dt=0.0):\n if dt not in self._Q_cache:\n Q = self.sv2 * dt * dt * eye_like(self.sv2, self._dimension)\n self._Q_cache[dt] = Q\n\n return self._Q_cache[dt]",
"def covariance(x, y):\n n = len(x)\n return dot(de_mean(x), de_mean(y)) / (n - 1)",
"def covariance (x, y):\n n = len(x)\n return dot(de_mean(x), de_mean(y))/(n-1)",
"def get_cov_matrix_states(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n return cov",
"def get_thermal_covariance(self):\n cov = []\n for var in self.noise_power_variance:\n cov.append(np.diag(var * np.ones(len(self.eta))))\n\n return cov",
"def get_covariance(self):\n x = self.particles[:, 0]\n y = self.particles[:, 1]\n X = np.stack((x, y), axis=0)\n return np.cov(X)",
"def covariance(x, y):\n n = len(x)\n return dot(deviations_from_mean(x), deviations_from_mean(y))/ (n - 1)",
"def _noise_matrix(self):\n\n return self._noise * (np.random.rand(self._rows, self._cols)\n - 0.5)",
"def cov_matrix(X, mu):\n m, n = X.shape\n X_minus_mu = X - mu\n sigma = (1 / m) * (X_minus_mu.T).dot(X_minus_mu)\n\n return sigma",
"def compute_cov(self):\n subject = copy.copy(self.signals)\n subject -= subject.mean(axis=0)\n if self.standardize:\n subject = subject / subject.std(axis=0) # copy on purpose\n\n n_samples = subject.shape[0]\n self.cov_ = np.dot(subject.T, subject) / n_samples\n return self",
"def get_cov_matrix_parameters(self):\n cov = numpy.diag(numpy.zeros(self.get_num_parameters()))\n i = 0\n for p in self.parameters:\n cov[i,i] = p.get_covariance()\n i += 1\n return cov",
"def getCovMatrix(self, caliStep, weights):\n\n Sigma = np.zeros([self.numObs, self.numObs])\n # scale observation data with normalized variance parameter to get covariance matrix\n for i in range(self.numObs):\n # use smaller weights for higher precision\n if self.scaleCovWithMax:\n Sigma[i, i] = self.sigma * weights[i] * max(self.obsData[:, i]) ** 2\n else:\n Sigma[i, i] = self.sigma * weights[i] * self.obsData[caliStep, i] ** 2\n return Sigma",
"def print_latex_cov(P):\n out = ''\n # Print out header with state variables\n for var in STATE_VARS:\n out += \" & ${0:9s}$ \".format(var)\n\n out += '\\\\\\\\ \\n'\n\n\n # Print out correlation / covariance matrix \n for row in range(18):\n out += \"${0:3s}$ \".format(STATE_VARS[row])\n for col in range(18):\n # Print correlations on lower diagnal\n if col < row:\n out += \" & {0: 2.2f} \".format(float(P[row,col]/(sqrt(P[row,row]) * sqrt(P[col,col]) )))\n # Highlight variances in blue\n elif row == col:\n out += \" & {0: 2.2e} \".format(float(P[row,col]))\n else:\n out += \"& {0: 2.2e} \".format(float(P[row,col]))\n out += '\\\\\\\\ \\n'\n\n print out",
"def covariance(G, variables = [], conditionants = []):\n return parameters(G, variables = variables, \n conditionants = conditionants )[\"cov\"]",
"def get_cosmic_variance(self, signal_power):\n if self.ps_dim == 2:\n cov = []\n \n for ii, sig_eta in enumerate(signal_power):\n cov.append((1 / self.grid_weights[ii] * np.diag(sig_eta)**2))\n\n return cov\n else:\n return self.grid_weights * signal_power"
] | [
"0.71300656",
"0.66976005",
"0.6629202",
"0.6629202",
"0.6629202",
"0.6622315",
"0.6622108",
"0.6622073",
"0.6570352",
"0.6554128",
"0.6520113",
"0.64823085",
"0.64064324",
"0.6393191",
"0.6317923",
"0.62303",
"0.6217225",
"0.6212507",
"0.6195888",
"0.6166094",
"0.612643",
"0.60941535",
"0.6054734",
"0.6047353",
"0.5995319",
"0.59950817",
"0.5979224",
"0.59743917",
"0.59668756",
"0.59310156"
] | 0.7025741 | 1 |
Determine if the object has a parent with the supplied name. | def has_parent(obj, parent_name):
if obj.parent is None:
return False
if obj.parent.name is None:
return False
elif obj.parent.name == parent_name:
return True
else:
return has_parent(obj.parent, parent_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_parent(self):\n return self.parent != None",
"def has_parent(self):\n return self._parent_ is not None",
"def _contains_in_self_or_parent(self, name: str) -> bool:\n return name in self",
"def is_parent(self):\n if self.parent is not None:\n return False\n return True",
"def is_parent_of(self):\n return self.hasLabel('parent_of')",
"def has_parent(self):\n return False",
"def isChildExists(self, name):\n return self.getChild(name) != None",
"def is_parent(self) -> bool:\n return AccountEntry.objects.filter(parent=self).exists()",
"def contains_parent(self, pid):\n return pid in self._parent_ids",
"def has_parent(self, index):\n return self.get_parent_index(index) < len(self.heap)",
"def is_parent(self):\n return not self.children",
"def is_parent_of(cls, *args):\n return cls.graph_traversal(None, None, Bytecode()).is_parent_of(*args)",
"def has_parent_key(self):\n if self.is_root():\n return False\n try:\n self.parent_key()\n return True\n except ParseException:\n return False",
"def has_parent(self, term):\n for parent in self.parents:\n if parent.id == term or parent.has_parent(term):\n return True\n return False",
"def has_name(self, name: str) -> bool:\n return name in self.child_tags",
"def is_parent(self, item):\n if len(self.df.loc[self.df['parent_code']==item, :]): \n return True\n return False",
"def has_parents(self):\n return len(self._parents) > 0",
"def is_parent(review_request, commit_data=None):\n commit_data = fetch_commit_data(review_request, commit_data=commit_data)\n\n return str(commit_data.extra_data.get(\n SQUASHED_KEY, False)).lower() == 'true'",
"def is_parent(self, mother, kid):\n mom_node = self.names_to_nodes[mother]\n child_node = self.names_to_nodes[kid]\n return child_node.is_parent(mom_node)",
"def __contains__(self, name):\n return (name in self._defs) or \\\n ((self._parent is not None) and (name in self._parent))",
"def is_parent_ref(\n self,\n schema: s_schema.Schema,\n reference: Object,\n ) -> bool:\n return False",
"def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()",
"def islchild(self):\n\t\tif (self.parent() and self.parent().lchild() is self): #TODO is or == here\n\t\t\treturn True\n\t\treturn False",
"def parent_of(self, eid):\n return self.E(eid).is_parent_of()",
"def is_in(self, other):\n if self.name == other.name:\n return True\n else:\n if self.parent:\n return self.parent.is_in(other)\n else:\n return False",
"def isSetParentSBMLObject(self):\n return _libsbml.ASTNode_isSetParentSBMLObject(self)",
"def _has_parents(self, node: CFNode) -> bool:\n return bool(self._graph._backedges[node])",
"def __contains__(self, item):\n if item in self._parents:\n return True\n else:\n return False",
"def is_parent_of_catalog(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.is_parent_of_bin\n return self._get_provider_session('catalog_hierarchy_session').is_parent_of_catalog(*args, **kwargs)",
"def find_parent(self,\r\n index):\r\n\r\n if not index.level() > 1:\r\n return False\r\n go_on = True\r\n while not index.is_top() or index.level() < 1:\r\n\r\n\r\n if str(index.parent()) in self.indexes():\r\n return index.parent()\r\n index = index.parent()\r\n\r\n return False"
] | [
"0.7545343",
"0.7435591",
"0.73406065",
"0.7337848",
"0.7336612",
"0.7255639",
"0.7117234",
"0.7116028",
"0.70370907",
"0.6815638",
"0.6784905",
"0.67783093",
"0.6720416",
"0.6667514",
"0.6614133",
"0.6522801",
"0.6410729",
"0.640435",
"0.63520426",
"0.6320416",
"0.6236038",
"0.61792105",
"0.6169194",
"0.6147922",
"0.6118637",
"0.61061174",
"0.6092991",
"0.60689795",
"0.60651433",
"0.6010433"
] | 0.8476002 | 0 |
will simulation PARALLEL_UNIVERSES_COUNT universes then, will return the overall multiverse survival of the player | def compute_player_score():
progress_bar = ProgressBar(label="Computing universes")
survivals_count = 0
for i in range(PARALLEL_UNIVERSES_COUNT):
if simulate_universe():
survivals_count += 1
progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)
progress_bar.end("\n\n")
return survivals_count / PARALLEL_UNIVERSES_COUNT | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_god_score():\n\n survivals_count = 0\n for _ in range(PARALLEL_UNIVERSES_COUNT):\n best_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL)\n for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)):\n treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL)\n if treated_survival > best_survival:\n best_survival = treated_survival\n if random.uniform(0, 1) <= best_survival:\n survivals_count += 1\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def simulate_universe():\n\n # untreated_survival is the probability to survive if not treated\n # this is an exact law of the universe, the player will not have this information\n untreated_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL)\n\n trials: list[Trial] = []\n\n treated_survivals: dict[Trial, float] = {}\n\n for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)):\n group_size = random.randint(MIN_GROUP_SIZE, MAX_GROUP_SIZE)\n\n # treated_survival is the probability to survive if treated\n # this is an exact law of the universe, the player will not have this information\n # therefore it is stored in a separate dict and not in the given-to-player Trial object\n treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL)\n\n trial = Trial(group_size, untreated_survival, treated_survival)\n\n trials.append(trial)\n treated_survivals[trial] = treated_survival\n\n chosen_trial = playground.choose_trial(trials)\n\n if chosen_trial is None: # None means no treatment\n chosen_survival = untreated_survival\n else:\n chosen_survival = treated_survivals[chosen_trial]\n\n return random.uniform(0, 1) <= chosen_survival",
"def calculate_survivors(self, planet=None):\n mage_life = 0\n if self.magos:\n\n # Calculate survivors\n mage_life = sum(self.race.rango_vid_mago) // 2\n survivors = min(self.vida / mage_life, len(self.magos))\n if planet:\n planet.magos = survivors\n\n # Kill off the dead and improve the survivors\n shuffle(self.magos)\n [self.magos.pop() for i in range(len(self.magos) - survivors)]\n for m in self.magos:\n m = mage(m.ataque + 5, m.vida + 10)\n\n if self.soldados:\n\n # Calculate survivors\n soldier_life = sum(self.race.rango_vid_soldado) // 2\n survivors = self.vida - len(self.magos)*mage_life\n survivors //= soldier_life\n if planet:\n planet.soldados = survivors\n\n # Kill off the dead and improve the survivors\n shuffle(self.soldados)\n [self.soldados.pop()\n for i in range(len(self.soldados) - survivors)]\n for s in self.soldados:\n s = soldier(s.ataque + 5, s.vida + 10)",
"def tournament_selection(self, population: List[IndividualType]) -> List[IndividualType]:\n survivors: List[IndividualType] = []\n for _ in range(self.configuration.n_survivors):\n # Choose participants\n rooster: List[IndividualType] = random.sample(population, self.configuration.rooster_size)\n # Select fittest of participants as survivor\n fittest_individual_of_rooster = self.get_best_individual(rooster)\n population.remove(fittest_individual_of_rooster)\n survivors.append(fittest_individual_of_rooster)\n return survivors",
"def college_selectivity():",
"def child_U(self):\n return math.sqrt(self.number_visits) * self.child_priors / (\n 1 + self.child_number_visits)",
"def playersVehicleScoring(self):\n self.__playersDriverNum()\n return self.Rf2Scor.mVehicles[self.__playersDriverNum()]",
"def __population_quality(self) -> float:\n population_identifier = np.zeros(shape=self.Dataset.size)\n subgroup_identifier = np.ones(shape=len(self.get_cover()))\n group = np.concatenate((population_identifier,\n subgroup_identifier))\n\n subgroup_times = self.Dataset.survival[self.get_cover()]\n subgroup_status = self.Dataset.status[self.get_cover()]\n\n time = np.concatenate((self.Dataset.survival, subgroup_times))\n status = np.concatenate((self.Dataset.status, subgroup_status))\n\n _, pvalue = sm.duration.survdiff(time, status, group)\n return 1 - pvalue",
"def survived(self,rates):\n # Juvenile or adult survival\n if self.age > 3:\n prob = rates[1]\n else:\n prob = rates[0]\n if np.random.binomial(1,prob):\n return True\n else:\n return False",
"def simulation(self, n = 42):\n\n self.initialisation()\n i = 0\n while i < n and self.agent.vivant :\n self.step()\n i+= 1\n return self.perfGlobale",
"def vote_of_citizens():\n\tglobal vote_first_candidate\n\tglobal vote_second_candidate\n\tglobal blank_vote\n\t\n\tfor i in range(NUMBER_OF_CITIZENS):\n\t\tvote = random.randint(1,10)\n\n\t\tif(vote <= 3):\n\t\t\tvote_first_candidate+=1\n\t\telif(vote > 3 and vote <= 6):\n\t\t\tvote_second_candidate+=1\n\t\telse:\n\t\t\tblank_vote+=1",
"def p(party, vote_count, s):\n return t(party, vote_count) / d(s)",
"def vi_get_county_with_max_voting(self):\r\n\r\n # fetch voting information from SCB database\r\n self.vi_fetch_voting_info_from_scb()\r\n for voting_year, voting_data in self.voting_information.items():\r\n max_percent = 0.0\r\n county_with_max_percent = ''\r\n for county, percentage in voting_data.items():\r\n if percentage > max_percent:\r\n max_percent = percentage\r\n county_with_max_percent = county\r\n self.vi_log_msg(voting_year, county_with_max_percent, max_percent)",
"def vis_survival_stats(data, outcomes, feature):\n pass",
"def test_repetition_vector():\n cosimulations = [\n example.control.gauss_seidel(1., 5., 1.),\n example.control.gauss_seidel(1., 5., 1., True),\n example.control.gauss_seidel(1., 5., 1., True, True),\n example.control.gauss_seidel(1., 5., 1., False, True),\n example.control.gauss_jacobi(1., 5., 1.),\n example.control.multi_rate(1., 5., 1.),\n ]\n for cosimulation in cosimulations:\n sdfg = cs.convert_to_sdf(cosimulation)\n schedule = sdf.calculate_schedule(sdfg)\n network, hs, _, _ = cosimulation\n _, connections = network\n repetitions = cs.repetition_vector(connections, hs)\n for agent in sdfg[0]:\n assert sum(agent == executed for executed in schedule) == repetitions[agent]",
"def survivors_selection(self):\n q = 5\n new_population = []\n for i in range(self._population_size):\n batch = []\n for j in range(q):\n r = random.randint(0, (self._child2population_ratio + 1) * self._population_size - 1)\n if r < self._population_size:\n batch.append(self._population[r])\n else:\n batch.append(self._children[r - self._population_size])\n new_population.append(self.select_best(batch))\n\n self._population = new_population",
"def getRandomMinorCiv(self):\n\t\t\n\t\treturn con.iIndependent + gc.getGame().getSorenRandNum(iNumMinorPlayers, 'Random minor civilization')",
"def mi_pressure_vessel(self, u):\n assert len(u) == 4, 'MI Pressure vessel design needs to specify 4 parameters.'\n R = u[0]\n L = u[1]\n ts = u[2]\n th = u[3]\n fitness = 0.6224 * R * ts * L + 1.7781 * R ** 2 * th + 3.1611 * ts ** 2 * L + 19.8621 * R * ts ** 2\n return fitness",
"def survival_score(timeSurvived, duration, winPlace):\n\n\tsurvival = (timeSurvived / duration) * 100\n\tif winPlace == 1:\n\t\twin_place = 100\n\telse:\n\t\twin_place = 100 - winPlace\n\tsurvival_score = int(survival * 0.8 + win_place * 0.2)\n\tif survival_score < 50:\n\t\tsurvival_score = 50\n\n\treturn survival_score",
"def suicide_query(game_mode=0, observability=-1, game_seed=-1, agent=-1):\n\n event_id = \"death\"\n\n # Keep only those games within given configuration\n if game_seed != -1:\n selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability) &\n (data['game_seed'] == game_seed)]\n else:\n selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability)]\n if agent != -1:\n for index, row in selection.iterrows():\n if agent not in row[\"agents\"]:\n selection.drop(index, inplace=True)\n\n # print(selection.size)\n\n team_kill_count = []\n ngames = 0 # Number of games in which this agent dies\n suicides = 0 # Number of games in which this agent commits suicide\n events_per_sample = []\n team_kills = 0\n\n # Iterate through selected game data\n for index, row in selection.iterrows():\n if agent in row[\"agents\"] and row['event_id'] == event_id: # This agent played in the game\n\n # Find its agent ID depending on its position in the agent list. There may be more than 1 agent of this\n # type in the game, so iterate over all and check individually.\n ll = row[\"agents\"]\n indices = [i for i, el in enumerate(ll) if el == agent]\n\n for agent_id in indices:\n # teammate = (agent_id + 2) % 4\n sample_event_counter = 0\n for event in row[\"event_data\"]:\n if event[\"agent_id\"] == agent_id: # This agent dies\n if event[\"killer\"] == agent_id: # Suicide\n sample_event_counter += 1\n # if event[\"killer\"] == teammate: # Killed by teammate\n # team_kills += 1\n # if event[\"agent_id\"] == teammate: # Teammate dies\n # if event[\"killer\"] == agent_id: # Killed by this agent\n # team_kill_count += 1\n ngames += 1\n events_per_sample.append(sample_event_counter)\n suicides += sample_event_counter\n\n # suicide_count.append(100*suicides/ngames) # Showing percentage of game suicides\n # team_kill_count.append(100*team_kills/games)\n\n # percentage = 100 * suicides / ngames\n # mean = ngames * (percentage / 100)\n # variance = mean * (1 - (percentage / 100))\n # std_dev = math.sqrt(variance)\n # std_err = std_dev / math.sqrt(ngames)\n # h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95 confidence interval\n # return percentage, h\n\n # print(events_per_sample)\n mean = suicides/ngames\n variance = sum([pow(x - mean, 2) for x in events_per_sample])/len(events_per_sample)\n std_dev = math.sqrt(variance)\n std_err = std_dev/math.sqrt(len(events_per_sample))\n h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95% confidence interval\n return mean * 100, h * 100 # , team_kill_count",
"def test_josephus_survivor(self):\n\n allure.dynamic.title(\"Testing josephus_survivor function\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>In this kata you have to verify that the function \"\n \"correctly returns who is the \\\"survivor\\\", ie: the \"\n \"last element of a Josephus permutation.</p>\")\n\n test_data = [\n ((7, 3), 4),\n ((11, 19), 10),\n ((1, 300), 1),\n ((14, 2), 13),\n ((100, 1), 100)\n ]\n\n for test_data, expected in test_data:\n n = test_data[0]\n k = test_data[1]\n result = josephus_survivor(n, k)\n\n with allure.step(\"Enter test data (n: {}, k: {}) and verify \"\n \"the output ({}) vs expected ({})\".format(n,\n k,\n result,\n expected)):\n print_log(n=n,\n k=k,\n result=result,\n expected=expected)\n\n self.assertEqual(expected,\n result)",
"def survivalIntegral(self):\n if self.isReplicateGroup():\n # here we average over the underlying replicates\n si=numpy.zeros([len(self.activeChildWellIndices())])\n\n allstatuses=StatusMessage()\n statuses=StatusMessage()\n i=0\n for clstc in self.activeChildWells():\n si[i], sivar, status = clstc.survivalIntegral()\n if status is not None:\n allstatuses.addStatus(status)\n if status is not None and days is not None:\n statuses.addStatus(status)\n i+=1\n\n idcs=numpy.isnan(si)\n if numpy.all(idcs):\n allstatuses.addStatus(self.initDiffStatus)\n return None, None, allstatuses\n\n simean, sivar = maskedArrayToMeanVar(si, ddof=1)\n\n statuses.addStatus(self.initDiffStatus)\n return simean, sivar, statuses\n\n days, viability, viabilityvar, initDiffStatus=self.viability()\n si=None\n if viability is not None and days is not None:\n si=numpy.trapz(viability,x=days)\n return si,None,None",
"def count_vario(dist_param, picker_param):\n orig = '/home/zby/MAGISTERKA/MGR/results/oryginal.clustered.t'\n cl_orig = read_clustered(orig)\n name_tag = ''\n ndist = dist_param[1:]\n npick = picker_param[1:]\n for index in drange(5, 20, 0.5):\n name_tag = \"{}_{}_{}\".format(index, npick, ndist)\n try:\n clust2 = read_clustered(tfidf_name('merged.stem{}.stop.clustered.t', name_tag))\n except:\n print(\"no data for {}\".format(name_tag))\n continue\n var, norm = variation_of_information(cl_orig, clust2)\n print(\" {} VOI is {}\".format(name_tag, norm))",
"def get_team_results(usrs, sched):\t\n\t\n\ttotal_consistency = 0\n\ttotal_completion = 0\n\tfor user in usrs:\n\t\tresult = get_consistency(user, sched)\n\t\t\n\t\ttotal_consistency += result[\"consistency\"]\n\t\ttotal_completion += result[\"completion\"]\n\t\n\tteam_consistency = 0\n\tteam_completion = 0\n\t\t\n\tif(len(usrs) != 0):\n\t\tteam_consistency = total_consistency / float(len(usrs))\n\t\tteam_completion = total_completion / float(len(usrs))\n\t\t\n\treturn { \"consistency\" : team_consistency, \"completion\" : team_completion }",
"def CRIT_U(p,tv): \n rho=RHO(p,tv)\n # Note: 144 = 2x 72 N; 0.3 = 0.6 drag coef * surface area 0.5 m**2\n uc=np.power(np.divide(144,np.multiply(rho,0.3)),0.5)\n \n return uc",
"def user_interaction_score(uv, recommended_News, ranked=True):\n\n iv = recommended_News[\"topical_vector\"]\n\n product = simple_doct_product(uv, iv)\n\n epsilon = 10e-5\n\n if (product + epsilon) > 1.0:\n vui = 0.99\n else:\n vui = beta_distribution(product)\n\n # Awared preference\n ita = beta_distribution(0.98)\n pui = vui * ita\n\n return pui",
"def compute_objective(Instance: dict):\r\n\r\n print(\"Computing objectives values...\")\r\n # Retrieve usefull infos\r\n T_max = Instance[T_STR]\r\n scenario_numbers = Instance[SCENARIO_NUMBER]\r\n Interventions = Instance[INTERVENTIONS_STR]\r\n quantile = Instance[QUANTILE_STR]\r\n # Retrieve risk final distribution\r\n risk = compute_risk_distribution(Interventions, T_max, scenario_numbers)\r\n # Compute mean risk\r\n mean_risk = compute_mean_risk(risk, T_max, scenario_numbers)\r\n # Compute quantile\r\n q = compute_quantile(risk, T_max, scenario_numbers, quantile)\r\n print(\"Done\")\r\n\r\n return mean_risk, q",
"def _extract_life_sci_relevance(self, bib):\n life_sci_relevant = 0\n for system_key in ('ipc', 'ecla', 'ipcr', 'cpc'):\n try:\n for classif in bib[system_key]:\n if life_sci_relevant == 0 and self.relevant_regex.match(classif):\n life_sci_relevant = 1\n except KeyError:\n # Skip the warning - classifications are processed again below\n pass\n\n return life_sci_relevant",
"def tournament(population, context, n, num_competitors=2):\n result = []\n for i in range(n):\n competitors = np.random.choice(population, num_competitors)\n result.append(max(competitors))\n return result, context",
"def langmuir_occ(p, k):\n\n intermediate = k * p\n\n occupancy = intermediate / (intermediate + 1)\n\n return occupancy"
] | [
"0.66658336",
"0.5772919",
"0.57570714",
"0.5285743",
"0.52517307",
"0.51297843",
"0.5128983",
"0.51264876",
"0.50322986",
"0.50084907",
"0.4935301",
"0.49063638",
"0.48907402",
"0.48814934",
"0.48217684",
"0.48150674",
"0.48144037",
"0.48021117",
"0.47568566",
"0.4754652",
"0.46895778",
"0.4689276",
"0.4685097",
"0.46762037",
"0.46718",
"0.46686104",
"0.46530622",
"0.46495634",
"0.46324232",
"0.4615847"
] | 0.73624575 | 0 |
simulates a universe and uses playground.choose_trial to take a decision return true in cas of survival in the simulated universe | def simulate_universe():
# untreated_survival is the probability to survive if not treated
# this is an exact law of the universe, the player will not have this information
untreated_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL)
trials: list[Trial] = []
treated_survivals: dict[Trial, float] = {}
for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)):
group_size = random.randint(MIN_GROUP_SIZE, MAX_GROUP_SIZE)
# treated_survival is the probability to survive if treated
# this is an exact law of the universe, the player will not have this information
# therefore it is stored in a separate dict and not in the given-to-player Trial object
treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL)
trial = Trial(group_size, untreated_survival, treated_survival)
trials.append(trial)
treated_survivals[trial] = treated_survival
chosen_trial = playground.choose_trial(trials)
if chosen_trial is None: # None means no treatment
chosen_survival = untreated_survival
else:
chosen_survival = treated_survivals[chosen_trial]
return random.uniform(0, 1) <= chosen_survival | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)",
"def run_trial():\n env = gym.make('CartPole-v0')\n obs_dim = env.observation_space.shape[0]\n n_actions = env.action_space.n\n\n qnet = QNet(obs_dim, n_actions)\n agent = Sarsa(qnet, n_actions, 0.99, 1.0, 0.05, 1e4)\n optim = torch.optim.RMSprop(qnet.parameters(), lr=0.01)\n memory = Memory()\n\n return_hist = []\n timestep = 1\n\n while timestep < 1e5:\n state = env.reset()\n done = False\n while not done:\n # Pick action and run a single environment step\n action = agent.act(state, timestep).item()\n next_state, reward, done, _ = env.step(action)\n # Add experience to memory for training\n memory.add_experience(state, action, reward, next_state, done)\n\n state = next_state\n\n # Run a single training step every 32 timesteps\n if timestep % 32 == 0:\n batch = memory.sample()\n agent.train(batch, optim)\n\n # Evaluate the current agent every 1000 agents\n if timestep % 1000 == 0:\n eval_return = evaluate(agent)\n return_hist.append(eval_return)\n\n timestep += 1\n\n return np.array(return_hist)",
"def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line",
"def _do_trial(self, i, j, permutation_vector, energies, adaptor):\n delta = energies[i, i] - energies[j, i] + energies[j, j] - energies[i, j]\n accepted = False\n\n if delta >= 0:\n accepted = True\n else:\n metrop = math.exp(delta)\n rand = random.random()\n if rand < metrop:\n accepted = True\n\n if accepted:\n self._swap_permutation(i, j, permutation_vector)\n self._swap_energies(i, j, energies)\n adaptor.update(i, True)\n else:\n adaptor.update(i, False)",
"def run(num_trials):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.1, display=True) \n # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=num_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n a.performace_report(num_trials)",
"def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)",
"def test_conjecture():\n print(\"Executing test_conjecture:\")\n\n theory=[]\n\n print(language.program_string(theory))\n for i in range(10):\n theory=conjecture.vary([theory], 0, [], steps=1)\n print(f\"Theory after {i+1} stages of variation:\")\n print(language.program_string(theory))",
"def main():\n\t# GET THE DIALOG CONTENT\n\tpolicyFlag = int(sys.argv[3])\n\tif policyFlag == 0:\n\t\texistedIterNum = 15000\n\t\texistedThetaFileName = 'algorithms/theta/cycle_tree/of0w1.0g5v0.0625l0.05'\n\telif policyFlag == 1:\n\t\texistedIterNum = 10000\n\t\texistedThetaFileName = 'algorithms/theta/cycle_tree/of0w1.0g5v0.0625l0.05Retroflex'\n\n\tqLearnOpts = {'gamma': 1.0, \n\t\t\t\t 'alpha': 0.0, \n\t\t\t\t 'epsilon': 0.0}\n\tnumOfTurn = util.returnConvertedIndexListCount('b','cycle_tree')\n\tnumofgauss = 5\n\tvar = 0.0625\n\tlamda = 0.05\n\tunitNum = 101\n\ta = qlearningAgents.FittedQLearningAgent(numOfTurn,numofgauss,var,lamda,unitNum, **qLearnOpts)\t\t\n\ta.openThetaFile(existedThetaFileName,existedIterNum)\n\n\tturnNum = int(sys.argv[1])\n\tuserUnitScore = []\n\tuserUnitScoreVector = sys.argv[2].split(',')\n\tfor i in userUnitScoreVector:\n\t\t\tuserUnitScore.append(float(i)/100.0)\n\n\tstate = State.State(turnNum, userUnitScore)\n\tprint a.getAction(state)",
"def run(n_trials, params):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n a.set_Qtable(params['epsilon'], params['gamma'], params['learn_rate'], params['lrconst'],\n params['learn_rate_decay'], params['epsconst'], params['epsilon_decay'])\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=n_trials) # run for a specified number of trials\n\n return a.reached_destination",
"def scenario3(sim):\n\n init_logging(logfile=None, debug=True)\n second = 1000.0\n duration = 10\n tau_m = 20 # ms\n cm = 1.0 # nF\n v_reset = -60\n cell_parameters = dict(\n tau_m=tau_m,\n cm=cm,\n v_rest=-70,\n e_rev_E=0,\n e_rev_I=-70,\n v_thresh=-54,\n v_reset=v_reset,\n tau_syn_E=5,\n tau_syn_I=5,\n )\n g_leak = cm / tau_m # µS\n\n w_min = 0.0 * g_leak\n w_max = 0.05 * g_leak\n\n r1 = 5.0\n r2 = 40.0\n\n sim.setup()\n pre = sim.Population(100, sim.SpikeSourcePoisson())\n post = sim.Population(10, sim.IF_cond_exp())\n\n pre.set(duration=duration * second)\n pre.set(start=0.0)\n pre[:50].set(rate=r1)\n pre[50:].set(rate=r2)\n assert_equal(pre[49].rate, r1)\n assert_equal(pre[50].rate, r2)\n post.set(**cell_parameters)\n post.initialize(v=RandomDistribution('normal', mu=v_reset, sigma=5.0))\n\n stdp = sim.STDPMechanism(\n sim.SpikePairRule(tau_plus=20.0, tau_minus=20.0,\n A_plus=0.01, A_minus=0.01),\n sim.AdditiveWeightDependence(w_min=w_min, w_max=w_max),\n #dendritic_delay_fraction=0.5))\n dendritic_delay_fraction=1)\n\n connections = sim.Projection(pre, post, sim.AllToAllConnector(),\n synapse_type=stdp,\n receptor_type='excitatory')\n\n initial_weight_distr = RandomDistribution('uniform', low=w_min, high=w_max)\n connections.randomizeWeights(initial_weight_distr)\n initial_weights = connections.get('weight', format='array', gather=False)\n # assert initial_weights.min() >= w_min\n # assert initial_weights.max() < w_max\n # assert initial_weights[0, 0] != initial_weights[1, 0]\n\n pre.record('spikes')\n post.record('spikes')\n post[0:1].record('v')\n\n sim.run(duration * second)\n\n actual_rate = pre.mean_spike_count() / duration\n expected_rate = (r1 + r2) / 2\n errmsg = \"actual rate: %g expected rate: %g\" % (actual_rate, expected_rate)\n assert abs(actual_rate - expected_rate) < 1, errmsg\n #assert abs(pre[:50].mean_spike_count()/duration - r1) < 1\n #assert abs(pre[50:].mean_spike_count()/duration- r2) < 1\n final_weights = connections.get('weight', format='array', gather=False)\n assert initial_weights[0, 0] != final_weights[0, 0]\n\n try:\n import scipy.stats\n except ImportError:\n raise SkipTest\n t, p = scipy.stats.ttest_ind(initial_weights[:50, :].flat, initial_weights[50:, :].flat)\n assert p > 0.05, p\n t, p = scipy.stats.ttest_ind(final_weights[:50, :].flat, final_weights[50:, :].flat)\n assert p < 0.01, p\n assert final_weights[:50, :].mean() < final_weights[50:, :].mean()\n sim.end()\n return initial_weights, final_weights, pre, post, connections",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')",
"def solve(num_wizards, num_constraints, wizards, constraints):\n print(\"starting time\")\n start = time.time()\n print(\"hello\")\n end = time.time()\n print(\"time taken\",end - start)\n random.shuffle(wizards)\n assignment = wizards\n loop_counter = 0\n temperature = 10.0\n while not numSat(constraints, assignment) == len(constraints):\n loop_counter += 1\n if loop_counter % 500 == 0:\n print(\"numSat\", numSat(constraints, assignment))\n print(\"loops\",loop_counter)\n print(\"time taken\", time.time()-start)\n a = random.sample([i for i in range(len(wizards))], 1)\n b = random.sample([i for i in range(len(wizards))], 1)\n new_assignment = swap(assignment, a[0], b[0])\n delta = numSat(constraints, assignment) - numSat(constraints, new_assignment)\n if delta < 0:\n print(\"delta negative\")\n assignment = new_assignment\n else: \n # print(\"sample greater than expo\")\n sample = random.uniform(0.0,1.0)\n expo = np.exp(-1*float(delta) / temperature)\n if sample < expo:\n # print(\"sample less than expo\")\n assignment = new_assignment\n temperature -= 0.2\n\n print(\"Solved BITCH! numSat:\", numSat(constraints, assignment))\n return assignment",
"def One_Decision(self):\n\n # EP.Population.one_year(self) # performs statistics\n\n agent = self.selectIndividual() # agent who will play the game \n # print agent.ID, 'about to move'\n self.CallsSinceLastMove += 1\n if agent.decisionToMove() and agent.moves():\n self.Moves += 1\n self.CallsSinceLastMove = 0\n # if self.popSize: self.Observer.season(self.Moves // self.popSize) # sets StepId\n self.Observer.season() # sets StepId\n # print(self.Observer.StepId)\n if self.Observer.Visible(): # time for display\n Satisfactions = self.satisfaction()\n for (Colour, Satisfaction) in Satisfactions:\n self.Observer.curve(Name='%s Satisfaction' % str(Colour), Value=Satisfaction)\n # if Satisfactions:\n # self.Observer.curve(Name='Global Satisfaction', Value=sum([S for (C,S) in Satisfactions])/len(Satisfactions))\n \n if self.CallsSinceLastMove > 10 * self.popSize:\n return False # situation is probably stable\n return True # simulation goes on",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n print 'alpha, gamma:', a.alpha, a.gamma\n print 'penalties:', a.total_penalties\n print 'total rewards:', a.total_rewards",
"def simulation(self, n = 42):\n\n self.initialisation()\n i = 0\n while i < n and self.agent.vivant :\n self.step()\n i+= 1\n return self.perfGlobale",
"def RandomVacuumAgent():\n return Agent(RandomAgentProgram(['Right',\n 'Left',\n 'Suck',\n 'NoOp']))",
"def simulation():\n # initialize action set\n action_set = np.zeros(int((s.MAX_INSPECT - s.MIN_INSPECT) / s.DELTA) + 3)\n x, i = s.MIN_INSPECT, 1\n while x <= s.MAX_INSPECT:\n action_set[i] = x\n x += s.DELTA\n i += 1\n action_set[-1] = np.inf\n action_number = len(action_set)\n\n # initialize current state\n current_state = math.floor(np.random.rand(1) * s.NUM_STATES)\n\n # initialize action index\n if current_state == 0:\n action_index = 0\n elif current_state == s.NUM_STATES - 1:\n action_index = action_number - 1\n\n if current_state != 0 and current_state != s.NUM_STATES - 1:\n action_index = action_number - 2\n\n # initialize policy set\n greedy_policy = np.zeros(s.NUM_STATES)\n greedy_policy[-1] = np.inf\n for i in range(1, s.NUM_STATES - 1):\n greedy_policy[i] = s.MAX_INSPECT\n\n visit_times = np.zeros([s.NUM_STATES, action_number])\n\n # initialization for simulation\n falpha, Aalpha, delay_T, uni_parameter = equivalent_markov(greedy_policy)\n stable_prob, potential = stable_potential(falpha, Aalpha, uni_parameter)\n last_value = falpha + np.matmul(Aalpha, potential)\n dis_value = last_value\n # ave_vector = np.matmul(stable_prob, falpha)\n # ave_estimate = ave_vector.tolist()\n each_transit_cost, each_transit_time, total_reward = (0 for i in range(3))\n\n # initialize DQN model if selected\n dqn = DQN() if MODEL == 1 else None\n # initialize Q-table if Q-learning selected\n q_factor = ql.init_q_factor(action_number) if MODEL == 2 else None\n\n for out_step in range(s.EPOCH):\n epsilon = s.EPSILON_1 if MODEL == 1 else s.EPSILON_2\n\n for inner_step in range(s.EPOCH_LEARN):\n\n visit_times[current_state, action_index] += 1\n current_action = greedy_policy[current_state]\n\n inspect_cost = 0 if current_state == s.NUM_STATES - 1 else s.K5 * current_action\n\n flag, sojourn_T, service_T, next_state = state_transition(current_state, current_action)\n each_transit_time = s.DISCOUNT * each_transit_time + (sojourn_T - each_transit_time) / pow(\n out_step * s.EPOCH_LEARN + (inner_step + 1), s.Q_AVE_STEP)\n end_sojourn_T = math.exp(- s.ALPHA * sojourn_T)\n end_serve_T = math.exp(- s.ALPHA * service_T)\n\n if s.ALPHA == 0:\n dis_T, dis_serve_T, dis_wait_T = sojourn_T, service_T, sojourn_T - service_T\n else:\n dis_T, dis_serve_T = (1 - end_sojourn_T) / s.ALPHA, (1 - end_serve_T) / s.ALPHA\n dis_wait_T = (end_serve_T - end_sojourn_T) / s.ALPHA\n\n if flag == 0: # no processing, waiting\n cost_real = (s.K1 * (s.NUM_STATES - current_state) + s.K3) * sojourn_T + inspect_cost\n cost_purt = (s.K1 * (s.NUM_STATES - current_state) + s.K3) * dis_T + inspect_cost\n else: # no waiting, processing\n cost_real = s.K1 * (s.NUM_STATES - current_state - 1) * sojourn_T + s.K2 * service_T + s.K3 * (\n sojourn_T - service_T) + s.K4 + inspect_cost\n cost_purt = s.K1 * (s.NUM_STATES - current_state - 1) * dis_T + s.K2 * dis_serve_T + s.K3 * dis_wait_T \\\n + s.K4 * end_serve_T + inspect_cost\n\n each_transit_cost = s.DISCOUNT * each_transit_cost + (cost_real - each_transit_cost) / (\n pow(out_step * s.EPOCH_LEARN + (inner_step + 1), s.Q_AVE_STEP))\n\n ave_q_cost = each_transit_cost / each_transit_time\n # ave_estimate.append(ave_q_cost)\n cost_dis = cost_purt - ave_q_cost * dis_T\n\n if MODEL == 1:\n reward = - cost_dis\n dqn.store_transition(current_state, action_index, reward, next_state)\n if dqn.memory_counter >= s.MEMORY_CAPACITY:\n dqn.learn(s.EPOCH_LEARN, inner_step, PS)\n else:\n difference = cost_dis + end_sojourn_T * min(q_factor[next_state, :]) \\\n - q_factor[current_state, action_index]\n q_factor = ql.update_q_factor(q_factor, current_state, action_index, difference,\n visit_times, inner_step, PS)\n current_state = next_state # transit to next state\n\n if current_state == 0:\n action_index = 0\n elif current_state == s.NUM_STATES - 1:\n action_index = action_number - 1\n else:\n if MODEL == 1:\n action_index = int(dqn.choose_action(current_state, epsilon))\n if action_set[action_index] <= 1:\n greedy_policy[current_state] = action_set[action_index]\n else:\n greedy_policy[current_state] = 1\n else:\n if np.random.rand(1) < epsilon:\n action_index = int(np.floor(np.random.rand(1) * (action_number - 2)) + 1)\n else:\n # minimal_q_value = np.min(q_factor[current_state, :])\n action_index = np.argmin(q_factor[current_state, :])\n greedy_policy[current_state] = action_set[action_index]\n\n # store the policy learned from the iterations\n optimal_policy = greedy_policy\n\n if MODEL != 1:\n for i in range(1, s.NUM_STATES - 1):\n # minimal_q_value_temp = np.min(q_factor[i, :])\n action_index_temp = np.argmin(q_factor[i, :])\n optimal_policy[i] = action_set[action_index_temp]\n\n falpha, Aalpha, delay_T, uni_parameter = equivalent_markov(optimal_policy)\n stable_prob, potential = stable_potential(falpha, Aalpha, uni_parameter)\n\n last_value = falpha + np.matmul(Aalpha, potential)\n dis_value = np.concatenate((dis_value, last_value), axis=1)\n total_reward += - np.ndarray.item(last_value[0])\n # new_ave_cost = np.matmul(stable_prob, falpha)\n # ave_vector = np.concatenate((ave_vector, new_ave_cost))\n print(\"epoch: {} , the epoch reward is {}\".format(out_step, round(- np.ndarray.item(last_value[0]), 2)))\n\n # result = np.asarray(dis_value)\n print(\"total reward:\", total_reward)\n\n return dis_value, total_reward",
"def Main():\n numberOfPopulation = 350\n numberOfDays = 60\n \n simulation = Simulation(Covid19(), numberOfPopulation, numberOfDays, \"Covid 19 Simulation\")\n simulation.run() \n simulation = Simulation(Ebola(), numberOfPopulation, numberOfDays, \"Ebola Simulation\")\n simulation.run()",
"def RandomVacuumAgent():\n return Agent(RandomAgentProgram(['Right', 'Left', 'Suck', 'NoOp']))",
"def simulationDelayedTreatment(numTrials):\n\n delays = [300,150,75,0]\n results = [[],[],[],[]]\n for place in range(0, 4):\n for trial in range(numTrials):\n viruses = []\n for num in range(100):\n viruses.append(ResistantVirus(0.1,0.05, {'guttagonol': False}, 0.005))\n patient = TreatedPatient(viruses, 1000)\n for delay in range(delays[place]):\n patient.update()\n patient.addPrescription(\"guttagonol\") \n for l in range(150):\n patient.update()\n results[place].append(patient.getTotalPop())\n pylab.hist(results[0])\n pylab.hist(results[1])\n pylab.hist(results[2])\n pylab.hist(results[3])\n pylab.show()\n for x in range(0, 10):",
"def test_result_reproducibility(monkeypatch):\n script = os.path.abspath(\"examples/scikitlearn-iris/main.py\")\n monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))\n config = \"orion_config.yaml\"\n\n orion.core.cli.main(\n [\"hunt\", \"--config\", config, \"python\", script, \"orion~choices([0.1])\"]\n )\n\n experiment = create_experiment(name=\"scikit-iris-tutorial\")\n assert experiment.stats is not None\n assert experiment.stats.best_evaluation == 0.6666666666666667",
"def main(_):\n description = xm.ExperimentDescription(\n 'HIS - trial=%d' % FLAGS.trial, tags=['his'])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.00000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line",
"def three_experiments_with_trials(family_with_trials, single_with_trials):",
"def simulationTwoDrugsDelayedTreatment(numTrials):\n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False, 'grimpex': False}\n mutProb = 0.005\n\n first_drug = 150\n second_drug = 300\n steps = first_drug + second_drug\n total_vs = [0 for i in range(steps)]\n resis_vs = list(total_vs)\n results = list(total_vs)\n\n for trial in range(numTrials):\n viruses = []\n for i in range(numViruses):\n viruses.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n patient = TreatedPatient(viruses, maxPop)\n\n for step in range(steps):\n if step == first_drug:\n patient.addPrescription('guttagonol')\n elif step == second_drug:\n patient.addPrescription('grimpex')\n patient.update()\n total_vs[step] += patient.getTotalPop()\n resis_vs[step] += patient.getResistPop(['guttagonol'])\n resis_vs[step] += patient.getResistPop(['grimpex'])\n\n results.append(patient.getTotalPop())\n\n pylab.hist(results, 9)\n pylab.show()",
"def decision():\n return random.choice(['GoToNormal','GoToSleep'])",
"def trial(self):\n pass",
"def act(self, s, exploration, game, return_pred_opp=False):\n opponent_p = self.compute_opponent_model(s)\n # print(opponent_p)\n opponent_action = np.random.choice(\n opponent_p.size, size=1, p=opponent_p)[0]\n # agent_p = np.exp(self.Q[s][:, opponent_action])\n agent_p = self.compute_marginal_pi(s)\n if exploration and random.random() < self.episilon:\n agent_action = random.randint(0, self.action_num - 1)\n else:\n if self.verbose:\n for s in self.Q.keys():\n print('{}--------------'.format(self.id_))\n print('Q of agent {}: state {}: {}'.format(self.id_, s, str(self.Q[s])))\n # print('QAof agent {}: state {}: {}'.format(self.id_, s, str(self.Q_A[s])))\n # self.Q_A\n print('pi of agent {}: state {}: {}'.format(self.id_, s, self.pi[s]))\n # print('pi of opponent agent {}: state{}: {}'.format(self.id_, s, self.opponent_best_pi[s]))\n print('{}--------------'.format(self.id_))\n agent_action = StationaryAgent.sample(agent_p)\n if return_pred_opp:\n return agent_action, opponent_action\n else:\n return agent_action",
"def decideOnGoal(self):\r\n\r\n\t\tself.goalNode = self.simulationHandle.getMap().getRandomNode()"
] | [
"0.6240639",
"0.6103099",
"0.60925263",
"0.5984078",
"0.5937608",
"0.5916742",
"0.5768528",
"0.5740173",
"0.5682504",
"0.5681896",
"0.56587714",
"0.56468713",
"0.5616107",
"0.56043226",
"0.5596143",
"0.5585067",
"0.558289",
"0.5577875",
"0.5572243",
"0.55598336",
"0.55593103",
"0.5553036",
"0.55502874",
"0.55475104",
"0.55386823",
"0.5531397",
"0.5530812",
"0.5515855",
"0.55136627",
"0.5507623"
] | 0.799685 | 0 |
Probability grouping of category variables | def probability_categorical(feature, label):
assert feature.nunique()>2, 'feature category nums must be greater than 2.'
t = pd.DataFrame({'feature':feature, 'label':label})
cat = label.unique()
cat = [(cat[i], cat[i+1]) for i in range(len(cat)-1)]
prob = label.value_counts(1).to_dict()
slope = [prob.get(i[0], 0)-prob.get(i[1], 0) for i in cat]
slope_dict = t.feature.value_counts(1).to_dict()
prob = t.groupby([ 'feature']).label.value_counts(1).to_dict()
slope_dict = {i:{'category_rate':slope_dict[i], 'slope':[prob.get((i,j[0]), 0)-prob.get((i,j[1]), 0) for j in cat]} for i in slope_dict}
for i in slope_dict:
slope_dict[i]['slope_diff'] = sum([abs(slope[j]-slope_dict[i]['slope'][j]) for j in range(len(slope))])
value1 = sorted([[[i], slope_dict[i]['slope_diff'], slope_dict[i]['category_rate']] for i in slope_dict], key=lambda x:x[1], reverse=1)
distance = sorted([value1[i][1]-value1[i+1][1] for i in range(len(value1)-1)])
std = pd.Series([i[1] for i in value1]).std()
coupe = value1
dis = distance[0]
for k in distance:
value = value1
while 1:
for i in range(len(value)-1):
if value[i][1]-k<value[i+1][1]:
value[i+1][0] = value[i][0]+value[i+1][0]
value[i+1][1] = value[i][1]*value[i][2]/(value[i][2]+value[i+1][2])+value[i+1][1]*value[i+1][2]/(value[i][2]+value[i+1][2])
value[i+1][2] = value[i][2]+value[i+1][2]
value.remove(value[i])
break
if i==len(value)-2:
break
if pd.Series([i[1] for i in value]).std()>std:
coupe = value
std = pd.Series([i[1] for i in value]).std()
dis = k
return {'group':{k:i for i,j in enumerate(coupe) for k in j[0]}, 'data':coupe,
'distance':dis, 'distance_index':f'{distance.index(dis)+1}/{len(distance)}', 'std':std} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ppf(self,x):\n return self.categoricalDist.ppf(x)",
"def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)",
"def categorical(pvals: np.ndarray) -> int:\n\n return sample_probabilities(pvals)() # faster than: np.argmax(np.random.multinomial(1, normalize(pvals)))",
"def pdf(self,x):\n return self.categoricalDist.pdf(x)",
"def grouping_cols(df, cat_percentage = 0.05, checking_itr = 10):",
"def _class_distribution(y):\n unique, counts = np.unique(y, return_counts = True)\n\n percentages = counts / np.sum(counts)\n\n return unique, counts, percentages",
"def purity_score(label, pred):\n \n df = pd.concat([label, pd.DataFrame(pred)], axis=1)\n df.set_axis(['label', 'pred'], axis=1, inplace=True)\n \n s = 0\n\n for x, cluster in df.groupby('pred'):\n s += cluster['label'].value_counts().iloc[0] # adding the most occuring class in a cluster\n\n return s / label.shape[0]",
"def cdf(self,x):\n return self.categoricalDist.cdf(x)",
"def conditional_probability(data, attr, cp_table):\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n class0 = classlist[0]\n class1 = classlist[1]\n # number of instances beloning to each class\n nclass0 = cp_table.loc[0, class0].sum()\n nclass1 = cp_table.loc[0, class1].sum()\n total = nclass0 + nclass1\n # all probabilities include a laplace est of 1\n prior0 = (nclass0 + 1) / (total + 2)\n prior1 = (nclass1 + 1) / (total + 2)\n list0 = []\n list1 = []\n for index, row in cp_table.iterrows():\n numattr = len(attr.loc[index, 'vars'])\n numer0 = row[class0] + 1\n numer1 = row[class1] + 1\n denom0 = nclass0 + (1 * numattr)\n denom1 = nclass1 + (1 * numattr)\n cp0 = numer0 / denom0\n cp1 = numer1 / denom1\n list0.append(cp0)\n list1.append(cp1)\n # replacing columns in previous table with cond probs\n del cp_table[class0]\n del cp_table[class1]\n cp_table[class0] = list0\n cp_table[class1] = list1\n \n return cp_table, prior0, prior1",
"def feature_prob(self, f, cat):\n if self.category_count(cat) == 0:\n return 0\n # The total number of times this feature appeared in this \n # category divided by the total number of items in this category\n pfc = self.feature_count(f, cat)\n pc = self.category_count(cat)\n return float(pfc)/pc",
"def age_group_mixing():\n p = []\n for j in np.linspace(1,5,5):\n for k in np.linspace(1,5,5):\n if j == k:\n p.append(1)\n else:\n p.append(0.2**np.abs(j+1-k))\n p /= sum(p)\n return p",
"def probabilities(self):\n raise NotImplementedError",
"def evaluate_probabilities(self, batches):\n total_batches = batches.batches_per_epoch()\n catprobs = []\n for batch in range(total_batches):\n X_batch, y_batch = batches.get_batch()\n feed_dict = {\n self.x: X_batch,\n self.y: y_batch,\n self.keep_prob: 1.0}\n fetch_dict = {\n \"catprobs\": self.categorical_probabilities}\n result = self.session.run(fetch_dict, feed_dict)\n catprobs.append(result[\"catprobs\"])\n catprobs = np.concatenate(catprobs)\n return catprobs",
"def calc_feature_probs(image_type, image_data, smoothing):\n counts = np.array([np.sum(image_data.features[image_data.labels == value], axis=0) + smoothing for value in range(image_type.categories)])\n denoms = np.array([np.count_nonzero(image_data.labels == value) + (smoothing * image_type.feature_kinds) for value in range(image_type.categories)])\n return counts / denoms[:, np.newaxis, np.newaxis]",
"def calcProbability(self):\n for attribute in self.attributes:\n index = self.F2I[attribute]\n features = set([self.train[i][0][index] for i in range(len(self.train))])\n for feature in features:\n #all the true and false\n result_t = list(filter(lambda x: x[1]== True, self.train))\n total_t = len(result_t)\n result_f = list(filter(lambda x: x[1]== False, self.train))\n total_f= len(result_f)\n #the probability for the feature if its true or false\n t = len(list(filter(lambda x: x[0][index] == feature, result_t)))\n f = len(list(filter(lambda x: x[0][index] == feature, result_f)))\n prob_yes= t/total_t\n prob_no = f/total_f\n #assign the probabilities to the dictionaries\n self.probs_yes[(index,feature)] = prob_yes\n self.probs_no[(index,feature)] = prob_no",
"def feature_prob(self, f, category): # Pr(A|B)\r\n if self.get_category_count(category) == 0:\r\n return 0\r\n fp = self.get_feature_count(f, category) / self.get_category_count(category)\r\n print \"Feature: %s | Feature count: %s | Category count: %s | Feature probability: %s\" % (f, self.get_feature_count(f, category), self.get_category_count(category), fp)\r\n return fp",
"def get_probs(self, *vars):\n freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])\n k = np.prod([len(v.values) for v in vars])\n return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k)",
"def proportion_with_cardinals(df, PATH):\n \n df_test = df.copy()\n df_test['cardinal'] = df.title.apply(contains_cardinal)\n\n click = df_test[df_test.target == 1]\n non = df_test[df_test.target == 0]\n click = click.groupby(['cardinal']).target.count()\n non = non.groupby(['cardinal']).target.count()\n \n non = non[1]/non[0] * 100\n click = click[1]/click[0] * 100\n # plot the results\n fig, ax = plt.subplots(figsize=(12,6))\n sns.barplot(x=['Normal', \"Clickbait\"], y=[non, click], ax=ax)\n plt.title(\"Percent of Titles Containing Cardinal Numbers\", size = 24)\n plt.xlabel(\"Article Class\", size=24)\n plt.ylabel(\"Percent %\", size = 24)\n plt.ylim(0, 100)\n plt.xticks([0,1], label=[\"Normal\", \"Clickbait\"], size=24)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n \n return ax",
"def test_probabilities_are_ok(self, seed):\n bins = defaultdict(int)\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim = Categorical(\"yolo\", categories)\n for _ in range(500):\n sample = dim.sample(seed=seed)[0]\n bins[sample] += 1\n for keys in bins.keys():\n bins[keys] /= float(500)\n for key, value in categories.items():\n assert abs(bins[key] - value) < 0.01",
"def prob(self, doc, cat):\n catprob = self.category_count(cat) / self.total_count() # Pr(Category)\n docprob = self.doc_prob(doc, cat) # Pr(Document | Category)\n return docprob*Decimal(str(catprob)) # Pr(Category | Document)",
"def categorical_sample(prob_n, np_random):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np_random.rand()).argmax()",
"def categorical_sample(prob_n, np_random):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np_random.rand()).argmax()",
"def _predict(self, probabilities):\n child_categories = []\n for i in range(0, self.category_level):\n child_categories.append({})\n for category_label in self.classifiers[i].classes_:\n main_category = self._get_categories(category_label)[0]\n if main_category not in child_categories[i]:\n child_categories[i][main_category] = []\n child_categories[i][main_category].append(category_label)\n\n # find the primary category\n max_score = -1\n primary_category_label = None\n\n for i in range(0, self.category_level):\n for category_label in self.classifiers[i].classes_:\n if probabilities[category_label] < 1e-9:\n continue\n total_score = 0\n main_category = self._get_categories(category_label)[0]\n candidates = child_categories[i][main_category]\n for actual_label in candidates:\n probability = probabilities[actual_label]\n if probability < 1e-9:\n continue\n score = self._cal_score(category_label, None, actual_label, i)\n total_score += score * probability\n if total_score > max_score:\n max_score = total_score\n primary_category_label = category_label\n\n # find the secondary category\n max_score = -1\n secondary_category_label = None\n for i in range(0, self.category_level):\n for category_label in self.classifiers[i].classes_:\n if probabilities[category_label] < 1e-9 and secondary_category_label:\n continue\n if category_label == primary_category_label:\n continue\n total_score = 0\n main_category = self._get_categories(category_label)[0]\n main_category2 = self._get_categories(primary_category_label)[0]\n candidates = list(set(child_categories[i][main_category] + child_categories[i][main_category2]))\n for actual_label in candidates:\n probability = probabilities[actual_label]\n if probability < 1e-9:\n continue\n score = self._cal_score(primary_category_label, category_label, actual_label, i)\n total_score += score * probability\n if total_score > max_score:\n max_score = total_score\n secondary_category_label = category_label\n\n return [self._get_categories(primary_category_label), self._get_categories(secondary_category_label)]",
"def classify_samples(nd, sample_list, cat_spec):\n tmp = []\n\n for key in cat_spec:\n tmp.append([PSD_sym_KL(psd, cat_spec[key]) for psd in nd])\n\n KL = np.array(tmp).T\n\n # This is a confusing formula\n # amounts to: for a given sample, prob of belonging to class k is:\n # (1 / KL_k) / sum_k(KL_i) = sum_k\\i(KL_i) / sum_k(KL_i)\n prob = ((1 / KL).T / (1 / KL).sum(axis=1)).T\n\n row_masks = np.array([row == row.max() for row in prob])\n cats = [cat_spec.columns[mask][0] for mask in row_masks]\n\n items = [('label', cats)] + [('P({})'.format(lab), p) for lab, p in zip(cat_spec.columns, prob.T)]\n df = pd.DataFrame.from_items(items)\n df.index = sample_list\n\n return df",
"def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs",
"def getClassCounts(column, uniqueVal, decision, yes, no , total):\r\n dataDict = {} # a dictionary of labels\r\n for val in uniqueVal:\r\n label1 = val + '/Y'\r\n label2 = val + '/N'\r\n dataDict[label1] = 0; dataDict[label2] = 0\r\n for dec, at in zip(decision, column):\r\n if at == val and dec == 'No':\r\n dataDict[label2] += 1\r\n if at == val and dec == 'Yes':\r\n dataDict[label1] += 1\r\n dataDict[val] = (dataDict[label2]+ dataDict[label1])/ total\r\n dataDict[label2] = dataDict[label2] / no\r\n dataDict[label1] = dataDict[label1] / yes\r\n return dataDict",
"def probability(self, xvalue):\n # Initialize key variables\n probability = {}\n bayesian = {}\n classes = self.classes()\n\n # Calculate the principal components of the individual xvalue\n p1p2 = self.pca_object.pc_of_x(xvalue, self.components)\n\n # Get probability of each class\n for cls in classes:\n # Initialize values for the loop\n sample_count = len(self.pca_object.xvalues(cls))\n\n # Get values for calculating gaussian parameters\n dimensions = len(p1p2)\n x_mu = p1p2 - self.meanvector(cls)\n covariance = self.covariance(cls)\n inverse_cov = np.linalg.inv(covariance)\n determinant_cov = np.linalg.det(covariance)\n\n # Work on the exponent part of the bayesian classifer\n power = -0.5 * np.dot(np.dot(x_mu, inverse_cov), x_mu.T)\n exponent = math.pow(math.e, power)\n\n # Determine the constant value\n pipart = math.pow(2 * math.pi, dimensions / 2)\n constant = pipart * math.sqrt(determinant_cov)\n\n # Determine final bayesian\n bayesian[cls] = (sample_count * exponent) / constant\n\n # Calculate bayesian probability\n denominator = bayesian[classes[0]] + bayesian[classes[1]]\n for cls in classes:\n probability[cls] = bayesian[cls] / denominator\n\n # Return\n return probability",
"def learn_distributions(file_lists_by_category):\n ### TODO: Write your code here\n\n #get word frequncies in each email category\n #key:word, value: number of occurences in this email loader\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n\n #get total length of each email loader\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n\n #get the length of the dictionary: D\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n\n spam_distribution = {}\n ham_distribution = {}\n #get the distributions of two email loaders\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n #create the required tuple\n probabilities_by_category = (spam_distribution, ham_distribution)\n return probabilities_by_category",
"def categorical_sample(prob_n, np_random = None):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np.random.rand()).argmax()",
"def test_categorical():\n # assert the distribution of the samples is close to the distribution of the data\n # using cstest:\n # - uniform (assert p-value > 0.05)\n # - very skewed / biased? (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)"
] | [
"0.6292472",
"0.6147138",
"0.60119295",
"0.59831697",
"0.5894537",
"0.5834089",
"0.5826",
"0.57258606",
"0.5708978",
"0.5685781",
"0.56797826",
"0.5673375",
"0.5663211",
"0.56225026",
"0.55870885",
"0.5568914",
"0.55597067",
"0.5559275",
"0.5555052",
"0.5546693",
"0.55196595",
"0.55196595",
"0.5501669",
"0.5496848",
"0.5495503",
"0.5462183",
"0.54366463",
"0.54021275",
"0.539517",
"0.53868943"
] | 0.70440626 | 0 |
Convert time_offsets to gps timestamps and nanoseconds | def get_gps_timestamp(file, time_offset):
reference_date = get_reference_datetime(file)
absolute_date = get_absolute_datetime(reference_date, time_offset)
timestamp, nanosecond = datetime_to_gpstimestamp_nanoseconds(absolute_date)
return timestamp, nanosecond | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_gps_time(self):\n reason = \"[!] GPS timestamps are 10 digits\"\n ts_type = self.ts_types['gpstime']\n try:\n if not len(self.gps) == 10 or not self.gps.isdigit():\n self.in_gpstime = indiv_output = combined_output = False\n pass\n else:\n leapseconds = self.leapseconds\n gps_stamp = self.epoch_1980 + timedelta(seconds=(float(self.gps)))\n tai_convert = gps_stamp + timedelta(seconds=19)\n epoch_convert = (tai_convert - self.epoch_1970).total_seconds()\n check_date = dt.utcfromtimestamp(epoch_convert)\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check is True:\n variance = entry\n else:\n variance = 0\n gps_out = check_date - timedelta(seconds=variance)\n self.in_gpstime = gps_out.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"{} {}\".format(ts_type, self.in_gpstime))\n combined_output = str(\"{}{}\\t\\t\\t{} UTC{}\".format(self.left_color, ts_type, self.in_gpstime, self.right_color))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_gpstime = indiv_output = combined_output = False\n return self.in_gpstime, indiv_output, combined_output, reason",
"def to_gps_time(self):\n ts_type = self.ts_types['gpstime']\n try:\n leapseconds = self.leapseconds\n check_date = duparser.parse(self.timestamp)\n if hasattr(check_date.tzinfo, '_offset'):\n dt_tz = check_date.tzinfo._offset.total_seconds()\n check_date = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check is True:\n variance = entry\n else:\n variance = 0\n leap_correction = check_date + timedelta(seconds=variance)\n epoch_shift = leap_correction - self.epoch_1970\n gps_stamp = (dt.utcfromtimestamp(epoch_shift.total_seconds()) - self.epoch_1980).total_seconds() - 19\n gps_stamp = int(gps_stamp) - int(dt_tz)\n self.out_gpstime = str(gps_stamp)\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_gpstime))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_gpstime = ts_output = False\n return self.out_gpstime, ts_output",
"def datetime_to_gpstimestamp_nanoseconds(date):\n timestamp = gpstime.utc_to_gps(calendar.timegm(date.utctimetuple()))\n nanosecond = date.microsecond * 1000\n\n return timestamp, nanosecond",
"def ms2pts(ms, dt):\n return int(ms/dt)",
"def calOffsets(self, Xi_arr, Vi_arr, hz):\n\n Wi_arr = [round(vi / hz, 6) for vi in Vi_arr] # tcptimestamps in seconds with microsecond precision\n Yi_arr = [(wi - xi) * 1000 for wi, xi in zip(Wi_arr, Xi_arr)] # offset in miliseconds\n offset_arr = [(round(x, 6), round(y, 6)) for x, y in zip(Xi_arr, Yi_arr)]\n return offset_arr",
"def normalize_timestamp(timestamp_series):\n # convert datetime strings into milliseconds from epoch\n times = pd.to_datetime(timestamp_series, format='%Y-%m-%d %H:%M:%S').astype(np.int64) // int(1e6)\n return times",
"def _calc_time(time_lines: list) -> np.ndarray:\n time = [time_to_fraction_hour(line.split()[1]) for line in time_lines]\n return np.array(time)",
"def pcr_delta_time_ms(pcr_t1, pcr_t2, offset = 0):\n return float(pcr_t2-pcr_t1)/90000.0 + offset",
"def ts_to_epoch_seconds(t) -> float:\n return t.astype(int) / 1e9",
"def timestamps():\n timestamps = ( # Index\n 1459516622.1, # 0\n 1459516622.2, # 1\n 1459516622.3, # 2\n 1459516623.0, # 3\n 1459516623.1, # 4\n 1459516623.3, # 5\n 1459516624.0, # 6\n )\n return timestamps",
"def ConvertTime( self ) :\n \n # modules:\n import logging\n import datetime\n import netCDF4\n import numpy\n \n #\n # Original 'Time' units and description:\n #\n # title = \"Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC), 1 January 1970.\"\n # units = \"s\"\n #\n # Create new field 'Datetime' field with units:\n # units = \"Seconds since 1970-01-01 00:00'\n #\n # select:\n varid = self.swaths[self.component]['Geolocation Fields']['Time']\n # values:\n tvalues = varid['data']\n # extract description:\n long_name = varid['long_name'].decode('latin-1')\n # check ...\n key = 'Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC),'\n if long_name.startswith(key) :\n # remove leading description:\n time0 = long_name.replace(key,'').replace('.','').strip()\n # extract datetime object:\n t0 = datetime.datetime.strptime(time0,'%d %B %Y')\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n if 'mask' in dir(tvalues) :\n values1d = netCDF4.num2date( tvalues.data, var['units'] )\n else :\n values1d = netCDF4.num2date( tvalues , var['units'] )\n #endif\n # alternative:\n # \"Time at Start of Scan (s, TAI93)\"\n elif 'TAI' in long_name :\n # find start:\n i0 = long_name.index('TAI')\n # extract:\n year = int(long_name[i0+3:].replace(')',''))\n # convert to 4-digits if necessary:\n if year < 100 :\n if year > 50 :\n year = 1900 + year\n else :\n year = 2000 + year\n #endif\n #endif\n # reference time:\n t0 = datetime.datetime(year,1,1,0,0,0)\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n values1d = netCDF4.num2date( tvalues, var['units'] )\n else :\n self.logger.error( 'could not convert time units \"%s\"' % long_name )\n self.logger.error( 'first value : %f' % tvalues[0] )\n raise Exception\n #endif\n \n # expand to 2D:\n var['data'] = numpy.zeros( (self.ntime,self.np), values1d.dtype )\n for ip in range(self.np) :\n var['data'][:,ip] = values1d\n #endfor\n \n # set dim names:\n var['dimnames'] = ('time','pixel')\n \n # store:\n self.swaths[self.component]['Geolocation Fields']['Datetime'] = var",
"def parseLabels(labels):\n\n timestamp = [(parseTimestamp(e['gameTime']), e['label']) for e in labels['annotations']]\n \n return timestamp",
"def normalize_time(full_timestamps, half_timestamp):\n phases = (half_timestamp - full_timestamps[0]) / (full_timestamps[-1] - full_timestamps[0])\n return phases",
"def pts2ms(pts, dt):\n return pts*dt",
"def timestamp_processor(timestamps, with_tz=False, check_delta=False):\n if timestamps.count(timestamps[0]) == len(timestamps):\n unified_timestamp = timestamps[0]\n else:\n average_timestamp = sum([dt.timestamp for dt in timestamps])/len(timestamps)\n unified_timestamp = arrow.get(average_timestamp)\n\n if check_delta:\n for ts in timestamps:\n delta = unified_timestamp - arrow.get(ts)\n second_difference = abs(delta.total_seconds())\n\n if second_difference > 3600:\n # more than 1 hour difference\n raise ValueError(\"\"\"South Korea generation data is more than 1 hour apart,\n saw {} hours difference\"\"\".format(second_difference/3600))\n\n if with_tz:\n unified_timestamp = unified_timestamp.replace(tzinfo='Asia/Seoul')\n\n return unified_timestamp",
"def GPSlatlon2XY_time(lat_u, lon_u, theta):\n\n\trho_u = np.sqrt(np.power(lon_u, 2) + np.power(lat_u, 2))\n\ttheta_new_u = np.arctan2(lat_u, lon_u) - theta\n\n\tUx, Uy = rho_u * np.cos(theta_new_u), rho_u * np.sin(theta_new_u)\n\n\treturn Ux, Uy",
"def _TIME2STEPS(time):\n return int(time*1000)",
"def unixTimeConv(timestamps):\n\n\tnewTime = str(datetime.datetime.fromtimestamp(int(timestamps)))\n\tyearDate,timeT = newTime.split(' ')\n\tyear,month,day = str(yearDate).split('-')\n\thour,minutes,sec = timeT.split(':')\n\tsplitTimes = (year,month,day,hour,minutes,sec,timestamps)\n\n\treturn(splitTimes)",
"def transform_timestamps(time_tracker):\n def calculate_timediff(t1, t2):\n return (t2 - t1).seconds + (t2 - t1).microseconds/1000000\n\n durations = dict()\n\n durations[\"Initialization\"] \\\n = round(calculate_timediff(time_tracker[\"time_start\"],\n time_tracker[\"after_init\"]), 3)\n\n durations[\"Configuration\"] \\\n = round(calculate_timediff(time_tracker[\"after_init\"],\n time_tracker[\"after_config\"]), 3)\n\n iter_list = []\n for i, iteration in enumerate(time_tracker[\"iterations\"]):\n if i == 0:\n iter_list\\\n .append(round(calculate_timediff(time_tracker[\"after_config\"],\n iteration), 3))\n else:\n iter_list\\\n .append(round(calculate_timediff(\n time_tracker[\"iterations\"][i-1], iteration), 3))\n durations[\"Iterations\"] = iter_list\n\n durations[\"Finalization\"] \\\n = round(calculate_timediff(time_tracker[\"iterations\"][-1],\n time_tracker[\"finish\"]), 3)\n durations[\"Total\"] \\\n = round(durations[\"Initialization\"] + durations[\"Configuration\"]\n + sum(durations[\"Iterations\"]) + durations[\"Finalization\"], 3)\n\n return durations",
"def time_unwrap(val_timestamps):\n a=val_timestamps.shape[0]\n val_time =val_timestamps.astype('int64')\n for i in range(a-1):\n if val_time[i+1]-val_time[i]<-1*2**25:\n val_time[i+1:]+=2**26\n\n return(val_time)",
"def from_deltatime(self,\n delta_time: np.ndarray,\n epoch: str | tuple | list | np.ndarray,\n standard: str = 'UTC'\n ):\n # assert delta time is an array\n delta_time = np.atleast_1d(delta_time)\n # calculate leap seconds if specified\n if (standard.upper() == 'GPS'):\n GPS_Epoch_Time = convert_delta_time(0, epoch1=epoch,\n epoch2= _gps_epoch, scale=1.0)\n GPS_Time = convert_delta_time(delta_time, epoch1=epoch,\n epoch2=_gps_epoch, scale=1.0)\n # calculate difference in leap seconds from start of epoch\n self.leaps = count_leap_seconds(GPS_Time) - \\\n count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))\n elif (standard.upper() == 'LORAN'):\n # LORAN time is ahead of GPS time by 9 seconds\n GPS_Epoch_Time = convert_delta_time(-9.0, epoch1=epoch,\n epoch2=_gps_epoch, scale=1.0)\n GPS_Time = convert_delta_time(delta_time - 9.0, epoch1=epoch,\n epoch2=_gps_epoch, scale=1.0)\n # calculate difference in leap seconds from start of epoch\n self.leaps = count_leap_seconds(GPS_Time) - \\\n count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))\n elif (standard.upper() == 'TAI'):\n # TAI time is ahead of GPS time by 19 seconds\n GPS_Epoch_Time = convert_delta_time(-19.0, epoch1=epoch,\n epoch2=_gps_epoch, scale=1.0)\n GPS_Time = convert_delta_time(delta_time-19.0, epoch1=epoch,\n epoch2=_gps_epoch, scale=1.0)\n # calculate difference in leap seconds from start of epoch\n self.leaps = count_leap_seconds(GPS_Time) - \\\n count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))\n else:\n self.leaps = 0.0\n # convert time to days relative to Modified Julian days in UTC\n self.MJD = convert_delta_time(delta_time - self.leaps,\n epoch1=epoch, epoch2=_mjd_epoch, scale=(1.0/self.day))\n return self",
"def epochCalc(timestamps):\n\tsplitTimes = unixTimeConv(timestamps)\n\tepochTimes = []\n\thour=int(splitTimes[3])\n\n\tif (hour >0 and hour <=9) or hour>=23:\n\t\tepoch='night'\n\telse:\n\t\tepoch='not_night'\n\tepochTimes.append((epoch,splitTimes[6]))\n\treturn epochTimes",
"def map_scan_time(time, nmap_store):\n nmap_store[\"start_time\"] = datetime.datetime.fromtimestamp(int(time.get('starttime')))\n nmap_store[\"stop_time\"] = datetime.datetime.fromtimestamp(int(time.get('endtime')))",
"def from_Timestamp(timestamp):\n # type: (timestamp_pb2.Timestamp) -> float\n return timestamp.seconds + float(timestamp.nanos) / 10**9",
"def get_times_from_utterance(utterance: str,\n char_offset_to_token_index: Dict[int, int],\n indices_of_approximate_words: Set[int]) -> Dict[str, List[int]]:\n\n pm_linking_dict = _time_regex_match(r'\\d+pm',\n utterance,\n char_offset_to_token_index,\n pm_map_match_to_query_value,\n indices_of_approximate_words)\n\n am_linking_dict = _time_regex_match(r'\\d+am',\n utterance,\n char_offset_to_token_index,\n am_map_match_to_query_value,\n indices_of_approximate_words)\n\n oclock_linking_dict = _time_regex_match(r\"\\d+ o'clock\",\n utterance,\n char_offset_to_token_index,\n lambda match: digit_to_query_time(match.rstrip(\" o'clock\")),\n indices_of_approximate_words)\n\n hours_linking_dict = _time_regex_match(r\"\\d+ hours\",\n utterance,\n char_offset_to_token_index,\n lambda match: [int(match.rstrip(\" hours\"))],\n indices_of_approximate_words)\n\n\n times_linking_dict: Dict[str, List[int]] = defaultdict(list)\n linking_dicts = [pm_linking_dict, am_linking_dict, oclock_linking_dict, hours_linking_dict]\n\n for linking_dict in linking_dicts:\n for key, value in linking_dict.items():\n times_linking_dict[key].extend(value)\n\n return times_linking_dict",
"def _convert_timestamp_2_periodic_time(self, timestamp):\n \n l = \"\"\n\n # daily periodic\n theta = self.two_pi_by_one_day_second * (int(timestamp[0:-3]) % self.one_day_second)\n #x = 1 + np.cos(theta)\n #y = 1 + np.sin(theta)\n x = np.cos(theta)\n y = np.sin(theta)\n l += str(x) + \",\" + str(y)\n l += \",\"\n\n # weekly periodic\n theta = self.two_pi_by_seven_days_second * (int(timestamp[0:-3]) % self.seven_days_second)\n # no need plus one?\n #x = 1 + np.cos(theta)\n #y = 1 + np.sin(theta)\n x = np.cos(theta)\n y = np.sin(theta)\n l += str(x) + \",\" + str(y)\n\n return l",
"def ClockUsToTimestamp(clock_us, reference_clock_us, reference_timestamp):\n\n return reference_timestamp + (clock_us - reference_clock_us) / 1.0e6",
"def to_gps_time(self):\n try:\n iso_time = Time(timestamp, format='iso', scale='utc')\n iso_time.format='gps'\n self.out_gpstime = str(iso_time)\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_gpstime = False\n return self.out_gpstime",
"def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)",
"def get_time_sec(gpx_track):\n start_time = gpx_track.segments[0].points[0].time\n time_in_sec = np.empty([gpx_track.get_points_no(), 1])\n n = 1\n for segment in gpx_track.segments:\n for point in segment.points:\n curr_time = point.time\n time_in_sec[n] = curr_time - start_time\n n = n + 1\n\n return time_in_sec, start_time"
] | [
"0.6133533",
"0.5981217",
"0.5839412",
"0.5720046",
"0.5719699",
"0.5716344",
"0.5671996",
"0.56716424",
"0.56283784",
"0.5613697",
"0.55957836",
"0.55948985",
"0.55543983",
"0.5532485",
"0.55197525",
"0.54900724",
"0.54886615",
"0.54784024",
"0.5435569",
"0.5418071",
"0.54071826",
"0.5405543",
"0.5402045",
"0.5390691",
"0.5362241",
"0.5346637",
"0.53447425",
"0.53435993",
"0.5324789",
"0.5324387"
] | 0.674219 | 0 |
Convert datetime objects to GPS timestamp and nanoseconds | def datetime_to_gpstimestamp_nanoseconds(date):
timestamp = gpstime.utc_to_gps(calendar.timegm(date.utctimetuple()))
nanosecond = date.microsecond * 1000
return timestamp, nanosecond | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_gps_time(self):\n ts_type = self.ts_types['gpstime']\n try:\n leapseconds = self.leapseconds\n check_date = duparser.parse(self.timestamp)\n if hasattr(check_date.tzinfo, '_offset'):\n dt_tz = check_date.tzinfo._offset.total_seconds()\n check_date = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check is True:\n variance = entry\n else:\n variance = 0\n leap_correction = check_date + timedelta(seconds=variance)\n epoch_shift = leap_correction - self.epoch_1970\n gps_stamp = (dt.utcfromtimestamp(epoch_shift.total_seconds()) - self.epoch_1980).total_seconds() - 19\n gps_stamp = int(gps_stamp) - int(dt_tz)\n self.out_gpstime = str(gps_stamp)\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_gpstime))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_gpstime = ts_output = False\n return self.out_gpstime, ts_output",
"def from_gps_time(self):\n reason = \"[!] GPS timestamps are 10 digits\"\n ts_type = self.ts_types['gpstime']\n try:\n if not len(self.gps) == 10 or not self.gps.isdigit():\n self.in_gpstime = indiv_output = combined_output = False\n pass\n else:\n leapseconds = self.leapseconds\n gps_stamp = self.epoch_1980 + timedelta(seconds=(float(self.gps)))\n tai_convert = gps_stamp + timedelta(seconds=19)\n epoch_convert = (tai_convert - self.epoch_1970).total_seconds()\n check_date = dt.utcfromtimestamp(epoch_convert)\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check is True:\n variance = entry\n else:\n variance = 0\n gps_out = check_date - timedelta(seconds=variance)\n self.in_gpstime = gps_out.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"{} {}\".format(ts_type, self.in_gpstime))\n combined_output = str(\"{}{}\\t\\t\\t{} UTC{}\".format(self.left_color, ts_type, self.in_gpstime, self.right_color))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_gpstime = indiv_output = combined_output = False\n return self.in_gpstime, indiv_output, combined_output, reason",
"def epoch2datetime(t):\n return datetime.fromtimestamp(t/1000.0)",
"def to_timestamp(date_time: datetime, unit: TimeUnit = TimeUnit.SECONDS) -> float:\n return date_time.replace(tzinfo=timezone.utc).timestamp() * (1000 ** int(unit))",
"def to_avro(date_time: datetime.datetime) -> float:\n if date_time.tzinfo:\n ts = (date_time - utils.epoch).total_seconds()\n else:\n ts = (date_time - utils.epoch_naive).total_seconds()\n\n return ts * 1000",
"def from_Timestamp(timestamp):\n # type: (timestamp_pb2.Timestamp) -> float\n return timestamp.seconds + float(timestamp.nanos) / 10**9",
"def unix_time_nanos(dt):\n return timedelta_to_micros(dt - epoch)",
"def date_to_nano(ts):\n return calendar.timegm(ts.utctimetuple()) * int(1e3)",
"def get_gps_timestamp(file, time_offset):\n reference_date = get_reference_datetime(file)\n absolute_date = get_absolute_datetime(reference_date, time_offset)\n timestamp, nanosecond = datetime_to_gpstimestamp_nanoseconds(absolute_date)\n\n return timestamp, nanosecond",
"def to_stamp(datetime_):\r\n try:\r\n return datetime_.timestamp()\r\n except AttributeError:\r\n return time.mktime(datetime_.timetuple()) + datetime_.microsecond / 1e6",
"def convertToEST(timestamp):\n newDateTime = datetime.datetime.fromtimestamp(timestamp/1000)\n return newDateTime.date(), newDateTime.time()",
"def to_gps_time(self):\n try:\n iso_time = Time(timestamp, format='iso', scale='utc')\n iso_time.format='gps'\n self.out_gpstime = str(iso_time)\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_gpstime = False\n return self.out_gpstime",
"def to_unix_milli(self):\n ts_type = self.ts_types['unix_milli']\n try:\n dt_obj = duparser.parse(self.timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_unix_milli = str(int(((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))*1000))\n ts_output = str(\"{}\\t\\t{}\".format(ts_type, self.out_unix_milli))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_unix_milli = ts_output = False\n return self.out_unix_milli, ts_output",
"def datetime_to_epoch_microseconds(obj: \"datetime\") -> float:\n td = datetime_to_epoch_timedelta(obj)\n return (td.days * 86400 + td.seconds) * 10**6 + td.microseconds",
"def datetime_to_timestamp(dt):\n return calendar.timegm(dt.timetuple()) * 1000",
"def gps2Time(self):\n self.posting_date = Time(self.posting_gpstime, format=\"gps\")",
"def convert_datetime(\n date: float | np.ndarray,\n epoch: str | tuple | list | np.datetime64 = _unix_epoch\n ):\n # convert epoch to datetime variables\n if isinstance(epoch, (tuple, list)):\n epoch = np.datetime64(datetime.datetime(*epoch))\n elif isinstance(epoch, str):\n epoch = np.datetime64(parse(epoch))\n # convert to delta time\n return (date - epoch) / np.timedelta64(1, 's')",
"def convert_datetime(\n date: float | np.ndarray,\n epoch: str | tuple | list | np.datetime64 = _unix_epoch\n ):\n # convert epoch to datetime variables\n if isinstance(epoch, (tuple, list)):\n epoch = np.datetime64(datetime.datetime(*epoch))\n elif isinstance(epoch, str):\n epoch = np.datetime64(parse(epoch))\n # convert to delta time\n return (date - epoch) / np.timedelta64(1, 's')",
"def ts_to_epoch_seconds(t) -> float:\n return t.astype(int) / 1e9",
"def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)",
"def to_pydatetime(self) -> npt.NDArray[np.object_]:\n return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)",
"def datetime_to_timestamp(obj: \"datetime\") -> \"Timestamp\":\n td = datetime_to_epoch_timedelta(obj)\n ts = Timestamp()\n ts.seconds = td.seconds + td.days * _SECONDS_PER_DAY\n ts.nanos = td.microseconds * _NANOS_PER_MICROSECOND\n return ts",
"def to_ios_time(self):\n ts_type = self.ts_types['iostime']\n try:\n dt_obj = duparser.parse(self.timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_iostime = str(int(((dt_obj - self.epoch_2001).total_seconds() - int(dt_tz)) * self.nano_2001))\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_iostime))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_iostime = ts_output = False\n return self.out_iostime, ts_output",
"def hydrate_datetime(seconds, nanoseconds, tz=None):\n minutes, seconds = map(int, divmod(seconds, 60))\n hours, minutes = map(int, divmod(minutes, 60))\n days, hours = map(int, divmod(hours, 24))\n seconds = (1000000000 * seconds + nanoseconds) / 1000000000\n t = DateTime.combine(Date.from_ordinal(unix_epoch_date_ordinal + days), Time(hours, minutes, seconds))\n if tz is None:\n return t\n if isinstance(tz, int):\n tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)\n zone = FixedOffset(tz_offset_minutes)\n else:\n zone = timezone(tz)\n return zone.localize(t)",
"def normalize_timestamp(timestamp_series):\n # convert datetime strings into milliseconds from epoch\n times = pd.to_datetime(timestamp_series, format='%Y-%m-%d %H:%M:%S').astype(np.int64) // int(1e6)\n return times",
"def epoch2time(time):\n\tvalue = datetime.datetime.fromtimestamp(time)\n\tNormal = value.strftime('%Y-%m-%d %H:%M:%S')\n\tprint(normal)\n\treturn normal",
"def np_dt_epoch_msec(value):\n return value.astype(long) / 1000",
"def to_prtime(self):\n try:\n dt_obj = duparser.parse(timestamp)\n self.out_prtime = str(int((dt_obj - self.epoch_1970).total_seconds() * 1000000))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_prtime = False\n return self.out_prtime",
"def _GetNormalizedTimestamp(self):\n if self._normalized_timestamp is None:\n if (self._number_of_seconds is not None and\n self.fraction_of_second is not None):\n self._normalized_timestamp = (\n decimal.Decimal(self._number_of_seconds) + self.fraction_of_second)\n\n return self._normalized_timestamp",
"def _datetime_to_timestamp(self, dt):\n return time.mktime(dt.timetuple())"
] | [
"0.64110595",
"0.6309177",
"0.6237445",
"0.61667824",
"0.61475044",
"0.61468357",
"0.6086212",
"0.606989",
"0.6046954",
"0.60396606",
"0.6038311",
"0.60068566",
"0.59911877",
"0.5953865",
"0.59505874",
"0.5947502",
"0.5941145",
"0.5941145",
"0.5921453",
"0.591261",
"0.58763117",
"0.58726496",
"0.58195144",
"0.5815126",
"0.5805712",
"0.57957864",
"0.579256",
"0.57904685",
"0.57684076",
"0.5761309"
] | 0.7270666 | 0 |
Get the reference datetime from the KNMI LGT file as datetime | def get_reference_datetime(file):
date_string = file.root.discharge1._f_getAttr('reference_datetime')[0]
ref_date = datetime.datetime.strptime(date_string, '%d-%b-%Y;%H:%M:%S.%f')
return ref_date | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_ref_time(self):\n from datetime import datetime, timedelta\n\n ref_time = datetime(2010, 1, 1, 0, 0, 0)\n ref_time += timedelta(seconds=int(self.fid['/PRODUCT/time'][0]))\n return ref_time",
"def get_file_date(self, file: str) -> date:",
"def extract_datetime(fpath):\n try:\n handle = open(fpath, 'rb')\n if hexlify(handle.read(2)) != hexlify(u'MZ'):\n handle.close()\n return\n except:\n return\n\n try:\n handle.seek(60, 0)\n offset = handle.read(4)\n offset = hexlify(offset[::-1])\n\n if offset == '':\n handle.close()\n return\n\n offset = int(offset, 16)\n handle.seek(offset+8, 0)\n dword = handle.read(4)\n handle.close()\n\n t = unpack(\">L\", dword[::-1])[0]\n except:\n return\n return datetime.datetime.fromtimestamp(t)",
"def _read_antti_datetime(dt_file):\n # NOTE: genfromtxt() doesn't work with gzipped files as it should, so we\n # unzip the file ourself, and use io.BytesIO to fake out genfromtext()\n if dt_file.split('.')[-1] == 'gz':\n ff = gzip.open(dt_file, 'r')\n else:\n ff = open(dt_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n ymdHMS = np.genfromtxt(sIO, comments=\"%\")\n DT = np.array([dt.datetime(*elem) for elem in ymdHMS.astype('int')])\n sIO.close()\n\n return DT",
"def _get_rec_datetime(self):\n\n rec_datetime = None\n date_string = ''\n time_string = ''\n datetime_string = ''\n\n if 'notes' not in self.info:\n return None\n\n for note_line in self.info['notes'].split('\\n'):\n\n # episodic acquisition mode\n if note_line.startswith('Created on '):\n date_string = note_line.strip('Created on ')\n if note_line.startswith('Start data acquisition at '):\n time_string = note_line.strip('Start data acquisition at ')\n\n # continuous acquisition mode\n if note_line.startswith('Created : '):\n datetime_string = note_line.strip('Created : ')\n\n if date_string and time_string:\n datetime_string = ' '.join([date_string, time_string])\n\n if datetime_string:\n try:\n rec_datetime = datetime.strptime(datetime_string,\n '%a %b %d %Y %H:%M:%S')\n except ValueError:\n pass\n\n return rec_datetime",
"def get_mod_time(self):\n if self.file_meta[:2] == b'bp':\n file_meta_plist = ccl_bplist.load(BytesIO(self.file_meta))\n raw_date_time = file_meta_plist['$objects'][1]['LastModified']\n converted_time = datetime.datetime.fromtimestamp(raw_date_time)\n converted_time = converted_time.timetuple()\n return converted_time\n else:\n file_meta_plist = plistlib.loads(self.file_meta)\n return file_meta_plist['modified'].timetuple()",
"def extract_datetime(filename) -> datetime:\n date_part = filename[-26:-7]\n return datetime.strptime(date_part, '%Y-%m-%d_%H-%M-%S')",
"def _get_time(self): \n\t\t# need to variable-ize the version ??? \n\t\ttime = self.root.find('.//{http://www.opengis.net/kml/2.2}when').text\n\t\t## strip off last 5 chars, ie '.135Z in '2015-08-01T00:06:29.135Z'\n\t\tutc = dateutil.tz.tzutc() \n\t\tcentral = dateutil.tz.gettz('America/Chicago')\n\t\ttime = datetime.datetime.strptime(time[:-5], '%Y-%m-%dT%H:%M:%S')\n\t\ttime = time.replace(tzinfo=utc)\n\t\tself.time = time.astimezone(central)",
"def read_datetime(self):\n with GPIOTimingContentManager(self.gpio, start=self._start_tx, end=self._end_tx):\n self._write_byte(self.REG_BURST_READ)\n\n regs = list()\n for _ in range(self.REG_SIZE):\n regs.append(self._read_byte())\n\n # Decode bytes to datetime\n return datetime.datetime.strptime(\" \".join([\"{:x}\".format(x) for x in regs]), self.DT_STR_FMT)",
"def get_time(\n filepath,\n year_ref=2000,\n ):\n with xr.open_dataset(filepath) as fdata:\n # load time\n if 'Time' in fdata.dims:\n if 'xtime' in fdata.data_vars:\n xtime = fdata['xtime'].astype(str)\n elif 'xtime_startMonthly' in fdata.data_vars:\n xtime = fdata['xtime_startMonthly'].astype(str)\n else:\n print('Time variable not found. Using indices instead...')\n return np.arange(fdata.dims['Time'])\n else:\n return None\n time_str = [x.strip() for x in xtime.values]\n if int(time_str[0][:4]) < 1678:\n time_str = ['{:04d}'.format(int(s[:4])+year_ref)+s[4:] for s in time_str]\n time = pd.to_datetime(time_str, format='%Y-%m-%d_%H:%M:%S')\n return time",
"def ref_time(self) -> float:\n return ntp_to_system_time(self.ref_timestamp)",
"def reference_time(self):\n if hasattr(self, '_reference_time') is False:\n self._reference_time = self.midtime\n\n return self._reference_time",
"def _get_mrk_meas_date(mrk):\n info = get_kit_info(mrk, False)[0]\n meas_date = info.get('meas_date', None)\n if isinstance(meas_date, (tuple, list, np.ndarray)):\n meas_date = meas_date[0]\n if isinstance(meas_date, datetime):\n meas_datetime = meas_date\n elif meas_date is not None:\n meas_datetime = datetime.fromtimestamp(meas_date)\n else:\n meas_datetime = datetime.min\n return meas_datetime",
"def datetime(self):\r\n if 'observation_time_rfc822' in self.data \\\r\n and self.data['observation_time_rfc822']:\r\n tstr = self.data['observation_time_rfc822']\r\n tstr = ' '.join(tstr.split(' ')[:-2])\r\n return datetime.strptime(tstr, '%a, %d %b %Y %H:%M:%S')\r\n elif 'observation_time' in self.data:\r\n return datetime.strptime(self.data['observation_time'] \\\r\n +' %s'%datetime.now().year,\r\n 'Last Updated on %b %d, %H:%M %p %Z %Y')\r\n return ''",
"def get_changefile_timestamp(changefile_type, file_sequence_number):\n url = get_url(changefile_type) + \"/\"\n url = url + (\"%03i/%03i/%03i\" % (file_sequence_number / 1000000,\n file_sequence_number / 1000 % 1000,\n file_sequence_number % 1000))\n url = url + \".state.txt\"\n changefile_timestamp = None\n for result in urllib.urlopen(url):\n # get timestamp\n timestamp_p = result.find(\"timestamp=\")\n if timestamp_p != -1:\n # found timestamp line\n timestamp_p += 10 # jump over text\n result = result[timestamp_p:].replace(\"\\\\\", \"\").strip()\n changefile_timestamp = strtodatetime(result)\n\n if not changefile_timestamp:\n logging.info(\"(no timestamp)\")\n if file_sequence_number == 0:\n changefile_timestamp = datetime(1900, 1, 1)\n else:\n AssertionError(\"no timestamp for %s changefile %i.\" %\n (changefile_type, file_sequence_number))\n else:\n logging.info(\"%s, id: %i, timestamp: %s\" %\n (changefile_type, file_sequence_number,\n changefile_timestamp.isoformat()))\n return changefile_timestamp",
"def get_source_stamp(self):",
"def get_source_stamp(self):",
"def import_time(self) -> str:\n return pulumi.get(self, \"import_time\")",
"def get_starttime(self):\n filetime = datetime.datetime.strptime(self.filenametime,\n \"%Y%m%d_%H%M%S\")\n if self.ldat_type != 'acc':\n starttime = filetime\n else:\n starttime = filetime - datetime.timedelta(seconds=512)\n return starttime",
"def file_get_mdatetime(filename):\n return datetime.datetime.utcfromtimestamp(os.path.getmtime(filename))",
"def get_timestamp(file_path):\n mtime = os.stat(file_path).st_mtime\n return datetime.datetime.fromtimestamp(mtime).isoformat()",
"def time(self):\r\n return conf.lib.clang_getFileTime(self)",
"def _get_orbit_start_date(self, filename):\n # if your data comes from the EUMETSAT EO Portal this function can\n if self.eo_portal is True:\n filename_base = os.path.basename(filename)\n fln_spl = filename_base.split('-')[5]\n fln_datetime = fln_spl.split('.')[0]\n return datetime.strptime(fln_datetime, self.datetime_format)\n\n else:\n orbit_start_str = os.path.basename(filename)[\n self.filename_datetime_format[0]:\n self.filename_datetime_format[1]]\n return datetime.strptime(orbit_start_str,\n self.filename_datetime_format[2])",
"def get_datetime_from_model(model):\n hyperparams = get_hyperparams_from_model(model)\n run_datetime = hyperparams['datetime']\n run_datetime = run_datetime.replace('-', '_').replace(' ', '_').replace(':', '_')\n return run_datetime",
"def original_start(self):\n if \"originalStart\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"originalStart\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None",
"def get_time_stamp():\n with open(TIME_STAMP_FILE_NAME, 'r') as f:\n s = f.readline()\n return s",
"def get_datetime_start(self):\n return self.get_t_sect()['datetime_start']",
"def _parse_cvcfile(self, cvcfilepath):\n cvcfilename = os.path.basename(cvcfilepath)\n (Ymd, HMS, cvcextrest) = cvcfilename.split('_', 2)\n datatype, restdat = cvcextrest[0:3], cvcextrest[3:]\n (rest, _datstr) = restdat.split('.')\n _nr512 = 512\n if datatype == 'acc':\n rest = rest.lstrip('_')\n (_nr512, nrrcus0, nrrcus1) = map(int, rest.split('x'))\n filenamedatetime = datetime.datetime.strptime(Ymd + 'T' + HMS,\n '%Y%m%dT%H%M%S')\n # NOTE: For ACC, filename is last obstime, while for XST, it is first.\n if datatype == 'acc':\n filebegindatetime = filenamedatetime - datetime.timedelta(\n seconds=_nr512)\n else:\n filebegindatetime = filenamedatetime\n return datatype, filebegindatetime",
"def etime(self):\n try:\n return self['datetime_2']\n except TypeError:\n return None",
"def last_modified_date_time(self):\n if \"lastModifiedDateTime\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"lastModifiedDateTime\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None"
] | [
"0.6449726",
"0.6254226",
"0.6224529",
"0.6155637",
"0.6119671",
"0.61154586",
"0.6110254",
"0.5968073",
"0.59634435",
"0.5960696",
"0.59445274",
"0.5924622",
"0.59102046",
"0.5850813",
"0.57510024",
"0.574005",
"0.574005",
"0.5737356",
"0.57372004",
"0.5716487",
"0.5684267",
"0.5642409",
"0.56256235",
"0.5590432",
"0.5586844",
"0.5577186",
"0.55681235",
"0.55400085",
"0.5530442",
"0.5522788"
] | 0.7556686 | 0 |
chang the order's status to be "cooking" which is selected by the id of order | def cook_order(request):
order_id = request.GET.get('order_id', 0)
cs , status = CookStatus.objects.get_or_create(cook_name=request.user)
if cs.current_order is None:
cs.current_order = Order.objects.get(id=order_id)
cs.current_order.status = 'cooking'
cs.current_order.tikchen = request.user.username
cs.current_order.save()
cs.save()
return HttpResponseRedirect("/staff/cook_order_list/") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def order_ready(request):\n\tcs , status = CookStatus.objects.get_or_create(cook_name=request.user)\n\tif cs.current_order is not None:\n\t\tcs.current_order.status = 'ready-to-serve'\n\t\tcs.current_order.save()\n\t\tcs.current_order = None\n\t\tcs.save()\n\n\treturn HttpResponseRedirect(\"/staff/cook_order_list/\")",
"def mark_completed(self, order_id=None):\n self.status = \"paid\"\n if order_id and not self.order_id:\n self.order_id = order_id\n print(\"Order completed\")\n self.save()",
"def change_status(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('delivered' , id)\n db.insert(query, tuple)",
"def update_specific_order(self,status,order_id):\n self.query = \"UPDATE orders SET order_status=%s WHERE order_id=%s\"\n self.input = (status,order_id) #tuple to support indexing\n self.query_1 = \"SELECT order_id FROM orders ORDER BY order_id DESC LIMIT 1.\"\n self.query_2 = \"SELECT * FROM orders WHERE order_id=%s\"\n self.input_2 = (order_id,) #tuple to support indexing\n self.event = \"admin_update_specific_order\"\n self.error = \"Invalid order id\"\n self.message = \"Successfully updated the order.\"\n self.order_id = order_id\n self.db_error = None",
"def put(self, order_id):\n body = request.get_json()\n order = db.session.query(models.Order).filter_by(id=order_id).first()\n if order is None:\n return 'Order id not found', 400\n borrower = body.get('borrower')\n borrower = query_user_by_name(borrower)\n if borrower is None:\n return 'User does not exit in the system', 404\n # if invalid_user(borrower.username):\n # return 'Unauthorized user, please login as a user/borrower', 401\n copy_id = body.get('copy_id')\n print(body)\n print(copy_id)\n copy = db.session.query(models.Copy).filter_by(id=copy_id).first()\n if copy is None:\n return 'Copy ID {} not found in system'.format(copy_id), 409\n elif copy.id != copy_id and copy.status == BOOK_COPY_STATUS_UNAVAILABLE:\n return 'The copy of the book is not available', 400\n copy_owner = body.get('copy_owner')\n owner = query_user_by_name(copy_owner)\n if owner is None:\n return 'Copy owner not found in the system'.format(copy_owner), 409\n # return_date = body.get('return_date')\n # if datetime.strptime(return_date, \"%y%m%d\") < datetime.strptime(datetime.utcnow().strftime(\"%Y-%m-%d\"), \"%y%m%d\"):\n # return 'Return date should be later than today', 400\n status = body.get('order_status')\n if status is not None and status < 0 or status > 4:\n return 'Status should between 0-4', 400\n order.parse_body_status(body)\n copy = db.session.query(models.Copy).filter_by(id=order.copy).first()\n if order.status == ORDER_STATUS_COMPLETED or order.status == ORDER_STATUS_DECLINED:\n copy.status = BOOK_COPY_STATUS_AVAILABLE\n else:\n copy.status = BOOK_COPY_STATUS_UNAVAILABLE\n db.session.commit()\n return order.serialize(), 200",
"def test_manager_change_order_status(self):\n self.client.force_authenticate(self.user)\n cancel = \"CA\"\n url = reverse('order-set_status', args=[self.order.id])\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_status = Order.objects.values(\"status\").get(pk=self.order.id)\n self.assertEqual(new_status[\"status\"], cancel)\n\n with self.subTest('customer can not change order status'):\n self.user.role = get_user_model().CUSTOMER\n self.client.force_authenticate(self.user)\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)",
"def UpdateStatus(self,pid):\n\t\tb1=Rents.objects.filter(paymentid_id=pid).first()\n\t\tamount=Payment.objects.filter(paymentid=pid).values('amount')\n\t\tb=b1.__dict__\n\t\tquant=b['quantity']\n\t\tbookid=b['bookid_id']\n\t\tprice=amount[0]['amount']/quant\n\t\t#price=float(\"{.2f}\".format(amount[0]['amount']))/float(\"{0:.2f}\".format(quant))\n\t\tRents.objects.filter(paymentid_id=pid).update(status='r')\n\t\tBook.objects.filter(bookid=bookid).update(quantity=F('quantity')+quant)\n\t\tStatus.objects.filter(ISBN=b['ISBN'],rentprice=price).update(quantity=F('quantity')+quant)\n\t\tUpload.objects.filter(owner_id_id=b['owner_id_id'],sellprice=price).update(qtyavailable=F('qtyavailable')+quant)\n\t\tself.notifyBuyer(b['ISBN'])",
"def order_update_status():\n result = order_obj.order_update_status(request.forms) \n return result",
"def cook_order_list(request):\n\tall_orders = Order.objects.all().order_by(\"-id\")\n\tcss = CookStatus.objects.filter(cook_name=request.user)\n\tcs = None\n\tcurrent_order = None\n\tif len(css) != 0:\n\t\tcs = css[0]\n\t\tif cs.current_order != None :\n\t\t\tcurrent_order = cs.current_order.menu_items.all()\n\n\tnew_orders = []\n\tfor order in all_orders:\n\t\ta = {}\n\t\ta['id'] = order.id\n\t\ta['status'] = order.status\n\n\t\ta['timespan'] = (datetime.datetime.utcnow().replace(tzinfo=utc) - order.timestamp_created).seconds\n\t\tcookofthis = CookStatus.objects.filter(current_order=order)\n\t\tif len(cookofthis) != 0:\n\t\t\ta['cookname'] = cookofthis[0].cook_name.username\n\t\telif order.tikchen != None:\n\t\t\ta['cookname'] = order.tikchen\n\n\t\tnew_orders.append(a)\n\n\n\treturn render_to_response('staff/cook_order_list.html', \n\t\t{'all_orders':new_orders, 'user':request.user, 'current_order':current_order})",
"async def update_order_status():\n symbol = App.config[\"symbol\"]\n\n # Get currently active order and id (if any)\n order = App.order\n order_id = order.get(\"orderId\", 0) if order else 0\n if not order_id:\n log.error(f\"Wrong state or use: check order status cannot find the order id.\")\n return None\n\n # -----\n # Retrieve order from the server\n try:\n new_order = App.client.get_order(symbol=symbol, orderId=order_id)\n except Exception as e:\n log.error(f\"Binance exception in 'get_order' {e}\")\n return\n\n # Impose and overwrite the new order information\n if new_order:\n order.update(new_order)\n else:\n return None\n\n # Now order[\"status\"] contains the latest status of the order\n return order[\"status\"]",
"def Cook(self, env, customer, cooking_time_type = 'fixed', manual_cook_time = None):\n with self.resource.request() as req:\n yield req #resource를 점유 해야 함.\n now_time = round(env.now , 1)\n req.info = [customer.name, now_time]\n if cooking_time_type == 'fixed':\n cooking_time = self.order_ready_time\n elif cooking_time_type == 'random':\n cooking_time = random.randrange(1,self.order_ready_time)\n elif cooking_time_type == 'uncertainty':\n cooking_time = customer.cook_time\n else:\n cooking_time = 0.001\n print('T :{} 가게 {}, {} 분 후 주문 {} 조리 완료'.format(int(env.now),self.name,cooking_time,customer.name))\n if manual_cook_time == None:\n yield env.timeout(cooking_time)\n else:\n yield env.timeout(manual_cook_time)\n print('T :{} 가게 {} 주문 {} 완료'.format(int(env.now),self.name,customer.name))\n customer.food_ready = True\n customer.ready_time = env.now\n self.ready_order.append(customer)",
"def test_updating_item_status(self):\n #test original quantity\n self.assertEqual(self.supply1.quantity, 10)\n self.assertEqual(self.supply2.quantity, 10)\n \n modified_po = copy.deepcopy(base_purchase_order)\n modified_po['status'] = 'Received'\n modified_po['items'][0]['id'] = 1\n modified_po['items'][0]['status'] = 'Receieved'\n \n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po)\n \n self.assertEqual(resp.status_code, 200, msg=resp)\n \n po = resp.data\n \n self.assertEqual(Supply.objects.get(pk=1).quantity, 20)",
"def set_order_status(self, status, orderid=0, envtype=0):\n if int(status) not in TRADE_CN.REV_ORDER_STATUS:\n error_str = ERROR_STR_PREFIX + \"the type of status is wrong \"\n return RET_ERROR, error_str\n\n if not TRADE_CN.check_envtype_cn(envtype):\n error_str = ERROR_STR_PREFIX + \"the type of environment param is wrong \"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(SetOrderStatusCN.cn_pack_req,\n SetOrderStatusCN.cn_unpack_rsp)\n\n # the keys of kargs should be corresponding to the actual function arguments\n kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'localid': str(0),\n 'orderid': str(orderid), 'status': str(status)}\n\n ret_code, msg, set_order_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = ['envtype', 'orderID']\n set_order_table = pd.DataFrame(set_order_list, columns=col_list)\n\n return RET_OK, set_order_table",
"def status(self, id):",
"def change_status(self, status, application_id):",
"def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")",
"def test_update_order(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 201)\n self.assertTrue(\n response_as_json(response)['order']['status_updated_on'])\n self.assertEqual(\n response_as_json(response)['order']['order_status'], 'accepted')",
"def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")",
"def set_OrderStatus(self, value):\n super(ListOrdersInputSet, self)._set_input('OrderStatus', value)",
"def return_item(self,reason):\n if reason == \"defective\":\n self.status = \"defective\"\n self.price = 0\n elif reason == \"unopened\":\n self.status = \"for sale\"\n else:\n self.status = \"used\"\n self.price -= (.20 * self.price)\n return self",
"def update_order():",
"def update_order():",
"def onOrderSelected(self, item):\n if self.lstOrders.getMultiSelectedItems() == []:\n self.clearBidData()\n else:\n self.btnCancelOrder.enable()",
"def update_order(self, order):\n order.order_id = self.order_id\n order.average_price = self.avg_execution_price\n order.symbol = self.symbol\n order.side = self.side\n order.type = self.order_type\n order.amount = self.original_amount\n order.price = self.price\n order.filled = self.executed_amount\n order.remaining = self.remaining_amount\n if self.is_cancelled:\n order.status = exchanges.Order.Status.CANCELLED\n elif self.is_live:\n order.status = exchanges.Order.Status.OPEN\n else:\n order.status = exchanges.Order.Status.CLOSED\n return order",
"def test_order_update_status_function(self):\n order = OrderInfo.objects.create(user=self.create_test_user())\n self.assertIsInstance(order.ordered, datetime)\n self.assertIsNone(order.cooked)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsInstance(order.delivered, datetime)",
"def onChange(self, customer, pizza_id):\n \n pizza_qty = self.vars[pizza_id].get()\n customer.ChangePizzaQTY(pizza_id, pizza_qty)\n self.showOrderPrice(customer.my_order)",
"def fill_order(self, order: Order) -> None:\n order = self.get_order_by_id(order.id)\n order.status = OrderStatus.FILL",
"def put(self):\n request_data = json.loads(request.data)\n print(request_data)\n order_id = request_data['order_id']\n status = request_data['status']\n MM.update_order_status(ObjectId(order_id), status)\n return {\"message\": \"Order Status Updated\"}, 200",
"def onSelected(self, item):\n if not item:\n self.clearBidData()\n else:\n self.enableAddOrder()",
"def finish_order(self, order_id):\n request_name = \"get_order_info\"\n\n orders = self.make_request(request_name, url_id=order_id)\n if orders is None:\n print(\"Unsuccessful updating order\")\n return\n order = orders[0]\n update_dict = dict()\n for key in order:\n if str(key)[0] == \"_\":\n continue\n try:\n update_dict[key.encode('utf-8')] = order[key].encode('utf-8')\n except AttributeError:\n update_dict[key.encode('utf-8')] = order[key]\n\n update_dict['status'] = 'Complete'\n resp = self.make_request('set_inventory_order', url_id=order_id, arguments=update_dict)"
] | [
"0.63635933",
"0.586669",
"0.5858225",
"0.58000624",
"0.56946445",
"0.5581705",
"0.55623066",
"0.55381656",
"0.5464021",
"0.5398148",
"0.53747994",
"0.5373412",
"0.53628695",
"0.5324506",
"0.5319295",
"0.52873236",
"0.5282698",
"0.52594453",
"0.5258599",
"0.52334434",
"0.52323323",
"0.52323323",
"0.5220004",
"0.52061313",
"0.5198367",
"0.51858896",
"0.51746446",
"0.51692444",
"0.51510894",
"0.51437265"
] | 0.7182565 | 0 |
chang the order's status to be "readytoserve" which is selected by the id of order | def order_ready(request):
cs , status = CookStatus.objects.get_or_create(cook_name=request.user)
if cs.current_order is not None:
cs.current_order.status = 'ready-to-serve'
cs.current_order.save()
cs.current_order = None
cs.save()
return HttpResponseRedirect("/staff/cook_order_list/") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def update_order_status():\n symbol = App.config[\"symbol\"]\n\n # Get currently active order and id (if any)\n order = App.order\n order_id = order.get(\"orderId\", 0) if order else 0\n if not order_id:\n log.error(f\"Wrong state or use: check order status cannot find the order id.\")\n return None\n\n # -----\n # Retrieve order from the server\n try:\n new_order = App.client.get_order(symbol=symbol, orderId=order_id)\n except Exception as e:\n log.error(f\"Binance exception in 'get_order' {e}\")\n return\n\n # Impose and overwrite the new order information\n if new_order:\n order.update(new_order)\n else:\n return None\n\n # Now order[\"status\"] contains the latest status of the order\n return order[\"status\"]",
"def order_update_status():\n result = order_obj.order_update_status(request.forms) \n return result",
"def api_mark_order_ready(request, id):\n\n close_old_connections()\n \n # Not marking it as served if it isn't even ready yet.\n if not request.user.is_authenticated:\n return HttpResponseForbidden(\"You're not authenticated.\")\n \n # Get the order.\n order = Order.objects.get(id=id)\n \n # Mark the order as served and save it.\n order.ready = not order.ready\n order.save()\n\n close_old_connections()\n\n return HttpResponse('Marked as ready')",
"def update_specific_order(self,status,order_id):\n self.query = \"UPDATE orders SET order_status=%s WHERE order_id=%s\"\n self.input = (status,order_id) #tuple to support indexing\n self.query_1 = \"SELECT order_id FROM orders ORDER BY order_id DESC LIMIT 1.\"\n self.query_2 = \"SELECT * FROM orders WHERE order_id=%s\"\n self.input_2 = (order_id,) #tuple to support indexing\n self.event = \"admin_update_specific_order\"\n self.error = \"Invalid order id\"\n self.message = \"Successfully updated the order.\"\n self.order_id = order_id\n self.db_error = None",
"def mark_completed(self, order_id=None):\n self.status = \"paid\"\n if order_id and not self.order_id:\n self.order_id = order_id\n print(\"Order completed\")\n self.save()",
"def change_status(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('delivered' , id)\n db.insert(query, tuple)",
"def status(self, id):",
"def test_manager_change_order_status(self):\n self.client.force_authenticate(self.user)\n cancel = \"CA\"\n url = reverse('order-set_status', args=[self.order.id])\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_status = Order.objects.values(\"status\").get(pk=self.order.id)\n self.assertEqual(new_status[\"status\"], cancel)\n\n with self.subTest('customer can not change order status'):\n self.user.role = get_user_model().CUSTOMER\n self.client.force_authenticate(self.user)\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)",
"def test_update_order(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 201)\n self.assertTrue(\n response_as_json(response)['order']['status_updated_on'])\n self.assertEqual(\n response_as_json(response)['order']['order_status'], 'accepted')",
"def update_order(self, order):\n order.order_id = self.order_id\n order.average_price = self.avg_execution_price\n order.symbol = self.symbol\n order.side = self.side\n order.type = self.order_type\n order.amount = self.original_amount\n order.price = self.price\n order.filled = self.executed_amount\n order.remaining = self.remaining_amount\n if self.is_cancelled:\n order.status = exchanges.Order.Status.CANCELLED\n elif self.is_live:\n order.status = exchanges.Order.Status.OPEN\n else:\n order.status = exchanges.Order.Status.CLOSED\n return order",
"def set_add_dispute_status(self, status):\n self.single_selection_from_kendo_dropdown(self.add_dispute_status_kendo_dropdown_locator, status)\n self.wait_for_ajax_spinner_load()",
"def test_admin_change_order_status(self):\n # Test unregistered id\n # Correct format but not there\n response = self.client.put(\n 'api/v1/parcels/35420', headers=self.admin_token_dict)\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n data, {'message': 'No Parcel delivery order with that id'})\n # Test invalid format id\n response = self.client.put(\n 'api/v1/parcels/35uh420', headers=self.admin_token_dict) # Incorrect id format\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data, {'message': 'Wrong id format'})",
"def put(self):\n request_data = json.loads(request.data)\n print(request_data)\n order_id = request_data['order_id']\n status = request_data['status']\n MM.update_order_status(ObjectId(order_id), status)\n return {\"message\": \"Order Status Updated\"}, 200",
"def set_order_status(self, status, orderid=0, envtype=0):\n if int(status) not in TRADE_CN.REV_ORDER_STATUS:\n error_str = ERROR_STR_PREFIX + \"the type of status is wrong \"\n return RET_ERROR, error_str\n\n if not TRADE_CN.check_envtype_cn(envtype):\n error_str = ERROR_STR_PREFIX + \"the type of environment param is wrong \"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(SetOrderStatusCN.cn_pack_req,\n SetOrderStatusCN.cn_unpack_rsp)\n\n # the keys of kargs should be corresponding to the actual function arguments\n kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'localid': str(0),\n 'orderid': str(orderid), 'status': str(status)}\n\n ret_code, msg, set_order_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = ['envtype', 'orderID']\n set_order_table = pd.DataFrame(set_order_list, columns=col_list)\n\n return RET_OK, set_order_table",
"def UpdateStatus(self,pid):\n\t\tb1=Rents.objects.filter(paymentid_id=pid).first()\n\t\tamount=Payment.objects.filter(paymentid=pid).values('amount')\n\t\tb=b1.__dict__\n\t\tquant=b['quantity']\n\t\tbookid=b['bookid_id']\n\t\tprice=amount[0]['amount']/quant\n\t\t#price=float(\"{.2f}\".format(amount[0]['amount']))/float(\"{0:.2f}\".format(quant))\n\t\tRents.objects.filter(paymentid_id=pid).update(status='r')\n\t\tBook.objects.filter(bookid=bookid).update(quantity=F('quantity')+quant)\n\t\tStatus.objects.filter(ISBN=b['ISBN'],rentprice=price).update(quantity=F('quantity')+quant)\n\t\tUpload.objects.filter(owner_id_id=b['owner_id_id'],sellprice=price).update(qtyavailable=F('qtyavailable')+quant)\n\t\tself.notifyBuyer(b['ISBN'])",
"def change_status(self, status, application_id):",
"def change_status(self):\n if self.status == 'in progress':\n self.status = 'done'\n return self.status\n elif self.status == 'done':\n self.status = 'in progress'\n self.eisenhower_priority()\n return self.status",
"def update_order():",
"def update_order():",
"def api_mark_order_served(request, id):\n\n close_old_connections()\n \n # Not allow unauthenticated users.\n if not request.user.is_authenticated:\n return HttpResponseForbidden(\"You're not authenticated.\")\n \n # Get the order.\n order = Order.objects.get(id=id)\n \n # Not marking it as served if it isn't even ready yet.\n if not order.ready:\n return HttpResponseForbidden(\"This order is not yet ready!\")\n \n # Mark it as served and save it.\n order.served = not order.served\n order.save()\n\n close_old_connections()\n\n return HttpResponse('Marked as served')",
"def test_change_order_status_when_order_does_not_exist(self):\n response = self.api_test_client.put('{}/orders/1000'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Order with id 1000 not found')",
"def _update_status(self):\n self._db_update({'status': self.status})",
"def set_order_done():\n data = select_data_source()\n user = data['user']\n order_id = data['id']\n \n if check_user_permission(user) : return permission_denied_return\n \n db = database.getdb()\n \n ### Check if is valid.\n \n cmd = 'select passed from orders where id==\"{0}\"'.format(order_id)\n order_valid = db.execute(cmd).fetchall()[0][0]\n if order_valid == 0 :\n return finish_invalid_return\n \n ### Check if is done.\n cmd = 'select done from orders where id==\"{0}\"'.format(order_id)\n order_done = db.execute(cmd).fetchall()[0][0]\n if order_done != 0 :\n return finish_done_return\n \n ### All check done.\n ### Set it to done.\n cmd = 'update orders set done=1 where id==\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('user sets order {0} to be done.'.format(user))\n \n return finish_complete_return",
"def test_order_update_status_function(self):\n order = OrderInfo.objects.create(user=self.create_test_user())\n self.assertIsInstance(order.ordered, datetime)\n self.assertIsNone(order.cooked)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsInstance(order.delivered, datetime)",
"def mark_ready_for_review(self, user: User) -> None:\n from .exceptions import OperationForbiddenError, OrderEmptyError\n\n # If order is not in the \"CREATED\" state, raise an\n # OperationForbiddenError\n if not self.is_created:\n raise OperationForbiddenError(\n self.STATE_CHANGE_FORBIDDEN_ERROR_MSG % {\n 'current_state': Order.OrderState.get_choice_display(\n self.state\n ),\n 'new_state': Order.OrderState.PENDING.choice_display\n }\n )\n\n # If the order's item list is empty, raise an OrderEmptyError\n if not self.orderitem_set.exists():\n raise OrderEmptyError(\n self,\n 'An order should contain at least one Order item before it '\n 'can be marked as \"PENDING\".'\n )\n\n # Update the order to \"PENDING\" state\n self.update(user, state=Order.OrderState.PENDING.choice_value)",
"def request_order_status_request(self, req):\n assert req.MsgType == Fix.Tags.MsgType.Values.ORDERSTATUSREQUEST, \\\n \"Order request is not ORDERSTATUSREQUEST\"\n\n params = { \"filter\": \"{\\\"orderID\\\": \\\"%s\\\"}\" % req.OrderID.value }\n return self.api_connector.send_request(\"Order\", RestApiConnector.HTTPMethod.GET, params)",
"def finish_order(self, order_id):\n request_name = \"get_order_info\"\n\n orders = self.make_request(request_name, url_id=order_id)\n if orders is None:\n print(\"Unsuccessful updating order\")\n return\n order = orders[0]\n update_dict = dict()\n for key in order:\n if str(key)[0] == \"_\":\n continue\n try:\n update_dict[key.encode('utf-8')] = order[key].encode('utf-8')\n except AttributeError:\n update_dict[key.encode('utf-8')] = order[key]\n\n update_dict['status'] = 'Complete'\n resp = self.make_request('set_inventory_order', url_id=order_id, arguments=update_dict)",
"def fill_order(self, order: Order) -> None:\n order = self.get_order_by_id(order.id)\n order.status = OrderStatus.FILL",
"async def on_order_completed(self, order_id: str):\n self._orders = list(filter(lambda order: order['id'] != order_id, self._orders))",
"def updateStatus(self, status):\n pass"
] | [
"0.67920375",
"0.670818",
"0.66758204",
"0.6641249",
"0.65196955",
"0.6437769",
"0.6087314",
"0.6031311",
"0.5999321",
"0.59882295",
"0.5978718",
"0.5966084",
"0.59375423",
"0.59296644",
"0.59227234",
"0.5900806",
"0.58614755",
"0.5841006",
"0.5841006",
"0.58058894",
"0.57958835",
"0.5776419",
"0.57573617",
"0.5730916",
"0.57189417",
"0.57016826",
"0.56785095",
"0.5673114",
"0.5636598",
"0.5635252"
] | 0.68117625 | 0 |
Format trajectory into a list of tuples before they are stored in memory. Trajectory is list of (s,a,r,s,d) tuples | def formatTrajectory(self, trajectory):
return self.RLModel.formatTrajectory(trajectory) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_trajectory(path: str) -> Optional[List[Dict[str, tuple]]]:\n lines = _get_lines_from_file(path)\n\n ess_file = False\n if path.split('.')[-1] != 'xyz':\n try:\n log = ess_factory(fullpath=path, check_for_errors=False)\n ess_file = True\n except (InputError, RMGInputError):\n ess_file = False\n\n if ess_file:\n if not isinstance(log, GaussianLog):\n raise NotImplementedError(f'Currently parse_trajectory only supports Gaussian files, got {type(log)}')\n traj = list()\n done = False\n i = 0\n while not done:\n if i >= len(lines) or 'Normal termination of Gaussian' in lines[i] or 'Error termination via' in lines[i]:\n done = True\n elif 'Input orientation:' in lines[i]:\n i += 5\n xyz_str = ''\n while len(lines) and '--------------------------------------------' not in lines[i]:\n splits = lines[i].split()\n xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\\n'\n i += 1\n traj.append(str_to_xyz(xyz_str))\n i += 1\n\n else:\n # this is not an ESS output file, probably an XYZ format file with several Cartesian coordinates\n skip_line = False\n num_of_atoms = 0\n traj, xyz_lines = list(), list()\n for line in lines:\n splits = line.strip().split()\n if len(splits) == 1 and all([c.isdigit() for c in splits[0]]):\n if len(xyz_lines):\n if len(xyz_lines) != num_of_atoms:\n raise ParserError(f'Could not parse trajectory, expected {num_of_atoms} atoms, '\n f'but got {len(xyz_lines)} for point {len(traj) + 1} in the trajectory.')\n traj.append(str_to_xyz(''.join([xyz_line for xyz_line in xyz_lines])))\n num_of_atoms = int(splits[0])\n skip_line = True\n xyz_lines = list()\n elif skip_line:\n # skip the comment line\n skip_line = False\n continue\n else:\n xyz_lines.append(line)\n\n if len(xyz_lines):\n # add the last point in the trajectory\n if len(xyz_lines) != num_of_atoms:\n raise ParserError(f'Could not parse trajectory, expected {num_of_atoms} atoms, '\n f'but got {len(xyz_lines)} for point {len(traj) + 1} in the trajectory.')\n traj.append(str_to_xyz(''.join([xyz_line for xyz_line in xyz_lines])))\n\n if not len(traj):\n logger.error(f'Could not parse trajectory from {path}')\n return None\n return traj",
"def simulation_to_lines(data: List(Float))->List(Tuple(Int, Float)):\n result = []\n counter = 0\n for payoff in data:\n result = result + [(counter, payoff)]\n counter+=1\n return result\n\n #print(str(result))",
"def trajectory(self):\n return Trajectory.createFromTuples(self.points)",
"def _print_tisserand_lists(self, Trajectory=[]):\n\t\n\timport numpy as np\n\t\n\tn = len(Trajectory);\n\trpl = [];\n\tral = [];\n\tpl = [];\n\tvinfl = [];\n\tfor i in range(n):\n\t\tral.append(Trajectory[i][6]);\n\t\trpl.append(Trajectory[i][5]);\n\t\tpl.append(Trajectory[i][7]);\n\t\tvinfl.append(Trajectory[i][8]);\n\t\n\tprint 'list_ra_python = [',\n\tn = len(ral);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % ral[i],\n\tprint '%f];' % ral[n-1];\n\t\n\tprint 'list_rp_python = [',\n\tn = len(rpl);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % rpl[i],\n\tprint '%f];' % rpl[n-1];\n\t\n\tprint 'list_period_python = [',\n\tn = len(pl);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % pl[i],\n\tprint '%f];' % pl[n-1];\n\t\n\tprint 'list_vinf_python = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % np.linalg.norm(vinfl[i]),\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % np.linalg.norm(vinfl[n-1]);\n\t\n\tprint 'list_vinf_python_x = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][0],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][0];\n\t\n\tprint 'list_vinf_python_y = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][1],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][1];\n\t\n\tprint 'list_vinf_python_z = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][2],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][2];",
"def format_coords(self, coordinates):\n\n coords = []\n for i in range(0, len(coordinates), 3):\n coords.append(tuple(coordinates[i:i+3]))\n\n return coords",
"def convert_shapely_points_to_tuples(list_of_points) -> list:\n return [(p.x, p.y) for p in list_of_points]",
"def get_trajectory(self) -> Tuple:\n # Ensure index is in an allowable range\n assert self.step <= self.num_steps\n # TODO NOTE consider edge case: what happens if this is called right\n # after self.cycle() has been called? does it still work?\n\n # ==\n # Get the trajectory up to current timestep\n o_traj = (self.obs_buffer[:self.step]\n .view(-1, *self.observation_shape)) # (T, *obs_shape)\n h_init = (self.hid_buffer[0]\n .view(-1, self.hidden_state_dim)) # (1, hidden_dim)\n d_traj = (self.don_buffer[:self.step]\n .view(-1, 1)) # (T, 1)\n a_traj = (self.act_buffer[:self.step]\n .view(-1, self.action_dim)) # (T, action_dim)\n\n # Return\n return o_traj, h_init, d_traj, a_traj",
"def build(self, trajectory):\n buf = StringIO()\n\n print >>buf, str(datetime.datetime.now())\n print >>buf, '%5d' % trajectory.n_atoms\n\n linecount = 0\n for atom in range(trajectory.n_atoms):\n for dim in range(3):\n # need to convert from nm to angstroms by multiplying by ten\n fmt = '%12.7f' % (10 * trajectory.xyz[0, atom, dim])\n assert len(fmt) == 12, 'fmt overflowed writing inpcrd. blowup?'\n buf.write(fmt)\n linecount += 1\n if linecount >= 6:\n buf.write(os.linesep)\n linecount = 0\n\n if trajectory.unitcell_lengths != None:\n if linecount != 0:\n buf.write(os.linesep)\n box = (trajectory.unitcell_lengths[0]*10).tolist()\n box.extend(trajectory.unitcell_angles[0].tolist())\n buf.write(('%12.7f' * 6) % tuple(box))\n \n return buf.getvalue()",
"def process_coordinates(void: 'ChargeSystem', steps: int) -> list:\n result = [[] for _ in void]\n\n for _ in range(steps):\n for ind, charge in enumerate(void):\n result[ind].append(list(charge.position))\n void.update()\n\n return result",
"def to_numpy_trajectory(trajectory):\n tx = []\n ty = []\n tyaw = []\n for pose in trajectory:\n tx.append(pose.x)\n ty.append(pose.y)\n tyaw.append(pose.yaw)\n return numpy.asarray([tx, ty, tyaw])",
"def read_frame_trajectory_file( filename ):\n file = open(filename, \"r\")\n\n timestamps = list()\n path = list()\n\n for line in file:\n # eliminate leading spaces\n line = line.strip()\n\n # ignore comments and empty lines\n if len(line) == 0 or line[0] == '#':\n continue\n\n # divide on whitespace and convert to numbers\n nums = [float(x) for x in line.split()]\n \n # separate out components and build lists\n\n timestamps.append( nums[0] )\n\n origin = list( nums[1:4] )\n unitx = list( nums[4:7] )\n unity = list( nums[7:10] )\n unitz = list( nums[10:13] )\n\n path.append( list( (origin, unitx, unity, unitz ) ) )\n\n return path, timestamps",
"def get_trajectory(self, sort_by: str = \"trials\") -> tuple[list[float], list[float]]:\n raise NotImplementedError",
"def dump_ue4_trajectory(name: str, trajectory: typing.Mapping[float, tf.Transform]) -> None:\n with open('unreal_trajectory_{0}.csv'.format(name), 'w') as output_file:\n output_file.write('Name,X,Y,Z,Roll,Pitch,Yaw\\n')\n for idx, timestamp in enumerate(sorted(trajectory.keys())):\n ue_pose = uetf.transform_to_unreal(trajectory[timestamp])\n output_file.write('{name},{x},{y},{z},{roll},{pitch},{yaw}\\n'.format(\n name=idx,\n x=ue_pose.location[0],\n y=ue_pose.location[1],\n z=ue_pose.location[2],\n roll=ue_pose.euler[0],\n pitch=ue_pose.euler[1],\n yaw=ue_pose.euler[2]))",
"def ex_list(data):\n return tuple(data)",
"def toListOfTuple(self, df:pd.core.frame.DataFrame) -> List[Tuple]: \n df['TIME_STAMP'] = df['TIME_STAMP'].astype('str')\n records = df.to_records(index=False)\n listOfTuple = list(records)\n return listOfTuple",
"def get_list(self):\n if self.key == 'L':\n return array_to_list([self.key, self.timing, self.data])\n if self.key == 'T':\n return array_to_list([self.key, self.data, self.timing])\n tmp_data = copy.deepcopy(self.data)\n for i in range(len(self.data)):\n if isinstance(self.data[i], float):\n tmp_data[i] = str('%.3f' % tmp_data[i])\n if tmp_data[i].split('.')[1] == '000':\n tmp_data[i] = tmp_data[i].split('.')[0]\n return array_to_list([self.key, self.easing, self.timing, tmp_data])",
"def trajectory_to_json(trajectory: Trajectory) -> str:\n # numpy arrays need to be converted to normal tuples\n return json.dumps(trajectory, cls=NumpyEncoder)",
"def parseTupleList(self,string):\r\n string = string.replace(\"[\",\"\")\r\n string = string.replace(\"),\",\"*\")\r\n string = string.replace(\"(\", \"\")\r\n string = string.replace(\")\", \"\")\r\n string = string.replace(\"]\", \"\")\r\n string = string.split(\"*\")\r\n for i in xrange(len(string)):\r\n string[i] = string[i].split(\",\")\r\n for i in xrange(len(string)):\r\n for j in xrange(len(string[i])):\r\n string[i][j] = int(string[i][j])\r\n string[i] = tuple(string[i])\r\n return string",
"def compute_trajectory():\n pass",
"def __str__(self):\n out_str = \"\\n\".join(`\"%.5f, %.5f, %.1f, %s, %s\" % (point[0], point[1], point[2], point[3], point[4])` for point in self.__traectory_list)\n return \"\\'x, y, altitude, capture time, capture date'\\n\"+out_str",
"def parse_1d_scan_coords(path: str) -> List[Dict[str, tuple]]:\n if not os.path.isfile(path):\n raise InputError(f'Could not find file {path}')\n software = identify_ess(path)\n traj = list()\n\n if software == 'xtb':\n scan_path = os.path.join(os.path.dirname(path), 'xtbscan.log')\n if os.path.isfile(scan_path):\n lines = _get_lines_from_file(scan_path)\n xyz_str = ''\n for line in lines:\n splits = line.split()\n if len(splits) == 1:\n if xyz_str:\n traj.append(str_to_xyz(xyz_str))\n xyz_str = ''\n continue\n if 'energy:' in line:\n continue\n xyz_str += f'{qcel.periodictable.to_E(splits[0])} {splits[1]} {splits[2]} {splits[3]}\\n'\n traj.append(str_to_xyz(xyz_str))\n return traj\n\n lines = _get_lines_from_file(path)\n log = ess_factory(fullpath=path, check_for_errors=False)\n if not isinstance(log, GaussianLog):\n raise NotImplementedError(f'Currently parse_1d_scan_coords only supports Gaussian files, got {type(log)}')\n done = False\n i = 0\n while not done:\n if i >= len(lines) or 'Normal termination of Gaussian' in lines[i] or 'Error termination via' in lines[i]:\n done = True\n elif 'Optimization completed' in lines[i]:\n while i < len(lines) + 10 and 'Input orientation:' not in lines[i] or 'Forces (Hartrees/Bohr)' in lines [i + 7]:\n i += 1\n if 'Error termination via' in lines[i]:\n return traj\n i += 5\n xyz_str, skip_traj = '', False\n while len(lines) and '--------------------------------------------' not in lines[i]:\n if 'DIIS: error' in lines[i]:\n skip_traj = True\n break\n splits = lines[i].split()\n xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\\n'\n i += 1\n if not skip_traj:\n traj.append(str_to_xyz(xyz_str))\n i += 1\n return traj",
"def plot_trajectory(axis, trajectory: typing.Mapping[float, tf.Transform], label: str, style: str = '-') \\\n -> typing.Tuple[float, float]:\n x = []\n y = []\n z = []\n max_point = 0\n min_point = 0\n times = sorted(trajectory.keys())\n first_pose = None\n for timestamp in times:\n pose = trajectory[timestamp]\n if first_pose is None:\n first_pose = pose\n x.append(0)\n y.append(0)\n z.append(0)\n else:\n pose = first_pose.find_relative(pose)\n max_point = max(max_point, pose.location[0], pose.location[1], pose.location[2])\n min_point = min(min_point, pose.location[0], pose.location[1], pose.location[2])\n x.append(pose.location[0])\n y.append(pose.location[1])\n z.append(pose.location[2])\n axis.plot(x, y, z, style, label=label, alpha=0.7)\n return min_point, max_point",
"def tup_list_maker(tup_list):\n final_list = []\n for item in tup_list:\n index = item[0]\n sentences = item[1]\n for sentence in sentences:\n pair = (index, sentence)\n final_list.append(pair)\n return final_list",
"def get_tuples(self):\n pattern = list()\n for gi in self.gradual_items:\n temp = tuple([gi.attribute_col, gi.symbol])\n pattern.append(temp)\n return pattern",
"def coords_to_structure(self) -> None:\n ...",
"def convert_listofrollouts(paths):\n #print([p[\"observation\"] for p in paths[:2]])\n observations = np.concatenate([path[\"observation\"] for path in paths])\n actions = np.concatenate([path[\"action\"] for path in paths])\n next_observations = np.concatenate([path[\"next_observation\"] for path in paths])\n terminals = np.concatenate([path[\"terminal\"] for path in paths])\n concatenated_rewards = np.concatenate([path[\"reward\"] for path in paths])\n unconcatenated_rewards = [path[\"reward\"] for path in paths]\n #print(\"DONE\")\n return observations, actions, next_observations, terminals, concatenated_rewards, unconcatenated_rewards",
"def toArray(parsedList):\n interpretCommand = {\n 'C' : lambda x, prevL : x[-2:], # bezier curve. Ignore the curve.\n 'L' : lambda x, prevL : x[0:2],\n 'M' : lambda x, prevL : x[0:2],\n 'Z' : lambda x, prevL : prevL[0],\n }\n\n points =[]\n for i,(c, arg) in enumerate(parsedList):\n #debug('toArray ', i, c , arg)\n newp = interpretCommand[c](arg, points)\n points.append( newp)\n a=numpy.array( points )\n\n # Some times we have points *very* close to each other\n # these do not bring any meaning full info, so we remove them\n #\n x,y, w,h = computeBox(a)\n sizeC = 0.5*(w+h)\n #deltas = numpy.zeros((len(a),2) )\n deltas = a[1:] - a[:-1] \n #deltas[-1] = a[0] - a[-1]\n deltaD = numpy.sqrt(numpy.sum( deltas**2, 1 ))\n sortedDind = numpy.argsort(deltaD)\n # expand longuest segments\n nexp = int(len(deltaD)*0.9)\n newpoints=[ None ]*len(a)\n medDelta = deltaD[sortedDind[len(deltaD)/2] ]\n for i,ind in enumerate(sortedDind):\n if deltaD[ind]/sizeC<0.005: continue\n if i>nexp:\n np = int(deltaD[ind]/medDelta)\n pL = [a[ind]]\n #print i,'=',ind,'adding ', np,' _ ', deltaD[ind], a[ind], a[ind+1]\n for j in range(np-1):\n f = float(j+1)/np\n #print '------> ', (1-f)*a[ind]+f*a[ind+1]\n pL.append( (1-f)*a[ind]+f*a[ind+1] )\n newpoints[ind] = pL\n else:\n newpoints[ind]=[a[ind]]\n if(D(a[0],a[-1])/sizeC > 0.005 ) :\n newpoints[-1]=[a[-1]]\n\n points = numpy.concatenate([p for p in newpoints if p!=None] )\n ## print ' medDelta ', medDelta, deltaD[sortedDind[-1]]\n ## print len(a) ,' ------> ', len(points)\n\n rel_norms = numpy.sqrt(numpy.sum( deltas**2, 1 )) / sizeC\n keep = numpy.concatenate([numpy.where( rel_norms >0.005 )[0],numpy.array([len(a)-1])])\n\n #return a[keep] , [ parsedList[i] for i in keep]\n #print len(a),' ',len(points)\n return points , []",
"def build(self, trajectory):\n #TODO Implement?",
"def transform_trajectory(self, trajectory, theta):\n origin = np.array([trajectory.s_coordinates[0],\n trajectory.d_coordinates[0]])\n\n for step in range(self.t_steps):\n point = np.array([trajectory.s_coordinates[step],\n trajectory.d_coordinates[step]])\n point = self._rotate_vector(point - origin, theta) + origin\n trajectory.s_coordinates[step] = point[0]\n trajectory.d_coordinates[step] = point[1]\n\n return trajectory",
"def split(self, time: float) -> Tuple['Trajectory','Trajectory']:\n if time <= self.times[0]:\n #split before start of trajectory\n return self.constructor()([time],[self.milestones[0]]),self.constructor()([time]+self.times,[self.milestones[0]]+self.milestones)\n elif time >= self.times[-1]:\n #split after end of trajectory\n return self.constructor()(self.times+[time],self.milestones+[self.milestones[-1]]),self.constructor()([time],[self.milestones[-1]])\n i,u = self.getSegment(time)\n assert i >= 0,\"getSegment returned -1? something must be wrong with the times\"\n #split in middle of trajectory\n splitpt = self.interpolate_state(self.milestones[i],self.milestones[i+1],u,self.times[i+1]-self.times[i])\n front = self.constructor()(self.times[:i+1],self.milestones[:i+1])\n back = self.constructor()(self.times[i+1:],self.milestones[i+1:])\n if u > 0:\n front.times.append(time)\n front.milestones.append(splitpt)\n if u < 1:\n back.times = [time] + back.times\n back.milestones = [splitpt] + back.milestones\n return (front,back)"
] | [
"0.60951096",
"0.6015128",
"0.5985093",
"0.5921288",
"0.5903913",
"0.5848871",
"0.5752752",
"0.5743873",
"0.5734865",
"0.5705948",
"0.56735694",
"0.5643993",
"0.55531675",
"0.5550036",
"0.55357385",
"0.5534374",
"0.550818",
"0.5483543",
"0.5480718",
"0.5441098",
"0.5439881",
"0.5385624",
"0.5363065",
"0.53434515",
"0.52942",
"0.5265581",
"0.52618796",
"0.52603185",
"0.52405906",
"0.5237256"
] | 0.6233229 | 0 |
Whether the environment is batched or not. If the environment supports batched observations and actions, then overwrite this property to True. A batched environment takes in a batched set of actions and returns a batched set of observations. This means for all numpy arrays in the input and output nested structures, the first dimension is the batch size. When batched, the leftmost dimension is not part of the action_spec or the observation_spec and corresponds to the batch dimension. When batched and handle_auto_reset, it checks `np.all(steps.is_last())`. | def batched(self) -> bool:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def batch_size(self) -> Optional[int]:\n if self.batched:\n raise RuntimeError(\n 'Environment %s marked itself as batched but did not override the '\n 'batch_size property'\n % type(self)\n )\n return None",
"def is_batch():\n\n pass",
"def _global_batch_size(self):\n return True",
"def has_full_batch(self) -> bool:",
"def should_handle_all_batches(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"should_handle_all_batches\")",
"def batching_enabled(self) -> bool:\n ...",
"def has_batch(self) -> bool:\n return self._buffer and (self._batch_size is None or sum(\n BlockAccessor.for_block(b).num_rows()\n for b in self._buffer) >= self._batch_size)",
"def dispatch_one_batch(self, iterator):\n\n if self._aborting:\n return False\n\n batch_size = self._get_batch_size()\n\n with self._lock:\n # to ensure an even distribution of the workload between workers,\n # we look ahead in the original iterators more than batch_size\n # tasks - However, we keep consuming only one batch at each\n # dispatch_one_batch call. The extra tasks are stored in a local\n # queue, _ready_batches, that is looked-up prior to re-consuming\n # tasks from the origal iterator.\n try:\n tasks = self._ready_batches.get(block=False)\n except queue.Empty:\n # slice the iterator n_jobs * batchsize items at a time. If the\n # slice returns less than that, then the current batchsize puts\n # too much weight on a subset of workers, while other may end\n # up starving. So in this case, re-scale the batch size\n # accordingly to distribute evenly the last items between all\n # workers.\n n_jobs = self._cached_effective_n_jobs\n big_batch_size = batch_size * n_jobs\n\n islice = list(itertools.islice(iterator, big_batch_size))\n if len(islice) == 0:\n return False\n elif (iterator is self._original_iterator and\n len(islice) < big_batch_size):\n # We reached the end of the original iterator (unless\n # iterator is the ``pre_dispatch``-long initial slice of\n # the original iterator) -- decrease the batch size to\n # account for potential variance in the batches running\n # time.\n final_batch_size = max(1, len(islice) // (10 * n_jobs))\n else:\n final_batch_size = max(1, len(islice) // n_jobs)\n\n # enqueue n_jobs batches in a local queue\n for i in range(0, len(islice), final_batch_size):\n tasks = BatchedCalls(islice[i:i + final_batch_size],\n self._backend.get_nested_backend(),\n self._reducer_callback,\n self._pickle_cache)\n self._ready_batches.put(tasks)\n\n # finally, get one task.\n tasks = self._ready_batches.get(block=False)\n if len(tasks) == 0:\n # No more tasks available in the iterator: tell caller to stop.\n return False\n else:\n self._dispatch(tasks)\n return True",
"def batch_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_size\")",
"def isFull(self):\n if len(self.batch) == self.__batch_size:\n return True\n return False",
"def get_evaluation_batch_size():\n return 1",
"def batch(self):\n return self._batch",
"def _batch(self, batch_size):\n transform_or_spec = self._specs.get(\n 'transform_or_spec', self.transform_or_spec)\n if hasattr(transform_or_spec, '_batch'):\n transform_or_spec = transform_or_spec._batch(batch_size)\n return _DeferredTensorSpec(\n self._get_batched_input_spec(batch_size),\n transform_or_spec=transform_or_spec,\n dtype=self.dtype,\n shape=(None if self.shape is None\n else tf.TensorShape([batch_size]).concatenate(self.shape)),\n name=self.name,\n also_track_spec=self._also_track_spec)",
"def _implements_predict_batch_hooks(self):\n return not is_default(self.on_predict_batch_begin) or not is_default(\n self.on_predict_batch_end\n )",
"def batch_size(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"batch_size\")",
"def _implements_train_batch_hooks(self):\n return not is_default(self.on_train_batch_begin) or not is_default(\n self.on_train_batch_end\n )",
"def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')",
"def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')",
"def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')",
"def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')",
"def compute_batch(self, duplicate_manager=None,context_manager=None):\n from ...acquisitions import AcquisitionTS\n assert isinstance(self.acquisition, AcquisitionTS)\n \n X_batch,_ = self.acquisition.optimize()\n k=1\n \n # --- GET the remaining elements\n while k<self.batch_size:\n new_sample,_ = self.acquisition.optimize()\n X_batch = np.vstack((X_batch,new_sample))\n k +=1\n \n return X_batch",
"def batch_size(self) -> int:\n ...",
"def batch_size(self):\n return self._batch_size",
"def batch_size(self):\n return self._batch_size",
"def batch_size(self):\n return self._batch_size",
"def batch_size(self):\n return self._batch_size",
"def batch_size(self):\n if self._batch_size is not None:\n return self._batch_size # custom batch size defined\n if self.task == 'objdet':\n return 8\n annos_per_img = self._annos_per_img[self.dataset]\n if self.task in {'predcls', 'sgcls'}:\n annos_per_img = annos_per_img['pairs']\n elif self.task == 'objcls':\n annos_per_img = annos_per_img['objects']\n elif self.task == 'preddet' and self.filter_multiple_preds:\n annos_per_img = annos_per_img['predicates_filtered']\n elif self.task == 'preddet' and self.filter_duplicate_rels:\n annos_per_img = annos_per_img['duplicates_filtered']\n elif self.task in {'preddet', 'sggen'}:\n annos_per_img = annos_per_img['relations']\n batch_size = ceil(self._annotations_per_batch / annos_per_img)\n return max(batch_size, 2)",
"def _get_batch_size(self):\n if self.batch_size == 'auto':\n return self._backend.compute_batch_size()\n else:\n # Fixed batch size strategy\n return self.batch_size",
"def _assert_is_batched(self, *arrays):\n shape_list = []\n for array in arrays:\n if isinstance(array, tf.Tensor):\n shape_list.append(array.shape.as_list())\n else:\n shape_list.append(np.shape(array))\n # All arrays should have at least two dimensions.\n assert all([len(shape) >= 2 for shape in shape_list])\n # All arrays should have the same batch size.\n assert len(set([shape[0] for shape in shape_list])) == 1",
"def batch_size(self):\n return self.size"
] | [
"0.6829763",
"0.63745177",
"0.6135504",
"0.6097213",
"0.60270363",
"0.59971696",
"0.5886409",
"0.5588769",
"0.5547144",
"0.55365115",
"0.5511294",
"0.55018145",
"0.5477585",
"0.5467263",
"0.54394144",
"0.5351245",
"0.53225625",
"0.53225625",
"0.53225625",
"0.53225625",
"0.52915865",
"0.52565306",
"0.5244268",
"0.5244268",
"0.5244268",
"0.5244268",
"0.52200854",
"0.5215194",
"0.52132463",
"0.51738775"
] | 0.6470478 | 1 |
Whether the Environmet should reset given the current timestep. By default it only resets when all time_steps are `LAST`. | def should_reset(self, current_time_step: ts.TimeStep) -> bool:
handle_auto_reset = getattr(self, '_handle_auto_reset', False)
return handle_auto_reset and np.all(current_time_step.is_last()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_values(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"reset_values\")",
"def reset():\n return True",
"def reset_values(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"reset_values\")",
"def reset(self, **kwargs):\n if self._backend_agent:\n self._backend_agent._on_gym_reset_begin(self, **kwargs)\n\n result = self.env.reset(**kwargs)\n if self.steps_done_in_episode > 0 and not self.is_episode_done:\n self.episodes_done += 1\n self.total.episodes_done_inc()\n self.is_episode_done = False\n self.steps_done_in_episode = 0\n\n if self._backend_agent:\n self._backend_agent._on_gym_reset_end(self, result, **kwargs)\n return result",
"def is_reset(self):\n return self._tag == 'reset'",
"def is_reset_task_states(self, task_state):\r\n return all(self.is_initial_child_state(child) for child in task_state)",
"def reset(self) -> ts.TimeStep:\n self._current_time_step = self._reset()\n return self._current_time_step",
"def should_reset(self):\n # type: () -> bool\n if not self._is_cache_enabled():\n return False\n elapsed = time.time() - self._last_ts\n return elapsed > self._refresh_interval_sec",
"def last_reset(self):\n native_implementation = getattr(self._feature, \"last_reset\", None)\n\n return native_implementation or super().last_reset",
"def test_reset(self, scml_system):\n scml_system._t = 12\n scml_system._k = 33\n state_space = scml_system.state_space\n state_positions = scml_system.state_positions\n initial_state = scml_system.reset()\n target = np.array([0, 0, 0, 0, 0, 0, 560]) / scml_system.limits\n assert np.all(initial_state == target), 'Initial states of the system are incorrect'\n assert scml_system._t == 0, 'Time of the system was not set to zero after reset'\n assert scml_system._k == 0, 'Episode step of the system was not set to zero after reset'\n assert scml_system.converter.reset_counter == scml_system.electrical_motor.reset_counter \\\n == scml_system.mechanical_load.reset_counter == scml_system.supply.reset_counter,\\\n 'The reset was not passed to all components of the SCMLSystem'\n assert scml_system._ode_solver.t == 0, 'The ode solver was not reset correctly'\n assert all(scml_system._ode_solver.y == np.zeros_like(\n scml_system.mechanical_load.state_names + scml_system.electrical_motor.CURRENTS, dtype=float\n )), ' The ode solver was not reset correctly'",
"def IsLocalRerun(self):\n return self.prev_test_run_key is not None",
"def reset(self):\n self._timestep = np.array([0])",
"def reset_next_step(self):\n return self._reset_next_step",
"def get_is_reset(self, timeout = 0):\n response = self.send_command_to_shouter(BP_TOOL.IS_RESET)\n if response == BP_TOOL.ACK:\n return False\n elif response == BP_TOOL.IS_RESET:\n return True\n else:\n return False",
"def __bool__(self) -> bool:\n if self.initial_value == 1 and self.number_of_steps == 0:\n return True\n return False",
"def reset(self):\n self.success = False\n self.i = 0\n if self.monitor:\n self.env = gym.wrappers.Monitor(self.env, \"./mountaincar-monitor\", force=True)\n state = self.env.reset()\n state = self.preprocess_state(state)\n state = np.concatenate([state] * self.action_repeat)\n return state",
"def IsRerun(self):\n return self.prev_test_context is not None",
"def reset(self):\n self.clock.reset()\n self.microgrid.reset();\n self.steps_beyond_done = None\n self.updateState();\n return self.state",
"def is_done(self):\n\n # Robosuite envs always rollout to fixed horizon.\n return False",
"def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs",
"def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs",
"def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs",
"def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs",
"def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs",
"def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs",
"def is_dry_run(self):\n try:\n v = environment.get(\"Run\")\n return v.lower() == \"dry\"\n except KeyError:\n return False",
"def is_restarting(self) -> bool:\r\n return False",
"def reset(self):\n # Initialize the timestep\n self.timestep = 0\n self.state = self.starting_state\n\n if self.from_data:\n self.episode_num += 1\n\n\n return self.starting_state",
"def reset(self, **kwargs):\r\n if self.was_real_done:\r\n obs = self.env.reset(**kwargs)\r\n else:\r\n # no-op step to advance from terminal/lost life state\r\n obs, _, _, _ = self.env.step(0)\r\n self.lives = self.env.unwrapped.ale.lives()\r\n return obs",
"def get_reset(self):\n resetcond = \"\"\n # OnConditions in dynamics\n dyn = self.dynamics\n for ev in dyn.event_handlers:\n if(type(ev) is lems.OnCondition):\n for sa in ev.actions:\n if(type(sa) is lems.StateAssignment):\n resetcond+=sa.variable + \" = \" + sa.value + \"\\n\"\n\n rgs = self.dynamics.regimes\n for r in rgs:\n if(r.initial==True): main_regime = r\n elif(r.initial==False): refractory_regime = r\n # OnConditions in main regimes\n roc = main_regime.event_handlers\n for oc in roc:\n if(type(oc) is lems.OnCondition):\n for sa in oc.actions:\n if(type(sa) is lems.StateAssignment):\n resetcond+=sa.variable + \" = \" + sa.value + \"\\n\"\n # OnEntry in refractory regime\n roe = refractory_regime.event_handlers\n for oe in roe:\n if(type(oe) is lems.OnEntry):\n for sa in oe.actions:\n if(type(sa) is lems.StateAssignment):\n resetcond+=sa.variable + \" = \" + sa.value + \"\\n\"\n\n return resetcond"
] | [
"0.6065845",
"0.5846722",
"0.5738438",
"0.57173324",
"0.5702698",
"0.5700601",
"0.56152225",
"0.5585178",
"0.55754685",
"0.5555264",
"0.5542493",
"0.55404925",
"0.5514392",
"0.5394026",
"0.5376499",
"0.5355567",
"0.53485197",
"0.53262925",
"0.5297183",
"0.52819175",
"0.52819175",
"0.52819175",
"0.52819175",
"0.52819175",
"0.52819175",
"0.5280383",
"0.5276791",
"0.5252488",
"0.52435327",
"0.5237724"
] | 0.8290577 | 0 |
Returns the current timestep. | def current_time_step(self) -> ts.TimeStep:
return self._current_time_step | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_time_step(self):\n return self._time_step",
"def time_step(self):\n return self._time_step",
"def GetTimeStep(self):\n time_step = None\n\n time_step = self._solver_collection.GetTimeStep()\n \n if not time_step is None:\n\n self.time_step = time_step\n\n return self.time_step",
"def time_step(self) -> float:\n return self._timestep",
"def timeStep(self):\n return self.params['h']",
"def getCurrentStep():",
"def sim_step(self):\n return traci.simulation.getCurrentTime()/1000 # milliseconds to seconds",
"def get_step(self):\n return self.step",
"def get_step(self):\n return self.step",
"def cur_step(self):\n return self._cur_step",
"def timestep(self) -> Optional[float]:\n dt = None\n if len(self.time) > 1 and self.is_equidistant:\n dt = (self.time[1] - self.time[0]).total_seconds() # type: ignore\n return dt",
"def get_time_step_to_enqueue(self):\n return self.time_step_to_enqueue",
"def get_time_step(self):\n for body in self.bodies:\n # If body is a Satelite\n if body.name == \"Satelite\":\n # Assuming that acceleration for a small times step is constant\n t = 0.01 * norm(body.velocity) / norm(body.acc)\n if t < self.t:\n return t\n return self.t",
"def step(self):\n return self._step",
"def step(self):\n return self._step",
"def step(self):\n return self._step",
"def step(self):\n return self._step",
"def step(self):\n return self._step",
"def current_step(self) -> FlowNode:\n return self._current_step",
"def get_step(self) -> int:\n return self.step",
"def getTime(self):\n return self.step / (self.max_step + int(self.include))",
"def internal_timestep(self):\n try:\n return self._internal_dt\n except AttributeError:\n # the component hasn't started running yet\n _ = self.calc_grads_and_timesteps(False, False)\n return self._internal_dt",
"def step ( self ) :\n return self.__step",
"def time(self, step: int) -> float:\n return self._start_time + self._parameters.dt*(step - self._start_step)",
"def dt(self):\n if isinstance(self._time_axis, are_ax.RegularAxis):\n return self._time_axis.step\n raise RuntimeError(\"Time step is not available for orbits constructed with non-regular time axis\")",
"def step(self):\n\n e = self.event_queue.get()\n self.current_time = e.time\n component = e.component\n component.output(self.current_time)\n component.input(self.current_time)\n component.fire()\n\n self.event_queue.put(VirtualTimeScheduler.Event(self.current_time + component.interval, component))\n\n return self.current_time",
"def full_step_time(self):\n\n total_step_time = self.duration()\n return total_step_time / (2 * self.half_to_full_step_time_ratio + (self.num_steps() - 2))",
"def get(self):\n if self.running:\n return self.accumulated_time + pg.time.get_ticks() - self.start_time\n else:\n return self.accumulated_time",
"def gettime(self):\n return self.t",
"def ntimestep(self):\n if self._ntimestep is None:\n self._ntimestep = self.get_data_ntimestep()\n\n return self._ntimestep"
] | [
"0.87353057",
"0.83974636",
"0.81454396",
"0.7974334",
"0.7622381",
"0.75591457",
"0.7524713",
"0.75237733",
"0.75237733",
"0.7476018",
"0.7327006",
"0.7309547",
"0.73024344",
"0.7277173",
"0.7277173",
"0.7277173",
"0.7277173",
"0.72636175",
"0.7225894",
"0.72174436",
"0.7172915",
"0.7149396",
"0.7144977",
"0.714474",
"0.70629865",
"0.69952554",
"0.6948339",
"0.6947402",
"0.6919923",
"0.6910559"
] | 0.9203299 | 0 |
Updates the environment according to the action and returns a `TimeStep`. If the environment returned a `TimeStep` with `StepType.LAST` at the previous step the implementation of `_step` in the environment should call `reset` to start a new sequence and ignore `action`. This method will start a new sequence if called after the environment has been constructed and `reset` has not been called. In this case `action` will be ignored. If `should_reset(current_time_step)` is True, then this method will `reset` by itself. In this case `action` will be ignored. | def step(self, action: types.NestedArray) -> ts.TimeStep:
if self._current_time_step is None or self.should_reset(
self._current_time_step
):
return self.reset()
self._current_time_step = self._step(action)
return self._current_time_step | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def step(self, action):\n if self._reset_next_step:\n return self.reset()\n\n # Apply the game_rules\n for rule in self.game_rules:\n rule.step(self._state, self._meta_state)\n\n # Apply the action\n self.action_space.step(self._state, action)\n\n # Step the physics\n self.physics.step(self._state)\n\n # Compute reward\n self.step_count += 1\n reward, should_reset = self.task.reward(\n self._state, self._meta_state, self.step_count)\n\n # Take observation\n observation = self.observation()\n\n # Return transition\n if should_reset:\n self._reset_next_step = True\n return dm_env.termination(reward=reward, observation=observation)\n else:\n return dm_env.transition(reward=reward, observation=observation)",
"def step(self, action):\n if self._reset_next_step:\n self._reset_next_step = False\n return self.reset()\n\n self._hooks.before_step(self._physics_proxy, action, self._random_state)\n self._observation_updater.prepare_for_next_control_step()\n\n try:\n for i in range(self._n_sub_steps):\n self._hooks.before_substep(self._physics_proxy, action,\n self._random_state)\n self._physics.step()\n self._hooks.after_substep(self._physics_proxy, self._random_state)\n # The final observation update must happen after all the hooks in\n # `self._hooks.after_step` is called. Otherwise, if any of these hooks\n # modify the physics state then we might capture an observation that is\n # inconsistent with the final physics state.\n if i < self._n_sub_steps - 1:\n self._observation_updater.update()\n physics_is_divergent = False\n except control.PhysicsError as e:\n if not self._raise_exception_on_physics_error:\n logging.warning(e)\n physics_is_divergent = True\n else:\n raise\n\n self._hooks.after_step(self._physics_proxy, self._random_state)\n self._observation_updater.update()\n\n if not physics_is_divergent:\n reward = self._task.get_reward(self._physics_proxy)\n discount = self._task.get_discount(self._physics_proxy)\n terminating = (\n self._task.should_terminate_episode(self._physics_proxy)\n or self._physics.time() >= self._time_limit\n )\n else:\n reward = 0.0\n discount = 0.0\n terminating = True\n\n obs = self._observation_updater.get_observation()\n\n if not terminating:\n return dm_env.TimeStep(dm_env.StepType.MID, reward, discount, obs)\n else:\n self._reset_next_step = True\n return dm_env.TimeStep(dm_env.StepType.LAST, reward, discount, obs)",
"def step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n self.microgridPolicy.improveAction(action);\n\n self.microgrid.update();\n\n self.updateState();\n done = self.microgridPolicy.verifyStopConditions();\n reward = self.microgridPolicy.computeReward(done)\n if done: \n if self.steps_beyond_done is None:\n self.steps_beyond_done = 0\n else:\n logger.warn(\"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.\")\n self.steps_beyond_done += 1\n self.clock.increaseTimeStep();\n return self.state, reward, done, {}",
"def step(self, action):\n if self.platform is None:\n raise RuntimeError(\"Call `reset()` before starting to step.\")\n\n if not self.action_space.contains(action):\n raise ValueError(\n \"Given action is not contained in the action space.\")\n\n num_steps = self.frameskip\n\n # ensure episode length is not exceeded due to frameskip\n step_count_after = self.step_count + num_steps\n if step_count_after > self.episode_length:\n excess = step_count_after - self.episode_length\n num_steps = max(1, num_steps - excess)\n\n reward = 0.0\n for _ in range(num_steps):\n self.step_count += 1\n if self.step_count > self.episode_length:\n raise RuntimeError(\"Exceeded number of steps for one episode.\")\n\n # send action to robot\n robot_action = self._gym_action_to_robot_action(action)\n t = self.platform.append_desired_action(robot_action)\n\n # Use observations of step t + 1 to follow what would be expected\n # in a typical gym environment. Note that on the real robot, this\n # will not be possible\n observation = self._create_observation(t + 1)\n\n reward += self.compute_reward(observation, self.info)\n\n is_done = self.step_count == self.episode_length\n\n return observation, reward, is_done, self.info",
"def step(self, action):\n if self._reset_next_step:\n return self.reset()\n\n self._step_count += 1\n \n reward = self._action_space.step(\n action, self._sprites, keep_in_frame=self._keep_in_frame)\n\n # Update sprite positions from their velocities\n for sprite in self._sprites:\n sprite.update_position(keep_in_frame=self._keep_in_frame)\n\n reward += self._task.reward(self._sprites)\n observation = self.observation()\n\n if self.should_terminate():\n self._reset_next_step = True\n return dm_env.termination(reward=reward, observation=observation)\n else:\n return dm_env.transition(reward=reward, observation=observation)",
"def _step(self, action):\n if self._episode_ended:\n # The last action ended the episode. Ignore the current action and start a new episode\n return self.reset()\n\n env_action = self.y_train[self.id[self.episode_step]]\n self.episode_step += 1\n\n if action == env_action: # Correct action\n if env_action: # Minority\n reward = 1 # True Positive\n else: # Majority\n reward = self.imb_rate # True Negative\n\n else: # Incorrect action\n if env_action: # Minority\n reward = -1 # False Negative\n self._episode_ended = True # Stop episode when minority class is misclassified\n else: # Majority\n reward = -self.imb_rate # False Positive\n\n if self.episode_step == self.X_len - 1: # If last step in data\n self._episode_ended = True\n\n self._state = self.X_train[self.id[self.episode_step]] # Update state with new datapoint\n\n if self._episode_ended:\n return ts.termination(self._state, reward)\n else:\n return ts.transition(self._state, reward)",
"def reset(self) -> ts.TimeStep:\n self._current_time_step = self._reset()\n return self._current_time_step",
"def step(self, action: int):\n assert self.action_space.contains(action)\n loc = action\n if self.done:\n return self._get_obs(), 0, True, None\n\n reward = NO_REWARD\n # update bord\n self.board[loc] = to_code(self.mark)\n\n # check if game has ended\n status = check_game_status(self.board)\n if status >= 0:\n self.done = True\n if status in [1, 2]:\n reward = O_REWARD if self.mark == 'O' else X_REWARD\n\n # update mark\n self.mark = next_mark(self.mark)\n\n return self._get_obs(), reward, self.done, None",
"def step(\n self,\n action: Union[Dict[str, Any], np.ndarray],\n env_id: Optional[np.ndarray] = None,\n ) -> Union[TimeStep, Tuple]:",
"def step(self, action):\n action = self.randomization.action_randomizer.randomize(\n action, self._random_state\n )\n\n robot_exception = None\n try:\n self._act(action)\n except RobotException as re:\n logger.error(\n f\"Robot raised exception: {str(re)}. This will finish the current episode.\"\n )\n robot_exception = re\n\n if not self.constants.physical:\n # We don't need to do stepping for physical roll out.\n self.mujoco_simulation.step()\n\n self._synchronize_step_time()\n self.t += 1\n\n obs, reward, done, info = self.get_observation(robot_exception=robot_exception)\n obs, reward, done, info = self.step_finalize(obs, reward, done, info)\n return obs, reward, done, info",
"def step(self, action):\n self.t += 1\n state, reward, done, info = self.env.step(action)\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info",
"def reset(self):\n self._reset_next_step = False\n self.step_count = 0\n \n self._state = self.state_initializer()\n self._meta_state = self._meta_state_initializer()\n self.task.reset(self._state, self._meta_state)\n self.physics.reset(self._state)\n self.action_space.reset(self._state)\n for rule in self.game_rules:\n rule.reset(self._state, self._meta_state)\n rule.step(self._state, self._meta_state)\n \n return dm_env.restart(self.observation())",
"def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:\n next_state, reward, done, _ = self.env.step(action, resize=RESIZE, size = RESIZE_SIZE)\n\n if not self.is_test:\n self.transition += [reward, next_state, done]\n \n # N-step transition\n if self.use_n_step:\n one_step_transition = self.memory_n.store(*self.transition)\n # 1-step transition\n else:\n one_step_transition = self.transition\n\n # add a single step transition\n if one_step_transition:\n self.memory.store(*one_step_transition)\n \n return next_state, reward, done",
"def reset(self):\n\n self.curr_episode += 1\n self.curr_step = 0\n\n self.action_episode_memory.append([])\n self.rewards.append([])\n\n self.is_finalized = False\n init_state, init_reward = self._take_action(5 * np.random.randn(self.act_dimension))\n self.initial_conditions.append(init_state)\n return init_state",
"def step(self, action):\n\n if not self._is_action_legal(action):\n return self.current_state, self.reward_illegal_action, self._is_terminal_state(), None\n else:\n # Change action passed if environment should behave random\n if self.stochastic:\n if not np.random.choice([True, False], 1, p=[self.p, 1 - self.p]):\n action = np.random.choice(self.possible_actions)\n\n # Needed for reward calculation (must be done before updating data structures)\n number_of_shifts = self._get_number_of_shifts(action)\n is_cargo_mandatory = int(self.vehicle_data[2][action] == 1)\n\n slot = self.end_of_lanes[self.current_Lane]\n self.loading_sequence += \"{}. Load Vehicle Type \\t {} \\t in Lane: \\t {} \\t Row: \\t {} \\n\" \\\n .format(self.sequence_no, action, self.current_Lane, slot)\n\n self.end_of_lanes[self.current_Lane] += self.vehicle_data[4][action]\n\n if self.vehicle_data[1][action] == -1 or \\\n self.number_of_vehicles_loaded[action] < self.vehicle_data[1][action]:\n self.number_of_vehicles_loaded[action] += 1\n\n self.loaded_vehicles[self.current_Lane][self.vehicle_Counter[self.current_Lane]] = action\n self.vehicle_Counter[self.current_Lane] += 1\n\n # Update grids\n for i in range(self.vehicle_data[4][action]):\n self.grid.T[self.current_Lane][slot + i] = self.sequence_no\n self.grid_destination.T[self.current_Lane][slot + i] = self.vehicle_data[3][action]\n self.grid_vehicle_type.T[self.current_Lane][slot + i] = self.vehicle_data[0][action]\n\n # Update lowest destination data structure\n if self.vehicle_data[3][action] < self.lowest_destination[self.current_Lane]:\n self.lowest_destination[self.current_Lane] = self.vehicle_data[3][action]\n\n self.sequence_no += 1\n # Update according to lane selection heuristic\n self.current_Lane = self._get_minimal_lanes()[0]\n\n self.possible_actions = self.get_possible_actions_of_state()\n self.current_state = self._get_current_state()\n\n if self._is_terminal_state():\n # Calculate reward for terminal state\n free_spaces = np.sum(self._get_free_capacity()) / np.sum(self.total_capacity)\n mandatory_vehicles_left_to_load = np.sum(self.vehicle_data[1][self.mandatory_cargo_mask]\n - self.number_of_vehicles_loaded[self.mandatory_cargo_mask])\n reward_features = np.array(\n [is_cargo_mandatory, number_of_shifts, free_spaces, mandatory_vehicles_left_to_load])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, True, {}\n else:\n # Calculate reward\n reward_features = np.array([is_cargo_mandatory, number_of_shifts, 0, 0])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, False, {}",
"def step_env(self, action):\n return self.env.step(action)",
"def step(self, action):\n self.t += 1\n if self.use_run_time_assurance:\n probe_state, unsafe = self.probe_step(action)\n # switch to safe controller if unsafe\n if unsafe:\n x, x_dot, theta, theta_dot = probe_state\n # go right\n if x <= -self.x_threshold: # go right\n action = 1\n elif x>= self.x_threshold: # go left\n action = 0 \n \n state, reward, done, info = self.env.step(action)\n # Could make a custom reward here if you want\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info",
"def _step(self, action: types.NestedArray) -> ts.TimeStep:",
"def step(self, action):\n x, y = self._move(action, *self._currentPos)\n\n if chr(self._grid[x, y]) == CASE_TYPES.Wall:\n # error - previous state was already a wall\n self._done = True\n self._trajectory.append(self._currentPos)\n return self._currentPos, -1, self._done, {}\n\n reward = {\n CASE_TYPES.Water: self.waterReward,\n CASE_TYPES.Sand: self.sandReward,\n CASE_TYPES.Open: self.stepReward,\n CASE_TYPES.Termination: self.successReward,\n CASE_TYPES.Trap: (\n -(self.maxSteps - len(self._trajectory)) + self.failureReward +\n self.trapReward)\n }[chr(self._grid[x, y])]\n\n # termination state\n if chr(self._grid[x, y]) in [CASE_TYPES.Termination, CASE_TYPES.Trap]:\n self._done = True\n\n self._currentPos = (x, y)\n\n self._trajectory.append(self._currentPos)\n self._nbSteps += 1\n\n if self._nbSteps >= self.maxSteps and not self._done:\n reward += self.failureReward\n\n return self._currentPos, reward, self._done, {}",
"def step(self, action: ActionType) -> EnvResponse:\n action = self.action_space.clip_action_to_space(action)\n if self.action_space and not self.action_space.contains(action):\n raise ValueError(\"The given action does not match the action space definition. \"\n \"Action = {}, action space definition = {}\".format(action, self.action_space))\n\n # store the last agent action done and allow passing None actions to repeat the previously done action\n if action is None:\n action = self.last_action\n self.last_action = action\n if self.visualization_parameters.add_rendered_image_to_env_response:\n current_rendered_image = self.get_rendered_image()\n\n self.current_episode_steps_counter += 1\n if self.phase != RunPhase.UNDEFINED:\n self.total_steps_counter += 1\n\n # act\n self._take_action(action)\n\n # observe\n self._update_state()\n\n if self.is_rendered:\n self.render()\n\n self.total_reward_in_current_episode += self.reward\n\n if self.visualization_parameters.add_rendered_image_to_env_response:\n self.info['image'] = current_rendered_image\n\n self.last_env_response = \\\n EnvResponse(\n reward=self.reward,\n next_state=self.state,\n goal=self.goal,\n game_over=self.done,\n info=self.info\n )\n\n # store observations for video / gif dumping\n if self.should_dump_video_of_the_current_episode(episode_terminated=False) and \\\n (self.visualization_parameters.dump_mp4 or self.visualization_parameters.dump_gifs):\n self.last_episode_images.append(self.get_rendered_image())\n\n return self.last_env_response",
"def step(self, action):\n assert self.action_space.contains(action)\n\n if self.done:\n return self._get_obs(), 0, True, None\n\n reward = NO_REWARD\n\n # place\n col = action\n row = get_row(self.board, col)\n\n self.board[row, col] = tocode(self.mark)\n self.turn += 1\n self.status = check_game_status(self.board, row, col)\n\n if self.status >= 0:\n self.done = True\n if self.status in [1, 2]:\n # always called by self\n reward = O_REWARD if self.mark == 'O' else X_REWARD\n\n # switch turn\n self.mark = next_mark(self.mark)\n return self._get_obs(), reward, self.done, None",
"def step(self, action):\n # Implement your step method here\n # return (observation, reward, done, info)\n self._state = self._state + action\n # print('Step state:', self._state)\n x, y = self._state\n reward = - (x ** 2 + y ** 2) ** 0.5\n done = abs(x) < 0.01 and abs(y) < 0.01\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=done)",
"def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]:\n next_state, reward, done, info = self.env.step(action)\n return next_state, reward, done, info",
"def step(self, action):\n # THIS WILL BE CALLED FROM ALL STEP DRIVERS\n self._world = self._action_wrapper.action_to_behavior(world=self._world,\n action=action)\n # 1. move the agent we set the action for\n controlled_agent_id = self._scenario._eval_agent_ids[self._action_wrapper._input_count-1]\n self._world.stepAgent(self._step_time, controlled_agent_id)\n\n # length of agents\n if self._action_wrapper._input_count >= len(self._scenario._eval_agent_ids):\n # CANNOT STEP WORLD IF NOT ALL ACTIONS ARE SET\n self._action_wrapper._input_count = 0\n \n # 2. move all other agent\n self._world.step(self._step_time)\n if self._render:\n self.render()\n\n # TODO needs to know the agents id\n return self.snapshot(\n world=self._world,\n controlled_agents=controlled_agent_id,\n action=action)",
"def step(self, action: list) -> None:\n self._input = np.array(\n [self._thrust_surge(action[0]), self._moment_steer(action[1])]\n )\n w, q = odesolver45(\n self._state_dot, self._state, self.config.simulation.t_step_size\n )\n\n self._state = q\n self._state[2] = geom.princip(self._state[2])\n\n self._prev_states = np.vstack([self._prev_states, self._state])\n self._prev_inputs = np.vstack([self._prev_inputs, self._input])\n\n self._step_counter += 1",
"def step(self, action):\n done = self.cur_step >= self.max_steps_per_episode\n\n if done:\n raise RuntimeError(\"Episode is done\")\n\n self.cur_step += 1\n\n # Compute new state based on previous state and action\n new_state = self._take_action(action)\n\n # Compute reward value based on previous state and action\n reward = self._get_reward(action)\n\n # Update current state to new state\n self.cur_state = new_state\n\n # Compute observation from current state\n ob = self._get_obs() # Has to come after new state update\n\n # Update action, observation and reward histories\n self.action_episode_memory[self.cur_episode].append(action)\n self.observation_episode_memory[self.cur_episode].append(ob)\n self.reward_episode_memory[self.cur_episode].append(reward)\n\n # Recompute done since action may have modified it\n done = self.cur_step >= self.max_steps_per_episode\n\n return ob, reward, done, {}",
"def step(self, action):\n # check if suggested action is valid\n valid = self._take_action(action)\n if not valid:\n _, _ = self._simulate()\n response = self.worst_response\n target = 6*60\n else:\n # simulate until a TS response is needed\n response = np.inf\n while response == np.inf:\n response, target = self._simulate()\n if np.isnan(target): # prio 2 or 3 incident: no target exists\n target = response\n\n self.last_action = action if self.action_type == \"tuple\" else self.action_num_to_tuple[action]\n # calculate reward and new state\n self.reward = self._get_reward(response, target, valid=valid)\n self.state, self.is_done = self._extract_state()\n return self.state, self.reward, self.is_done, {\"note\": \"nothing to report\"}",
"def step(\n self, action: Union[numpy.ndarray, int], state: numpy.ndarray = None, dt: int = None\n ) -> tuple:\n data = super(AtariEnvironment, self).step(action=action, state=state, dt=dt)\n if state is None:\n observ, reward, terminal, info = data\n observ = self.gym_env.unwrapped.ale.getRAM() if self.obs_ram else observ\n return observ, reward, terminal, info\n else:\n state, observ, reward, terminal, info = data\n observ = ale_to_ram(self.gym_env.unwrapped.ale) if self.obs_ram else observ\n return state, observ, reward, terminal, info",
"def step(self, action):\n self.move_step(action) # Move.\n r, d = self.check_goal() # Check the reward and done state, and create\n # new environment.\n s_new= self.render_env() # Render the new environment.\n return s_new, r, d",
"def step(self, action):\n self.action = action\n return self.env.step(action)"
] | [
"0.6702035",
"0.66238755",
"0.64235044",
"0.6258338",
"0.62385756",
"0.6237361",
"0.6150759",
"0.6134917",
"0.61343175",
"0.61312205",
"0.60947925",
"0.5976863",
"0.5962188",
"0.5953434",
"0.59465617",
"0.59439987",
"0.58978987",
"0.58788586",
"0.58774835",
"0.58655053",
"0.58542365",
"0.5834581",
"0.58265305",
"0.581372",
"0.58122355",
"0.579503",
"0.57924336",
"0.5789864",
"0.57771266",
"0.57589006"
] | 0.776358 | 0 |
it makes the flow of a given input through the network, all data are stored in the layers "y" and "v" | def flow(input_):
global number_of_neurons_by_layer
if len(input_) != number_of_neurons_by_layer[0]:
raise IndexError(
f"\033[91mInput length is incorrect. It must be {number_of_neurons_by_layer[0]}.\033[m")
layers[0]["y"][1:] = np.array(input_).flatten().reshape(len(input_), 1)
for i_lay in range(1, len(layers)):
layers[i_lay]["v"][:] = logistic(
layers[i_lay]["weigths"] @ layers[i_lay-1]["y"]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trainNet():",
"def forward(self, x):\n # sources保存特征图,loc与conf保存所有PriorBox的位置与类别预测特征\n sources = list()\n loc = list()\n conf = list()\n\n # 对输入图像卷积到conv4_3,将特征添加到sources中\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # 继续卷积到conv7,将特征添加到sources中\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # 继续利用额外的卷积层计算,并将特征添加到sources中\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1: # 间隔一层\n sources.append(x)\n\n # 对sources中的特征图利用类别与位置网络进行卷积计算,并保存到loc与conf中\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1, self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n # 对于训练来说,output包括了loc与conf的预测值以及PriorBox的信息\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output",
"def epoch(self, v, expected):\n self.V = []\n self.O_hidden = []\n self.O_output = []\n self.D_1 = []\n\n self.error = []\n\n\n self.forward(np.transpose([v]), np.transpose([expected]))\n self.backward()",
"def forward(self, x): \n pal1_sources = list()\n pal2_sources = list()\n loc_pal1 = list()\n conf_pal1 = list()\n loc_pal2 = list()\n conf_pal2 = list()\n\n # apply vgg up to conv3_3 relu\n for k in range(16):\n x = self.vgg[k](x)\n\n of1 = x\n s = self.L2Normof1(of1)\n pal1_sources.append(s)\n \n # apply vgg up to conv4_3 relu\n for k in range(16, 23):\n x = self.vgg[k](x)\n\n of2 = x\n s = self.L2Normof2(of2)\n pal1_sources.append(s)\n\n # apply vgg up to conv5_3 relu\n for k in range(23, 30):\n x = self.vgg[k](x)\n of3 = x\n s = self.L2Normof3(of3)\n pal1_sources.append(s)\n\n # apply vgg up to fc7\n for k in range(30, len(self.vgg)):\n x = self.vgg[k](x)\n of4 = x\n pal1_sources.append(of4)\n \n # apply extra layers and cache source layer outputs\n for k in range(2):\n x = F.relu(self.extras[k](x), inplace=True)\n of5 = x\n pal1_sources.append(of5)\n for k in range(2, 4):\n x = F.relu(self.extras[k](x), inplace=True)\n of6 = x\n pal1_sources.append(of6)\n\n ## fpn module\n \"\"\"\n lfpn6 = self.fpn_topdown6(of6)\n lfpn5 = self._upsample_product(self.fpn_topdown5(of6), self.fpn_latlayer5(of5))\n lfpn4 = self._upsample_product(self.fpn_topdown4(of5), self.fpn_latlayer4(of4))\n lfpn3 = self._upsample_product(self.fpn_topdown3(of4), self.fpn_latlayer3(of3))\n lfpn2 = self._upsample_product(self.fpn_topdown2(of3), self.fpn_latlayer2(of2))\n lfpn1 = self._upsample_product(self.fpn_topdown1(of2), self.fpn_latlayer1(of1))\n\n\n ef1 = self.fpn_fem3_3(lfpn1)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem4_3(lfpn2)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem5_3(lfpn3)\n ef3 = self.L2Normef3(ef3)\n\n ef4 = self.fpn_fem7(lfpn4)\n ef5 = self.fpn_fem6_2(lfpn5)\n ef6 = self.fpn_fem7_2(lfpn6)\n \"\"\"\n\n conv7 = F.relu(self.fpn_topdown[0](of6), inplace=True)\n x = F.relu(self.fpn_topdown[1](conv7), inplace=True)\n conv6 = F.relu(self._upsample_product(x, self.fpn_latlayer[0](of5)), inplace=True)\n\n x = F.relu(self.fpn_topdown[2](conv6), inplace=True)\n convfc7_2 = F.relu(self._upsample_product(x, self.fpn_latlayer[1](of4)), inplace=True)\n\n x = F.relu(self.fpn_topdown[3](convfc7_2), inplace=True)\n conv5 = F.relu(self._upsample_product(x, self.fpn_latlayer[2](of3)), inplace=True)\n\n x = F.relu(self.fpn_topdown[4](conv5), inplace=True)\n conv4 = F.relu(self._upsample_product(x, self.fpn_latlayer[3](of2)), inplace=True)\n\n x = F.relu(self.fpn_topdown[5](conv4), inplace=True)\n conv3 = F.relu(self._upsample_product(x, self.fpn_latlayer[4](of1)), inplace=True)\n\n ef1 = self.fpn_fem[0](conv3)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem[1](conv4)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem[2](conv5)\n ef3 = self.L2Normef3(ef3)\n ef4 = self.fpn_fem[3](convfc7_2)\n ef5 = self.fpn_fem[4](conv6)\n ef6 = self.fpn_fem[5](conv7)\n\n pal2_sources = (ef1, ef2, ef3, ef4, ef5, ef6)\n\n ## first shot \n for (x, l, c) in zip(pal1_sources, self.loc_pal1, self.conf_pal1):\n loc_pal1.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal1.append(c(x).permute(0, 2, 3, 1).contiguous())\n \n ## second shot\n for (x, l, c) in zip(pal2_sources, self.loc_pal2, self.conf_pal2):\n loc_pal2.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal2.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n # first shot\n loc_pal1 = torch.cat([o.view(o.size(0), -1) for o in loc_pal1], 1)\n conf_pal1 = torch.cat([o.view(o.size(0), -1) for o in conf_pal1], 1)\n \n # second shot\n loc_pal2 = torch.cat([o.view(o.size(0), -1) for o in loc_pal2], 1)\n conf_pal2 = torch.cat([o.view(o.size(0), -1) for o in conf_pal2], 1)\n\n if self.phase == 'test':\n # 测试时, 仅使用shot2 的输出\n output = self.detect(\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n self.softmax(conf_pal2.view(conf_pal2.size(0), -1,\n self.num_classes)), # conf preds\n )\n else:\n ## 训练时,使用shot1 和 shot2 的输出\n output = (\n loc_pal1.view(loc_pal1.size(0), -1, 4),\n conf_pal1.view(conf_pal1.size(0), -1, self.num_classes),\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n conf_pal2.view(conf_pal2.size(0), -1, self.num_classes))\n return output",
"def forward(self, x):\n x=T.div(x,255.0)\n \n #print(state[20:,20:,0])\n #print(state[:,0,:,:])\n conv1 = F.relu(self.conv1(x))\n conv2 = F.relu(self.conv2(conv1))\n conv3 = F.relu(self.conv3(conv2))\n ###\n conv_state = conv3.view(conv3.size()[0], -1)\n flat1 = F.relu(self.fc1(conv_state))\n flat2 = F.relu(self.fc2(flat1))\n\n V = self.V(flat2)\n A = self.A(flat2)\n\n return V, A\n return x",
"def arch(self, nn) -> 'final node of the tensor flow graph (y_conv)':\n\n print(self)\n\n # first conv. layer \n # 5x5 filter, 1 input channel, 32 output channels\n W_conv1 = nn.weight_variable([5, 5, 1, 32])\n b_conv1 = nn.bias_variable([32])\n stride1 = 1\n h_conv1 = tf.nn.relu(nn.conv2d(nn.x_image, W_conv1, stride1, 'VALID') + b_conv1) \n # outputs a 24x24x32 image\n \n # first pooling layer (2x2) \n h_pool1 = nn.max_pool_2x2(h_conv1) \n # outputs a 12x12x32 image\n\n # second conv. layer \n # 3x3 filter, 32 input channel, 32 output channels\n W_conv2 = nn.weight_variable([3, 3, 32, 32])\n b_conv2 = nn.bias_variable([32])\n stride2 = 1\n h_conv2 = tf.nn.relu(nn.conv2d(h_pool1, W_conv2, stride2, 'VALID') + b_conv2)\n # outputs a 10x10x32 image\n\n # third conv. layer\n # 3x3 filter, 32 input channel, 32 output channels\n W_conv3 = nn.weight_variable([3, 3, 32, 32])\n b_conv3 = nn.bias_variable([32])\n stride3 = 1\n h_conv3 = tf.nn.relu(nn.conv2d(h_conv2, W_conv3, stride3, 'VALID') + b_conv3)\n # outputs a 8x8x32 image\n\n # reshape (flatten) output\n h_conv3_flat = tf.reshape(h_conv3, [-1, 8*8*32])\n\n # first fully connected layer\n W_fc1 = nn.weight_variable([8 * 8 * 32, 1024])\n b_fc1 = nn.bias_variable([1024])\n h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)\n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, nn.keep_prob)\n\n # second (final) fully connected layer (softmax)\n W_fc2 = nn.weight_variable([1024, 10])\n b_fc2 = nn.bias_variable([10])\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n return y_conv",
"def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n #print('Reached start of vgg')\n for k in self.vgg._modules.keys():\n if int(k) < 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n #print('Reached L2Norm')\n s = self.L2Norm(x)\n sources.append(s)\n\n #print('Reached after L2Norm')\n # apply vgg up to fc7\n for k in self.vgg._modules.keys():\n if int(k) >= 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n sources.append(x)\n #print('Reached end of VGG')\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(-1, self.num_classes)), # conf preds\n self.priors # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output",
"def forward(self, x):\n\n def run0(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n mat = out7[:, :, :, None] + out7[:, :, None, :]\n cur = mat\n if self.num_1d:\n output1d = self.final_1d(out7)\n return cur, output1d\n else:\n return cur\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n if self.num_1d:\n cur, output1d = checkpoint(run0, x, dummy)\n else:\n cur = checkpoint(run0, x, dummy)\n\n def run1(cur):\n first = True\n for lm, m in zip(self.lconvtwos[:7], self.convtwos[:7]):\n if first:\n cur = lm(cur)\n\n first = False\n else:\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run2(cur):\n for lm, m in zip(self.lconvtwos[7:13], self.convtwos[7:13]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run3(cur):\n for lm, m in zip(self.lconvtwos[13:], self.convtwos[13:]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n\n cur = self.final(cur)\n cur = 0.5 * cur + 0.5 * cur.transpose(2, 3)\n return cur\n\n cur = checkpoint(run1, cur)\n cur = checkpoint(run2, cur)\n cur = checkpoint(run3, cur)\n\n if self.num_1d:\n return cur, output1d\n else:\n return cur",
"def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r",
"def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in self.base.named_parameters():\n print(name, param.size())\n\n res50_model = self.base\n res50_conv2 = ResNet50Bottom(res50_model)\n for i,child in enumerate(self.base.children()):\n print(i)\n if i==8:\n l4=x\n break\n if i==6:\n l2=x\n x=res50_conv2(x.detach())\"\"\"\n\n s2 = l2.sum(1) #/ 100\n #\n s4 = l4.sum(1) #/ 1000\n\n\n sw2 = s2 / (s2.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n sw4 = s4 / (s4.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n\n l2 = l2 * sw2.unsqueeze(1)\n l4 = l4 * sw4.unsqueeze(1)\n\n \n c2 = self.inconv2(l2)\n c4 = self.inconv4(l4)\n c2 = self.bn2(c2)\n c4 = self.bn4(c4)\n \n n2 = F.softmax(torch.mean(torch.mean(c2, dim=2), dim=2), dim=1)\n n4 = F.softmax(torch.mean(torch.mean(c4, dim=2), dim=2), dim=1)\n nn2 = n2.data.cpu().numpy()\n nn4 = n4.data.cpu().numpy()\n cam2 = np.zeros((x.size(0), 28, 28), dtype=float)\n cam4 = np.zeros((x.size(0), 7, 7), dtype=float)\n\n\n for i in range(0, x.size(0)):\n for j in range(0, 2):\n temp1 = c2[i, j, :, :].data.cpu().numpy()\n temp1 = np.maximum(temp1, 0)\n temp1 = temp1 - np.min(temp1)\n temp1 = temp1 / (np.max(temp1)+1e-8)\n cam2[i] = cam2[i] + nn2[i, j] * temp1\n cam2 = torch.FloatTensor(cam2)\n l2 = l2 * (cam2.unsqueeze(1).cuda())\n l2 = self.stack1(l2)\n l2 = self.stack1_1(l2)\n\n for i in range(0, x.size(0)):\n for j in range(0, 8):\n temp2 = c4[i, j, :, :].data.cpu().numpy()\n temp2 = np.maximum(temp2, 0)\n temp2 = temp2 - np.min(temp2)\n temp2 = temp2 / (np.max(temp2)+1e-8)\n cam4[i] =cam4[i] + nn4[i, j] * temp2\n cam4 = torch.FloatTensor(cam4)\n l4 = l4 * cam4.unsqueeze(1).cuda()\n l4 = self.stack3(l4)\n X = l2.view(x.size(0), 512, 7 ** 2)\n Y = l4.view(x.size(0), 512, 7 ** 2)\n Z = self.cross_bilinear(X, Y)\n return n2, n4, Z",
"def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out",
"def forward(self, input_x):\n return self.net(input_x.float())",
"def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # apply vgg up to fc7\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1,\n self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output",
"def forward(self, x):\n sources = list()\n tcb_source = list()\n odm_loc = list()\n odm_conf = list()\n if self.phase == 'test':\n feat_sizes = list()\n\n # apply vgg up to conv4_3 relu and conv5_3 relu\n for k in range(self.conv5_3_layer):\n x = self.vgg[k](x)\n if self.size != 512 and self.size != 320 and self.conv3_3_layer - 1 == k:\n s = self.conv3_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if self.conv4_3_layer - 1 == k:\n s = self.conv4_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n elif self.conv5_3_layer - 1 == k:\n s = self.conv5_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply vgg up to fc7\n for k in range(self.conv5_3_layer, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply extra layers and cache source layer outputs\n for k in range(len(self.extras)):\n x = self.extras[k](x)\n if self.extra_1_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if (self.size == 640 or self.size == 5126) and self.extra_2_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # calculate TCB features\n p = None\n for k, v in enumerate(sources[::-1]):\n s = v\n for i in range(3):\n s = self.tcb0[(self.step-k)*3 + i](s)\n if k != 0:\n u = p\n u = self.tcb1[self.step-k](u)\n s += u\n for i in range(3):\n s = self.tcb2[(self.step-k)*3 + i](s)\n p = s\n tcb_source.append(s)\n tcb_source.reverse()\n\n # apply ODM to source layers\n for (x, l, c) in zip(tcb_source, self.odm_loc, self.odm_conf):\n odm_loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n odm_conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n odm_loc = torch.cat([o.view(o.size(0), -1) for o in odm_loc], 1)\n odm_conf = torch.cat([o.view(o.size(0), -1) for o in odm_conf], 1)\n\n if self.phase == \"test\":\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4), # odm loc preds\n self.softmax(odm_conf.view(odm_conf.size(0), -1,\n self.num_classes)), # odm conf preds\n feat_sizes\n )\n else:\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4),\n odm_conf.view(odm_conf.size(0), -1, self.num_classes),\n )\n return output",
"def forward(self, inputs):\r\n\r\n assert len(inputs) == self.depth, \\\r\n \"Mismatch between input and Network scales\"\r\n\r\n y = self.rgb_to_features[self.depth - 2](inputs[self.depth - 1])\r\n y = self.layers[self.depth - 2](y)\r\n for x, block, converter in \\\r\n zip(reversed(inputs[1:-1]),\r\n reversed(self.layers[:-1]),\r\n reversed(self.rgb_to_features[:-1])):\r\n input_part = converter(x) # convert the input:\r\n y = torch.cat((input_part, y), dim=1) # concatenate the inputs:\r\n y = block(y) # apply the block\r\n\r\n # calculate the final block:\r\n input_part = self.final_converter(inputs[0])\r\n y = torch.cat((input_part, y), dim=1)\r\n y = self.final_block(y)\r\n\r\n # return calculated y\r\n return y",
"def forward(self, x: torch.Tensor) -> torch.Tensor:\n model_output = None\n #######################################################################\n # Student code begins\n #######################################################################\n\n (N,C,H,W) = x.shape\n\n conv_features = self.conv_layers(x)\n \n flat_features = conv_features.reshape(-1, 500)\n model_output = self.fc_layers(flat_features)\n\n\n #######################################################################\n # Student code ends\n #######################################################################\n return model_output",
"def arch(self, nn) -> 'final node of the tensor flow graph (y_conv)':\n\n print(self)\n\n # first conv. layer \n # 5x5 filter, 1 input channel, 32 output channels\n W_conv1 = nn.weight_variable([5, 5, 1, 32])\n b_conv1 = nn.bias_variable([32])\n stride1 = 1\n h_conv1 = tf.nn.relu(nn.conv2d(nn.x_image, W_conv1, stride1) + b_conv1)\n \n # first pooling layer (2x2) \n h_pool1 = nn.max_pool_2x2(h_conv1)\n\n # second conv. layer \n # 5x5 filter, 32 input channel, 64 output channels\n W_conv2 = nn.weight_variable([5, 5, 32, 64])\n b_conv2 = nn.bias_variable([64])\n stride2 = 1\n h_conv2 = tf.nn.relu(nn.conv2d(h_pool1, W_conv2, stride2) + b_conv2)\n\n # second pooling layer (2x2) \n h_pool2 = nn.max_pool_2x2(h_conv2)\n\n # reshape (flatten) output\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n\n # first fully connected layer\n W_fc1 = nn.weight_variable([7 * 7 * 64, 1024])\n b_fc1 = nn.bias_variable([1024])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, nn.keep_prob)\n\n # second (final) fully connected layer (softmax)\n W_fc2 = nn.weight_variable([1024, 10])\n b_fc2 = nn.bias_variable([10])\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n return y_conv",
"def __call__(self, x_1d, is_training, reuse=False, nfilt=32):\n with tf.variable_scope(self.name):\n x = tf.reshape(x_1d, [-1, self.input_dim, self.input_dim, self.channels])\n\n e1 = unet_conv(x, nfilt*1, 'e1', reuse, is_training)\n e2 = unet_conv(e1, nfilt*2, 'e2', reuse, is_training)\n e3 = unet_conv(e2, nfilt*4, 'e3', reuse, is_training)\n e4 = unet_conv(e3, nfilt*8, 'e4', reuse, is_training)\n e5 = unet_conv(e4, nfilt*8, 'e5', reuse, is_training)\n e6 = unet_conv(e5, nfilt*8, 'e6', reuse, is_training, s=1)\n e7 = unet_conv(e6, nfilt*8, 'e7', reuse, is_training, s=1)\n e8 = unet_conv(e7, nfilt*8, 'e8', reuse, is_training, s=1)\n\n d1 = unet_conv_t(e8, e7, nfilt*8, 'd1', reuse, is_training, s=1)\n d2 = unet_conv_t(d1, e6, nfilt*8, 'd2', reuse, is_training, s=1)\n d3 = unet_conv_t(d2, e5, nfilt*8, 'd3', reuse, is_training, s=1)\n d4 = unet_conv_t(d3, e4, nfilt*8, 'd4', reuse, is_training)\n d5 = unet_conv_t(d4, e3, nfilt*4, 'd5', reuse, is_training)\n d6 = unet_conv_t(d5, e2, nfilt*2, 'd6', reuse, is_training)\n d7 = unet_conv_t(d6, e1, nfilt*1, 'd7', reuse, is_training)\n out = unet_conv_t(\n d7, None, self.channels, 'out', reuse, is_training,\n activation=tf.nn.tanh, use_batch_norm=False, use_dropout=False)\n\n out_1d = tf.reshape(out, (-1, self.output_dim*self.output_dim*self.channels))\n\n tensors = [\n x, e1, e2, e3, e4, e5, e6, e7, e8, d1, d2, d3, d4, d5, d6, d7, out, out_1d]\n\n for tensor in tensors:\n print(tensor)\n\n return out_1d",
"def Network_model(input_data):\n layer1_param={'weights':tf.Variable(tf.random_normal([784, no_neurons_layer1])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer1]))}\n \n layer2_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer1, no_neurons_layer2])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer2]))}\n \n layer3_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer2, no_neurons_layer3])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer3]))}\n \n layer4_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer3, no_neurons_layer4])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer4]))}\n \n output_layer_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer4, no_classes])), \n 'biases': tf.Variable(tf.random_normal([no_classes]))}\n \n #so uptill now the weights for each layer is initialized\n \n \"\"\"\n Now what will happened in each layer, I will define next. basically the weights are multiplied\n in each layer with the corresponding inputs and then it is passed through activation function \n (relu in this case) and the output is given as input to the other layer.\n sign:B-Jan\n \"\"\"\n \n l1_output= tf.add(tf.matmul(input_data,layer1_param['weights']), layer1_param['biases'])\n l1_output=tf.nn.relu(l1_output)\n \n l2_output= tf.add(tf.matmul(l1_output,layer2_param['weights']), layer2_param['biases'])\n l2_output=tf.nn.relu(l2_output)\n \n \n l3_output= tf.add(tf.matmul(l2_output,layer3_param['weights']), layer3_param['biases'])\n l3_output=tf.nn.relu(l3_output)\n \n l4_output= tf.add(tf.matmul(l3_output,layer4_param['weights']), layer4_param['biases'])\n l4_output=tf.nn.relu(l4_output)\n \n #The final output Layer\n output= tf.matmul(l4_output, output_layer_param['weights'])+output_layer_param['biases']\n \n return output # contains the output of the last output layer",
"def network_modified(input):\n\n up6 = upsample_and_concat( conv5, conv4, 256, 512 , 'up_conv1' )\n conv6=slim.conv2d(up6, 256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_1')\n conv6=slim.conv2d(conv6,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_2')\n\n up7 = upsample_and_concat( conv6, conv3, 128, 256 , 'up_conv2' )\n conv7=slim.conv2d(up7, 128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_1')\n conv7=slim.conv2d(conv7,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_2')\n\n up8 = upsample_and_concat( conv7, conv2, 64, 128 , 'up_conv3')\n conv8=slim.conv2d(up8, 64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_1')\n conv8=slim.conv2d(conv8,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_2')\n\n up9 = upsample_and_concat( conv8, conv1, 32, 64 , 'up_conv4')\n conv9=slim.conv2d(up9, 32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_1')\n conv9=slim.conv2d(conv9,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_2')\n\n conv10=slim.conv2d(conv9,12,[1,1], rate=1, activation_fn=None, scope='g_conv10')\n out = tf.depth_to_space(conv10,2)\n return out",
"def model(data, train=None):\n\n conv = tf.nn.conv2d(data, layer1_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer1_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,1,1,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer2_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer2_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,2,2,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer3_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer3_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,1,1,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer6_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer6_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,2,2,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer7_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer7_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,1,1,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer8_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer8_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,2,2,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n shape = pool.get_shape().as_list()\n reshape = tf.reshape(pool, [shape[0], shape[1] * shape[2] * shape[3]])\n hidden = tf.nn.relu(tf.matmul(reshape, layer4_weights) + layer4_biases)\n\n return (tf.matmul(hidden, layer5_weights1) + layer5_biases1), (tf.matmul(hidden, layer5_weights2) + layer5_biases2), \\\n (tf.matmul(hidden, layer5_weights3) + layer5_biases3), (tf.matmul(hidden, layer5_weights4) + layer5_biases4), \\\n (tf.matmul(hidden, layer5_weights5) + layer5_biases5)",
"def model(image_height,image_width,path):\n\n\tdef load_file(path='vgg19.mat'):\n\t\t\"\"\"\n\t\tLoads Weights File & returns Object of Numpy array\n\t\t\"\"\"\n\t\tfile=loadmat(path)\n\t\tfile=file['layers']\n\t\tprint(\"Success load_file\")\n\t\treturn file\n\n\tdef ret_layer_index(file):\n\t\t\"\"\"\n\t\tTakes file as input & returns a dictionary having name of layers with their code\n\t\t\"\"\"\n\t\tnames={}\n\t\tfor i in range(len(file[0])):\n\t\t\tprint(file[0][i][0][0][0])\n\t\t\tnames[file[0][i][0][0][0][0]]=i\n\t\tprint(\"Success layer_index\")\n\t\treturn names\n \n\tdef weight(layer_name):\n\t\t\"\"\" Asks for Layer Name & returns its weights & bias\n\t\t\"\"\"\n\t\tlayer_no=names[layer_name]\n\t\twb =file[0][layer_no][0][0][2]\n\t\tw=wb[0][0]\n\t\tb=wb[0][1]\n\t\tname=file[0][layer_no][0][0][0]\n\t\tassert name==layer_name\n\t\tprint(\"Success weight\")\n\t\treturn w,b\n\n\tdef conv_relu(prev_layer,layer_no,layer_name):\n\t\tW,b=weight(layer_name)\n\t\tW=tf.constant(W)\n\t\tb=tf.constant(np.reshape(b, (b.size)))\n\t\tl=tf.nn.conv2d(prev_layer,filter=W,strides=[1,1,1,1],padding='SAME') +b\n\t\tprint(\"Success convrelu\")\n\t\treturn tf.nn.relu(l)\n\n\tdef avg_pool(prev_layer):\n\t\treturn tf.nn.avg_pool(prev_layer,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')\n\n\tdef load_graph():\n\t\tgraph={}\n\t\tgraph['input'] = tf.Variable(np.zeros((1, image_height, image_width,3)), dtype = 'float32')\n\t\tgraph['conv1_1'] = conv_relu(graph['input'], 0, 'conv1_1')\n\t\tgraph['conv1_2'] = conv_relu(graph['conv1_1'], 2, 'conv1_2')\n\t\tgraph['avgpool1'] = avg_pool(graph['conv1_2'])\n\t\tgraph['conv2_1'] = conv_relu(graph['avgpool1'], 5, 'conv2_1')\n\t\tgraph['conv2_2'] = conv_relu(graph['conv2_1'], 7, 'conv2_2')\n\t\tgraph['avgpool2'] = avg_pool(graph['conv2_2'])\n\t\tgraph['conv3_1'] = conv_relu(graph['avgpool2'], 10, 'conv3_1')\n\t\tgraph['conv3_2'] = conv_relu(graph['conv3_1'], 12, 'conv3_2')\n\t\tgraph['conv3_3'] = conv_relu(graph['conv3_2'], 14, 'conv3_3')\n\t\tgraph['conv3_4'] = conv_relu(graph['conv3_3'], 16, 'conv3_4')\n\t\tgraph['avgpool3'] = avg_pool(graph['conv3_4'])\n\t\tgraph['conv4_1'] = conv_relu(graph['avgpool3'], 19, 'conv4_1')\n\t\tgraph['conv4_2'] = conv_relu(graph['conv4_1'], 21, 'conv4_2')\n\t\tgraph['conv4_3'] = conv_relu(graph['conv4_2'], 23, 'conv4_3')\n\t\tgraph['conv4_4'] = conv_relu(graph['conv4_3'], 25, 'conv4_4')\n\t\tgraph['avgpool4'] = avg_pool(graph['conv4_4'])\n\t\tgraph['conv5_1'] = conv_relu(graph['avgpool4'], 28, 'conv5_1')\n\t\tgraph['conv5_2'] = conv_relu(graph['conv5_1'], 30, 'conv5_2')\n\t\tgraph['conv5_3'] = conv_relu(graph['conv5_2'], 32, 'conv5_3')\n\t\tgraph['conv5_4'] = conv_relu(graph['conv5_3'], 34, 'conv5_4')\n\t\tgraph['avgpool5'] = avg_pool(graph['conv5_4'])\n\t\treturn graph\n\n\tfile=load_file(path)\n\tnames=ret_layer_index(file)\n\treturn load_graph()",
"def train(self, X, y):",
"def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x",
"def forward(self, inputs):\n #NOTE: Already merge axis 0(batches) and axis 1(channels) before extracting feature phase,\n # please refer to paddlevideo/modeling/framework/recognizers/recognizer2d.py#L27\n #y = paddle.reshape(\n # inputs, [-1, inputs.shape[2], inputs.shape[3], inputs.shape[4]])\n\n ####ResNet-C: use three 3x3 conv, replace, one 7x7 conv\n y = self.conv1_1(inputs)\n y = self.conv1_2(y)\n y = self.conv1_3(y)\n\n y = self.pool2D_max(y)\n for block in self.block_list:\n y = block(y)\n return y",
"def forward(self, inputs, end_points, mode=\"\"):\n batch_size = inputs['point_clouds'].shape[0]\n\n end_points = self.backbone_net1(inputs['point_clouds'], end_points)\n end_points = self.backbone_net2(inputs['point_clouds'], end_points, mode='net1')\n end_points = self.backbone_net3(inputs['point_clouds'], end_points, mode='net2')\n end_points = self.backbone_net4(inputs['point_clouds'], end_points, mode='net3')\n\n ### Extract feature here\n xyz = end_points['fp2_xyz']\n features1 = end_points['fp2_features']\n features2 = end_points['fp2_features'+'net1']\n features3 = end_points['fp2_features'+'net2']\n features4 = end_points['fp2_features'+'net3']\n end_points['seed_inds'] = end_points['fp2_inds']\n end_points['seed_xyz'] = xyz\n end_points['seed_features'] = features1\n \n ### Combine the feature here\n features_hd_discriptor = torch.cat((features1, features2, features3, features4), dim=1)\n features_hd_discriptor = F.relu(self.bn_agg1(self.conv_agg1(features_hd_discriptor)))\n features_hd_discriptor = F.relu(self.bn_agg2(self.conv_agg2(features_hd_discriptor)))\n\n end_points['hd_feature'] = features_hd_discriptor\n \n net_flag_z = F.relu(self.bn_flag_z1(self.conv_flag_z1(features_hd_discriptor)))\n net_flag_z = self.conv_flag_z2(net_flag_z)\n end_points[\"pred_flag_z\"] = net_flag_z\n\n net_flag_xy = F.relu(self.bn_flag_xy1(self.conv_flag_xy1(features_hd_discriptor)))\n net_flag_xy = self.conv_flag_xy2(net_flag_xy)\n end_points[\"pred_flag_xy\"] = net_flag_xy\n\n net_flag_line = F.relu(self.bn_flag_line1(self.conv_flag_line1(features_hd_discriptor)))\n net_flag_line = self.conv_flag_line2(net_flag_line)\n end_points[\"pred_flag_line\"] = net_flag_line\n\n proposal_xyz, proposal_features, center_offset, center_residual = self.vgen(xyz, features_hd_discriptor)\n proposal_features_norm = torch.norm(proposal_features, p=2, dim=1)\n proposal_features = proposal_features.div(proposal_features_norm.unsqueeze(1))\n end_points['vote_xyz'] = proposal_xyz\n end_points['vote_features'] = proposal_features\n \n voted_z, voted_z_feature, z_offset, z_residual = self.vgen_z(xyz, features_hd_discriptor)\n voted_z_feature_norm = torch.norm(voted_z_feature, p=2, dim=1)\n voted_z_feature = voted_z_feature.div(voted_z_feature_norm.unsqueeze(1))\n end_points['vote_z'] = voted_z\n end_points['vote_z_feature'] = voted_z_feature\n\n voted_xy, voted_xy_feature, xy_offset, xy_residual = self.vgen_xy(xyz, features_hd_discriptor)\n voted_xy_feature_norm = torch.norm(voted_xy_feature, p=2, dim=1)\n voted_xy_feature = voted_xy_feature.div(voted_xy_feature_norm.unsqueeze(1))\n end_points['vote_xy'] = voted_xy\n end_points['vote_xy_feature'] = voted_xy_feature\n\n voted_line, voted_line_feature, line_offset, line_residual = self.vgen_line(xyz, features_hd_discriptor)\n voted_line_feature_norm = torch.norm(voted_line_feature, p=2, dim=1)\n voted_line_feature = voted_line_feature.div(voted_line_feature_norm.unsqueeze(1))\n end_points['vote_line'] = voted_line\n end_points['vote_line_feature'] = voted_line_feature\n \n center_z, feature_z, end_points = self.pnet_z(voted_z, voted_z_feature, end_points, mode='_z')\n center_xy, feature_xy, end_points = self.pnet_xy(voted_xy, voted_xy_feature, end_points, mode='_xy')\n center_line, feature_line, end_points = self.pnet_line(voted_line, voted_line_feature, end_points, mode='_line')\n\n end_points = self.pnet_final(proposal_xyz, proposal_features, center_z, feature_z, center_xy, feature_xy, center_line, feature_line, end_points)\n return end_points",
"def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5",
"def forward(self, x):\n return self.net(x)",
"def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)):\n if l == 0:\n z = self.layers[l].forward(x)\n else:\n z = self.layers[l].forward(a)\n a = self.activations[l].forward(z)\n\n # output from softmax layer\n out = a\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out",
"def forward(self, input):\n input, _ = input\n bs = input.shape[0]\n d1 = self.relu1(self.fc1(input))\n d2 = self.relu2(self.fc2(d1))\n d3 = self.fc3(d2)\n out = self.sigmoid(d3)\n\n out = out.view(bs, 17, 3)\n return out"
] | [
"0.6696856",
"0.6663844",
"0.6572504",
"0.6489043",
"0.6439873",
"0.6390039",
"0.63803667",
"0.630682",
"0.63015246",
"0.6280952",
"0.6269826",
"0.62485385",
"0.62225485",
"0.62188786",
"0.6218298",
"0.6182807",
"0.61798155",
"0.61786693",
"0.6172364",
"0.61677724",
"0.61635745",
"0.6146077",
"0.6119188",
"0.6116991",
"0.610831",
"0.60976243",
"0.6078553",
"0.60722274",
"0.6066292",
"0.6065537"
] | 0.76247156 | 0 |
it computes the error vector between desired and obtained output, stored at the last layer | def error(input_, output):
global number_of_neurons_by_layer
if len(output) != number_of_neurons_by_layer[-1]:
raise IndexError(
f"\033[91mDesired output length is incorrect. It must be {number_of_neurons_by_layer[-1]}.\033[m")
output = np.array(output).reshape(len(output), 1)
flow(input_)
layers[-1]["error"] = output - layers[-1]["v"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error2(input_, output):\n error(input_, output)\n layers[-1][\"error2\"] = layers[-1][\"error\"].T @ layers[-1][\"error\"]",
"def getError(outputVector, targetVector):\r\n return np.sum((outputVector-targetVector)**2)",
"def get_error(self, params):\n return self.endog - self.predict(params)",
"def _compute_error(self,expected_out,actual_out,error_func):\n\n error = error_func(expected_out,actual_out)\n return error",
"def get_error(self, output,target):\n return [target[i]-output[i] for i in range(len(output))]",
"def train_error(self):\n self.prediction = self.predict()\n pred = self.prediction.reshape(-1)\n self.error = np.sum(pred != self.label) / self.train_data.shape[0]\n return(self.error)",
"def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)",
"def backprop_error(self,t=0, external_error=0):\n\t\taux = external_error\n\t\t# Other layers' contributions\n\t\tif t != 0:\n\t\t\tfor layer in self.next_recurrent:\n\t\t\t\taux += layer.get_error_contribution(self, t=t+1)\n\t\tfor layer in self.next:\n\t\t\taux += layer.get_error_contribution(self, t=t)\n\n\t\tif t != 0:\n\t\t\taux += np.dot(\n\t\t\t\tself.get_o_error(t=t+1) * self.__dev_sigm__(self.get_o(t=t+1)),\n\t\t\t\tnp.transpose(self.W_o_atprev)\n\t\t\t) + \\\n\t\t\tnp.dot(\n\t\t\t\tself.get_c_error(t=t+1) * self.get_i(t=t+1) * self.__dev_tanh__(self.get_c(t=t+1)),\n\t\t\t\tnp.transpose(self.W_c_atprev)\n\t\t\t) + \\\n\t\t\tnp.dot(\n\t\t\t\tself.get_i_error(t=t+1) * self.__dev_sigm__(self.get_i(t=t+1)),\n\t\t\t\tnp.transpose(self.W_i_atprev)\n\t\t\t) + \\\n\t\t\tnp.dot(\n\t\t\t\tself.get_f_error(t=t+1) * self.__dev_sigm__(self.get_f(t=t+1)),\n\t\t\t\tnp.transpose(self.W_f_atprev)\n\t\t\t)\n\t\t\n\t\tif isinstance(aux, np.ndarray):\n\t\t\tself.error_a = [aux]+self.error_a\n\n\t\t# error in o\n\t\tself.error_o = [self.get_a_error(t=t) * self.__tanh__(self.get_c(t=t))] + self.error_o\n\n\t\t# error in c\n\t\taux = self.w_o_c * self.get_o_error(t=t) * self.__dev_sigm__(self.get_o(t=t)) \\\n\t\t+ self.get_a_error(t=t) * self.get_o(t=t) * self.__dev_tanh_z__(self.get_c(t=t))\n\t\tif t!=0:\n\t\t\taux += self.get_c_error(t=t+1) * self.get_f(t=t+1) \\\n\t\t\t+ self.w_i_ctprev * self.get_i_error(t=t+1) * self.__dev_sigm__(self.get_i(t=t+1)) \\\n\t\t\t+ self.w_f_ctprev * self.get_f_error(t=t+1) * self.__dev_sigm__(self.get_f(t=t+1))\n\t\tself.error_c = [aux] + self.error_c\n\n\t\t# error in f\n\t\tself.error_f = [self.get_c(t=t-1)*self.get_c_error(t=t)] + self.error_f\n\n\t\t# error in i\n\t\tself.error_i = [self.get_c_error(t=t)*self.get_tanh_zc(t=t)] + self.error_i",
"def calcError(self, inherited_error):\r\n\t\tif inherited_error == None:\t\t# output neurons\r\n\t\t\tself.error = (self.target - self.value) * self.activate_der()\r\n\t\telse:\r\n\t\t\tself.error = inherited_error * self.activate_der()",
"def _delta(self, output, err, neuron):\n return neuron._g_prime(output) * err",
"def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum",
"def calculate_error(self):\n \n delta = self.Y - self.T\n error = delta.dot(delta) / self.N\n error = format(error, '.5f')\n \n self.errors.append(error)",
"def _wer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total",
"def test(test_data, test_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, bias_w1, bias_w2, bias_w3, l1, lmbda):\n # Set up initial variables\n samples = test_data.shape[0]\n correct_values = np.argmax(test_output, axis=1)\n predicted_values = np.zeros((samples,))\n error = np.zeros(test_output.shape)\n error_l1 = 0\n\n # Extract inputs\n x0 = test_data.T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n\n # Checks if L1 is wanted\n if l1:\n # Calculates l1 error for input layer\n error_l1 = lmbda * np.sum(np.sqrt(np.square(w1)))\n\n # Checks if hidden layer is needed\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n if l1:\n # Calculates l1 error for hidden layer\n error_l1 += lmbda * np.sum(np.sqrt(np.square(w2)))\n\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n if l1:\n # Calculates l1 error for second hidden layer\n error_l1 += lmbda * np.sum(np.sqrt(np.square(w3)))\n\n # Calculate labels\n predicted_values = np.argmax(x3, axis=0)\n # Error Signal\n error = (test_output - x3.T)\n\n else:\n # Calculate labels\n predicted_values = np.argmax(x2, axis=0)\n # Error Signal\n error = (test_output - x2.T)\n\n else:\n # Calculate labels\n predicted_values = np.argmax(x1, axis=0)\n # Error Signal\n error = (test_output - x1.T)\n\n # Calculate MSE error\n error_mse = np.sum(np.square(error)) / (2 * error.shape[0])\n\n # Add MSE error to L1 error, if L1 isn't used this will add 0\n error = error_mse + error_l1\n\n # Calculate accuracy of predictions\n accuracy = (np.sum(predicted_values == correct_values) / samples) * 100\n\n print(\"Accuracy = \", accuracy)\n print(\"Error = \", error)\n return accuracy, error",
"def error_function(prediction_dict, use_example_flags):\n\n predicted_flux_matrix_w_m02 = numpy.mean(\n prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY][\n use_example_flags, ...\n ],\n axis=-1\n )\n actual_flux_matrix_w_m02 = prediction_dict[\n prediction_io.SCALAR_TARGETS_KEY\n ][use_example_flags, :]\n\n predicted_net_flux_matrix_w_m02 = (\n predicted_flux_matrix_w_m02[:, 0] -\n predicted_flux_matrix_w_m02[:, 1]\n )\n actual_net_flux_matrix_w_m02 = (\n actual_flux_matrix_w_m02[:, 0] -\n actual_flux_matrix_w_m02[:, 1]\n )\n\n net_flux_sse_w2_m04 = numpy.sum(\n (predicted_net_flux_matrix_w_m02 - actual_net_flux_matrix_w_m02)\n ** 2\n )\n raw_flux_sse_w2_m04 = numpy.sum(\n (predicted_flux_matrix_w_m02 - actual_flux_matrix_w_m02) ** 2\n )\n\n num_examples = actual_flux_matrix_w_m02.shape[0]\n flux_mse_w_m02 = (\n (net_flux_sse_w2_m04 + raw_flux_sse_w2_m04) / (3 * num_examples)\n )\n\n predicted_hr_matrix_k_day01 = numpy.mean(\n prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY][\n use_example_flags, ...\n ],\n axis=-1\n )\n actual_hr_matrix_k_day01 = prediction_dict[\n prediction_io.VECTOR_TARGETS_KEY\n ][use_example_flags, ...]\n\n weight_matrix_k_day01 = numpy.maximum(\n numpy.absolute(predicted_hr_matrix_k_day01),\n numpy.absolute(actual_hr_matrix_k_day01)\n )\n heating_rate_dwmse_k3_day03 = numpy.mean(\n weight_matrix_k_day01 *\n (predicted_hr_matrix_k_day01 - actual_hr_matrix_k_day01) ** 2\n )\n\n return (\n scaling_factor_for_dwmse * heating_rate_dwmse_k3_day03 +\n scaling_factor_for_flux_mse * flux_mse_w_m02\n )",
"def input_error(self, out_influence, new_weights):\n in_influence = np.dot(np.transpose(new_weights), out_influence)\n return in_influence",
"def ErrorFunction(p,x,y,z):\n \n return TargetFunction(p,x,y) - z",
"def error_compute(self):\n self.tt_error = np.linalg.norm(self.rel_error)\n if self.global_rank==0:print('Overall error is::',self.tt_error)\n return {'NMF': self.rel_error, 'tt': self.tt_error}",
"def _mer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total",
"def error_values(X_train,X_test,Y_train,Y_test):\n #setting up parameters and variables for plotting \n n_train = X_train.shape[0]\n n_test = X_test.shape[0]\n d = X_train.shape[1]\n hdnode = 100\n w1 = np.random.normal(0,0.001,d*hdnode).reshape((d,hdnode))\n d1 = np.zeros((d,hdnode))\n w2 = np.random.normal(0,0.001,hdnode).reshape((hdnode,1))\n d2 = np.zeros(hdnode)\n h = np.zeros(hdnode)\n mb = 100 #minibatch size\n m = int(n_train/mb)\n batch = np.arange(m) \n lr = 0.00020\n EP = 20000 #needed for initializing \n ep = 0\n yh = np.zeros((n_train,1))\n yh2 = np.zeros((n_test,1))\n L_train= np.zeros(EP+1)\n L_test = np.zeros(EP+1)\n Y_train = Y_train.reshape(len(Y_train),1)\n #activation function for the hidden layer is tanh\n \n def g(A):\n return (np.tanh(A))\n\n def gd(A):\n return (1-np.square(np.tanh(A)))\n \n #setting up how long the epoch will run\n EP = 200\n ep = 0\n while ep < EP:\n ep += 1\n yh = g(X_train.dot(w1)).dot(w2)\n yh2 = g(X_test.dot(w1)).dot(w2)\n L_train[ep] = LA.norm(yh-Y_train.reshape(len(Y_train),1))/n_train\n L_test[ep] = LA.norm(yh2-Y_test.reshape(len(Y_test),1))/n_test\n \n np.random.shuffle(batch)\n for i in range(m):\n st = batch[i]*mb\n ed = (batch[i]+1)*mb\n h = g(X_train[st:ed].dot(w1))\n y = h.dot(w2)\n d2 = h.T.dot(Y_train[st:ed]-y)\n d1 = X_train[st:ed].T.dot(np.multiply((Y_train[st:ed]-y).dot(w2.T),gd(X_train[st:ed].dot(w1))))\n w2 += lr*d2\n w1 += lr*d1\n return yh, yh2",
"def calc_error(self):\n if self._fit_data.y is not None and self._fit_data.y_fit is not None:\n self._fit_data.error_vector = self._fit_data.y - self._fit_data.y_fit",
"def _cer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total",
"def train(X, Y=[], hidden_layer_sizes=[], error_deriv=\"default\", n_outputs=\"default\", n_loops=100,\n eta=.1, output_transformation=\"default\", error_function=\"default\"):\n \n # the default error function will be the squared error function, whose derivative is just\n # the output minus the target values\n def square_error_deriv(output, x, y):\n return output - y\n if error_deriv == \"default\":\n error_deriv = square_error_deriv\n \n # the default error function is the squared error \n def square_error(Y, Y_hat):\n return np.linalg.norm(Y_hat - Y, ord='fro') / (Y.shape[0]*Y.shape[1])\n if error_function == \"default\":\n error_function = square_error\n\n # the default output transformatoin is none\n def none_transformation(output):\n return output\n if output_transformation == \"default\":\n output_transformation = none_transformation\n\n if len(Y) > 0:\n if n_outputs == \"default\":\n n_outputs = Y.shape[0]\n else:\n # setting one y value for the rest of the training\n y = \"None\"\n layer_sizes = [X.shape[0]] + hidden_layer_sizes + [n_outputs]\n weights = construct_network(layer_sizes)\n \n errorvec = np.empty((n_loops))\n for loop in np.arange(n_loops):\n delta_W = []\n # initiazlizing delta Ws\n for weight_mat_idx in range(len(weights)):\n delta_W.append(np.zeros((weights[weight_mat_idx].shape)))\n \n for data_idx in np.arange(n_data):\n x = X[:, data_idx]\n y = Y[:, data_idx]\n layer_activations, output = forward_propagate(x, weights)\n \n errors = [error_deriv(output, x, y)]\n # go from the output layer towards the input layer and calculate error values\n for idx in np.arange(1, len(weights)):\n # prepend the newest error layer to the errors list:\n errors = [-np.multiply(deriv_sigmoid(layer_activations[-idx-1]), \\\n weights[-idx].T @ errors[-idx])] \\\n + errors\n errors[-idx-1] = errors[-idx-1][:-1,0] # removing bias\n # go from input layer towards output layer and calculate weight updates\n for idx in range(len(weights)):\n delta_W[idx] -= errors[idx] @ layer_activations[idx].T\n delta_W[idx][:,:-1] = np.mean(layer_activations[idx])\n \n # update all weight matrices\n for idx in range(len(weights)): \n weights[idx] += eta/n_data * delta_W[idx]\n \n Y_hat = classify(X, weights, output_transformation)\n errorvec[loop] = error_function(Y, Y_hat)\n return weights, errorvec",
"def output_error(self, zs, activations, y):\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n\n delta = self.cost_derivative_for_output(activations[-1], y) * sigmoid_prime(zs[-1])\n nabla_b[-1] = delta\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\n return delta, nabla_b, nabla_w",
"def transformMeasurementError(self):\n var = self.model.observationError**2\n self.errShD = self.model.observationError\n self.errSinvD = 1.0/var\n self.errSinvhD = np.sqrt(self.errSinvD)",
"def error(beta_0: float, beta_1: float, x_i: float, y_i: float) -> float:\n return predict(beta_0, beta_1, x_i) - y_i",
"def compute_loss(self):",
"def reserrorcalc(test_set, model):\n # Extracting X\n X = test_set[:,:-1]\n\n # Extracting labels\n Y = test_set[:,-1]\n residual_err = sum((model.predict(X) - Y) ** 2)\n return residual_err",
"def errors(self):\n # placeholders for the target network q values and the action\n self.target_q = tf.placeholder(tf.float32, [None], name=\"target_q\")\n self.action = tf.placeholder(tf.int64, [None], name=\"action\")\n\n # convert the action to one-hot representation in order to compute the\n # error\n action_one_hot = tf.one_hot(\n self.action,\n self.action_space,\n on_value=1,\n off_value=0,\n name=\"action_one_hot\")\n\n self.q_acted = tf.reduce_sum(\n self.q_values *\n tf.cast(\n action_one_hot,\n tf.float32),\n axis=1,\n name=\"q_acted\")\n\n self.delta = self.target_q - self.q_acted\n\n \"\"\"\n [Article] We also found it helpful to clip the error term from the update r + gamma max_d Q(s', a', theta-)\n to be between -1 and 1. Because the absolute value loss function |x| has a derivative of -1\n for all negative values of x and a derivative of 1 for all positive values of x,\n clipping the squared error to be between -1 and 1 corresponds to using an absolute value\n loss function for errors outside of the (-1,1) interval. This form of error clipping further\n improved the stability of the algorithm.\n\n It is called the Huber loss and because the name is so cool, we have to implement it\n With d = 1 (we could also try with d = 2) (d <> self.delta)\n x = 0.5 * x^2 if |x| <= d\n x = 0.5 * d^2 + d * (|x| - d) if |x| > d\n \"\"\"\n self.clipped_error = tf_array_ops.where(tf.abs(self.delta) < 1.0,\n tf.square(self.delta) * 0.5,\n tf.abs(self.delta) - 0.5)\n return(self.clipped_error)",
"def mse_cost_function(predicted_output, actual_output):\n error = predicted_output - actual_output\n mse_cost = np.sum(error ** 2) /(2 * len(actual_output),)\n return mse_cost, error"
] | [
"0.73773164",
"0.7060913",
"0.69024837",
"0.68995315",
"0.6740334",
"0.66105175",
"0.6591121",
"0.6574008",
"0.65447205",
"0.653256",
"0.65291804",
"0.64597213",
"0.6338442",
"0.6333448",
"0.6331635",
"0.6329993",
"0.6325577",
"0.63097376",
"0.63089925",
"0.62953514",
"0.6232458",
"0.62002254",
"0.6170006",
"0.6145786",
"0.6142563",
"0.61360204",
"0.6125068",
"0.6119836",
"0.6109353",
"0.6107925"
] | 0.73490536 | 1 |
it gets the list of weigths | def getweigths():
ls = []
for i_lay in range(1, len(layers)):
ls.append(layers[i_lay]["weigths"])
return ls | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def weights(self) -> List[float]:",
"def get_weights(self):",
"def show_rel_wt(list_obj):\r\n total = sum_list(list_obj)\r\n wt_list = []\r\n \r\n for num in list_obj:\r\n weight = int((num / total) * 100)\r\n wt_list.append(f\"{weight}%\")\r\n \r\n return wt_list",
"def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]",
"def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]",
"def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]",
"def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] if w is not None]",
"def updateweigths():\n for i_lay in range(1, len(layers)):\n layers[i_lay][\"weigths\"] += layers[i_lay][\"Delta_w\"]",
"def get_weights(self):\n return self.weights\n #print(W)",
"def hwt(data):\n sz = len(data)\n i = 0\n res1 = []\n res2 = []\n while i < sz:\n s0 = data[i]\n s1 = data[i+1]\n res1.append((s0+s1)/2.)\n res2.append((s0-s1)/2.)\n i += 2\n return (res1,res2)",
"def get_weights(self):\n return [self.w, self.b]",
"def abbott_steam():\n per_klb = 20 # dollars per klb of steam\n kwh_eq = to_kwh(1) # kwh equivalent of steam\n per_kwh = per_klb / kwh_eq\n return per_kwh",
"def get_weight_list(self) -> List[float]:\n return self._weight_list",
"def get_weights(self):\n return [self.W]",
"def get_weights(self):\n return [self.W]",
"def get_Delta_weigths():\n ls = []\n for i_lay in range(1, len(layers)):\n ls.append(layers[i_lay][\"Delta_w\"])\n return ls",
"def get_list_powers(self):\r\n s = self.query('LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a",
"def get_list_powers(self):\r\n s = self.query('SOUR1:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a",
"def get_list_powers(self):\r\n s = self.query('SOUR1:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a",
"def getByWeight(list, w):\n itemId = 0\n partialWeight = list[0][1]\n while partialWeight < w:\n itemId += 1\n partialWeight += list[itemId][1]\n return list[itemId]",
"def getFWHM(antenna, freq):\n diameter = getDiameter(antenna)\n lam = 299792458.0 / (freq * 1e9)\n fwhmo = lam / math.pi * 180.0 * 60.0\n fwhm = 1.22 * fwhmo / diameter\n return fwhm",
"def get_list_powers(self):\r\n _debug('simq03b_api.get_list_powers')\r\n \r\n s = self.query('SOUR:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a",
"def get_weights(self):\n return []",
"def abbott_elec():\n per_kwh = 0.08 # [$/kWh]\n return per_kwh",
"def power_list():",
"def get_weight(ew1, ew2):\n dw = flu.delta_epiweeks(ew1, ew2)\n yr = 52.2\n hl1, hl2, bw = yr, 1, 4\n a = 0.05\n #b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2\n b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2))\n c = 2 ** -(dw / hl1)\n d = 1 - 2 ** -(dw / hl2)\n return (a + (1 - a) * b) * c * d",
"def weight(self):",
"def weights(self):\r\n\t\treturn None",
"def animal_weights(self):\n herb_weights = []\n carn_weights = []\n for cell in self.land_cells.values():\n for herb in cell.herbivores:\n herb_weights.append(herb.weight)\n for carn in cell.carnivores:\n carn_weights.append(carn.weight)\n\n if not herb_weights:\n return [carn_weights]\n elif not carn_weights:\n return [herb_weights]\n else:\n return [herb_weights, carn_weights]",
"def wavelength_ex(hdulist):\n wave = hdulist[1].data['loglam']\n wave = 10**wave\n\n return wave"
] | [
"0.6625915",
"0.62661403",
"0.6248362",
"0.6228295",
"0.6228295",
"0.6228295",
"0.6188413",
"0.61738515",
"0.6153207",
"0.6063258",
"0.5998551",
"0.5988195",
"0.59823006",
"0.59580696",
"0.59580696",
"0.59547997",
"0.59507",
"0.5947943",
"0.5947943",
"0.5919658",
"0.58991927",
"0.58605516",
"0.5852971",
"0.5817886",
"0.5795634",
"0.5790727",
"0.57895565",
"0.578825",
"0.5769492",
"0.57573575"
] | 0.78056127 | 0 |
it gets the list of "Delta_w" | def get_Delta_weigths():
ls = []
for i_lay in range(1, len(layers)):
ls.append(layers[i_lay]["Delta_w"])
return ls | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getdelta(self):\n\t\tmyhmag.initializehelmholtz()\n\t\tabar = 13.714285714285715\n\t\tzbar = abar/2.0\n\t\tself.data[\"delta\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tfor i in range(len(self.data[\"rho\"])):\n\t\t\tadgradred,hydrograd,my_nu,my_alpha,self.data[\"delta\"][i],my_gamma1,my_cp,my_cph,my_c_s,failtrig = myhmag.gethelmgrads(self.data[\"T\"][i], self.data[\"rho\"][i], 0.,abar,zbar,True)",
"def updateweigths():\n for i_lay in range(1, len(layers)):\n layers[i_lay][\"weigths\"] += layers[i_lay][\"Delta_w\"]",
"def getweigths():\n ls = []\n for i_lay in range(1, len(layers)):\n ls.append(layers[i_lay][\"weigths\"])\n return ls",
"def _get_current_delta(model):\n delta = []\n _delta = []\n for i in MILPSolver.prob.nn.layers:\n (s, e) = vmodel.get_var_indices(i.depth, 'delta')\n d = model._vars[s:e]\n _d = np.asarray(model.cbGetNodeRel(d))\n delta.append(d)\n _delta.append(_d)\n\n return delta, _delta",
"def _w_diff_dcm(self, otherframe):\n dcm2diff = self.dcm(otherframe)\n diffed = dcm2diff.diff(dynamicsymbols._t)\n angvelmat = diffed * dcm2diff.T\n w1 = trigsimp(expand(angvelmat[7]), recursive=True)\n w2 = trigsimp(expand(angvelmat[2]), recursive=True)\n w3 = trigsimp(expand(angvelmat[3]), recursive=True)\n return -Vector([(Matrix([w1, w2, w3]), self)])",
"def calculate_delta_weights(self, out_tensors: List[Tensor], in_tensors: List[Tensor]) -> List[Tensor]:\n return None",
"def getOscDelta():\n tmp_channels = GetAllSelCh(True)\n delta_store = {}\n for ch in tmp_channels:\n if isTubeChannel(ch) and GetOscType(ch):\n name = GetChName(ch).lower()\n color_name = getChannelColor(name)\n osc_chase = GetOscChase(ch)\n\n if osc_chase and color_name not in delta_store:\n for x in range(ch + 1, 512):\n if isTubeChannel(x) and GetOscType(x):\n namex = GetChName(x).lower()\n color_name = getChannelColor(namex)\n osc_chasex = GetOscChase(x)\n chase_value = abs(osc_chase - osc_chasex)\n delta_store[color_name] = chase_value\n if \"first\" not in delta_store:\n delta_store[\"first\"] = chase_value\n break\n\n if len(delta_store) == 4:\n break\n return delta_store",
"def delta(self):\n return self.get_dim_attribute('delta')",
"def delta(self) -> None:",
"def delta(self):\r\n return self.nd1()",
"def deltas(self):\n return self._deltas",
"def get_w(self):\n raise NotImplementedError",
"def test_delta_layer_iterator(self):\n\n\t\tdelta_iter = self.watcher.make_delta_layer_iterator(base_model=self.model, model=self.model)\n\t\n\t\tfor ww_layer in delta_iter:\n\t\t\t\n\t\t\tprint(ww_layer.layer_id, ww_layer.name)\n\t\t\tself.assertEquals(1, len(ww_layer.Wmats))\n\t\t\tW = ww_layer.Wmats[0]\n\t\t\t\n\t\t\tlayer_norm = np.linalg.norm(W)\n\t\t\tlayer_sum = np.sum(W)\n\n\t\t\tself.assertAlmostEqual(0.0, layer_norm)\n\t\t\tself.assertAlmostEqual(0.0, layer_sum)\n\n\t\treturn",
"def get_transitions(self, w):\n return np.array([((i, j), self.dij(j, i), np.abs(e1 - e2), 0)\n for j, e1 in enumerate(self.ev)\n for i, e2 in enumerate(self.ev)\n if np.isclose(e1 - e2, w)], dtype=DTYPE_JUMP)",
"def deltai(self,i):\n\t\tif self.deltas[i]!=[]:\n\t\t\treturn self.deltas[i]\n\t\telse:\n\t\t\th=[]\n\t\t\tzgi = [self[i][j][0] for j in range(len(self[i]))]\n\t\t\tzgim1 = [self[i-1][j][0] for j in range(len(self[i-1]))]\n\n\t\t\tfor z in zgim1:\n\t\t\t\tif zgi.count(z)>0:\n\t\t\t\t\th+=[[z,self[i][zgi.index(z)][1]-self[i-1][zgim1.index(z)][1]]]\n\t\t\t\telse:\n\t\t\t\t\th+=[[z,-self[i-1][zgim1.index(z)][1]]]\n\t\t\tfor z in zgi:\n\t\t\t\tif zgim1.count(z)==0:\n\t\t\t\t\th+=[[z,self[i][zgi.index(z)][1]]]\n\t\t\tself.deltas[i]=h\n\t\t\treturn h",
"def compute_velocities(self):\n Ddemo_trajs = []\n\n for demo_traj in self._demo_trajs:\n d_traj = np.diff(demo_traj, axis=0)/self._dt\n #append last element to adjust the length\n d_traj = np.hstack([d_traj, d_traj[-1]])\n #add it to the list\n Ddemo_trajs.append(d_traj)",
"def extract_wind(source,la,lo,lats,lons,wd,ws):\r\n lat = source[la]\r\n lon = source[lo]\r\n wdir = []\r\n wspd = [] \r\n for coor in zip(lon,lat): \r\n in_lon = coor[0]\r\n in_lat = coor[1]\r\n # since lons are 0 thru 360, convert to -180 thru 180\r\n converted_lons = lons - ( lons.astype(np.int32) / 180) * 360\r\n # get cell of facility\r\n lat_idx = geo_idx(in_lat, lats)\r\n lon_idx = geo_idx(in_lon, converted_lons)\r\n #extract winddirection and wind speed from that cell\r\n d = wd[:,lat_idx,lon_idx][0]\r\n wdir.append(d)\r\n s = ws[:,lat_idx,lon_idx][0]\r\n wspd.append(s)\r\n \r\n return wdir,wspd",
"def get_delta_arrays(self):\n delta_as = []\n delta_ds = []\n for sites, deltas in zip(self.nu, self.delta):\n delta_as.extend([deltas[i] for i, nu_s in enumerate(sites) if nu_s < 0])\n delta_ds.extend([deltas[i] for i, nu_s in enumerate(sites) if nu_s > 0])\n return delta_as, delta_ds",
"def w_to_d(self, wx, wy):\r\n dx = (wx - self.wxmin) * self.xscale + self.dxmin\r\n dy = (wy - self.wymin) * self.yscale + self.dymin\r\n return dx, dy",
"def _get_deltas(event):\n delta_x = round(event.deltaX())\n delta_y = round(event.deltaY())\n delta_z = round(event.deltaZ())\n return delta_x, delta_y, delta_z",
"def get_weights(self):\n return [self.W]",
"def get_weights(self):\n return [self.W]",
"def _update_weights(self, alpha, delta):\n res = []\n for j, weight in enumerate(self._weights):\n self._weights[j] = weight + (alpha * delta * self._g_prime(self._in_j))\n #print(\"Prev weight: {} New weight: {}\".format(weight, self._weights[j]))\n res.append(self._weights[j] - weight)\n return res[0]",
"def worddist(self):\n #return (self.n_z_t + self.beta) / (self.n_z[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)\n return (self.n_z_t + self.n_w_d + self.beta) / (self.n_z[:, numpy.newaxis] + self.n_w[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)",
"def extract_delta_Q_skewness(batch,index,start_cycle,end_cycle):\n from scipy.stats import skew\n X= []\n for ind in index:\n cell_no = list(batch.keys())[ind]\n Qd_100 = batch[cell_no]['cycles'][str(end_cycle-1)]['Qdlin']\n Qd_10 = batch[cell_no]['cycles'][str(start_cycle-1)]['Qdlin']\n delta = Qd_100-Qd_10\n # delta_rv_mean = delta - np.average(delta)\n # temp = np.average(np.power(delta_rv_mean,3)) / np.power(np.sum(np.power(delta_rv_mean,2)),1.5)\n # Note: Supplementary formular is wrong\n temp = skew(delta)\n skewness = log(abs(temp),10)\n X.append(skewness)\n X = np.reshape(X,(-1,1))\n return X\n pass",
"def getDelta(self,u,w,v=None):\r\n if v==None :\r\n return self._deltaDot[u,w]\r\n elif self._sigma[u,v]==0 or self._sigma[u,w]==0 or self._sigma[w,v]==0:\r\n return 0.0\r\n elif (self._d[u,v]==self._d[u,w]+self._d[w,v]):\r\n return 1.0 * self._sigma[u,w]*self._sigma[w,v]/self._sigma[u,v]\r\n else:\r\n return 0.0",
"def calc_walked_distance(self, window_size=0):\n walked_distance = []\n walked_distance_window = []\n for i in range(len(self)):\n vel = self.get_absolute_velocity(i)\n if vel is None: vel = 0\n walked_distance.append(\n vel + (walked_distance[i-1] if i>0 else 0)\n )\n walked_distance_window.append(\n walked_distance[i] - (walked_distance[i-window_size] if i>window_size else 0)\n )\n return walked_distance, walked_distance_window",
"def get_lw_to_sw_array(self):\n if self.lw_to_sw_array is None:\n lw_to_sw_array = self.basis.get_dO_I_ddelta_alpha(self.sw_survey.geo,self.sw_survey.get_dO_I_ddelta_bar_array())\n else:\n lw_to_sw_array = self.lw_to_sw_array\n return lw_to_sw_array",
"def get_weights(self):\n return [self.w, self.b]",
"def get_resul(self):\n return {'W': self.W}"
] | [
"0.6032407",
"0.59571075",
"0.57902676",
"0.5779536",
"0.5758633",
"0.57385635",
"0.56800616",
"0.5667207",
"0.5647853",
"0.56092405",
"0.5599284",
"0.5591161",
"0.55750877",
"0.557497",
"0.557401",
"0.5565431",
"0.55429536",
"0.552723",
"0.54701483",
"0.54661304",
"0.54582804",
"0.54582804",
"0.5440319",
"0.5429842",
"0.540978",
"0.53968453",
"0.53782725",
"0.53717625",
"0.53514",
"0.5349907"
] | 0.79851145 | 0 |
Sets/clears a software breakpoint address > the address of the software breakpoint instruction > the instruction to be programmed (either the software breakpoint opcode or the original instruction the software breakopint was replacing). flags > One or more of the SWBPFlags listed below returns the original/old opcode at address | def set_sw_bp(address, instruction, flags):
log.info("Debug:: set/remove bp at address 0x%0x, instructions 0x%0x, flags = 0x%0x" % (
address, instruction, flags))
# Accept addressing both from FLASH_START and from 0x0
addr = address & (FLASH_START-1)
single_page_access = False
buffer_size = PAGE_SIZE * 16
# Canopus: single page read-modify-write is possible within the first 16kb of flash.
# SAMRH71: single page read-modify-write is possible in whole flash.
if addr < 16384 or "RH71" in device:
buffer_size = PAGE_SIZE
single_page_access = True
buffer_mask = long(buffer_size-1)
data_buffer = bytearray(buffer_size)
# Get the start address to the flash page(es) we need to erase
start_addr = addr & ~(buffer_mask)
absolute_start_addr = address & ~(buffer_mask)
# Get BP address within the buffer
bp_addr = addr & buffer_mask
prog_read("pgm", absolute_start_addr, buffer_size, data_buffer)
org_inst = 0
n = 0
# Replace instruction in data_buffer
while(n < 2):
org_inst += data_buffer[bp_addr+n] << (n*8)
data_buffer[bp_addr+n] = ((instruction >> (n*8)) & 0xff)
n = n+1
if single_page_access:
if "RH71" in device:
# Remove flash offset, if any, and mask away page internal address bits.
# FARG bitfield in EFC_FCR
page_number = addr & 0x3fff00 # SAMRH71 has page_size 256
# Erase and write page (two separate commands on SAMRH71)
dev.Write32(efc_fcr, efc_cmd_ep | page_number)
waitForFlashReady()
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
else:
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Then shift right once to position page_number in the FARG bitfield in EFC_FCR
page_number = (addr & 0x3ffe00)/2 # Canopus has page_size 512
# Erase and write page (one single command on Canopus)
dev.Write32(efc_fcr, efc_cmd_ewp | page_number)
waitForFlashReady()
else:
# Erase 16 pages (16pages == buffer_size). The "0x200" sets the number of pages to erase.
dev.Write32(efc_fcr, efc_cmd_epa | (start_addr >> 1) | 0x200)
waitForFlashReady()
prog_write("Pgm", absolute_start_addr, buffer_size, data_buffer)
return org_inst | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_breakpoint(self, addr: int) -> Optional[Breakpoint]:\n if not self.enabled:\n self.enable()\n\n if not self.can_support_address(addr):\n LOG.error('Breakpoint out of range 0x%X', addr)\n return None\n\n if self.available_breakpoints == 0:\n LOG.error('No more hardware breakpoints are available, dropped breakpoint at 0x%08x', addr)\n return None\n\n for bp in self.hw_breakpoints:\n if not bp.enabled:\n bp.enabled = True\n comp = 0\n if self.fpb_rev == 1:\n bp_match = (1 << 30)\n if addr & 0x2:\n bp_match = (2 << 30)\n comp = addr & 0x1ffffffc | bp_match | 1\n elif self.fpb_rev == 2:\n comp = (addr & 0xfffffffe) | 1\n self.ap.write32(bp.comp_register_addr, comp)\n LOG.debug(\"BP: wrote 0x%08x to comp @ 0x%08x\", comp, bp.comp_register_addr)\n bp.addr = addr\n self.num_hw_breakpoint_used += 1\n return bp\n return None",
"def software_breakpoint_set(self, addr, thumb=False, arm=False, flash=False, ram=False):\n if flash and not ram:\n flags = enums.JLinkBreakpoint.SW_FLASH\n elif not flash and ram:\n flags = enums.JLinkBreakpoint.SW_RAM\n else:\n flags = enums.JLinkBreakpoint.SW\n\n if thumb:\n flags = flags | enums.JLinkBreakpoint.THUMB\n elif arm:\n flags = flags | enums.JLinkBreakpoint.ARM\n\n handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)\n if handle <= 0:\n raise errors.JLinkException('Software breakpoint could not be set.')\n\n return handle",
"def remove_breakpoint(self, bp: Breakpoint) -> None:\n for hwbp in self.hw_breakpoints:\n if hwbp.enabled and hwbp.addr == bp.addr:\n hwbp.enabled = False\n self.ap.write_memory(hwbp.comp_register_addr, 0)\n self.num_hw_breakpoint_used -= 1\n return",
"def breakpoint_find(self, addr):\n return self._dll.JLINKARM_FindBP(addr)",
"def gdb_breakpoint():\n _gdb_python_call_gen('gdb_breakpoint')()",
"def set_breakpoint(context, *args):\n\n vars = [arg for arg in locals()['args']] # noqa F841\n\n if settings.DEBUG:\n breakpoint()",
"def breakpoint_set(self, addr, thumb=False, arm=False):\n flags = enums.JLinkBreakpoint.ANY\n\n if thumb:\n flags = flags | enums.JLinkBreakpoint.THUMB\n elif arm:\n flags = flags | enums.JLinkBreakpoint.ARM\n\n handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)\n if handle <= 0:\n raise errors.JLinkException('Breakpoint could not be set.')\n\n return handle",
"def break_cmd(cmd, cnt, args):\n if cnt == 1:\n log(\"Break command needs an address\")\n return\n log(\"break\"+ \" {:08x}\".format(int(args[1], 16)))\n cpu.set_break(int(args[1],16))",
"def hardware_breakpoint_set(self, addr, thumb=False, arm=False):\n flags = enums.JLinkBreakpoint.HW\n\n if thumb:\n flags = flags | enums.JLinkBreakpoint.THUMB\n elif arm:\n flags = flags | enums.JLinkBreakpoint.ARM\n\n handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)\n if handle <= 0:\n raise errors.JLinkException('Hardware breakpoint could not be set.')\n\n return handle",
"def _SetBreakpoint(self, mobj, line=-1):\n handle = -1\n if line < 0:\n line = self.GetCurrentLine()\n if not mobj.IsSet(self, line):\n # Clear other set breakpoint marker states on same line\n ed_marker.Breakpoint().Set(self, line, delete=True)\n ed_marker.BreakpointDisabled().Set(self, line, delete=True)\n mobj.Set(self, line, delete=False)\n handle = mobj.Handle\n return handle",
"def clear_break_cmd(cmd, cnt, args):\n if cnt == 1:\n log(\"Clear break command needs an address\")\n return \n log(\"clear break\"+\" {:08x}\".format(int(args[1], 16)))\n cpu.clear_break(int(args[1],16))",
"def frame_off_savregs(*args):\n return _ida_frame.frame_off_savregs(*args)",
"def handle_breakpoints(self, bit):\n while bit != self.options.current_breakpoint:\n self.check_required()\n self.options.shift_breakpoint()\n self.arguments = self.options.get_arguments()\n self.current_argument = self.arguments.pop(0)",
"def delete_breakpoint():\n raise NotImplementedError()",
"def break_code(self, breakpoint):\n\n self.cont = False\n self.pause_reason = \"breakpoint\"\n self.scope_assign = {}\n self.scope_var_id = 0\n handler.send_breakpoint_event(breakpoint)",
"def clear_breakpoint(self, id):\r\n bps = self.bpoints.filter( ('id',),(id,) )\r\n if len(bps)==0:\r\n raise Exception('No breakpoint with id '+str(id))\r\n bpdict = bps[0]\r\n\r\n #clear the breakpoint in each engine\r\n console = self.app.toolmgr.get_tool('Console')\r\n engines = console.get_all_engines(active=True)\r\n for eng in engines:\r\n eng.debugger.clear_breakpoint(id)\r\n \r\n #remove from internal breakpoint list\r\n self.bpoints.remove(bpdict)\r\n\r\n #clear any markers from the editor pages\r\n page = self.frame.notebook.GetPageFromPath( bpdict['filename'] )\r\n if page is not None:\r\n page.DeleteBreakpointMarker( id )\r\n\r\n #publish a breakpoint cleared message\r\n self.msg_node.publish_msg( editor_messages.EDITOR_BREAKPOINT_CLEARED,\r\n (id,) )",
"def handle_next_breakpoint(self, bit):\n self.check_required()\n self.options.shift_breakpoint()\n self.arguments = self.options.get_arguments()\n self.current_argument = self.arguments.pop(0)",
"def breakpoint(g=None, l=0):\n global simulator\n\n if simulator is None:\n print \"Program is not started.\"\n\n try:\n if g is None:\n g = simulator.get_pc()[0]\n l = simulator.get_pc()[1]\n bp = simulator.add_breakpoint(g, l)\n print \"breakpoint set at (0x{:x},{}) with id={}.\".format(g, l, bp[0])\n return bp[0]\n except:\n simulation_error()\n return None",
"def avoid_instr(bv: BinaryView, addr: int):\n\n # Highlight the instruction in red\n highlight_instr(bv, addr, HighlightStandardColor.RedHighlightColor)\n\n # Add the instruction to the list associated with the current view\n bv.session_data.mui_avoid.add(addr)",
"def rm_avoid_instr(bv: BinaryView, addr: int):\n\n # Remove instruction highlight\n clear_highlight(bv, addr)\n\n # Remove the instruction to the list associated with the current view\n bv.session_data.mui_avoid.remove(addr)",
"def breakpoint(self):\n return None",
"def set_breakpoint(self, func):\n\n if func == \"\":\n return\n\n if self.bp_func is not None:\n self.debugger.set_breakpoint(self.bp_func, False)\n\n self.bp_func = func\n self.debugger.set_breakpoint(self.bp_func, True)\n\n # Console output\n self.write_console_output_sig.emit(\"[%s] Breakpoint set on \"\n \"function %s.\" % (DEBUG, func))",
"def breakpoint_clear(self, handle):\n return not self._dll.JLINKARM_ClrBPEx(handle)",
"def pdb_option(args, run):\n run.pdb = True",
"def SWP():\n\tglobal pointer, memory, registers\n\ttmp = registers[memory[pointer + 0x02]]\n\tregisters[memory[pointer + 0x02]] = registers[memory[pointer + 0x01]]\n\tregisters[memory[pointer + 0x01]] = tmp\n\tpointer += 0x03",
"def delete_breakpoints(l=None):\n global simulator\n if simulator is None:\n return\n if isinstance(l, int):\n l = [l]\n if l is None:\n l = []\n for(id, a) in simulator.get_breakpoints():\n l = l + [id]\n for bp in l:\n if not simulator.del_breakpoint(bp):\n print \"unknown breakpoint\", bp",
"def pdb(item, item2=None):\n import pdb # noqa\n pdb.set_trace() # noqa",
"def breakpoint_clear_all(self):\n return not self._dll.JLINKARM_ClrBPEx(0xFFFFFFFF)",
"def cond(id, e=None):\n global simulator\n\n if simulator is None:\n print \"Program is not started; set breakpoint to entrypoint.\"\n return None\n bp = None\n if e is None or isinstance(e, str):\n bp = simulator.set_cond(id, e)\n else:\n raise TypeError(e)\n if bp is None:\n print \"no such breakpoint \", id\n return\n elif e is None:\n print \"making breakpoint\", id, \" unconditional\"\n else:\n print \"making breakpoint\", id, \" conditional\"\n print bp[0], \" : \", bp[1]",
"def add_breakpoint():\n raise NotImplementedError()"
] | [
"0.5881102",
"0.5785291",
"0.5218082",
"0.5208455",
"0.5201885",
"0.51153344",
"0.5073578",
"0.50049704",
"0.49999252",
"0.4934474",
"0.48571247",
"0.4838905",
"0.48162797",
"0.47711107",
"0.47597492",
"0.4733747",
"0.46354747",
"0.46129856",
"0.46129563",
"0.46084633",
"0.45189667",
"0.45098227",
"0.4471261",
"0.44446117",
"0.44293693",
"0.4393159",
"0.4339281",
"0.43294948",
"0.4319227",
"0.43135446"
] | 0.65059483 | 0 |
Change the bearing (angle) of the turtle. | def setbearing(self, bearing):
diff = self.bearing - bearing
self.b_change = diff
self.bearing = bearing
self._add_point()
self.b_change = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bearing(self, value: int):\n self._bearing = value",
"def set_angle(self, ang):\n if ang < 0:\n ang = 0\n elif ang > 180:\n ang = 180\n dutyCycle = 5 + (ang*5/180)\n self.servoPort.ChangeDutyCycle(dutyCycle)",
"def setAngle(self,angle = 2.5):\n pass",
"def change_angle(self, new_angle):\r\n self.angle = new_angle",
"def set_bearing(self, bearing):\n self._set_sub_text('bearing', text=str(bearing))\n return self",
"def change_angle(self, up_or_down):\n self.angle += up_or_down * math.pi / 180",
"def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)",
"def set_angle(self, angle=0.0):\n self.angle = angle",
"def setAngle(self,a):\n self.angle = a\n if self.drawn == True:\n self.draw()",
"def set_angle(self, value):\n if not -90 <= value <= 90:\n raise ValueError('Servo angle must be between -90 and 90 degrees')\n self.duty_cycle = ...",
"def setAngle(self, angle):\n self._angle = (angle + math.pi / 2) % math.pi - math.pi / 2\n # self._angle = angle % (2*math.pi)",
"def setAngle(self, angle):\n self.vector.angle = angle",
"def settiltangle(self, angle):\n tilt = -angle * self._degreesPerAU * self._angleOrient\n tilt = (tilt * math.pi / 180.0) % (2*math.pi)\n self.pen(resizemode=\"user\", tilt=tilt)",
"def set_angle(self, angle):\n return self.bot_client.send_command(_Command.SetAngle, angle)",
"def change_angle(self, new_angle):\n if type(new_angle) not in [int, float]:\n raise ValueError('angle must be int or float.')\n self.__angle = new_angle",
"def rotate_rad(self, angle):\n self.beam_angle += angle\n self.xy = rotate(self.xy, angle)\n self.angle += angle",
"def wheel_angle(self, angle):\n self.angle = angle",
"def rotate_turtle(angle, mv_direction):\n \n if mv_direction == 1:\n turtle.right(angle)\n else:\n turtle.left(angle)",
"def set_angel(self):\n self.angle = math.degrees(math.atan2(self.next.y - self.y, self.next.x - self.x)\n - math.atan2(self.prev.y - self.y, self.prev.x - self.x))\n\n if self.angle < 0:\n self.angle += 360",
"def move_turtle(self):\n self.forward(self.move_speed)",
"def angle(self, angle):\n self._angle = angle\n self.x_rate = self._rate * cos(self._angle)\n self.y_rate = self._rate * sin(self._angle)",
"def set_angle(self, angle):\n new_angle = angle\n\n # Declaring conversion constants\n angle_min = 0\n angle_max = 180\n angle_range = angle_max - angle_min\n dc_range = self._dc_max - self._dc_min\n\n # Enforcing angle range\n if new_angle > angle_max:\n new_angle = angle_max\n elif new_angle < angle_min:\n new_angle = angle_min\n\n # Scaling input angle to an appropriate duty cycle\n duty_cycle = ((dc_range / angle_range) * (new_angle - angle_min)) + self._dc_min\n\n self._servo_pwm.changeDutyCycle(duty_cycle)",
"def change_angle_by(self, delta_angle, direction):\n target_angle = round(self.__calc_target_angle(degree_to_radian(delta_angle), direction), 5)\n\n self.move_to_angle(target_angle)\n self.current_angle = target_angle",
"def adjAngle(self, amt): \r\n\r\n self.angle = self.angle + radians(amt)\r\n self.redraw()",
"def set_angle(self, angle_key: Union[EKT, str], v: float): # -> None:\n ...",
"def angle(self, angle):\n\n self._angle = angle",
"def adjAngle(self, amt):\n \n self.angle = self.angle+radians(amt)\n self.redraw()",
"def angle(self, value):\n if value is None:\n value = 0.0\n\n self.__angle = value",
"def turn_by(self, dangle, dt):\n # Don't turn too fast\n self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.turning_rate)\n\n # Keep angle in range [-pi, pi)\n self.angle = normalize_angle(self.angle)",
"def set_wrist(self, angle):\n return self.set_servo_angle(protocol.SERVO_HAND, angle)"
] | [
"0.6930348",
"0.6852178",
"0.6837454",
"0.67657924",
"0.6641104",
"0.66320354",
"0.66257477",
"0.6583451",
"0.65234107",
"0.64924616",
"0.64834297",
"0.64331305",
"0.63996845",
"0.6354722",
"0.6261629",
"0.6239155",
"0.62279516",
"0.62081057",
"0.62069297",
"0.61839217",
"0.614857",
"0.6126802",
"0.61265624",
"0.60912144",
"0.6088944",
"0.6047413",
"0.60423183",
"0.596344",
"0.59620225",
"0.5949161"
] | 0.6944382 | 0 |
this method is called by an admin user to approve the lyrics of a song | def approve_lyrics():
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_lyrics_approved():",
"def approve (self, response) :\n if 'event' in response and 'moderator' in response :\n eventId = response ['event']\n userId = response ['moderator']\n else :\n raise ModerationError (response)\n\n mod_status = 'OK'\n if 'status' in response :\n mod_status = response ['status']\n \n event = Event.object.get (id = eventId)\n approval = Approval (approved = event, moderatorId = userId, status = mod_status)\n approval.save ()\n self.editValues (event.answer, response)",
"def vote(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n _logger.info(\"%s is trying to vote on %s\", request.user, context['song'])\n vote_dict = get_vote_dict(request.user)\n can_vote = context['song'].id not in vote_dict[request.user.id] and context['song'].ready\n if can_vote:\n vote = Vote()\n vote.user = request.user\n vote.song = context['song']\n vote.save()\n vote_dict[request.user.id].append(context['song'].id)\n cache.set('vote_dict', vote_dict)\n logging.info('%s voted on %s.', request.user, context['song'])\n return HttpResponse('Vote registered on %s.' % context['song'])\n else:\n logging.info('%s tried to vote more than once on %s.', request.user.username, context['song'])\n return HttpResponse(\"Du har allerede stemt på denne sangen i dag!\", content_type='text/plain', status=403)",
"def change_learned_status(self, instance):\n self.song = self.songs.get_song_by_title(instance.text)\n # Marks song as learned and shows according status text\n if self.song.required:\n self.song.mark_learned()\n status_text = \"You have learned {}\".format(self.song.title)\n # Marks song as required and shows according status text\n else:\n self.song.mark_required()\n status_text = \"You need to learn {}\".format(self.song.title)\n # Shows status text, sorts songs by current s\n self.root.ids.status_text.text = status_text\n self.sort_songs(self.root.ids.sort_options.text)",
"async def musicbot(self, ctx, the_state):\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n if is_mod:\r\n if the_state == \"1\":\r\n self.music_off = False\r\n await ctx.send(\"Music Bot features now on\")\r\n else:\r\n self.music_off = True\r\n await ctx.send(\"Music Bot features now off\")\r\n else:\r\n await ctx.send(\"**Error:** You are not allowed to use this command!\")",
"async def queue(self, ctx, *args):\r\n if ctx.message.channel.id != 701868237771505695:\r\n return await ctx.send(\"**Error:** Music Bot commands are only available in <#701868237771505695>\")\r\n if self.music_off:\r\n return await ctx.send(\"**Error:** Music Bot features are currently off\")\r\n if ctx.voice_client is None or ctx.voice_client is not self.voice:\r\n return await ctx.send(\"**Error:** You must be connected to the voice channel.\")\r\n\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n\r\n queue_string = \"```\"\r\n try:\r\n # if(args[0]==\"clear\"):\r\n #\tself.songs=[]\r\n if args[0] == \"remove\":\r\n pos = len(self.songs) - 1\r\n while pos > 0:\r\n if args[1].lower() in self.songs[pos][0].title.lower():\r\n if ctx.author.id not in self.songs[pos][4]:\r\n self.songs[pos][4].append(ctx.author.id)\r\n shortened_title = self.title_shorten(self.songs[pos][0].title)\r\n print(self.songs[pos][1])\r\n print(ctx.message.author)\r\n if (len(ctx.message.author.voice.channel.members) - 1 > len(self.songs[pos][4]) * 2\r\n and not is_mod\r\n and not ctx.message.author.id == self.songs[pos][1]):\r\n await ctx.send(\"{0} remove votes registered for `{1}`, need {2} to remove song.\".format(\r\n len(self.songs[pos][4]),\r\n shortened_title,\r\n int((len(ctx.message.author.voice.channel.members) - 1) / 2)))\r\n else:\r\n await ctx.send(\"Removing `{0}`\".format(shortened_title))\r\n self.del_song(pos)\r\n pos = pos - 1\r\n except:\r\n pass\r\n pos = 0\r\n for song in self.songs:\r\n if pos == 0:\r\n pos_indicator = \"> \"\r\n else:\r\n pos_indicator = \"{0}.\".format(str(pos))\r\n shortened_title = self.title_shorten(song[0].title)\r\n queue_string = \"{0}{1}{2}\\n\".format(queue_string, pos_indicator, shortened_title)\r\n pos = pos + 1\r\n if queue_string == \"```\":\r\n return await ctx.send(\"Queue is empty\")\r\n await ctx.send(\"{0}```\".format(queue_string))",
"async def auto(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'Still working on integration with the election results. Maybe have a command to link to an elections '\n 'database?')\n else:\n await ctx.message.channel.send('Hey! You do not have permission to do that.')",
"def update_text(self):\n likes = \"\"\n if self.comedy.get():\n likes += \"You like comedy.\"\n if self.drama.get():\n likes += \"You like drama.\"\n if self.romance.get():\n likes += \"You like romantic.\"\n self.result.delete(0.0, END) # delete from position 0 until the end\n self.result.insert(0.0, likes) # insert to textbox the text in likes in position 0",
"async def approve(self, ctx, user: discord.Member):\n server = ctx.message.server\n if user.id in self.norole[server.id]:\n if self.norole[server.id][user.id]['Role'] == True:\n self.norole[server.id][user.id] = {'Role': False}\n dataIO.save_json(self.warninglist, self.norole)\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n await self.bot.remove_roles(user,nobnl)\n msg = await self.bot.say (\"Role removed!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg) \n await self.bot.delete_message(ctx.message)\n else:\n msg = await self.bot.say(\"There is no role to remove!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg)\n await self.bot.delete_message(ctx.message)",
"def add_the_song_to_playlist(self):\n com_util.tap_on(self.driver, element['AddToPlaylist'])\n # com_util.send_to(self.driver, element['EnterThePlaylist'], 'My Songs')\n com_util.tap_on(self.driver, element['ClickMySongs'])\n # com_util.tap_on(self.driver, element['SaveBtn'])\n com_util.tap_on(self.driver, element['CancelBtn'])\n com_util.tap_on(self.driver, element['DownArrow'])",
"def approve(self, request, object_id, extra_context=None):\n obj = self.get_object(request, unquote(object_id))\n title = self._approve_title(obj)\n\n AdminAddApprovalForm = self._approve_approval_form(request)\n\n form = AdminAddApprovalForm(initial={'prescription': obj})\n if request.method == 'POST':\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n if obj.approval_status == obj.APPROVAL_DRAFT and obj.can_approve:\n # create an approval\n obj.approval_status = obj.APPROVAL_SUBMITTED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully submitted for approval.\")\n return HttpResponseRedirect(url)\n elif obj.approval_status == obj.APPROVAL_SUBMITTED:\n if request.POST.get('_cancel'):\n obj.clear_approvals()\n msg = 'Delete: Clearing Approvals/Endorsements', 'Burn ID: {}, Deleted by: {}'. format(obj.burn_id, request.user.get_full_name())\n logger.warning(msg)\n support_email('Delete: Clearing Approvals/Endorsements', msg)\n\n self.message_user(\n request, \"Approval rejected. ePFP is now draft.\")\n return HttpResponseRedirect(url)\n\n form = AdminAddApprovalForm(request.POST,\n initial={'prescription': obj})\n if form.is_valid():\n approval = form.save(commit=False)\n approval.prescription = obj\n approval.creator = request.user\n approval.modifier = request.user\n approval.save()\n obj.approval_status = obj.APPROVAL_APPROVED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully approved.\")\n return HttpResponseRedirect(url)\n elif obj.is_approved:\n if obj.is_closed:\n self.message_user(\n request, \"You can't extend an approval after the \"\n \"prescribed fire plan has been closed.\")\n return HttpResponseRedirect(url)\n if request.POST.get('_cancel'):\n self.message_user(\n request, \"Didn't extend approval.\")\n return HttpResponseRedirect(url)\n else:\n approval = obj.current_approval\n if approval and approval.extension_count < 3:\n approval.extension_count = approval.extension_count + 1\n approval.valid_to = approval.next_valid_to\n approval.save()\n self.message_user(\n request, \"Successfully extended approval.\")\n else:\n self.message_user(request, \"You can't extend an \"\n \"approval more than 3 times.\")\n return HttpResponseRedirect(url)\n\n admin_form, media = self._approve_form(request, obj, form)\n\n context = {\n 'title': title,\n 'current': obj,\n 'form': admin_form,\n 'media': media,\n 'errors': None,\n }\n return TemplateResponse(request, \"admin/prescription/prescription/\"\n \"approval.html\", context,\n current_app=self.admin_site.name)",
"async def vote(self, ctx):\n embed = discord.Embed(title = \"Here are some bot lists that you can vote for me on, voters may soon™ recieve perks\", color = discord.Color.blurple())\n embed.add_field(name = \"Bots For Discord\", value = \"[Click Here](https://botsfordiscord.com/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Discord Boats\", value = \"[Click Here](https://discord.boats/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Divine Discord Bots\", value = \"[Click Here](https://divinediscordbots.com/bot/592811241756688405/vote)\") \n embed.add_field(name = \"Botlist.space\", value = \"[Click Here](https://botlist.space/bot/592811241756688405/upvote)\") \n embed.set_thumbnail(url = self.bot.user.avatar_url)\n await ctx.send(embed = embed)",
"def jao_approve(self):\n print \"JAO approved this form. Current state:\", self.state",
"def approve(self):\n self.approved = True\n self.quest_node['approved'] = True\n graph.push(self.quest_node)\n self.payout()",
"def hod_approve(self):\n print \"HOD approved this form. Current state:\", self.state",
"def put_on_wish_list():\n book = request.form\n flash(\"The Wish list feature is under construction! Please check back soon!\")\n return render_template('book_details.html', list_of_books=book)",
"async def optin(self, ctx):\n optout.delete_one({\"_id\": ctx.author.id})\n await ctx.send(f\"You have **opted into** A Sound Mood. To leave the program, use ?optout.\")",
"def approve(self):\n self._check_if_open()\n data = {\"approved\": True}\n return self.post(\"approve\", data)",
"async def _lyrics(self, ctx: commands.Context):\n if not ctx.voice_state.is_playing:\n raise commands.CommandError('Nothing being played at the moment.')\n\n # Get song name listed on youtube\n song_title = ctx.voice_state.current.source.track\n if not song_title:\n return await ctx.send(\"Couldn't find lyrics for this track!\")\n\n song_title = re.sub(\"[(\\[].*?[)\\]]\", \"\", song_title).strip() # Remove parenthesis from song title\n # Get artist name listed on youtube\n artist_name = ctx.voice_state.current.source.artist\n # Instance of GeniusSong class using the Genius API\n genius_song = GeniusSong(song_title, artist_name)\n # Try getting the lyrics using the lyricsgenius library\n lyrics = genius_song.fastlyrics()\n\n # In case of no lyrics found. Use the other (slower) method\n if not lyrics:\n res = genius_song.get_response() # Generate a response using the Genius API to get the songs\n if res:\n # Find the most similar artist comparing the artist on YouTube and Genius\n artist_name = genius_song.return_similar_artist(res)\n # Artist didn't match\n if not artist_name:\n await ctx.send(\"Couldn't find similar artists. The lyrics might not be the expected.\")\n\n # Get the lyrics using the lyricsgenius library with the new artist\n lyrics = genius_song.fastlyrics(artist_name)\n\n else:\n return await ctx.send(\n \"**Error!**\\nThere is a problem with Genius.\\nTry again in a few minutes. \"\n \"\\nYou can also try the command `fastlyrics`.\")\n\n if lyrics:\n # Split lyrics into fields\n fields = genius_song.split_lyrics(lyrics)\n # Create an embed message\n embed = embed_msg(\n title=song_title.capitalize() + \"\\n{}\".format(artist_name),\n description=\"\",\n footer=\"Lyrics provided by Genius.\",\n field_values=fields,\n inline=False\n )\n return await ctx.send(embed=embed)\n\n return await ctx.send(\"Lyrics couldn't be found.\")",
"async def admin(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'List of useable commands for the parent command: **admin**\\n\\n **eboard admin auto** - updates the '\n 'new seats given current election data.\\n\\n**eboard admin set <position> <User#0000>** - assigns a '\n 'position to target user.\\n\\n**eboard admin remove <position> <User#0000>** - remove a target user '\n 'from their position.\\n\\n**eboard admin list** - lists the positions in the SQLite table.')",
"def set_lyrics(self, lyrics: str) -> None:\n self.lyrics = lyrics",
"def approve_me(message):\n load_users(message._client.users)\n sender_id = message._get_user_id()\n target = user_list[sender_id].details['name']\n if (user_list[sender_id].is_unknown):\n message.reply(Strings['APPROVER_REQUEST'])\n names = list_to_names(user_list.admin_list)\n approval_message = Strings[\n 'APPROVER_REQUEST_DETAIL'].format(\">, <@\".join(names), target)\n message._client.send_message(config.AUTH_CHANNEL, approval_message)\n else:\n message.reply(\n \"Your status is already: \" + user_list[sender_id].level.name)",
"async def stan(self, ctx, *args):\n if args:\n if args[0] == 'update':\n amount = len(self.artists)\n self.artists = []\n urls_to_scrape = ['https://kprofiles.com/k-pop-girl-groups/',\n 'https://kprofiles.com/k-pop-boy-groups/',\n 'https://kprofiles.com/co-ed-groups-profiles/',\n 'https://kprofiles.com/kpop-duets-profiles/',\n 'https://kprofiles.com/kpop-solo-singers/']\n for url in urls_to_scrape:\n self.artists += scrape_kprofiles(url)\n\n database.set_attr(\"data\", \"artists\", self.artists)\n\n await ctx.send(f\"Artist list succesfully updated, {len(self.artists) - amount} new entries, \"\n f\"{len(self.artists)} total entries\")\n self.logger.info(misolog.format_log(ctx, f\"artist list updated; {len(self.artists) - amount} new, \"\n f\"{len(self.artists)} total\"))\n return\n\n elif args[0] == 'clear':\n self.artists = []\n database.set_attr(\"data\", \"artists\", self.artists)\n await ctx.send(\"Artist list cleared\")\n self.logger.info(misolog.format_log(ctx, f\"artist list cleared\"))\n return\n\n if self.artists:\n artist = str(rd.choice(self.artists))\n await ctx.send('stan ' + artist)\n self.logger.info(misolog.format_log(ctx, f\"artist={artist}\"))\n else:\n await ctx.send(\"Error: artist list is empty, please use >stan update\")\n self.logger.warning(misolog.format_log(ctx, f\"artist list empty\"))",
"async def spotify(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send('Invalid command passed. Use the `?help spotify` command to learn more.')",
"async def ironman(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n out = (':tools: __**IRONMAN**__ :tools:\\n' \\\n 'If you want to become an ironman, please react to this post with a :thumbsup:. '\n 'This will **RESET** your account and give you the ironman role. '\n 'You will be unable to trade with other players or gamble. '\n 'In return, you will be able to proudly display your status as an ironman, '\n 'by the way.')\n msg = await ctx.send(out)\n\n if await self.confirm(ctx, msg, out):\n ctx.user_object.reset_account()\n ctx.user_object.is_ironman = True\n ctx.user_object.save()\n # ironman_role = discord.utils.get(ctx.guild.roles, name=\"Ironman\")\n # await ctx.author.add_roles(ironman_role, reason='Wanted to become an ironmeme.')\n name = get_display_name(ctx.author)\n await msg.edit(content=f':tools: __**IRONMAN**__ :tools:\\n'\n f'Congratulations, {name}, you are now '\n 'an ironman!')",
"def record_lyrics_result(self, track_id, songdata):\n self.lyrics.insert_one(\n {\n \"_id\": track_id,\n \"response_artist\": songdata.artist,\n \"response_title\": songdata.title,\n \"lyrics\": songdata.lyrics,\n }\n )",
"async def quotes(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"quotes\")",
"def song_has_lyrics():\n pass",
"def _query_commands(self):\n # TODO: make this work\n self.player.respond(\"Hi there! Ask me to play artists or songs. \"\n \"I can also find songs that are similar to other \"\n \"artists.\")",
"def add_music_from_search(request, music_id: int) -> HttpResponse:\n music_item = get_object_or_404(Music, id=music_id)\n\n if music_item in request.user.profile.playlist.all():\n return HttpResponse('Success')\n\n playpos = PlayPosition(\n position=music_item,\n plist=request.user.profile\n )\n\n playpos.add_order()\n playpos.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))"
] | [
"0.68488747",
"0.56438977",
"0.55508363",
"0.55483466",
"0.5504152",
"0.5491618",
"0.54619044",
"0.54378915",
"0.54029",
"0.5392849",
"0.53796095",
"0.5319239",
"0.52956706",
"0.5290236",
"0.5269116",
"0.52477455",
"0.5243103",
"0.5234158",
"0.52026176",
"0.5178342",
"0.5166911",
"0.5128911",
"0.5116017",
"0.5100287",
"0.50971097",
"0.50952",
"0.50817424",
"0.5062706",
"0.50561315",
"0.5046288"
] | 0.8136522 | 0 |
This method is called to check if a song already has lyrics so as to avoid duplicity of lyrics | def song_has_lyrics():
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_existing_lyrics(self, song_id):\n\t\tlyrics = self.db.lyrics.find_one({'song_id': song_id})['lyrics']\n\t\treturn lyrics",
"def add_lyrics(self):\n\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n c.execute(\"SELECT songs.id, artist, title, url FROM songs LEFT JOIN lyrics ON songs.id = lyrics.song_id WHERE lyrics.song_id IS NULL\")\n all_songs_to_scrape = c.fetchall()\n for song in all_songs_to_scrape:\n song_id = song[0]\n song_artist = song[1]\n song_title = song[2]\n song_url = song[3]\n print(\"Looking for lyrics for \" + song_title + \" by \" + song_artist)\n try:\n lyrics = pygenius_songs.searchURL(song_url, 'lyrics')\n for lyric in lyrics:\n for line in lyric.split('\\n'):\n c.execute('INSERT INTO lyrics(song_id, line) VALUES (?,?)', (song_id, line))\n conn.commit()\n except Exception as e:\n print(e)\n print song_url\n print(\"Exception caught! ... continuing.\")\n pass",
"def get_existing_lyrics_of_artist(self, artist_name=None, artist_id=None):\n\t\tif artist_name:\n\t\t\tsongs = self.db.artists.find_one({'name': str(artist_name).lower()})\n\t\t\tlyrics = []\n\t\t\tfor song in songs:\n\t\t\t\tlyrics.append((song, self.get_existing_lyrics(song)))\n\t\t\treturn lyrics\n\t\tif artist_id:\n\t\t\tsongs = self.db.artists.find_one({'id': artist_id})['songs']\n\t\t\tprint(len(songs))\n\t\t\tlyrics = []\n\t\t\tfor song in songs:\n\t\t\t\ttry:\n\t\t\t\t\tlyrics.append((song, self.get_existing_lyrics(song)))\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\t\treturn lyrics",
"def lyrics_note_is_same_as_original():\n pass",
"def fetch_lyrics(self) -> None:\n if self.artist is None or self.title is None:\n return\n Logger.Logger.log('Looking for song lyrics...')\n finder = LyricsFinder.LyricsFinder(self)\n finder.fetch()\n self.lyrics = finder.get_lyrics()\n self.lyrics_writer = finder.get_lyrics_writer()\n if not self.lyrics:\n Logger.Logger.log('No lyrics found for this song.')",
"def artist_song_first_pass(self):\n log.debug(\"Called artist_song_first_pass for %s.\" % self.name)\n self.success = False\n song_potentials = []\n potential_count = 0\n _min = 20\n\n def generate_potentials(count):\n results = self.sp.search(q= 'artist: ' + self.artist + ' track: ' + self.song, type='track', limit=2)\n if results['tracks']['total'] >= 1:\n for items in results['tracks']['items']:\n song_potentials.append([items['name'], items['uri']])\n for artist in items['artists']:\n song_potentials[count].append(artist['name'])\n song_potentials[count].append(artist['uri'])\n count += 1\n\n for splitter in splitters:\n if self.name_clean.count(splitter) == 1:\n self.artist, self.song = self.name_clean.split(splitter)\n generate_potentials(potential_count)\n elif self.name_clean.count(splitter) > 1:\n for x in range(0, self.name_clean.count(splitter)):\n self.artist, self.song = split(self.name_clean, splitter, x)\n generate_potentials(potential_count)\n\n cutoff = matching(self.name_clean)\n log.debug(\"%s potential matches found for %d\" % (len(song_potentials), id(self)))\n log.debug(\"Potentials: %s\" % song_potentials)\n for potential in song_potentials:\n log.debug(potential)\n log.debug(self.name_clean)\n log.debug(str(potential[2]) + \" \" + str(potential[0]))\n lev = levenshtein(self.name_clean, str.lower(str(potential[2])) + \" \" + str.lower(str(potential[0])))\n log.debug(lev)\n if lev < _min:\n _min = lev\n self.artist = potential[2]\n self.artist_uri = potential[3]\n self.song = potential[0]\n self.song_uri = potential[1]\n\n if self.artist_uri and self.song_uri is not None:\n log.debug(\"Cutoff point for %s : %d\" % (id(self), cutoff))\n log.debug(\"Current Min: {}\".format(_min))\n log.debug(\"Levenshtein distance between {} and {} : {}\"\n .format(self.name_clean, self.artist + self.song,\n levenshtein(self.name, self.artist + \" \" + self.song)))\n if int(_min) > cutoff:\n log.debug(\"Method artist_song_first_pass failed for %s.\" % self.name)\n self.success = False\n self.artist = None\n self.song = None\n else:\n log.debug(\"Method artist_song_first_pass succeeded for %s.\" % self.name)\n self.success = True\n else:\n log.debug(\"Method artist_song_first_pass failed for %s.\" % self.name)\n self.success = False\n self.artist = None\n self.song = None",
"def add_songs(self, artist_list):\n\n \"Terms that identify songs that aren't really songs\"\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n if artist_list is None:\n return \"You must provide a list of artists for whom to find songs.\"\n else:\n for artist in artist_list:\n print(\"Finding songs for \" + artist)\n all_songs_by_artist = pygenius_songs.findAllSongs(artist)\n already_scraped = list()\n for song in all_songs_by_artist:\n url = song[0]\n title = song[1]\n print(title)\n c.execute(\"SELECT count(*) FROM songs WHERE title = (?) AND artist = (?)\", (title, artist))\n check_in_db = c.fetchall()\n if check_in_db[0][0] == 0:\n if title not in already_scraped:\n if not [i for i, x in enumerate(self.bad_terms) if x in title]:\n already_scraped.append(title)\n c.execute('INSERT INTO songs(title, artist, url) values (?,?,?)', (title, artist, url))\n conn.commit()",
"def add_lyrics_and_song_data_to_database(artist, song):\n if exists('song_database.txt'):\n f = open('song_database.txt', 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, song)\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print \"Song '\" + song + \"' already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open('song_database.txt', 'w')\n song_list = [Song_data(artist, song)]\n f.seek(0,0)\n pickle.dump(song_list, f)",
"def is_lyrics_approved():",
"def song_already_exists(song, playlist_id):\n print('Song {title} already in playlist {playlist_id}, adding has been skipped.'\n .format(title=song.title,\n playlist_id=playlist_id))\n pass",
"def __add_lyric(self, song, genius_api):\n\t\tentry = {\n\t\t\t'song_id' : int(song['id']),\n\t\t\t'song_title' : song['title'],\n\t\t\t'url' : song['url']\n\t\t\t}\n\t\ttry:\n\t\t\tentry['lyrics'] = genius_api.get_lyrics(song['id'], song['url'])\n\t\texcept:\n\t\t\tentry['lyrics'] = ''\t\n\t\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\ttry:\n\t\t\tself.db.lyrics.insert_one(entry)\n\t\texcept errors.DuplicateKeyError:\n\t\t\tpass",
"async def _lyrics(self, ctx: commands.Context):\n if not ctx.voice_state.is_playing:\n raise commands.CommandError('Nothing being played at the moment.')\n\n # Get song name listed on youtube\n song_title = ctx.voice_state.current.source.track\n if not song_title:\n return await ctx.send(\"Couldn't find lyrics for this track!\")\n\n song_title = re.sub(\"[(\\[].*?[)\\]]\", \"\", song_title).strip() # Remove parenthesis from song title\n # Get artist name listed on youtube\n artist_name = ctx.voice_state.current.source.artist\n # Instance of GeniusSong class using the Genius API\n genius_song = GeniusSong(song_title, artist_name)\n # Try getting the lyrics using the lyricsgenius library\n lyrics = genius_song.fastlyrics()\n\n # In case of no lyrics found. Use the other (slower) method\n if not lyrics:\n res = genius_song.get_response() # Generate a response using the Genius API to get the songs\n if res:\n # Find the most similar artist comparing the artist on YouTube and Genius\n artist_name = genius_song.return_similar_artist(res)\n # Artist didn't match\n if not artist_name:\n await ctx.send(\"Couldn't find similar artists. The lyrics might not be the expected.\")\n\n # Get the lyrics using the lyricsgenius library with the new artist\n lyrics = genius_song.fastlyrics(artist_name)\n\n else:\n return await ctx.send(\n \"**Error!**\\nThere is a problem with Genius.\\nTry again in a few minutes. \"\n \"\\nYou can also try the command `fastlyrics`.\")\n\n if lyrics:\n # Split lyrics into fields\n fields = genius_song.split_lyrics(lyrics)\n # Create an embed message\n embed = embed_msg(\n title=song_title.capitalize() + \"\\n{}\".format(artist_name),\n description=\"\",\n footer=\"Lyrics provided by Genius.\",\n field_values=fields,\n inline=False\n )\n return await ctx.send(embed=embed)\n\n return await ctx.send(\"Lyrics couldn't be found.\")",
"def get_lyrics(self, artist, song):\n\n # Disable lyrics display\n self.status_bar.hide()\n self.lyrics_view.hide()\n self.scroll.hide()\n\n lyrics = None\n in_database = False\n\n if self.database.status: # Testing connection to database\n lyrics = self.database.retrieve_lyrics(artist, song)\n if lyrics: # False if not found in database\n in_database = True\n\n if not lyrics: # Try next to retrieve from web\n url = self.make_url(artist, song)\n try:\n lyrics = self.fetch_lyrics(url)\n except:\n self.display_message('Internet Connection Problem') # Could not connect to internet\n return\n\n if not lyrics: # Not available in database or on web\n self.display_message('Lyrics Not Available')\n else:\n # Set the display\n lyrics_buffer = self.lyrics_view.get_buffer()\n lyrics_buffer.set_text(lyrics)\n\n if not in_database: # Save if not in database\n self.database.save(artist, song, lyrics)\n\n # Re-enable lyrics display\n self.scroll.show()\n self.lyrics_view.show()\n self.display_message('Lyrics Extracted Successfully')",
"def validate_song_is_added_to_playlist(self):\n if self.track == 'Enjoy Enjaami':\n option = element['EnjoyEnjaami']\n elif self.track == 'Inna Mylu':\n option = element['InnaMylu']\n return com_util.find_text(self.driver, option)",
"def test_single_track_artist_too_long(self):\n self.add_mp3(set_artist=True, artist='z'*(App.max_artist_album_length+10))\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('is longer than', status)\n self.assertEqual(self.get_album_count(), 0)",
"def record_lyrics_result(self, track_id, songdata):\n self.lyrics.insert_one(\n {\n \"_id\": track_id,\n \"response_artist\": songdata.artist,\n \"response_title\": songdata.title,\n \"lyrics\": songdata.lyrics,\n }\n )",
"async def async_is_playing_new_track(self):\n if self._playing_mediabrowser and self._media_source_uri is not None:\n # don't trigger new track flag for local mediabrowser files\n return False\n \n if self._icecast_name != None:\n import unicodedata\n artmed = unicodedata.normalize('NFKD', str(self._media_artist) + str(self._media_title)).lower()\n artmedd = u\"\".join([c for c in artmed if not unicodedata.combining(c)])\n if artmedd.find(self._icecast_name.lower()) != -1 or artmedd.find(self._source.lower()) != -1:\n # don't trigger new track flag for icecast streams where track name contains station name or source name; save some energy by not quering last.fm with this\n self._media_image_url = None\n return False\n\n if self._media_artist != self._media_prev_artist or self._media_title != self._media_prev_title:\n return True\n else:\n return False",
"def song_lyrics(ans):\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n return words",
"def remove_library_dups(self, client):\n\n print \"\\n\\nGetting all Google Play Music library contents for '%s'...\" % (self.user)\n all_songs = client.get_all_songs()\n print \"\\n\"\n\n new_songs = {}\n old_songs = {}\n print \"Checking for duplicates...\"\n for song in all_songs:\n song_id = song.get('id')\n timestamp = song.get('recentTimestamp')\n\n key = \"%s: %d-%02d %s\" % (song.get('album'), song.get('discNumber'), song.get('trackNumber'), song.get('title'))\n\n # Identify duplicates within this library...\n if key in new_songs:\n if new_songs[key]['timestamp'] < timestamp:\n old_songs[key] = new_songs[key]\n new_songs[key] = {'id': song_id, 'timestamp': timestamp}\n else:\n old_songs[key] = {'id': song_id, 'timestamp': timestamp}\n\n new_songs[key] = {'id': song_id, 'timestamp': timestamp}\n\n if len(old_songs):\n print \"Found duplicate songs\"\n\n old_song_ids = []\n for key in sorted(old_songs.keys()):\n old_song_ids.append(old_songs[key]['id'])\n print \" ==> %s <==\" % (key.encode('utf-8'))\n\n print \"Deleting duplicate songs...\"\n client.delete_songs(old_song_ids)\n else:\n print \"No duplicate songs\"\n\n print \"Processed all %d songs\" % (len(all_songs))",
"def test_next_song_existing(bot, monkeypatch):\n tracks = [fake_res['title'], 'war squids']\n song_next = Song(fake_res['artist'], 'war squids', fake_res['album'])\n bot.log_result('chat_id', fake_log)\n monkeypatch.setattr(bot, 'get_album_tracks', lambda x: tracks)\n monkeypatch.setattr(bot, 'get_lyrics', lambda s, c: f'Searching for {s}')\n\n assert bot._get_next_song('chat_id') == f'Searching for {song_next}'",
"def test_single_track_no_artist(self):\n self.add_mp3(set_artist=True)\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no artist tag', status)\n self.assertEqual(self.get_album_count(), 0)",
"def lyrics(self):\n return get_lyrics(self.artist, self.title,'')",
"def scrape_new(self):\n\t\tno_urls = 0\n\t\tfalse_urls = 0\n\t\tfor i, song in enumerate(self._songs):\n\t\t\theaders = { \n\t\t\t\t'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36' \n\t\t\t\t}\n\t\t\t# metro structure\n\t\t\tmetro = \"http://www.metrolyrics.com/\" + '-'.join(song.title.split()) + \"-lyrics-\" + '-'.join(song.artist.split()) + \".html\"\n\t\t\trequest = requests.head(metro) # check if site exists\n\t\t\tif request.status_code == 301: # 301 == moved permanantely (new url exists)\n\t\t\t\tr = urllib.request.Request(metro, data=None, headers=headers)\n\t\t\t\tpage = urllib.request.urlopen(r)\n\t\t\t\tsoup = BeautifulSoup(page, \"lxml\")\n\t\t\t\tlyric_body = soup.find(\"div\", {\"id\": \"lyrics-body-text\"})\n\t\t\t\tverses = lyric_body.find_all(\"p\", class_='verse')\n\t\t\t\tif verses:\n\t\t\t\t\tdir = self._path + song.song_file\n\t\t\t\t\tself.write_to_file_obj(dir, verses)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Wrong Format?\")\n\t\t\t\t\tfalse_urls += 1\n\t\t\telse:\n\t\t\t\tprint(\"BAD URL\")\n\t\t\t\tno_urls += 1\n\t\t\tif i != len(self._songs)-1: \n\t\t\t\ttime.sleep(7) # set timeout to not overburdden the server\n\t\tprint(\"URLs Failed: \", no_urls)",
"def song_check(song):\n msg = choose_song(song)\n return msg != ERROR",
"def test_single_track_blank_artist(self):\n self.add_mp3(set_artist=True, artist='')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no artist tag', status)\n self.assertEqual(self.get_album_count(), 0)",
"def test_single_track_album_too_long(self):\n self.add_mp3(set_album=True, album='z'*(App.max_artist_album_length+10))\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('is longer than', status)\n self.assertEqual(self.get_album_count(), 0)",
"def add_songs(self, song, position=None):\n song_found = find_object(song, self.tracks)\n if song_found is None:\n song_found = Song(song, self.artist)\n if position is None:\n self.tracks.append(song_found)\n else:\n self.tracks.insert(position, song_found)",
"async def lyrics(\n self, ctx: commands.Context, query: str = None\n ) -> Optional[Tuple[str, str, str]]:\n\n query = await self.now_playing(ctx) if query is None else query\n if not query:\n return\n\n url = f\"https://some-random-api.ml/lyrics?title={query}\"\n\n async with aiohttp.ClientSession() as session:\n request = await session.get(url)\n request_json = await request.json(content_type=None)\n\n authors = request_json.get(\"author\")\n title = request_json.get(\"title\")\n lyrics = request_json.get(\"lyrics\")\n\n return (title, authors, lyrics) if lyrics else None",
"def test_get_lyrics_notfound(monkeypatch, bot):\n\n def assert_not_found(msg):\n msg = get_lyrics(song, 1)\n msg = msg.lower()\n assert song.artist in msg\n assert song.title in msg\n assert 'could not be found' in msg\n\n song = Song('nothing more', 'christ copyright')\n result = Nothing()\n result.source = 'hello'\n monkeypatch.setattr(bot, 'get_lyrics_threaded', lambda a, b: result)\n\n msg = bot.get_lyrics(song, 1)\n assert_not_found(msg)\n\n result.source = None\n song.lyrics = 'hello'\n msg = bot.get_lyrics(song, 1)\n assert_not_found(msg)",
"def test_two_tracks_mismatched_album(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3', set_album=True, album='Album 2')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('changed to', status)\n self.assertEqual(self.get_album_count(), 0)"
] | [
"0.67191",
"0.6461495",
"0.64484215",
"0.62452585",
"0.6116291",
"0.6073265",
"0.603493",
"0.60067284",
"0.59900224",
"0.59794277",
"0.5927802",
"0.5798049",
"0.56955206",
"0.56919104",
"0.56533647",
"0.5607341",
"0.5586889",
"0.55832136",
"0.553297",
"0.5515789",
"0.54869497",
"0.5463637",
"0.54492414",
"0.53928244",
"0.53823465",
"0.5352275",
"0.5342825",
"0.53323585",
"0.53206784",
"0.5286877"
] | 0.7237534 | 0 |
This is called to compare a lyrics note to the original to ensure they are not the same..if they are , such a lyrics note is rejected | def lyrics_note_is_same_as_original():
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_duplicate_notes(self, tokens, curr_note, step) -> bool:\n same_note_cnt = 0\n idx = step - 3\n while idx > 0:\n prev_note = self._get_num(self.tgt_dict.string(tokens[0, idx : idx + 1]))\n if prev_note != curr_note:\n break\n same_note_cnt += 1\n idx -= 4\n\n if same_note_cnt > _config.PitchPara.Max_Same_Pitch.value:\n return True\n return False",
"def test_ensure_passage_is_not_removed(self):\n simple = self.TEI.getPassage(MyCapytain.common.reference.Reference(\"1.pr.1-1.2.5\"))\n orig_refs = self.TEI.getValidReff(level=3)\n self.assertIn(\"1.pr.1\", orig_refs)\n self.assertIn(\"1.1.1\", orig_refs)\n self.assertIn(\"1.2.4\", orig_refs)\n self.assertIn(\"1.2.5\", orig_refs)\n\n simple = self.TEI.getPassage(MyCapytain.common.reference.Reference(\"1.pr-1.2\"))\n orig_refs = self.TEI.getValidReff(level=3)\n self.assertIn(\"1.pr.1\", orig_refs)\n self.assertIn(\"1.1.1\", orig_refs)\n self.assertIn(\"1.2.4\", orig_refs)\n self.assertIn(\"1.2.5\", orig_refs)",
"def check_note_for_history(self):\r\n testrun_notes = [\r\n \"multiple loci suspected\",\r\n \"suspected multicopy, poor performance\",\r\n \"fixed allele 1\",\r\n \"very poor amplification\",\r\n \"very poor amplification, high off target percent\",\r\n \"poor amplification, maybe redesign\",\r\n \"mono-allele 1?\",\r\n \"redesign primer\",\r\n \"most of target\",\r\n \"poor performance\",\r\n \"poor performance, primers off target\",\r\n \"off target amp\",\r\n \"mono-allele 1\",\r\n \"mono-allele 2 and off target\",\r\n \"Nate said it is a mess\",\r\n \"off target amp\",\r\n \"mono-allele 1 and off target\"\r\n ]\r\n if self.note == \"No primers made by primer3\":\r\n self.add_history(\"2018-2-12\",\"Nate\",\"primers were not made for this sequence variation\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"Removed by nate, close to other SNP\":\r\n self.add_history(\"2018-2-19\",\"Nate\",\"Primers designed for this SNP were taken out, were to close to other SNP\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"Predicted to form hetrodymer\":\r\n self.add_history(\"2018-2-19\",\"Nate\",\"Predicted to form hetrodymer\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"no valid primer pair could be made for this position\":\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note in testrun_notes:\r\n self.add_history(\"2018-2-23\",\"Thomas\",self.note)\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n #check if any were missed.\r\n if self.active and self.note != \"sequence variant selected by GBS-SNP-selection\":\r\n pass #print(self.note)\r",
"def isEqualNote(self, notestring):\n if isinstance(notestring, Note):\n noteEncoding = notestring.getEncoding()\n else:\n note = self.new(notestring)\n simpleNote = note.simplify()\n noteEncoding = simpleNote.getEncoding()\n if noteEncoding == None:\n return False\n else:\n if self.getEncoding() == noteEncoding:\n return True\n else:\n return False",
"def handle_one_off(self, shorter, longer):\n found = False\n for n, c in enumerate(shorter):\n if shorter[n] == longer[n]:\n continue\n elif shorter[n] == longer[n+1]:\n if not found:\n found = True\n else:\n return False\n return True",
"def get_mismatches(rec):\n qseq = rec.get_forward_sequence().upper()\n if rec.is_reverse:\n qseq = reverseComplement(qseq)\n rseq = rec.get_reference_sequence().upper()\n for qpos, rpos in rec.get_aligned_pairs():\n if qpos == None or rpos == None:\n continue # no indels yet\n q = qseq[qpos]\n r = rseq[rpos - rec.reference_start]\n if q != r:\n position = (rec.reference_name, rpos)\n change = (r, q)\n yield (position, change)",
"def test_disambiguate(self):\n self.assertEqual(self.RNA(\"\").disambiguate(), \"\")\n self.assertEqual(\n self.RNA(\"AGCUGAUGUA--CAGU\").disambiguate(), \"AGCUGAUGUA--CAGU\"\n )\n self.assertEqual(\n self.RNA(\"AUn-yrs-wkmCGwmrNMWRKY\").disambiguate(\"strip\"), \"AU--CG\"\n )\n s = self.RNA(\"AUn-yrs-wkmCGwmrNMWRKY\")\n t = s.disambiguate(\"random\")\n u = s.disambiguate(\"random\")\n for i, j in zip(str(s), str(t)):\n if i in s.moltype.degenerates:\n assert j in s.moltype.degenerates[i]\n else:\n assert i == j\n self.assertNotEqual(t, u)\n self.assertEqual(len(s), len(t))",
"def test_titles_do_not_match(self):\r\n gm_title = 'Zhao Hua'\r\n sp_title = 'MMXXX (ft Moor Mother)'\r\n self.assertFalse(gmspotify.titles_match(gm_title, sp_title))",
"def monkey_trouble(a_smile, b_smile):\r\n return a_smile == b_smile",
"def is_lyrics_approved():",
"def test_single_not_match_returns_line(self):\n eq_(self.line,line_no_matches_ngreps(self.line,[\"nomatch\"]))",
"def testSynonymDuplicate(self):\n\t\t\t\tone = spinner.Word.objects.get_single('mac', True)\n\t\t\t\ttwo = spinner.Word.objects.get_single('macintosh', True)\n\t\n\t\t\t\tsyn = spinner.Synonym.objects.get_single(one, two, True)\n\t\t\t\t\n\t\t\t\tsyn2 = spinner.Synonym.objects.get_single(two, one, True)\n\n\t\t\t\tassert syn == syn2\n\n\t\t\t\tsyn.delete()\n\t\t\t\tone.delete()\n\t\t\t\ttwo.delete()",
"def test_check_consistency_05():\n xml_notes, divisions = R.get_notes(os.path.join(\n '..', 'test', 'data',\n 'test_cross_barline_8-8_no_rest_no_pitch.xml'))\n note_attr_list = [\n R.get_note_attrs(xml_note, divisions) for xml_note in xml_notes]\n assert not U.check_consistency(note_attr_list)",
"def monkey_trouble2(a_smile, b_smile):\n if a_smile == b_smile:\n return True\n else:\n return False",
"def _compareIgnoreVersion(self, loc1, loc2, msg=None):\r\n if loc1.version_agnostic() != loc2.version_agnostic():\r\n self.fail(self._formatMessage(msg, u\"{} != {}\".format(unicode(loc1), unicode(loc2))))",
"def test_notes_invalid_student(self):\n student_id = '1234567890'\n career_id = 34\n perdiod_id = 115\n result = self.ucuenca.notes(student_id, career_id, perdiod_id)\n self.assertFalse(result)",
"def test_cclwarning_not_equal():\n w = pyccl.CCLWarning(\"blah\")\n w2 = pyccl.CCLWarning(\"blahh\")\n assert w is not w2\n assert w != w2\n assert hash(w) != hash(w2)\n\n v = pyccl.CCLDeprecationWarning(\"blah\")\n v2 = pyccl.CCLDeprecationWarning(\"blahh\")\n assert v is not v2\n assert v != v2\n assert hash(v) != hash(v2)",
"def assertMultiLineEqual(self, first, second, msg=None):\n self.assertTrue(isinstance(first, str),\n 'First argument is not a string')\n self.assertTrue(isinstance(second, str),\n 'Second argument is not a string')\n\n if first != second:\n message = ''.join(difflib.ndiff(first.splitlines(True),\n second.splitlines(True)))\n if msg:\n message += \" : \" + msg\n self.fail(\"Multi-line strings are unequal:\\n\" + message)",
"def test_idempotent():\n dirty = u'<span>invalid & </span> < extra http://link.com<em>'\n\n clean = bl.clean(dirty)\n eq_(clean, bl.clean(clean))\n\n bleached = bl.bleach(dirty)\n eq_(bleached, bl.bleach(bleached))\n\n linked = bl.linkify(dirty)\n eq_(linked, bl.linkify(linked))",
"def __citation_correction(self, bs, ground_truth):\n bs_ref = bs.findNext('bibl')\n gt_ref = ground_truth.findNext('ref')\n while gt_ref is not None:\n if gt_ref.find('article-title') != bs_ref.title:\n pass\n gt_ref = gt_ref.findNext('ref')",
"def compareLyricalSimilarity(userTranscript, originalCaption, verbose=False, profile=False):\n error = \"\"\n if (profile): start = time.time()\n # cmp = compareToDialogue(audioFile, originalCaption, verbose=verbose)\n cmp = similar(userTranscript, originalCaption)\n if (profile):\n end = time.time()\n print(\"(profile) lyrical similarity :\", end-start)\n return cmp, error",
"def test_equality(self):\n\n # change .phones\n pw1: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n pw2: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"P\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n self.assertNotEqual(pw1, pw2)\n\n # change .stress_pattern\n pw1: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n pw2: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n ],\n )\n self.assertNotEqual(pw1, pw2)",
"def test_check_consistency_04():\n xml_notes, divisions = R.get_notes(os.path.join(\n '..', 'test', 'data', 'test_cross_barline_8-8_extra_rest.xml'))\n note_attr_list = [\n R.get_note_attrs(xml_note, divisions) for xml_note in xml_notes]\n assert not U.check_consistency(note_attr_list)",
"def test_can_mismatch(self):\n assert not self.RNA(\"\").can_mismatch(\"\")\n assert self.RNA(\"N\").can_mismatch(\"N\")\n assert self.RNA(\"R\").can_mismatch(\"R\")\n assert self.RNA(\"N\").can_mismatch(\"r\")\n assert self.RNA(\"CGUACGCAN\").can_mismatch(\"CGUACGCAN\")\n assert self.RNA(\"U\").can_mismatch(\"C\")\n assert self.RNA(\"UUU\").can_mismatch(\"UUC\")\n assert self.RNA(\"UUU\").can_mismatch(\"UUY\")\n assert not self.RNA(\"UUU\").can_mismatch(\"UUU\")\n assert not self.RNA(\"UCAG\").can_mismatch(\"UCAG\")\n assert not self.RNA(\"U--\").can_mismatch(\"U--\")",
"def forbid_sequence(*s,min_len=2):\n assert len(s) >= 1\n notes = [part for part in tools.iter_melodies(*s)]\n\n for start in range(len(notes)):\n for end in range(start,len(notes)):\n if end - start < min_len:\n continue\n\n # try a motif\n motif = []\n for i in range(start,end+1):\n motif.extend(notes[i])\n\n # try a following\n part_nb = end - start + 1\n try:\n following = []\n for i in range(end+1, part_nb + end + 1):\n following.extend(notes[i])\n except IndexError:\n break\n\n # is there a sequence?\n try:\n if tools.matchSequence(motif, following, s[0].scale):\n warn(f\"Sequence in {(s.title for s in s)}.\",motif,following)\n except ValueError:\n continue",
"def __CompareText(self, s1, s2):\n # The \"splitlines\" method works independently of the line ending\n # convention in use.\n return s1.splitlines() == s2.splitlines()",
"def negation_check(self,sentence):",
"def test_same_sentence_check(self):\n block = get_text(SAMPLE_SENTENCE)\n self.assertTrue(same_sentence_check(block, 0, 98))\n self.assertFalse(same_sentence_check(block, 166, 168))",
"def isEqualPitch(self, notestring):\n if self.isEqualNote(notestring) and self.isEqualOctave(notestring):\n return True\n else:\n return False",
"def test_unequality(self):\n self.assertFalse(Record(1, 2) != Record(1, 2))\n self.assertTrue(Record(1, 2) != Record(1, 3))\n self.assertTrue(Record(1, 2) != Record(2, 2))\n self.assertTrue(Record(1, 2) != Record(3, 4))"
] | [
"0.6234901",
"0.6063626",
"0.6018202",
"0.59463143",
"0.57870716",
"0.5715159",
"0.57044494",
"0.56992126",
"0.5657637",
"0.5643236",
"0.5580238",
"0.5577914",
"0.5575537",
"0.5558365",
"0.5553078",
"0.5549295",
"0.5545337",
"0.54852504",
"0.54813206",
"0.54713225",
"0.54681325",
"0.5460688",
"0.54572344",
"0.54562765",
"0.54514766",
"0.5439124",
"0.5434291",
"0.5431563",
"0.54080796",
"0.53780496"
] | 0.82412505 | 0 |
Checks if the lyrics has been approved or not | def is_lyrics_approved(): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def approve_lyrics():\n pass",
"def song_has_lyrics():\n pass",
"def is_approved(self) -> bool:\n return self.state == Order.OrderState.APPROVED.choice_value",
"def approve_tweet(worker_responses):\n approvals = [len(get_tweet_text(response)) > 0 for response in worker_responses]\n return approvals",
"def is_approved(self):\n return self.moderator_state in (Page.MODERATOR_APPROVED, Page.MODERATOR_APPROVED_WAITING_FOR_PARENTS)",
"def is_complete(self):\n return all([\n len(strip_tags(score.notes)) > 0 for score in self.scores.all()\n ])",
"def pops_agree(x):\n return len(x.all_open_closed) == 1",
"def need_attention(self):\n msg = [\"not staged\", \"behind\", \"ahead\", \"Untracked\"]\n status_msg = self.status()\n if any([each in status_msg for each in msg]):\n return True\n return False",
"def Confirm(self):\n self.PrintMetadata()\n answer = input(\"Continue [Y/n]? \").lower()\n return not answer.startswith(\"n\")",
"def approves(self):\n # verify trailing stop-loss threshold has been met\n thresholdMet = self.analysis.trailing_percentage >= constants.PERCENT_TRAILING_CLOSE_THRESHOLD\n\n # verify price has reverted back to the mean\n if self.analysis.initial_order_type == \"buy\":\n meanReverted = self.analysis.current_price >= self.analysis.current_volume_weighted_average_price\n else:\n meanReverted = self.analysis.current_price <= self.analysis.current_volume_weighted_average_price\n\n # return approval\n _approval = thresholdMet or meanReverted\n if _approval:\n self.logger.log(self.analysis.__dict__)\n self.logger.log(\"%s close approved!\" % self.ticker)\n return _approval",
"def ConfirmAllowedCopyrightHolder(holder):\n return holder in ALLOWED_COPYRIGHT_HOLDERS",
"def _check_required(self):\n if self.data['history_file'] is None:\n return\n required = self.data.get('required_changelog_text')\n if not required:\n return\n if isinstance(required, six.string_types):\n required = [required]\n history_last_release = self.data['history_last_release']\n for text in required:\n if text in history_last_release:\n # Found it, all is fine.\n return\n pretty_required = '\"{}\"'.format('\", \"'.join(required))\n if not utils.ask(\n \"WARNING: Changelog should contain at least one of \"\n \"these required strings: {}. Are you sure you \"\n \"want to release?\".format(pretty_required),\n default=False):\n sys.exit(1)",
"async def status(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if adv.is_on_adventure(ctx.author.id):\n out = adv.print_adventure(ctx.author.id)\n else:\n out = 'You are not doing anything at the moment.'\n await ctx.send(out)",
"def customer_wants_condiments(self):\n answer = raw_input(\"Would you like Lemon? (y/n)\").lower()\n if answer.startswith('y'):\n return True\n else:\n return False",
"def approved(self) -> bool:\n return all(d.approved for d in self.affected_directories)",
"def _check_for_license_acceptance(self, dep):\n if \"license\" in self.dependency_dict[dep]:\n license_name = self.dependency_dict[dep][\"license\"]\n else:\n license_name = \"restrictive\"\n if \"license_file\" in self.dependency_dict[dep]:\n license_text = Path(\n self.dependency_dict[dep][\"license_file\"]\n ).read_text()\n logger.warning(license_text)\n while \"invalid answer\":\n reply = (\n str(\n input(\n f\"Do you accept this {license_name} license? (y/n): \"\n )\n )\n .lower()\n .strip()\n )\n if len(reply) > 0:\n if reply[0] == \"y\":\n return True\n if reply[0] == \"n\":\n return False",
"def approve(self):\n self._check_if_open()\n data = {\"approved\": True}\n return self.post(\"approve\", data)",
"def is_retweet(self, strict=True):\n if self.tweet.get('retweeted_status', False):\n return True\n if not strict:\n text_lower = self.tweet['text'].lower()\n if text_lower.startswith('rt '):\n return True\n if ' rt ' in text_lower:\n if not 'please rt' in text_lower \\\n and not 'pls rt' in text_lower \\\n and not 'plz rt' in text_lower:\n return True\n return False",
"def test_single_aclhook_true(self):\n self._test_hook_approval_sequence([True], True)",
"def hod_approve(self):\n print \"HOD approved this form. Current state:\", self.state",
"def jao_approve(self):\n print \"JAO approved this form. Current state:\", self.state",
"def is_valid(self):\n return (self.time is not None\n and self.author is not None\n and self.content is not None)",
"def seesSuggestions(self):\n return self.floor.owner == self.user and self.floor.permissiveness == \"permissive\"",
"def is_eligible(self, myself):\n if self.author().screen_name == myself.screen_name:\n log_.debug(\"Not replying to my own tweets\")\n return False\n if self.is_retweet():\n log_.debug(\"Not processing pure retweets\")\n return False\n return True",
"def check():\n\t\t# This forces user to set dirs before running the app for first time.\n\t\tif len(Config.lyrics_dir) == 0:\n\t\t\t# see which directory in not set and raise BadConfigError with that as value\n\t\t\tprint('lyrics_dir is not set.')\n\t\t\tprint('Please use the \"set\" command to set lyrics_dir.')\n\t\t\tprint('use \"lyrico --help\" to view commands.')\n\t\t\treturn False\n\n\t\tif len(Config.source_dir) == 0:\n\t\t\t# see which directory in not set and raise BadConfigError with that as value\n\t\t\tprint('source_dir is not set.')\n\t\t\tprint('Please use the \"set\" command to set source_dir or pass it as parameter.')\n\t\t\tprint('use \"lyrico --help\" to view commands.')\n\t\t\treturn False\n\n\t\t# if user disable both saving mode. Notify & force user to correct on next run.\n\t\tif not Config.save_to_file and not Config.save_to_tag:\n\t\t\tprint('Both \"save_to_file\" and \"save_to_tag\" modes are disabled. Please enable one.')\n\t\t\tprint('use \"lyrico --help\" to view commands.')\n\t\t\treturn False\n\n\t\t# if user disables all sources. Notify & force user to enable one.\n\t\tif (not Config.lyric_wikia\n\t\t and not Config.az_lyrics\n\t\t and not Config.musix_match\n\t\t and not Config.lyricsmode):\n\t\t\tprint('All lyrics sources are disabled. Please enable one.')\n\t\t\tprint('use \"lyrico --help\" to view commands.')\n\t\t\treturn False\n\t\treturn True",
"def ok(self):\n return self['webok'] == 'OK'",
"def need_admin_approval(self):\n return self._need_admin_approval",
"def change_availability():\n artwork_sold = get_artwork_name()\n if not controls_utils.artwork_exists(artwork_sold):\n print('No record of that piece of art. ')\n else:\n artist = controls_utils.name_of_artist(artwork_sold)\n if not controls_utils.artwork_available(artwork_sold, artist):\n print('Sorry that piece has already been sold. ')\n else:\n response = input('Mark ' + artwork_sold + ' as sold? Y or N ')\n if response.upper() == 'Y':\n mark_as_sold(artwork_sold)\n while not controls_utils.response_affirmative(response):\n response = input('Are you sure you want to mark '\n + artwork_sold + ' by ' + artist + ' as sold? Y or N or press X to escape ')\n if response.upper() == 'X':\n break\n elif response.upper() == 'N':\n break",
"def asking(self):\n return 'Sure.'",
"def check(self, description: Description) -> bool:"
] | [
"0.7582891",
"0.6132226",
"0.5762543",
"0.5721694",
"0.56735605",
"0.5672874",
"0.5540766",
"0.5498608",
"0.54710966",
"0.5407162",
"0.5385652",
"0.53676486",
"0.53605825",
"0.5347708",
"0.53281146",
"0.5326186",
"0.53110224",
"0.5303602",
"0.5287871",
"0.5286905",
"0.5272448",
"0.52441275",
"0.52437484",
"0.52387387",
"0.523041",
"0.52121",
"0.52111715",
"0.5198329",
"0.5184507",
"0.51838124"
] | 0.8734696 | 0 |
r"""Calculate the cold plasma dispersion surfaces according to equation 2.64 in Plasma Waves by Swanson (2nd ed.) | def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):
# Make vectors of the wave numbers
kc_z = np.linspace(1e-6, kc_z_max, 35)
kc_x = np.linspace(1e-6, kc_x_max, 35)
# Turn those vectors into matrices
kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)
# Find some of the numbers that appear later in the calculations
kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k
theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B
wc_i = 1 / m_i # The ion gyro frequency
wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency
wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# For every k_perp and k_par, turn the dispersion relation into a
# polynomial equation and solve it.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# The polynomial coefficients are calculated
pol_koeff_8 = -2 * kc_ ** 2
pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)
pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)
pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)
pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(
theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))
pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2
w_final = np.zeros((10, len(kc_z), len(kc_x)))
# For each k, solve the equation
for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):
disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,
pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],
0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]
# theoretically should be real (A. Tjulin)
w_temp = np.real(np.roots(disp_polynomial))
# We need to sort the answers to get nice surfaces.
w_final[:, k_z, k_x] = np.sort(w_temp)
n2_ = kc_ ** 2 / w_final ** 2
v_ph_c = np.sqrt(1. / n2_)
va_c = 1 / (wp_e * np.sqrt(m_i))
v_ph_va = v_ph_c / va_c
diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i)
e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor)
e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_
b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat,
w_final, e_x, e_y, e_z)
dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]]
dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)]
dw_x[:, :, 1:] = np.diff(w_final, axis=2)
dw_z[:, 1:, :] = np.diff(w_final, axis=1)
v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])]
s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z)
# Compute ion and electron velocities
v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final,
e_x, e_y, e_z)
# Ratio of parallel and perpendicular to B speed
vepar_perp = v_ez * np.conj(v_ez)
vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey))
vipar_perp = v_iz * np.conj(v_iz)
vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy))
# Total particle speeds
v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez)
v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz)
# Ion and electron energies
m_e = -1
en_e = 0.5 * m_e * v_e2
en_i = 0.5 * m_i * v_i2
# Ratio of particle and field energy densities
ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot)
# Continuity equation
dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final,
v_ex, v_ez, v_ix, v_iz)
dn_e_n_db_b = dn_e_n / b_tot
dn_i_n_db_b = dn_i_n / b_tot
dn_e_n_dbpar_b = dn_e_n / b_par
dn_i_n_dbpar_b = dn_i_n / b_par
dn_e = dn_e_n * wp_e ** 2
k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat
k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e))
# Build output dict
extra_param = {"Degree of electromagnetism": np.log10(b_tot / e_tot),
"Degree of longitudinality": np.abs(e_par) / e_tot,
"Degree of parallelity E": e_z / e_tot,
"Degree of parallelity B": np.sqrt(
b_z * np.conj(b_z)) / b_tot,
"Ellipticity E": e_pol, "Ellipticity B": b_pol,
"E_part/E_field": np.log10(ratio_part_field),
"v_g": np.sqrt(v_x ** 2 + v_z ** 2),
"v_ph/v_a": np.log10(v_ph_va),
"E_e/E_i": np.log10(en_e / en_i),
"v_e/v_i": np.log10(np.sqrt(v_e2 / v_i2)),
"v_epara/v_eperp": np.log10(vepar_perp),
"v_ipara/v_iperp": np.log10(vipar_perp),
"dn_e/dn_i": np.log10(dne_dni),
"(dn_e/n)/ (dB/B)": np.log10(dn_e_n_db_b),
"(dn_i/n)/(dB/B)": np.log10(dn_i_n_db_b),
"(dn_i/n)/(dBpar/B)": np.log10(dn_i_n_dbpar_b),
"(dn_e/n)/(dB/B)": np.log10(dn_e / k_dot_e),
"(dn_e/n)/(dBpar /B)": np.log10(dn_e_n_dbpar_b),
" Spar/Stot": s_par / s_tot}
for k, v in zip(extra_param.keys(), extra_param.values()):
extra_param[k] = np.transpose(np.real(v), [0, 2, 1])
kx_ = np.transpose(kc_x_mat)
kz_ = np.transpose(kc_z_mat)
wf_ = np.transpose(w_final, [0, 2, 1])
return kx_, kz_, wf_, extra_param | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_mixing_coefficients_surf(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n\n # SET UP NEW MIXING COEFFICIENT ARRAYS\n self.Kv_surf = np.zeros([Ly,N+1])\n self.Kt_surf = np.zeros([Ly,N+1])\n \n self.ghat = np.zeros([Ly,N+1])\n \n\n #################################\n # \tSURFACE KPP\n ################################\n #---> j-loop\n \n self.wm2 = []\n self.ws2 = []\n self.sigma_y = []\n for j in range(Ly):\n #--> k-loop (top to kbl[j])\n # in fortran k=N-1,kbl(j),-1\n for k in range(N-1,self.kbl[j]-1,-1):\n k_w = k\n k_r = k-1\n\n Bfsfc = self.Bfsfc_bl[j]\n zscale = z_u_w[j,N] - z_u_w[j,k_w]\n \n # CALCULATE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm2.append(wm)\n self.ws2.append(ws)\n # COMPUTE VERTICAL MIXING COEFFICIENTS\n sigma = (z_u_w[j,N] - z_u_w[j,k_w]) / np.max([self.hbls[j],self.eps])\n self.sigma1 = sigma #for debugging\n if j == 25: \n self.sigma_y.append(sigma)\n a1 = sigma - 2.\n a2 = 3.-2.*sigma\n a3 = sigma - 1.\n\n if sigma < 0.07:\n cff = 0.5 * (sigma-0.07)**2/0.07\n else:\n cff = 0\n \n \n if k == N-1: \n self.wm_debug = wm\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n\n self.Kv_surf[j,k_w] = wm * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gm1[j]+a3*self.dGm1_dS[j])))\n\n if k == N-1:\n self.ws_debug = ws\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n \n self.Kt_surf[j,k_w] = ws * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gt1[j]+a3*self.dGt1_dS[j])))\n #---> end k-loop \n if self.LMD_NONLOCAL:\n if Bfsfc < 0:\n self.ghat[j,k_w] = 0\n self.ghat[j,k_w] = self.Cg * sigma * (1.-sigma)**2\n else:\n self.ghat[j,k_w] = 0.\n\n # ADD CONVECTIVE ADJUSTMENT IN SURFACE MIXED LAYER \n if self.LMD_CONVEC and self.MLCONVEC: \n for k in range(N-1,int(self.kbl[j]-1),-1):\n k_w = k\n k_r = k -1\n\n if self.bvf[j,k_w] < 0:\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.ffac*self.nu0c\n\n # ADD CONVECTIVE ADJUSTMENT BELOW SURFACE MIXED LAYER\n # IF BKPP IS SWITCHED OFF!!\n for k in range(int(self.kbl[j]-1),-1,-1):\n k_w = k\n k_r = k -1\n if self.LMD_NONLOCAL:\n self.ghat[j,k_w] = 0\n if self.LMD_CONVEC and self.LMD_BKPP == False:\n if self.bvf[j,k_w] < 0:\n self.Kv_surf[j,k_w] = self.Kv_surf[j,k_w] + self.nu0c\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.nu0c\n \n\n #---> end j-loop",
"def get_bforce_wm_ws_Gx_surf(self):\n\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w'] \n \n self.Gm1 = np.zeros([Ly])\n self.dGm1_dS = np.zeros([Ly]) \n self.Gt1 = np.zeros([Ly])\n self.dGt1_dS = np.zeros([Ly]) \n self.Bfsfc_bl = np.zeros([Ly])\n self.Av_bl = np.zeros([Ly])\n self.dAv_bl = np.zeros([Ly])\n \n #debugging\n self.wm_surf = np.zeros([Ly])\n self.ws_surf = np.zeros([Ly]) \n\n #---> j-loop\n for j in range(Ly): \n k_w = self.kbl[j] # KBL is \"new bl index after calling find_new_kbl()\n z_bl = z_u_w[j,N] - self.hbls[j]\n zscale = self.hbls[j] \n \n if self.swr_frac[j,k_w-1] > 0:\n Bfsfc = self.Bo[j] + self.Bosol[j] * ( 1. - self.swr_frac[j,k_w-1]\\\n * self.swr_frac[j,k_w] * ( z_u_w[j,k_w] - z_u_w[j,k_w-1] )\\\n / (self.swr_frac[j,k_w] * (z_u_w[j,k_w] - z_bl)\\\n + self.swr_frac[j,k_w-1] * (z_bl - z_u_w[j,k_w-1]) ))\n \n else:\n Bfsfc = self.Bo[j] + self.Bosol[j]\n \n # CALCUALTE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm_surf[j] = wm\n self.ws_surf[j] = ws \n\n if self.LIMIT_UNSTABLE_ONLY:\n f1 = 5. * np.max([0,Bfsfc]) * self.vonKar / (self.ustar[j]**4+self.eps)\n else:\n f1 = 0\n\n \n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n\n #MOMENTUM \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * (self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl[j] = Av_bl\n self.dAv_bl[j] = dAv_bl\n self.Gm1[j] = Av_bl / (self.hbls[j] * wm + self.eps)\n self.dGm1_dS[j] = np.min([0.,Av_bl*f1-dAv_bl/(wm+self.eps)]) \n\n #TEMPERATURE(BUOYANCY)\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * (self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1[j] = At_bl / (self.hbls[j] * ws + self.eps)\n self.dGt1_dS[j] = np.min([0.,At_bl*f1-dAt_bl/(ws+self.eps)]) \n\n self.Bfsfc_bl[j] = Bfsfc",
"def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n self.ni[0], self.ni[-1] = 1e11, 1e11\n self.nn[0], self.nn[-1] = 1e11, 1e11\n self.Te[0], self.Te[-1] = 0.1, 0.1\n self.Ti[0], self.Ti[-1] = 0.01, 0.01\n # self.coll_em[0], self.coll_em[-1] = 1e5, 1e5\n # self.coll_im[0], self.coll_im[-1] = 1e5, 1e5",
"def dry_snow_density(self):\n return (self.rho - self.h2o_vol * RHO_W0) / \\\n (1 - self.h2o_vol * RHO_W0 / RHO_ICE)",
"def snow_depth(lon, lat, month):\n\n im = month - 1\n\n h0 = np.array( [28.01, 30.28, 33.89, 36.80, 36.93, 36.59,\n 11.02, 4.64, 15.81, 22.66, 25.57, 26.67] )\n a = np.array( [ 0.1270, 0.1056, 0.5486, 0.4046, 0.0214, 0.7021,\n 0.3008, 0.3100, 0.2119, 0.3594, 0.1496, -0.1876] )\n b = np.array( [-1.1833, -0.5908, -0.1996, -0.4005, -1.1795, -1.4819,\n -1.2591, -0.6350, -1.0292, -1.3483, -1.4643, -1.4229] )\n c = np.array( [-0.1164, -0.0263, 0.0280, 0.0256, -0.1076, -0.1195,\n -0.0811, -0.0655, -0.0868, -0.1063, -0.1409, -0.1413] )\n d = np.array( [-0.0051, -0.0049, 0.0216, 0.0024, -0.0244, -0.0009,\n -0.0043, 0.0059, -0.0177, 0.0051, -0.0079, -0.0316] )\n e = np.array( [ 0.0243, 0.0044, -0.0176, -0.0641, -0.0142, -0.0603,\n -0.0959, -0.0005, -0.0723, -0.0577, -0.0258, -0.0029] )\n\n x = (90. - lat) * np.cos( np.radians(lon) )\n y = (90. - lat) * np.sin( np.radians(lon) )\n\n h = ( h0[im] + ( a[im] * x ) + ( b[im] * y ) + ( c[im] * x * y ) +\n ( d[im] * x * x ) + ( e[im] * y * y ) )\n\n return h",
"def pwlFly(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n numAZ = int(360./zenSpacing)\n pwl_All = np.zeros((numAZ,numZD))\n pwlSig_All = np.zeros((numAZ,numZD))\n Bvec_complete = []\n Sol_complete = []\n meas_complete = []\n model_complete = []\n postchis = []\n prechis = []\n aics = []\n bics = []\n #w = 1;\n\n for j in range(0,numAZ):\n # Find only those value within this azimuth bin:\n if(j - azSpacing/2. < 0) :\n criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )\n ind = np.array(np.where(criterion))[0]\n azData =data[ind,:]\n numd = np.shape(azData)[0]\n #print(\"NUMD:\",numd)\n if numd < 2:\n continue\n #\n # Neq is acting like a constrain on the model a small value 0.001\n # let the model vary by 1000 mm\n # will let it vary more. a large value -> 1 will force the model to be closer to 0\n # This gets too large for lots of observations, s best to doit on the fly..\n #\n Neq = np.eye(numZD,dtype=float)# * 0.001\n Apart = np.zeros((numd,numZD))\n\n for i in range(0,numd):\n iz = int(np.floor(azData[i,2]/zenSpacing))\n Apart[i,iz] = (1.-(azData[i,2]-iz*zenSpacing)/zenSpacing)\n Apart[i,iz+1] = (azData[i,2]-iz*zenSpacing)/zenSpacing\n w = np.sin(data[i,2]/180.*np.pi)\n for k in range(iz,iz+2):\n for l in range(iz,iz+2):\n Neq[k,l] = Neq[k,l] + (Apart[i,l]*Apart[i,k]) * 1./w**2\n\n prechi = np.dot(azData[:,3].T,azData[:,3])\n\n Bvec = np.dot(Apart.T,azData[:,3])\n for val in Bvec:\n Bvec_complete.append(val)\n\n Cov = np.linalg.pinv(Neq)\n Sol = np.dot(Cov,Bvec)\n for val in Sol:\n Sol_complete.append(val)\n\n #Qxx = np.dot(Apart.T,Apart)\n #Qvv = np.subtract( np.eye(numd) , np.dot(np.dot(Apart,Qxx),Apart.T))\n #sd = np.squeeze(np.diag(Qvv))\n #dx = np.dot(np.linalg.pinv(Qxx),Bvec)\n #dl = np.dot(Apart,dx)\n\n postchi = prechi - np.dot(Bvec.T,Sol)\n postchis.append(np.sqrt(postchi/numd))\n prechis.append(np.sqrt(prechi/numd))\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n\n # calculate the model values for each obs\n model = np.dot(Apart,Sol) #np.zeros(numd)\n for d in range(0,numd):\n model_complete.append(model[d])\n meas_complete.append(azData[d,3])\n # zen = azData[d,2]\n # iz = int(np.floor(azData[d,2]/zenSpacing))\n # #model[d] = Sol[iz]\n\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),gls_results.rsquared,gls_results.aic,gls_results.bic)\n \n # loglikelihood(meas,model,sd)\n #sd = np.squeeze(np.diag(Qvv))\n #print(\"meas, model, sd:\",np.shape(azData),np.shape(model),np.shape(sd))\n f = loglikelihood(azData[:,3],model)\n dof = numd - np.shape(Sol)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n aics.append(aic) \n bics.append(bic) \n #print(\"=========================\")\n pwl_All[j,:] = Sol \n pwlSig_All[j,:] = pwlsig\n\n del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind\n\n #A_complete = np.squeeze(np.asarray(A_complete.todense()))\n #print(\"A shape\",np.shape(A_complete))\n\n print(\"Doing a fit to the data\")\n f = loglikelihood(np.array(meas_complete),np.array(model_complete))\n numd = np.size(meas_complete)\n dof = numd - np.shape(Sol_complete)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #prechi = np.dot(data[:,3].T,data[:,3])\n prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))\n postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n\n return pwl_All, pwlSig_All",
"def solid_surface_density_RC2014_given_observed_catalog(sss_per_sys, max_core_mass=10.):\n mult_obs = sss_per_sys['Mtot_obs']\n mult_obs_2p = []\n a_obs_2p = []\n core_mass_obs_2p = []\n sigma_obs_2p = []\n for i in np.arange(len(mult_obs))[mult_obs > 1]: # only consider multi-planet systems\n a_sys = gen.a_from_P(sss_per_sys['P_obs'][i], sss_per_sys['Mstar_obs'][i])\n core_mass_sys = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(sss_per_sys['radii_obs'][i][a_sys > 0])\n core_mass_sys[core_mass_sys > max_core_mass] = max_core_mass\n a_sys = a_sys[a_sys > 0]\n\n mult_obs_2p += [len(a_sys)]*len(a_sys)\n a_obs_2p += list(a_sys)\n core_mass_obs_2p += list(core_mass_sys)\n sigma_obs_2p += list(solid_surface_density_system_RC2014(core_mass_sys, a_sys))\n mult_obs_2p = np.array(mult_obs_2p)\n a_obs_2p = np.array(a_obs_2p)\n core_mass_obs_2p = np.array(core_mass_obs_2p)\n sigma_obs_2p = np.array(sigma_obs_2p)\n return sigma_obs_2p, core_mass_obs_2p, a_obs_2p, mult_obs_2p",
"def main():\n\n varList = {'beta': 6., 'convSpeed': 1.2, 'Mark': 0., 'axi': 1, 'acModes': 4, 'Nr': 801, 'Tf': 600., 'xf': 0.51}\n\n # Solve steady flame.\n # BC1: I have the attachment BC at r = 1, always\n # BC2: I need to set dF/dr = 0 at r = 0 iff Mark != 0\n [qMean, r, FMean] = steady_flame_area_FD3(varList['Mark'], varList['beta'], varList['axi'], varList['Nr'])\n r = r * varList['beta']\n\n # Calculate mean flame derivatives\n dFMeanDr = derivsnew.FD1_CT2_D(FMean, r[1] - r[0])\n d2FMeanDr2 = derivsnew.FD2_CT2_D(FMean, r[1] - r[0])\n\n #Apply BC smooth tip:\n if(varList['Mark']!=0.0):\n dFMeanDr[-1] = 0.0\n\n # Use correct number of points. Remember that the extrems need to be set depending on the BC!\n # The attach BC (first point) is always assumed to be true and removed from the vector list\n if(varList['Mark']==0):\n Nr = varList['Nr'] / 2\n dFMeanDr = dFMeanDr[1:]\n d2FMeanDr2 = d2FMeanDr2[1:]\n r = r[1:]\n # The smooth BC holds only if Mark!=0 (second derivatives appear): remove also the last point\n else:\n Nr = varList['Nr'] / 2 - 1\n dFMeanDr = dFMeanDr[1:-1]\n d2FMeanDr2 = d2FMeanDr2[1:-1]\n r = r[1:-1]\n\n # Calculate geometric values\n den = 1 + varList['beta'] * varList['beta'] * dFMeanDr * dFMeanDr\n dR = r[1] - r[0]\n # Set Nx equal to Nr for now.\n # The implementation is more complicated if they differ, and need to interpolate between values.\n Nx = Nr\n\n # Nonuniform grid spacing along x!\n # Nx = length(dx) has to hold.\n dx = np.empty(len(FMean) - 1)\n for ii in range(1, len(FMean)):\n dx[ii - 1] = FMean[ii] - FMean[ii - 1]\n\n [A, B, C, tau] = loadAcoustics(varList['xf'], varList['Tf'], varList['acModes'], varList['beta'])\n\n Matrix = buildMatrix(Nr, dR, varList['beta'], den, r, FMean, dFMeanDr, d2FMeanDr2, varList['Mark'], varList['acModes'], A,\n B, C, Nx, dx, tau, qMean, varList['convSpeed'])\n\n [d, W, V] = eigProblem.solveEigProb(Matrix)\n [dnew, Wnew, Vnew] = eigProblem.selectUnstable(d, W, V)\n\n print dnew / (2. * np.pi)",
"def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux",
"def dynamic(self):\n # FrostIndexChangeRate=-(1-Afrost)*FrostIndex - Tavg*exp(-0.04*Kfrost*SnowCover/SnowWaterEquivalent);\n\n FrostIndexChangeRate = -(1 - self.var.Afrost) * self.var.FrostIndex - self.var.Tavg * \\\n np.exp(-0.04 * self.var.Kfrost * self.var.SnowCover / self.var.SnowWaterEquivalent)\n # FrostIndexChangeRate=self.var.AfrostIndex - self.var.Tavg* pcraster.exp(self.var.Kfrost*self.var.SnowCover*self.var.InvSnowWaterEquivalent)\n # Rate of change of frost index (expressed as rate, [degree days/day])\n # CHANGED 9 September 2004:\n # - first term should be negative\n # - second term should be subtracted, not added!!\n\n self.var.FrostIndex = np.maximum(self.var.FrostIndex + FrostIndexChangeRate * self.var.DtDay, 0)\n # frost index in soil [degree days]\n # based on Molnau and Bissel (1983, A Continuous Frozen Ground Index for Flood\n # Forecasting. In: Maidment, Handbook of Hydrology, p. 7.28, 7.55)\n # if Tavg is above zero, FrostIndex will stay 0\n # if Tavg is negative, FrostIndex will increase with 1 per degree C per day\n # Exponent of 0.04 (instead of 0.4 in HoH): conversion [cm] to [mm]!\n # Division by SnowDensity because SnowDepth is expressed as equivalent water\n # depth(always less than depth of snow pack)\n # SnowWaterEquivalent taken as 0.100 (based on density of 100 kg/m3) (Handbook of Hydrology, p. 7.5)\n # Afrost, (daily decay coefficient) is taken as 0.97 (Handbook of Hydrology,\n # p. 7.28)\n # Kfrost, (snow depth reduction coefficient) is taken as 0.57 [1/cm],\n # (HH, p. 7.28)",
"def solid_surface_density_nHill_given_observed_catalog(sss_per_sys, max_core_mass=10., n=10.):\n Mstar_obs = np.repeat(sss_per_sys['Mstar_obs'][:,None], np.shape(sss_per_sys['P_obs'])[1], axis=1)[sss_per_sys['P_obs'] > 0] # flattened array of stellar masses repeated for each planet\n a_obs_per_sys = gen.a_from_P(sss_per_sys['P_obs'], sss_per_sys['Mstar_obs'][:,None])\n a_obs = a_obs_per_sys[sss_per_sys['P_obs'] > 0]\n radii_obs = sss_per_sys['radii_obs'][sss_per_sys['P_obs'] > 0]\n core_mass_obs = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(radii_obs)\n core_mass_obs[core_mass_obs > max_core_mass] = max_core_mass\n sigma_obs = solid_surface_density_nHill(core_mass_obs, a_obs, Mstar=Mstar_obs, n=n)\n return sigma_obs, core_mass_obs, a_obs",
"def oceansim(sun_az,sun_zen,cam_head,cam_elev=0,m2=1.33,npart=1.08,mu=3.483, debug=True):\n\n #Water surface norm\n n = np.array([0,0,1])\n m1 = 1.0\n #vector from sun:\n ki = -np.asarray([np.sin(sun_az)*np.sin(sun_zen),\n np.cos(sun_az)*np.sin(sun_zen),\n np.cos(sun_zen)])\n xi = norm_cross(n,ki)\n #transmitted sunlight\n #tx, ty are the transmission amplitude coefficients in the xt, yt directions\n kt,tx,ty = Fresnel.transmission(ki,n,m1,m2)\n xt = xi\n #vector to camera\n kc = -np.asarray([np.sin(cam_head)*np.cos(cam_elev),\n np.cos(cam_head)*np.cos(cam_elev),\n np.sin(cam_elev)])*np.linalg.norm(kt)\n xc = norm_cross(n, kc) #right\n yc = norm_cross(kc, xc) #up\n #vectors for scattering\n ys = norm_cross(kt, kc) # y-axis of scattering event\n xst = norm_cross(ys, kt) # x-axis of scattering event relative to transmitted sunlight\n xsc = norm_cross(ys, kc) # x-axis of scattering event relative to camera\n #Mueller matrices\n # transmission through water surface:\n mm1 = Mueller.polarizer(tx,ty)\n # rotate to scattering plane\n mm2 = Mrotv(kt,xt,xst)\n # scatter\n th_s = vector_angle(kt,kc)\n #mm3 = Mocean(rad2deg(th_s)) #using Empirical ocean scattering\n mm3 = Mueller.rayleigh_norm(th_s) #normalized Rayleigh scattering matrix\n #b = Scattering.bsf_fournier(npart,mu)\n b = Scattering.vspf_fournier(th_s,npart,mu)\n # transform to camera's horizontal and up vectors\n mm4 = Mxform(xsc,ys, xc,yc)\n #Combined: mm4 . (b*mm3) . mm2 . mm1\n m = mm4.dot(b*mm3.dot(mm2.dot(mm1)))\n #stokes vector\n s = m.dot([1,0,0,0])\n if debug:\n return s,m,(ki,xi),(kt,xt,xst),(kc,xc,xsc),(mm1,mm2,mm3,b,mm4)\n else:\n return s,m",
"def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI",
"def gz(xp, yp, zp, prisms):\n if xp.shape != yp.shape != zp.shape:\n raise ValueError(\"Input arrays xp, yp, and zp must have same shape!\")\n dummy = 1e-10\n res = 0\n for prism in prisms:\n if prism is None or 'density' not in prism.props:\n continue\n x, y = prism.x, prism.y\n z1, z2 = prism.z1, prism.z2\n density = prism.props['density']\n nverts = prism.nverts\n # Calculate the effect of the prism\n Z1 = z1 - zp\n Z2 = z2 - zp\n Z1_sqr = Z1**2\n Z2_sqr = Z2**2\n kernel = 0\n for k in range(nverts):\n Xk1 = x[k] - xp\n Yk1 = y[k] - yp\n Xk2 = x[(k + 1) % nverts] - xp\n Yk2 = y[(k + 1) % nverts] - yp\n p = Xk1*Yk2 - Xk2*Yk1\n p_sqr = p**2\n Qk1 = (Yk2 - Yk1)*Yk1 + (Xk2 - Xk1)*Xk1\n Qk2 = (Yk2 - Yk1)*Yk2 + (Xk2 - Xk1)*Xk2\n Ak1 = Xk1**2 + Yk1**2\n Ak2 = Xk2**2 + Yk2**2\n R1k1 = np.sqrt(Ak1 + Z1_sqr)\n R1k2 = np.sqrt(Ak2 + Z1_sqr)\n R2k1 = np.sqrt(Ak1 + Z2_sqr)\n R2k2 = np.sqrt(Ak2 + Z2_sqr)\n Ak1 = np.sqrt(Ak1)\n Ak2 = np.sqrt(Ak2)\n Bk1 = np.sqrt(Qk1**2 + p_sqr)\n Bk2 = np.sqrt(Qk2**2 + p_sqr)\n E1k1 = R1k1*Bk1\n E1k2 = R1k2*Bk2\n E2k1 = R2k1*Bk1\n E2k2 = R2k2*Bk2\n # Simplifying these arctans with, e.g., (Z2 - Z1)*arctan2(Qk2*p -\n # Qk1*p, p*p + Qk2*Qk1) doesn't work because of the restrictions\n # regarding the angles for that identity. The regression tests\n # fail for some points by a large amount.\n kernel += (Z2 - Z1)*(np.arctan2(Qk2, p) - np.arctan2(Qk1, p))\n kernel += Z2*(np.arctan2(Z2*Qk1, R2k1*p) -\n np.arctan2(Z2*Qk2, R2k2*p))\n kernel += Z1*(np.arctan2(Z1*Qk2, R1k2*p) -\n np.arctan2(Z1*Qk1, R1k1*p))\n Ck1 = Qk1*Ak1\n Ck2 = Qk2*Ak2\n # dummy helps prevent zero division and log(0) errors (that's why I\n # need to add it twice)\n # Simplifying these two logs with a single one is not worth it\n # because it would introduce two pow operations.\n kernel += 0.5*p*Ak1/(Bk1 + dummy)*np.log(\n (E1k1 - Ck1)*(E2k1 + Ck1)/((E1k1 + Ck1)*(E2k1 - Ck1) + dummy) +\n dummy)\n kernel += 0.5*p*(Ak2/(Bk2 + dummy))*np.log(\n (E2k2 - Ck2)*(E1k2 + Ck2)/((E2k2 + Ck2)*(E1k2 - Ck2) + dummy) +\n dummy)\n res += kernel*density\n res *= G*SI2MGAL\n return res",
"def create_flux_vector_pms_gr(self):\n soma_prod = 0\n soma_inj = 0\n lim4 = 1e-4\n store_velocity = {}\n store_flux = {}\n for primal in self.primals:\n #1\n primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n primal_id = self.ident_primal[primal_id]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n for volume in fine_elems_in_primal:\n #2\n flux = {}\n velocity = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #3\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n if adj not in fine_elems_in_primal:\n #4\n pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n #3\n else:\n #4\n pvol = self.mb.tag_get_data(self.pcorr_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pcorr_tag, adj, flat=True)[0]\n #3\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)/(self.mi)\n keq2 = keq\n keq = keq*(np.dot(self.A, uni))\n pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))\n q = (grad_p)*keq - grad_z*keq*self.gama\n print((grad_p)*keq)\n print(- grad_z*keq*self.gama)\n print(q)\n print(self.store_flux_pf_gr[volume][tuple(unit)])\n print('\\n')\n import pdb; pdb.set_trace()\n\n if gid_adj > gid_vol:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n else:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n\n flux[tuple(unit)] = q\n velocity[tuple(unit)] = v\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n\n #2\n # print(gid_vol)\n # print(velocity)\n # print('\\n')\n # import pdb; pdb.set_trace()\n store_flux[volume] = flux\n self.mb.tag_set_data(self.flux_fine_pms_tag, volume, sum(flux.values()))\n # flt = sum(flux.values())\n # if volume not in self.wells_inj and volume not in self.wells_prod:\n # lim4 = 1e-7\n # if abs(flt) > lim4:\n # print(gid_vol)\n # print(flt)\n # import pdb; pdb.set_trace()\n # flt = sum(flux.values())\n store_velocity[volume] = velocity\n\n for volume in set(self.all_fine_vols) - set(self.wells):\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n values = store_flux[volume].values()\n if sum(values) > lim4:\n print('fluxo multiescala nao esta dando conservativo')\n print('gid:{0}'.format(gid))\n print(sum(values))\n import pdb; pdb.set_trace()\n\n with open('fluxo_multiescala_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat= True)[0]\n values = store_flux[volume].values()\n if volume in self.wells_inj:\n soma_inj += sum(values)\n else:\n soma_prod += sum(values)\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(soma_inj))\n arq.write('soma_prod:{0}\\n'.format(soma_prod))\n\n return store_flux",
"def dispersion(self, k):\n return 2*self.material.gamma*np.sqrt(self.material.z)",
"def Schechter_M_z_M200c(M, redshift, M200c):\n\treturn 0.4 * n.log(10.) * 10**logPhi_evol(redshift, mass_2_richness(M200c, redshift)) * 10**(0.4 * (M_s_evol(redshift, mass_2_richness(M200c, redshift)) - M) * (alpha_evol(redshift, mass_2_richness(M200c, redshift)) + 1)) * n.e**( -10** ( 0.4 * (M_s_evol(redshift,mass_2_richness(M200c, redshift)) - M)))",
"def get_hbls_hbbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n u = self.u\n v = self.v\n \n v_upts = TTTW_func.v2u(v)\n Hz = z_u_w[:,1:] - z_u_w[:,:-1]\n\n\n\n # CALCULATE swr_frac\n self.swr_frac = TTTW_func.lmd_swr_frac(self.grid_dict)\n\n\n # WHOLE THING HAPPENS IN j loop through y-indices\n \n # INITIALIZE ARRAYS\n self.kmo = np.zeros([Ly])\n self.Cr = np.zeros([Ly])\n self.kbl = np.empty([Ly],dtype='int')\n self.C_h_MO = np.zeros([Ly])\n self.Cr = np.zeros([Ly,N+1]) # sum term\n self.FC = np.zeros([Ly,N+1])\n self.swdk_r = np.zeros([Ly,N+1])\n \n self.zscale = np.zeros([Ly,N])\n self.Kern = np.zeros([Ly,N])\n\n \n # --> LOOP THROUGH Y-INDICES\n for j in range(Ly):\n if self.LIMIT_MO_DEPTH:\n self.kmo[j] = 0\n self.C_h_MO[j] = self.C_MO *self.ustar[j]**3/self.vonKar\n \n self.kbl[j] = 0\n self.Cr[j,-1] = 0 # set top Cr\n self.Cr[j,0] = 0 # set bottom Cr\n \n # SEARCH FOR MIXED LAYER DEPTH\n self.FC[j,-1] = 0.\n\n\n # ---> LOOP TOP TO BOTTOM (FORTRAN ==> k=N-1,1,-1)\n for k in range(N-1,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n \n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n self.zscale[j,k_w] = zscale\n if self.LMD_KPP:\n if self.LMD_BKPP:\n zscaleb = z_u_r[j,k_r] - z_u_w[j,0]\n Kern = zscale * zscaleb**2 / ( (zscale + self.epssfcs*self.hbls_old[j]) * (zscaleb**2+(self.epssfcb**2*self.hbbl_old[j]**2)))\n else:\n Kern = zscale / (zscale + (self.epssfcs*self.hbls_old[j]))\n else:\n Kern = 1.\n \n\n\n self.Kern[j,k_w] = Kern\n self.FC[j,k_w] = self.FC[j,k_w+1] + Kern * (\\\n ( ( u[j,k_r+1] - u[j,k_r] )**2 + ( v_upts[j,k_r+1] - v_upts[j,k_r])**2 ) \\\n / (Hz[j,k_r] + Hz[j,k_r+1]) \\\n - 0.5 * ( Hz[j,k_r] + Hz[j,k_r+1]) * (self.Ri_inv * self.bvf[j,k_w] + self.C_Ek*self.f[j]*self.f[j]))\n\n\n #\t\tLOOP THAT FINDS BL DEPTH ##\n #----> LOOP TOP TO BOTTOM (start at free surface, w-level surface) \n \n if self.LMD_KPP:\n #swdk_r only used in this function so don't need to be class attribute\n # but for testing make it an attribute to see what it is\n \n # fortran equivlanet ===> k=N,1,-1 \n for k in range(N,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n ###################################################################### \n self.swdk_r[j,k_w] = np.sqrt( self.swr_frac[j,k_w] * self.swr_frac[j,k_w-1])\n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n Bfsfc = self.Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])\n \n self.bvf_max = np.sqrt(np.max([0,self.bvf[j,k_w-1]]))\n \n # CALCULATE TURBULENT VELOCITY SCALE FOR TRACERS\n \t\t\t self.ws = self.lmd_wscale_ws_only(Bfsfc, zscale,self.hbls_old[j],self.ustar[j])\n \n self.Vtsq = self.Vtc * self.ws* self.bvf_max + self.V0\n \n\n self.Cr[j,k_w] = self.FC[j,k_w] + self.Vtsq\n \n\n #######################################################################\n \n # SEARCH FOR hbls vertical level #\n '''\n kbl is specified at vertical w-level (via Cr which is at\n vertical w-levels)\n '''\n if self.kbl[j] == 0 and self.Cr[j,k_w] < 0:\n self.kbl[j] = k_w\n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] == 0 and Bfsfc*(z_u_w[j,N] - z_u_r[j,k_r]) > self.C_h_MO[j]:\n self.kmo[j] = k_w\n\n \n #--> still in j-loop\n #######################################################\n \n # \t\tGET SURFACE BOUNDARY LAYER DEPTH # \n self.hbls[j] = z_u_w[j,N] - z_u_w[j,0] + self.eps # set hbls as depth of entire water column\n if self.kbl[j] > 0:\n k_w = self.kbl[j]\n k_r = k_w - 1 \n if k_w == N: # set hbls at the surface btwn w- and rho-levels at surface\n self.hbls[j] = z_u_w[j,N] - z_u_r[j,N-1]\n \n else:\n self.hbls[j] = z_u_w[j,N] - ( z_u_r[j,k_r] * self.Cr[j,k_w+1] - z_u_r[j,k_r+1] * self.Cr[j,k_w]) / \\\n (self.Cr[j,k_w+1] - self.Cr[j,k_w])\n \n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] > 0:\n k_w = self.kmo[j]\n k_r = k_w-1\n if k_w == N:\n z_up = z_u_w[j,N]\n cff_up = np.max([0,Bo[j]])\n else:\n z_up = z_r[j,k_w+1]\n cff_up = np.max([0, Bo[j] + self.Bosol[j]*(1-self.swdk_r[j,(k_w-1)+1])])\n \n cff_dn = np.max([0,Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])]) \n h_MO = z_u_w[j,N] + self.C_h_MO[j] * ( cff_up*z_up - cff_dn * z_u_r[j,k_r] ) \\\n / ( cff_up * cff_dn * (z_up - z_u_r[j,k_r]) ) \\\n + self.C_h_MO[j] * (cff_dn - cff_up)\n\n self.hbls[j] = np.min([self.hbls[j],np.max([h_MO,0])])\n\n\n\n #### GET BOTTOM BOUNDARY LAYER DEPTH #######\n if self.LMD_BKPP:\n self.kbl[j] = 0 # reset Cr at bottom and kbl for BKPP\n self.Cr[j,0] = 0.\n self.FC[j,0] = 1.5 * self.FC[j,1] - 0.5 * self.FC[j,2] # linear extrapolation\n \n #---> LOOP BOTTOM TO TOP\n # FIND kbl for BBL\n for k in range(1,N+1):\n k_r = k-1\n k_w = k \n self.Cr[j,k_w] = self.FC[j,k_w] - self.FC[j,0]\n \n # LOOK FOR FIRST ZERO CROSSING FROM BOTTOM UP\n if self.kbl[j] == 0 and self.Cr[j,k_w] > 0:\n self.kbl[j] = k_w \n \n\n self.hbbl[j] = z_u_w[j,N] - z_u_w[j,0] # total depth\n if self.kbl[j] > 0 :\n k_w = self.kbl[j] \n k_r = k_w -1\n if k_w == 1: # NO BBL CASE\n self.hbbl[j] = z_u_r[j,0] - z_u_w[j,0] #in between bottom rho and w-level\n else:\n self.hbbl[j] = ( z_u_r[j,k_r-1] * self.Cr[j,k_w] - z_u_r[j,k_r] * self.Cr[j,k_w-1]) / \\\n (self.Cr[j,k_w] - self.Cr[j,k_w-1]) - z_u_w[j,0]",
"def dispersion(self, k):\n return np.sqrt(\n 1 + (\n (k*self.material.hbar_m)**2 / (2*self.material.m_star_m) -\n self.material.z\n )**2\n )",
"def sigmai_dep(ptem, psal, pref):\n zr4 = 4.8313e-4\n zd =-2.042967e-2\n zrau0 = 1000.e0\n \n sigmai_dep_out = zeros(psal.shape)\n \n # ?? for whatever reason sqrt(abs(psal)) seems to kick up a fuss when arrays\n # exceed a certain size...??? otherwise this could be vectorised\n # TODO: if pref is a number, broadcast it into a 2d field\n \n for jj in range(psal.shape[0]): # python indexing\n for ji in range(psal.shape[1]):\n \n ztem = ptem[jj, ji]\n zsal = psal[jj, ji]\n zws = sqrt( abs(psal[jj, ji]) )\n \n # Compute the volumic mass of pure water at atmospheric pressure.\n zr1 = ( ( ( ( (6.536332e-9 * ztem - 1.120083e-6) * ztem + 1.001685e-4 )\n * ztem - 9.095290e-3 ) * ztem + 6.793952e-2 ) * ztem + 999.842594e0\n )\n\n # Compute the seawater volumic mass at atmospheric pressure.\n zr2 = ( ( ( ( 5.3875e-9 * ztem - 8.2467e-7) * ztem + 7.6438e-5)\n * ztem - 4.0899e-3) * ztem + 0.824493e0\n )\n\n zr3 = (-1.6546e-6 * ztem + 1.0227e-4) * ztem - 5.72466e-3\n\n # Compute the potential volumic mass (referenced to the surface).\n zrhop = (zr4 * zsal + zr3 * zws + zr2) * zsal + zr1\n\n # Compute the compression terms.\n ze = (-3.508914e-8 * ztem - 1.248266e-8) * ztem - 2.595994e-6\n\n zbw = (1.296821e-6 * ztem - 5.782165e-9) * ztem + 1.045941e-4\n\n zb = zbw + ze * zsal\n\n zc = (-7.267926e-5 * ztem + 2.598241e-3) * ztem + 0.1571896e0\n\n zaw = ( ( (5.939910e-6 * ztem + 2.512549e-3) * ztem - 0.1028859e0 ) \n * ztem - 4.721788e0\n )\n\n za = (zd * zws + zc) * zsal + zaw\n\n zb1 = (-0.1909078e0 * ztem + 7.390729e0) * ztem - 55.87545e0\n\n za1 = ( ( (2.326469e-3 * ztem + 1.553190e0) * ztem - 65.00517e0)\n * ztem + 1044.077e0\n )\n\n zkw = ( ( ( (-1.361629e-4 * ztem - 1.852732e-2) * ztem - 30.41638e0)\n * ztem + 2098.925e0) * ztem + 190925.60\n )\n\n zk0 = (zb1 * zws + za1) * zsal + zkw\n\n # Compute the potential density anomaly.\n sigmai_dep_out[jj, ji] = ( zrhop / (1.0e0 - pref / \n ( zk0 - pref * (za - pref * zb) ) )\n - zrau0\n )\n \n return sigmai_dep_out",
"def sat_vap_dens(nz, T, SWVD, plot=False):\r\n rho_v = np.zeros(nz)\r\n rho_v_dT = np.zeros(nz)\r\n if SWVD == \"Libbrecht\":\r\n rho_v = (\r\n np.exp(-T_ref_L / T) / (f * T) * (a0 + a1 * (T - 273) + a2 * (T - 273) ** 2)\r\n ) # [kg/m^3] Water vapor density\r\n rho_v_dT = (\r\n np.exp(-T_ref_L / T)\r\n / (f * T ** 2)\r\n * (\r\n (a0 - a1 * 273 + a2 * 273 ** 2) * (T_ref_L / T - 1)\r\n + (a1 - a2 * 2 * 273) * T_ref_L\r\n + a2 * T ** 2 * (T_ref_L / T + 1)\r\n )\r\n ) # [kg/m^3/K]\r\n elif SWVD == \"Calonne\":\r\n x = (L_Cal * mH2O) / (rho_i * kB)\r\n rho_v = rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n rho_v_dT = x / T ** 2 * rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n elif SWVD == \"Hansen\":\r\n\r\n rho_v = (\r\n (10.0 ** (c1 / T + c2 * np.log(T) / np.log(10) + c3 * T + c4 * T ** 2 + c5))\r\n * c6\r\n / R_v\r\n / T\r\n )\r\n rho_v_dT = (\r\n rho_v\r\n * np.log(10)\r\n * (-c1 / T ** 2 + c2 / (T * np.log(10)) + c3 + 2 * c4 * T)\r\n - rho_v / T\r\n )\r\n else:\r\n raise ValueError(\"Saturation water vapor density not available\")\r\n if plot:\r\n fig1 = plt.plot(T, rho_v)\r\n plt.title(\"Water vapor density with respect to temperature\")\r\n plt.show(fig1)\r\n fig2 = plt.plot(T, rho_v_dT)\r\n plt.title(\"Derivative of water vapor density with respect to temperature\")\r\n plt.show(fig2)\r\n return rho_v, rho_v_dT",
"def solid_surface_density_CL2013(M, a):\n return solid_surface_density(M, a, a)",
"def findzpd(self):\n dc=0.5*self.rms*self.ndstep\n #fixed at 0.1 of the dispersion\n dd=0.1*self.ws.coef[1]\n\n #set upt he docef values\n dcoef=self.ws.coef*0.0\n dcoef[0]=dc\n dcoef[1]=dd\n self.ws=st.findxcor(self.xarr, self.farr, self.swarr, self.sfarr, self.ws, \n dcoef=dcoef, ndstep=self.ndstep, best=False, inttype='interp')\n self.plotArt()\n self.redraw_canvas()",
"def main_gamma_ray_loop(\n num_decays,\n model,\n plasma,\n time_steps=10,\n time_end=80.0,\n grey_opacity=-1,\n spectrum_bins=500,\n time_space=\"log\",\n photoabsorption_opacity=\"tardis\",\n pair_creation_opacity=\"tardis\",\n seed=1,\n path_to_decay_data=\"~/Downloads/tardisnuclear/decay_radiation.h5\",\n positronium_fraction=0.0,\n):\n # Note: not best numpy practice, but works better in numba than the alternatives\n np.random.seed(seed)\n\n # Enforce cgs\n outer_velocities = model.v_outer.to(\"cm/s\").value\n inner_velocities = model.v_inner.to(\"cm/s\").value\n ejecta_density = model.density.to(\"g/cm^3\").value\n ejecta_volume = model.volume.to(\"cm^3\").value\n ejecta_velocity_volume = (\n 4 * np.pi / 3 * (outer_velocities**3.0 - inner_velocities**3.0)\n )\n time_explosion = model.time_explosion.to(\"s\").value\n number_of_shells = model.no_of_shells\n raw_isotope_abundance = model.raw_isotope_abundance.sort_values(\n by=[\"atomic_number\", \"mass_number\"], ascending=False\n )\n\n shell_masses = ejecta_volume * ejecta_density\n\n time_start = time_explosion\n time_end *= u.d.to(u.s)\n\n assert (\n time_start < time_end\n ), \"Error, simulation start time greater than end time!\"\n\n if time_space == \"log\":\n times = np.zeros(time_steps + 1)\n\n # log time steps\n for i in range(time_steps + 1):\n times[i] = (\n np.log(time_start)\n + (np.log(time_end) - np.log(time_start)) / time_steps * i\n )\n times[i] = np.exp(times[i])\n else:\n times = np.linspace(time_start, time_end, time_steps + 1)\n\n dt_array = np.diff(times)\n effective_time_array = np.array(\n [np.sqrt(times[i] * times[i + 1]) for i in range(time_steps)]\n )\n\n # Use isotopic number density\n for atom_number in plasma.isotope_number_density.index.get_level_values(0):\n values = plasma.isotope_number_density.loc[atom_number].values\n if values.shape[1] > 1:\n plasma.number_density.loc[atom_number] = np.sum(values, axis=0)\n else:\n plasma.number_density.loc[atom_number] = values\n\n # Calculate electron number density\n electron_number_density = (\n plasma.number_density.mul(plasma.number_density.index, axis=0)\n ).sum()\n\n electron_number_density_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n mass_density_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n electron_number = (electron_number_density * ejecta_volume).to_numpy()\n\n inv_volume_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n # Pre-calculate quantities as they change with time\n for i, t in enumerate(effective_time_array):\n inv_volume_time[:, i] = (1.0 / ejecta_velocity_volume) / (t**3.0)\n mass_density_time[:, i] = shell_masses * inv_volume_time[:, i]\n electron_number_density_time[:, i] = (\n electron_number * inv_volume_time[:, i]\n )\n\n energy_df_rows = np.zeros((number_of_shells, time_steps))\n\n # Calculate number of packets per shell based on the mass of isotopes\n number_of_isotopes = plasma.isotope_number_density * ejecta_volume\n total_number_isotopes = number_of_isotopes.sum(axis=1)\n\n inventories = raw_isotope_abundance.to_inventories()\n all_isotope_names = get_all_isotopes(raw_isotope_abundance)\n all_isotope_names.sort()\n\n gamma_ray_lines = get_nuclear_lines_database(path_to_decay_data)\n\n taus = {}\n parents = {}\n gamma_ray_line_array_list = []\n average_energies_list = []\n average_positron_energies_list = []\n\n for i, isotope in enumerate(all_isotope_names):\n nuclide = rd.Nuclide(isotope)\n taus[isotope] = nuclide.half_life() / np.log(2)\n child = nuclide.progeny()\n if child is not None:\n for c in child:\n if rd.Nuclide(c).half_life(\"readable\") != \"stable\":\n parents[c] = isotope\n\n energy, intensity = setup_input_energy(\n gamma_ray_lines[\n gamma_ray_lines.Isotope == isotope.replace(\"-\", \"\")\n ],\n \"g\",\n )\n gamma_ray_line_array_list.append(np.stack([energy, intensity]))\n average_energies_list.append(np.sum(energy * intensity))\n positron_energy, positron_intensity = setup_input_energy(\n gamma_ray_lines[\n gamma_ray_lines.Isotope == isotope.replace(\"-\", \"\")\n ],\n \"bp\",\n )\n average_positron_energies_list.append(\n np.sum(positron_energy * positron_intensity)\n )\n\n # Construct Numba typed dicts\n gamma_ray_line_arrays = {}\n average_energies = {}\n average_positron_energies = {}\n\n for iso, lines in zip(all_isotope_names, gamma_ray_line_array_list):\n gamma_ray_line_arrays[iso] = lines\n\n for iso, energy, positron_energy in zip(\n all_isotope_names, average_energies_list, average_positron_energies_list\n ):\n average_energies[iso] = energy\n average_positron_energies[iso] = positron_energy\n\n # urilight chooses to have 0 as the baseline for this calculation\n # but time_start may also be valid in which case decay time is time_end - time_start\n total_energy_list = []\n\n for shell, inv in enumerate(inventories):\n decayed_energy = {}\n total_decays = inv.cumulative_decays(time_end)\n for nuclide in total_decays:\n if nuclide in parents and nuclide != \"Co-56\" and nuclide != \"Co-57\":\n parent = parents[nuclide]\n if parent in parents:\n parent = parents[parent]\n decayed_energy[parent] += (\n total_decays[nuclide]\n * average_energies[nuclide]\n * shell_masses[shell]\n )\n else:\n decayed_energy[nuclide] = (\n total_decays[nuclide]\n * average_energies[nuclide]\n * shell_masses[shell]\n )\n\n total_energy_list.append(decayed_energy)\n\n total_energy = pd.DataFrame(total_energy_list)\n\n total_energy_columns = total_energy.columns.to_list()\n\n total_energy = total_energy[\n sorted(\n total_energy_columns, key=get_nuclide_atomic_number, reverse=True\n )\n ]\n\n energy_per_mass = total_energy.divide(\n (raw_isotope_abundance * shell_masses).T.to_numpy(),\n axis=0,\n )\n\n # Time averaged energy per mass for constant packet count\n average_power_per_mass = energy_per_mass / (time_end - time_start)\n\n energy_per_mass_norm = energy_per_mass.divide(\n energy_per_mass.sum(axis=1), axis=0\n ) # .cumsum(axis=1)\n\n decayed_packet_count = num_decays * number_of_isotopes.divide(\n total_number_isotopes, axis=0\n )\n\n packets_per_isotope = (\n (energy_per_mass_norm * decayed_packet_count.T.values)\n .round()\n .fillna(0)\n .astype(int)\n )\n\n print(\"Total gamma-ray energy\")\n print(total_energy.sum().sum() * u.keV.to(\"erg\"))\n\n print(\"Total positron energy\")\n print(total_energy[\"Co-56\"].sum(axis=0) * 0.0337 * u.keV.to(\"erg\"))\n\n # Taking iron group to be elements 21-30\n # Used as part of the approximations for photoabsorption and pair creation\n # Dependent on atomic data\n iron_group_fraction_per_shell = model.abundance.loc[(21):(30)].sum(axis=0)\n\n number_of_packets = packets_per_isotope.sum().sum()\n print(\"Total packets:\", number_of_packets)\n\n packet_energy = total_energy.sum().sum() / number_of_packets\n\n print(\"Energy per packet\", packet_energy)\n\n # Need to update volume for positron deposition to be time-dependent\n print(\"Initializing packets\")\n (\n packets,\n energy_df_rows,\n energy_plot_df_rows,\n energy_plot_positron_rows,\n ) = initialize_packets(\n packets_per_isotope,\n packet_energy,\n gamma_ray_line_arrays,\n positronium_fraction,\n inner_velocities,\n outer_velocities,\n inv_volume_time,\n times,\n energy_df_rows,\n effective_time_array,\n taus,\n parents,\n average_positron_energies,\n inventories,\n average_power_per_mass,\n )\n\n print(\"Total positron energy from packets\")\n print((energy_df_rows).sum().sum() * u.eV.to(\"erg\"))\n\n total_cmf_energy = 0\n total_rf_energy = 0\n\n for p in packets:\n total_cmf_energy += p.energy_cmf\n total_rf_energy += p.energy_rf\n\n print(\"Total CMF energy\")\n print(total_cmf_energy)\n\n # Below is the Artis compensation for their method of packet rejection\n \"\"\"\n energy_ratio = total_energy.sum().sum() / total_cmf_energy\n\n print(\"Energy ratio\")\n print(energy_ratio)\n \n for p in packets:\n p.energy_cmf *= energy_ratio\n p.energy_rf *= energy_ratio\n\n for e in energy_df_rows:\n e *= energy_ratio\n \n for row in energy_plot_df_rows:\n row[1] *= energy_ratio\n \"\"\"\n print(\"Total RF energy\")\n print(total_rf_energy)\n\n energy_bins = np.logspace(2, 3.8, spectrum_bins)\n energy_out = np.zeros((len(energy_bins - 1), time_steps))\n\n # Process packets\n (\n energy_df_rows,\n energy_plot_df_rows,\n energy_out,\n deposition_estimator,\n ) = gamma_packet_loop(\n packets,\n grey_opacity,\n photoabsorption_opacity,\n pair_creation_opacity,\n electron_number_density_time,\n mass_density_time,\n inv_volume_time,\n iron_group_fraction_per_shell.to_numpy(),\n inner_velocities,\n outer_velocities,\n times,\n dt_array,\n effective_time_array,\n energy_bins,\n energy_df_rows,\n energy_plot_df_rows,\n energy_out,\n )\n\n # DataFrame of energy information\n energy_plot_df = pd.DataFrame(\n data=energy_plot_df_rows,\n columns=[\n \"packet_index\",\n \"energy_input\",\n \"energy_input_r\",\n \"energy_input_time\",\n \"energy_input_type\",\n \"compton_opacity\",\n \"photoabsorption_opacity\",\n \"total_opacity\",\n ],\n )\n\n # DataFrame of positron energies\n energy_plot_positrons = pd.DataFrame(\n data=energy_plot_positron_rows,\n columns=[\n \"packet_index\",\n \"energy_input\",\n \"energy_input_r\",\n \"energy_input_time\",\n ],\n )\n\n # DataFrame of estimated deposition\n # Multiply dataframes by inv_volume_time array\n # if per unit volume is needed\n energy_estimated_deposition = (\n pd.DataFrame(data=deposition_estimator, columns=times[:-1])\n ) / dt_array\n\n # Energy is eV/s\n energy_df = pd.DataFrame(data=energy_df_rows, columns=times[:-1]) / dt_array\n\n final_energy = 0\n for p in packets:\n final_energy += p.energy_rf\n\n print(\"Final energy to test for conservation\")\n print(final_energy)\n\n escape_energy = pd.DataFrame(\n data=energy_out, columns=times[:-1], index=energy_bins\n )\n\n return (\n energy_df,\n energy_plot_df,\n escape_energy,\n decayed_packet_count,\n energy_plot_positrons,\n energy_estimated_deposition,\n )",
"def force_12(alpha, r1, r2, R_particle, eps_particle, k, eps_out, eps_in,\n fiber_radius, nmin, nmax, kzimax, E0_mod, nmin_sc, nmax_sc, case):\n\n dr = 1 / k * 1e-5\n dz = dr\n dtheta = 1e-5\n\n p1 = dipole_moment(1, r1, r2, R_particle, eps_particle, k, eps_out, eps_in,\n fiber_radius, nmin, nmax, kzimax,\n E0_mod, nmin_sc, nmax_sc, case)\n p1c = p1.conjugate()\n\n # Fr\n if alpha == 0:\n r1plusdr = r1 + np.array([dr, 0, 0])\n r1minusdr = r1 - np.array([dr, 0, 0])\n Eplusr = total_loc_efield(1, r1plusdr, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n Eminusr = total_loc_efield(1, r1minusdr, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n grad_r = (Eplusr - Eminusr) / (2 * dr)\n\n return(0.5 * np.dot(p1c, grad_r).real)\n # Ftheta\n elif alpha == 1:\n r1plusdtheta = r1 + np.array([0, dtheta, 0])\n r1minusdtheta = r1 - np.array([0, dtheta, 0])\n\n Eplustheta = total_loc_efield(1, r1plusdtheta, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n Eminustheta = total_loc_efield(1, r1minusdtheta, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n grad_theta = (Eplustheta - Eminustheta) / (r1[0] * 2 * dtheta)\n\n return(0.5 * np.dot(p1c, grad_theta).real)\n # Fz\n elif alpha == 2:\n r1plusdz = r1 + np.array([0, 0, dz])\n r1minusdz = r1 - np.array([0, 0, dz])\n\n Eplusz = total_loc_efield(1, r1plusdz, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n Eminusz = total_loc_efield(1, r1minusdz, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n grad_z = (Eplusz - Eminusz) / (2 * dz)\n\n return(0.5 * np.dot(p1c, grad_z).real)\n else:\n print('alpha is out of range!')\n return(0)",
"def compute_desired_velocity(self):\n mask_red = (self.image_red == 255) \\\n *(self.image_green == 0) \\\n *(self.image_blue == 0)\n ind_red = sp.where( mask_red )\n phi = sp.ones(self.image_red.shape)\n phi[ind_red] = 0\n phi = sp.ma.MaskedArray(phi, mask=self.mask)\n numpy.set_printoptions(threshold=sys.maxsize)\n self.door_distance = skfmm.distance(phi, dx=self.pixel_size)\n tmp_dist = self.door_distance.filled(9999)\n grad = sp.gradient(tmp_dist,edge_order=2)\n grad_X = -grad[1]/self.pixel_size\n grad_Y = -grad[0]/self.pixel_size\n norm = sp.sqrt(grad_X**2+grad_Y**2)\n norm = (norm>0)*norm+(norm==0)*0.001\n self.desired_velocity_X = self.vmax * (grad_X/norm)\n self.desired_velocity_Y = self.vmax * (grad_Y/norm)\n '''plt.subplot(1,2,1)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.subplot(1,2,2)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.show()'''\n return self.door_distance, self.desired_velocity_X, self.desired_velocity_Y",
"def calculate_pressure_layers(P_surface = 100000,P_Cutoff = 0.00001):\n layers = np.ceil(-np.log(P_Cutoff/P_surface)) \n return [float(\"%.3g\"%x) for x in np.exp(-np.arange(layers))*P_surface]",
"def calculate_muscl_fluxes(densities, pressures, velocities, gamma,\n mass_ratios, specific_heats, molar_masses, dt_over_dx):\n # Get half step densities\n limiter = UltraBeeLimiter()\n half_step_densities_L = np.zeros(len(densities) - 2)\n half_step_velocities_L = np.zeros(half_step_densities_L.shape)\n half_step_pressures_L = np.zeros(half_step_densities_L.shape)\n half_step_mass_ratios_L = np.zeros((len(densities) - 2, len(specific_heats)))\n half_step_densities_R = np.zeros(half_step_densities_L.shape)\n half_step_velocities_R = np.zeros(half_step_densities_L.shape)\n half_step_pressures_R = np.zeros(half_step_densities_L.shape)\n half_step_mass_ratios_R = np.zeros(half_step_mass_ratios_L.shape)\n for i, dens in enumerate(half_step_densities_L):\n idx = i + 1\n\n # Calculate slopes\n left_slopes = dict()\n left_slopes[\"rho\"] = (densities[idx] - densities[idx - 1]) / 2\n left_slopes[\"mom\"] = (densities[idx] * velocities[idx] - densities[idx - 1] * velocities[idx - 1]) / 2\n cell_energy = 0.5 * densities[idx] * velocities[idx] * velocities[idx] + pressures[idx] / (gamma[idx] - 1)\n behind_energy = 0.5 * densities[idx - 1] * velocities[idx - 1] * velocities[idx - 1] + pressures[idx - 1] / (gamma[idx - 1] - 1)\n left_slopes[\"energy\"] = (cell_energy - behind_energy) / 2\n\n right_slopes = dict()\n right_slopes[\"rho\"] = (densities[idx + 1] - densities[idx]) / 2\n right_slopes[\"mom\"] = (densities[idx + 1] * velocities[idx + 1] - densities[idx] * velocities[idx]) / 2\n forward_energy = 0.5 * densities[idx + 1] * velocities[idx + 1] * velocities[idx + 1] + pressures[idx + 1] / (gamma[idx + 1] - 1)\n right_slopes[\"energy\"] = (forward_energy - cell_energy) / 2\n\n average_density_slope, average_momentum_slope, average_energy_slope = limiter.calculate_limited_slopes(left_slopes, right_slopes)\n\n # Interpolate left and right densities\n left_density = densities[idx] - average_density_slope\n left_momentum = densities[idx] * velocities[idx] - average_momentum_slope\n left_energy = cell_energy - average_energy_slope\n left_mass_ratios = mass_ratios[idx, :]\n assert left_density > 0, left_density\n assert left_energy > 0, left_energy\n assert np.isclose(1.0, left_mass_ratios.sum(), 1e-14)\n\n right_density = densities[idx] + average_density_slope\n right_momentum = densities[idx] * velocities[idx] + average_momentum_slope\n right_energy = cell_energy + average_energy_slope\n right_mass_ratios = mass_ratios[idx, :]\n assert right_density > 0, right_density\n assert right_energy > 0, right_energy\n assert np.isclose(1.0, right_mass_ratios.sum(), 1e-14)\n\n # Perform half step flux\n left_velocity = left_momentum / left_density\n left_density_flux = left_momentum\n left_internal_energy = left_energy - 0.5 * left_momentum * left_velocity\n left_pressure = left_internal_energy * (gamma[idx] - 1)\n left_momentum_flux = left_momentum * left_velocity + left_pressure\n left_energy_flux = (left_energy + left_pressure) * left_velocity\n\n right_velocity = right_momentum / right_density\n right_density_flux = right_momentum\n right_internal_energy = right_energy - 0.5 * right_momentum * right_velocity\n right_pressure = right_internal_energy * (gamma[idx] - 1)\n right_momentum_flux = right_momentum * right_velocity + right_pressure\n right_energy_flux = (right_energy + right_pressure) * right_velocity\n\n half_step_density_flux = (left_density_flux - right_density_flux) * dt_over_dx * 0.5\n half_step_momentum_flux = (left_momentum_flux - right_momentum_flux) * dt_over_dx * 0.5\n half_step_energy_flux = (left_energy_flux - right_energy_flux) * dt_over_dx * 0.5\n\n state = ThermodynamicState1D(left_pressure, left_density, left_velocity, gamma[idx], left_mass_ratios)\n state.update_states(half_step_density_flux,\n half_step_momentum_flux,\n half_step_energy_flux,\n specific_heats, molar_masses)\n half_step_densities_L[i] = state.rho\n half_step_velocities_L[i] = state.u\n half_step_pressures_L[i] = state.p\n half_step_mass_ratios_L[i, :] = state.mass_ratios\n\n state = ThermodynamicState1D(right_pressure, right_density, right_velocity, gamma[idx], right_mass_ratios)\n state.update_states(half_step_density_flux,\n half_step_momentum_flux,\n half_step_energy_flux,\n specific_heats, molar_masses)\n half_step_densities_R[i] = state.rho\n half_step_velocities_R[i] = state.u\n half_step_pressures_R[i] = state.p\n half_step_mass_ratios_R[i, :] = state.mass_ratios\n\n # Calculate final fluxes\n density_fluxes = np.zeros(len(half_step_densities_R) - 1)\n momentum_fluxes = np.zeros(len(half_step_densities_R) - 1)\n total_energy_fluxes = np.zeros(len(half_step_densities_R) - 1)\n mass_ratio_fluxes = np.zeros((len(half_step_densities_R) - 1, mass_ratios.shape[1]))\n\n for i, dens_flux in enumerate(density_fluxes):\n solver = IterativeRiemannSolver()\n\n # Generate left and right states from cell averaged values\n left_state = ThermodynamicState1D(half_step_pressures_R[i],\n half_step_densities_R[i],\n half_step_velocities_R[i],\n gamma[i],\n half_step_mass_ratios_L[i, :])\n right_state = ThermodynamicState1D(half_step_pressures_L[i + 1],\n half_step_densities_L[i + 1],\n half_step_velocities_L[i + 1],\n gamma[i + 1],\n half_step_mass_ratios_R[i + 1, :])\n\n # Solve Riemann problem for star states\n p_star, u_star = solver.get_star_states(left_state, right_state)\n\n # Calculate fluxes using solver sample function\n p_flux, u_flux, rho_flux, is_left = solver.sample(0.0, left_state, right_state, p_star, u_star)\n\n # Store fluxes in array\n mass_ratio_fluxes[i, :] = left_state.mass_ratios if is_left else right_state.mass_ratios\n flux_gamma = left_state.gamma if is_left else right_state.gamma\n density_fluxes[i] = rho_flux * u_flux\n momentum_fluxes[i] = rho_flux * u_flux * u_flux + p_flux\n e_tot = p_flux / (flux_gamma - 1) + 0.5 * rho_flux * u_flux * u_flux\n total_energy_fluxes[i] = (p_flux + e_tot) * u_flux\n\n return density_fluxes, momentum_fluxes, total_energy_fluxes, mass_ratio_fluxes",
"def compare_averages_shell_pspec_dft():\n\n select_radius = 5. #degrees\n\n Nside=256\n Npix = 12 * Nside**2\n Omega = 4*np.pi/float(Npix)\n\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420/freqs - 1.\n\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n\n dV = comoving_voxel_volume(Z[Nfreq/2], dnu, Omega)\n variances = []\n means = []\n pks = []\n\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n\n steps = range(10,110,10)\n vmin,vmax = min(steps),max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n\n for n in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, select_radius, freqs=freqs, Nkbins=Nkbins, N_sections=n, cosmo=True, method='dft', error=False)\n variances.append(np.var(pk[0:Nkbins-5]))\n means.append(np.mean(pk[0:Nkbins-5]))\n pks.append(pk)\n ax0.plot(kbins, pk, label=str(n), color=colormap(normalize(n)))\n\n ax0.axhline(y=dV*sig**2, color='k', lw=2.0)\n# ax0.legend()\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable,label=r'Number of snapshots', ax=ax0)\n ax0.set_ylabel(r\"P(k) [mK$^2$ Mpc$^{3}]$\")\n ax0.set_xlabel(r\"k [Mpc$^{-1}]$\")\n ax1.plot(steps, np.array(variances), label=\"Variance\")\n ax1.set_ylabel(r\"Variance(P(k)) [mK$^4$ Mpc$^{6}]$\")\n ax1.set_xlabel(u\"Number of 5° snapshots\")\n ax3.plot(steps, means, label=\"Mean\")\n ax3.set_ylabel(r\"Mean(P(k)) [mK$^2$ Mpc$^{3}]$\")\n ax3.set_xlabel(u\"Number of 5° snapshots\")\n ax1.legend()\n ax3.legend()\n im = ax2.imshow(np.array(pks)[:,0:Nkbins-5], aspect='auto')#, norm=mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n print('Fractional deviation: ', np.mean(np.abs(pk - dV*sig**2)))\n pl.show()",
"def get_variables(self, z0, u_inf):\n # Get the ambient data from the CTD profile\n Ta, Sa, P = self.profile.get_values(z0, ['temperature', 'salinity',\n 'pressure'])\n rho = seawater.density(Ta, Sa, P)\n \n # Compute the properties of each dispersed-phase particle\n us = np.zeros(len(self.particles))\n rho_p = np.zeros(len(self.particles))\n m_p = np.zeros(len(self.particles))\n B_p = np.zeros(len(self.particles))\n for i in range(len(self.particles)):\n m0 = self.particles[i].m0\n T0 = self.particles[i].T0\n m_p[i] = np.sum(m0) * self.particles[i].nb0\n if m_p[i] > 0.:\n # Particles exist, get properties. Make sure the algorithm \n # uses the dirty bubble properties since this is supposed\n # to be the rise velocity averaged over the whole plume.\n us[i], rho_p[i]= self.particles[i].properties(m0, T0, P, Sa, \n Ta, np.inf)[0:2]\n B_p[i] = (rho - rho_p[i]) / rho * 9.81 * (m_p[i] / rho_p[i])\n else:\n # Particles dissolved, set to ambient conditions\n us[i] = 0.\n rho_p[i] = rho\n B_p[i] = 0.\n \n # Select the correct slip velocity\n u_slip = us[0]\n for i in range(len(self.particles) - 1):\n if B_p[i+1] > B_p[i]:\n u_slip = us[i+1]\n \n # Compute the total buoyancy flux\n B = np.sum(B_p)\n \n # Get the ambient buoyancy frequency\n N = self.profile.buoyancy_frequency(z0)\n \n # Return the governing parameters\n return (B, N, u_slip, u_inf)"
] | [
"0.6823951",
"0.68156433",
"0.64645",
"0.62532675",
"0.5977594",
"0.5888927",
"0.5858084",
"0.5850966",
"0.5778458",
"0.5767043",
"0.5753279",
"0.5737354",
"0.5723255",
"0.5714657",
"0.57088953",
"0.5705945",
"0.56355387",
"0.56164163",
"0.561608",
"0.56118447",
"0.5599761",
"0.5590967",
"0.55508405",
"0.5537181",
"0.55344886",
"0.55212325",
"0.55204123",
"0.55125654",
"0.55106556",
"0.55104506"
] | 0.7354267 | 0 |
Test Restaurant.__check_conditions decorator Test must be passed if functions with this decorator raised error cause of Hall, Delivery or Kitchen was not setted. | def test_open_no_setup(restaurant_only, hall_only, kitchen_only, delivery_only):
# Here checks not all variants, cause restaurant_only is not isolated
# object. They were previously check and working alongside
# but affects result if together.
# no setups
with pytest.raises(CustomWarning):
restaurant_only.open()
assert restaurant_only.is_working is False, "You need to setup Kitchen, Delivery and Hall"
# only kitchen
with pytest.raises(CustomWarning):
restaurant_only.set_kitchen(kitchen_only)
restaurant_only.open()
assert restaurant_only.is_working is False, "You need to setup Kitchen, Delivery and Hall"
# only delivery and kitchen
with pytest.raises(CustomWarning):
restaurant_only.set_delivery(delivery_only)
restaurant_only.set_kitchen(kitchen_only)
restaurant_only.open()
assert restaurant_only.is_working is False, "You need to setup Kitchen, Delivery and Hall" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_available(self):\n feature_guard = _make_requires(True, \"Error text\")\n results = []\n\n @feature_guard\n def inner():\n results.append(True)\n return True\n\n assert inner() is True\n assert [True] == results",
"def test_simple_restaurant(restaurant_only):\n assert restaurant_only",
"def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)",
"def check_requirements(self): # pylint: disable=no-self-use\n self.is_skipped = False",
"def _check(self):\n if self.action_on_failure not in self.ACTION_ON_FAILURE:\n raise type_utils.TestListError(\n 'action_on_failure must be one of \"NEXT\", \"PARENT\", \"STOP\"')\n\n if self.parallel:\n if not self.subtests:\n raise type_utils.TestListError(\n '`parallel` should be set on test group')\n for subtest in self.subtests:\n if not subtest.IsLeaf():\n raise type_utils.TestListError(\n 'Test %s: all subtests in a parallel test should be leaf nodes' %\n self.id)\n if subtest.enable_services or subtest.disable_services:\n raise type_utils.TestListError(\n 'Test %s cannot be parallel with enable_services or '\n 'disable_services specified.' % subtest.id)\n\n # all subtests should come before teardown tests\n it = iter(self.subtests)\n if not self.teardown:\n # find first teardown test\n it = itertools.dropwhile(lambda subtest: not subtest.teardown, it)\n for subtest in it:\n if not subtest.teardown:\n raise type_utils.TestListError(\n '%s: all subtests should come before teardown tests' % self.id)\n\n for subtest in self.subtests:\n subtest._check() # pylint: disable=protected-access",
"def _check(self):\n assert isinstance(self._price, int)\n assert self._price >= 0\n assert isinstance(self._units, int)\n assert self._units > 0\n assert self._side == OrderSide.BUY or self._side == OrderSide.SELL\n assert self._type == OrderType.LIMIT or self._type == OrderType.CANCEL\n assert isinstance(self._market, int)\n assert self._market > 0",
"def test_full_restaurant(restaurant_full):\n assert restaurant_full",
"def test_defining_only_and_defer_fails(self):",
"def _is_valid(self):\n # Test vol_id:\n assert isinstance(self.volume_id, int), ('The volume id vol_id must be an '\n f'integer, but {self.volume_id} was '\n 'given.')\n assert self.volume_id > 0, ('The volume id vol_id must be greater zero, '\n f'but {self.volume_id} was given.')\n\n # Test if ROI function is defined properly:\n assert callable(self.roi), ('roi must be a callable function '\n 'which depends on x,y,z.')\n\n # Testing the electric field:\n if not (callable(self.electric_field) or\n isinstance(self.electric_field, (int, float))):\n raise ValueError('e_field must be either a function or '\n 'a constant!')\n\n if callable(self.electric_field):\n args = inspect.getfullargspec(self.electric_field).args\n m = np.all(np.isin(['x', 'y', 'z'], args))\n m = m & (len(args) == 3)\n assert m, ('Wrong arguments for e_field. Expected arguments: '\n f'\"x\", \"y\" and \"z\" but {args} were given.')\n # Cannot add a specific if **kwargs are valid properties. Cannot\n # inspect nestpy functions.",
"def testConditionChecking(self):\n\n state = State.from_problem(self.prob)\n \n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n self.assert_(state.is_satisfied(drive.precondition))\n\n with drive.instantiate([\"agent\", \"tru1\", \"apt2\"], self.prob):\n self.assertFalse(state.is_satisfied(drive.precondition))",
"def _backtest_model_specific_sanity_checks(self, *args: Any, **kwargs: Any) -> None:\n pass",
"def test_check_args_weekend(self):\n test_date = dt.datetime(2021, 6, 20, 11, 0, 0)\n with self.assertRaises(ValueError) as context:\n self.duedate.check_args(test_date, self.test_turn_time)\n self.assertTrue(\n \"You can submit requests during weekdays only.\" in str(context.exception))",
"def _check_parameters(self, target_function, **kwargs):\n # Ensure all arguments are =< 0 where relevant\n for keyword, value in kwargs.items():\n # Two conditions\n value_is_less_than_zero = value < 0\n keyword_is_relevant = keyword in ['mean', 'constant', 'low', 'mode', 'high']\n # Test conditions\n if keyword_is_relevant and value_is_less_than_zero:\n raise FairException('\"{}\" is less than zero.'.format(keyword))\n # Check that all required keywords are provided\n required_keywords = self._required_keywords[target_function]\n for required_keyword in required_keywords:\n if required_keyword in kwargs.keys():\n pass\n else:\n raise FairException('\"{}\" is missing \"{}\".'.format(str(target_function), required_keyword))",
"def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])",
"def validate(self):\n try:\n assert self.__age_calculate() is True, Exception('Age is less than expected')\n assert self.__is_user_repeated() is True,Exception(\n 'Recently request received in last 5 days')\n assert self.__is_indian_or_american() is True, Exception(\n 'Nationality should be india or america')\n assert self.__check_state() is True, Exception('State should be valid')\n assert self.__check_salary() is True, Exception(\n 'Salary should be below 90k and above 10k')\n self.__log.write_log(\"All Validation is Successful\")\n self.__response = {'response':'success'}\n return True\n except AssertionError as error:\n self.__response = {'response':f\"{error}\"}\n self.__log.write_log(\"Validation Error...Check the Eligibility Criteria...\")\n return False",
"def precondition(self, *args, **kwargs):\n pass",
"def test_freeze_user_final_grade_error1_improperly_configured(self, raise_on_exception, mock_refr, mock_get_fg):\n # case without freeze date\n with self.assertRaises(ImproperlyConfigured):\n api.freeze_user_final_grade(self.user, self.run_fa_with_cert, raise_on_exception=raise_on_exception)\n assert mock_refr.called is False\n assert mock_get_fg.called is False\n assert FinalGrade.objects.filter(user=self.user, course_run=self.run_fa_with_cert).exists() is False",
"def test_check_is_required(fake_check):\n assert fake_check.is_required()",
"def conditions():\n pass",
"def checkFood(self, food):\n pass",
"def check_errors(self) -> None:",
"def check_validity(self):",
"def test_unavailable(self):\n feature_guard = _make_requires(False, \"Error text\")\n\n @feature_guard\n def inner(): # pragma: nocover\n pytest.fail(\"Should not be called\")\n\n with pytest.raises(NotImplementedError) as e:\n inner()\n\n assert \"Error text\" in str(e.value)",
"def testCheck(self):\n change = ChangeState(self.config, \"changestate_t\")\n\n # Run through all good state transitions and assert that they work\n for state in self.transitions:\n for dest in self.transitions[state]:\n change.check(dest, state)\n dummystates = ['dummy1', 'dummy2', 'dummy3', 'dummy4']\n\n # Then run through some bad state transistions and assertRaises(AssertionError)\n for state in self.transitions:\n for dest in dummystates:\n self.assertRaises(AssertionError, change.check, dest, state)\n return",
"def check(self) -> None:",
"def test_validation_called(self, mock_field_validator, mock_error_builder):\n\n LandCompensationLandSoldValidator.validate(VALID_LAND_SOLD, VALID_WORK_DONE)\n\n calls = [\n call(VALID_LAND_SOLD, 'land-sold-description', 'Description of the charge', mock_error_builder(),\n summary_message='Describe the land sold',\n inline_message='This is the land bought by the authority, so they can do public works on the land.'),\n call().is_required(),\n\n call(VALID_LAND_SOLD, 'land-sold-description', 'Description of the charge',\n mock_error_builder(), summary_message=\"Answer too long\",\n inline_message=\"Answer must be shorter than 400 characters (about 60 words)\"),\n call().is_length_less_than_or_equal_to(400),\n\n call(VALID_WORK_DONE, 'land-works-particulars', 'The description of the work planned',\n mock_error_builder(), summary_message='Describe the work',\n inline_message='This is the work that the authority wants to do on the land they have bought.'),\n call().is_required(),\n\n call(VALID_WORK_DONE, 'land-works-particulars', 'The description of the work planned',\n mock_error_builder(), summary_message=\"Answer too long\",\n inline_message=\"Answer must be shorter than 400 characters (about 60 words)\"),\n call().is_length_less_than_or_equal_to(400)\n ]\n mock_field_validator.assert_has_calls(calls)",
"def test_check_all_set(self):\n try:\n self.settings1.check_all_set()\n except AssertionError:\n self.fail(\"check_all_set() raised unexpected AssertionError.\")\n\n with self.assertRaises(AssertionError):\n self.settings2.check_all_set()",
"def test_case_01(self):\n if True:\n self.fail()",
"def test_properties(self):\n with pytest.raises(AssertionError):\n self.ownership_state.amount_by_currency_id\n\n with pytest.raises(AssertionError):\n self.ownership_state.quantities_by_good_id",
"def test_required(self):\n \n from pystarlab.starlab import Makeking\n self.assertRaises(ValueError, Makeking)\n \n self.assertRaises(ValueError, Makeking, n=500)\n \n self.assertRaises(ValueError, Makeking, w=1.4)\n \n # this will fail if it raises any exceptions\n king_nonfailing = Makeking(n=500, w=1.4, s=12345678)"
] | [
"0.62134707",
"0.5922159",
"0.5859138",
"0.57996106",
"0.5779631",
"0.57793766",
"0.5778328",
"0.57430685",
"0.5736257",
"0.5732914",
"0.57291234",
"0.5714089",
"0.5701453",
"0.5699153",
"0.5698433",
"0.5688214",
"0.5600252",
"0.55860335",
"0.55774295",
"0.5557103",
"0.5550488",
"0.5548981",
"0.5548535",
"0.55231905",
"0.55218583",
"0.55168873",
"0.5501451",
"0.5497198",
"0.54865056",
"0.54689044"
] | 0.66464823 | 0 |
Test of cooking the same product twice. Test passed if second cooking of same product raise ValueError | def test_cook_twice(cook_not_busy, product_for_cook):
cook_not_busy.cook_dish(product_for_cook)
with pytest.raises(ValueError):
cook_not_busy.cook_dish(product_for_cook) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False",
"def test_buyTicket_AlreadySold():\n assert not testUser2.buyTicket(testTicket1)\n assert testTicket1 in testUser1.inventory\n assert testTicket1 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500",
"def test_create_same_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Eggs',\n 'description': '''\n Bird and reptile eggs consist of a protective eggshell,\n albumen (egg white), and vitellus (egg yolk),\n contained within various thin membranes.\n The most commonly consumed eggs are chicken eggs.\n Other poultry eggs including those of duck and quail\n also are eaten.\n '''\n }\n product_count_before = models.Product.objects.count()\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.count(), product_count_before)",
"def test_DECISION_repeat_conflict(self, commit):\n self.assertRaises(AssertionError, lambda:\n self.node.fake_message(Decision(slot=1, proposal=PROPOSAL2)))",
"def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)",
"def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())",
"def test_6(self):\n toothpaste = Store.Product(11, \"toothpaste\", \"dental\", 2, 4)\n milk = Store.Product(12, \"milk\", \"dairy\", 2, 3)\n eggs = Store.Product(14, \"eggs\", \"dairy\", 2, 2)\n apple_juice = Store.Product(13, \"apple juice\", \"drink\", 1, 1)\n\n s = Store.Store()\n s.add_product(toothpaste)\n s.add_product(milk)\n s.add_product(eggs)\n s.add_product(apple_juice)\n\n henry = Store.Customer(\"henry\", \"mrh\", False)\n s.add_member(henry)\n\n s.add_product_to_member_cart(11, \"mrh\")\n s.add_product_to_member_cart(12, \"mrh\")\n s.add_product_to_member_cart(14, \"mrh\")\n self.assertAlmostEqual(s.check_out_member(\"mrh\"), 6.42, \"not the correct checkout amount\")",
"def test_save_item_with_duplicate_value_on_unique_field_raises(\n test_store, andy, pandy, candy\n):\n\n person = next(test_store.get_by(name=\"Andy\"))\n person.name = \"Pandy\"\n\n with pytest.raises(NotUniqueException):\n test_store.save(person)\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items",
"def checker(product):\n for item in INSTOCK:\n if item == product:\n return True\n return False",
"def test_cart_creation_duplicate_name(self):\n cart_name = 'cart name'\n self.cart_item_manager.create_cart('123', cart_name, False)\n self.cart_item_manager.create_cart('124', cart_name, False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.create_cart('123', cart_name, False)",
"def test_market_1_2(self):\n\n def check_1_2(buyers: List[float], sellers: List[float], expected_num_of_deals: int,\n expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 2]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_2(buyers=[9], sellers=[-4, -3],\n expected_num_of_deals=0, expected_prices=[9, -4.5])\n check_1_2(buyers=[9, 8, 7, 6], sellers=[-6, -5, -4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-6, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n\n # PRICE CROSSES ZERO AT FIRST PHASE\n check_1_2(buyers=list(range(20)), sellers=[-3, -2, -1],\n expected_num_of_deals=1, expected_prices=[18, -9])",
"def test_update_cart_name_duplicate(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.create_cart(user_id, 'Cart2', False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.update_cart(user_id, cart_id, {'CartName': 'Cart2'})",
"def test_duplicate_entries(self):",
"def test_check_for_existing_reaction_keeps_identical_reactions_with_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=True)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=True)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertFalse(found, 'check_for_existing_reaction failed to identify duplicate template reactions')",
"def test_allow_multiples(self):\r\n o1 = self.b1.get(self.key)\r\n o2 = self.b2.get(self.key)\r\n\r\n o1.set_data(\"object-1\")\r\n o1.store()\r\n o2.set_data(\"object-2\")\r\n o2.store()\r\n\r\n conflicted = self.b1.get(self.key)\r\n siblings = filter(bool, (s.get_data() for s in conflicted.get_siblings()))\r\n self.assertEqual(len(siblings), 2)",
"def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)",
"def test_multiple_creates_do_not_increase_products(self):\n for i in xrange(0, 10):\n modified_po = copy.deepcopy(base_purchase_order)\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.post('/api/v1/purchase-order/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 201, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['status'], 'AWAITING APPROVAL')\n\n item1 = po_data['items'][0]\n #self.assertEqual(item1['supply']['id'], 1)\n self.assertEqual(item1['status'], u'Ordered')\n\n item2 = po_data['items'][1]\n #self.assertEqual(item1['supply']['id'], 2)\n self.assertEqual(item1['status'], u'Ordered')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=resp.data['id'])\n self.assertEqual(po.status, 'AWAITING APPROVAL')\n for item in po.items.all():\n self.assertEqual(item.status, u\"Ordered\")\n \n supplier = Supplier.objects.get(pk=1)\n\n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)\n\n supply = Supply.objects.get(pk=2)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)",
"def check_duplicate(self, state):\n pass",
"def test_unique_together(self):\n form = PriceForm({'price': '6.00', 'quantity': '1'})\n self.assertTrue(form.is_valid())\n form.save()\n form = PriceForm({'price': '6.00', 'quantity': '1'})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 1)\n self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])",
"def test_add_with_not_right_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"-14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)",
"def test_not_repeat_combination(self):\n self.assertTrue(\"-Yao Ming Zydrunas Ilgauskas\", show_players_sumheigh_is_input(177))\n self.assertFalse(show_players_sumheigh_is_input(177), \"-Zydrunas Ilgauskas Yao Ming\")",
"def test_check_for_existing_reaction_eliminates_identical_reactions_without_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=False)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=False)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to eliminate reactions without duplicate tag')",
"def test_validate_duplicate_wire(self, circuit):\n with pytest.raises(ValueError, match=\"Wire ID 0 is specified more than once.\"):\n circuit._validate_wire_ids(wire_ids=[0, 0])",
"def test_add_item_with_duplicate_value_on_unique_field_raises(\n test_store, andy, pandy, candy\n):\n\n person_with_duplicate_name = Person(name=\"Andy\", age=80)\n\n with pytest.raises(NotUniqueException):\n test_store.add(person_with_duplicate_name)\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)",
"def test_cart_creation_duplicate_default_will_not_create_new_cart(self):\n test_user_id = '123'\n cart_id_1 = self.cart_item_manager.create_cart(test_user_id, 'Cart1', True)\n cart_id_2 = self.cart_item_manager.create_cart(test_user_id, 'Cart3', True)\n self.assertEqual(cart_id_1, cart_id_2)\n self.assertEqual(1, len(self.cart_item_manager.get_user_carts(test_user_id)))",
"def test_get_similar_recipes(self):\n pass",
"def test_check_for_duplicates_with_duplicates(self):\n quotes = [api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author2\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author3\", \"Publication\", [\"tag1, tag2\"])]\n\n with self.assertRaisesRegexp(Exception, \"a duplicate quote was found on line 2 of 'stdin'. \"\n \"Quote: \\\"This is an added quote.\\\".\"):\n\n api._check_for_duplicates(quotes, \"stdin\")",
"def products_made(self, product) -> bool:\n return self.product_idx(product) is not None",
"def _check_sn_uniqueness(self):\n if self.product_tracking == 'serial' and self.lot_producing_id:\n sml = self.env['stock.move.line'].search_count([\n ('lot_id', '=', self.lot_producing_id.id),\n ('location_id.usage', '=', 'production'),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ])\n if sml:\n raise UserError(_('This serial number for product %s has already been produced', self.product_id.name))\n\n for move in self.move_finished_ids:\n if move.has_tracking != 'serial' or move.product_id == self.product_id:\n continue\n for move_line in move.move_line_ids:\n domain = [\n ('lot_id', '=', move_line.lot_id.id),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ]\n message = _('The serial number %(number)s used for byproduct %(product_name)s has already been produced',\n number=move_line.lot_id.name,\n product_name=move_line.product_id.name)\n co_prod_move_lines = self.move_finished_ids.move_line_ids.filtered(lambda ml: ml.product_id != self.product_id)\n domain_unbuild = domain + [\n ('production_id', '=', False),\n ('location_dest_id.usage', '=', 'production')\n ]\n\n # Check presence of same sn in previous productions\n duplicates = self.env['stock.move.line'].search_count(domain + [\n ('location_id.usage', '=', 'production')\n ])\n if duplicates:\n # Maybe some move lines have been compensated by unbuild\n duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [\n ('move_id.unbuild_id', '!=', False)\n ])\n if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):\n raise UserError(message)\n # Check presence of same sn in current production\n duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line\n if duplicates:\n raise UserError(message)\n\n for move in self.move_raw_ids:\n if move.has_tracking != 'serial':\n continue\n for move_line in move.move_line_ids:\n if float_is_zero(move_line.qty_done, precision_rounding=move_line.product_uom_id.rounding):\n continue\n domain = [\n ('lot_id', '=', move_line.lot_id.id),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ]\n message = _('The serial number %(number)s used for component %(component)s has already been consumed',\n number=move_line.lot_id.name,\n component=move_line.product_id.name)\n co_prod_move_lines = self.move_raw_ids.move_line_ids\n domain_unbuild = domain + [\n ('production_id', '=', False),\n ('location_id.usage', '=', 'production')\n ]\n\n # Check presence of same sn in previous productions\n duplicates = self.env['stock.move.line'].search_count(domain + [\n ('location_dest_id.usage', '=', 'production')\n ])\n if duplicates:\n # Maybe some move lines have been compensated by unbuild\n duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [\n ('move_id.unbuild_id', '!=', False)\n ])\n if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):\n raise UserError(message)\n # Check presence of same sn in current production\n duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line\n if duplicates:\n raise UserError(message)"
] | [
"0.6690166",
"0.63332933",
"0.62514263",
"0.61649024",
"0.6153124",
"0.605767",
"0.6029322",
"0.60229874",
"0.6018796",
"0.6007936",
"0.5988192",
"0.5973974",
"0.5963615",
"0.5908742",
"0.58811826",
"0.58582234",
"0.585461",
"0.5827044",
"0.5807381",
"0.58039653",
"0.579343",
"0.57912743",
"0.5782794",
"0.5782669",
"0.5776542",
"0.57610244",
"0.5741526",
"0.57412314",
"0.57355934",
"0.573267"
] | 0.7491278 | 0 |
Test of cooking by busy cook Test passed if busy cook raise a CustomWarning | def test_busy_cook(cook_busy, product_for_cook):
with pytest.raises(CustomWarning):
assert cook_busy.cook_dish(product_for_cook) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_cook_twice(cook_not_busy, product_for_cook):\n\n cook_not_busy.cook_dish(product_for_cook)\n with pytest.raises(ValueError):\n cook_not_busy.cook_dish(product_for_cook)",
"def test_cook_set_free(cook_busy, product_for_cook):\n cook_busy.set_free(True)\n # if product needs to be cooked\n assert product_for_cook.get_need_cook_status() is True\n cook_busy.cook_dish(product_for_cook)\n assert product_for_cook.get_need_cook_status() is False",
"def _warn_exit_early(self):\n ready_outputs = self.n_completed_tasks - self._nb_consumed\n is_completed = self._is_completed()\n msg = \"\"\n if ready_outputs:\n msg += (\n f\"{ready_outputs} tasks have been successfully executed \"\n \" but not used.\"\n )\n if not is_completed:\n msg += \" Additionally, \"\n\n if not is_completed:\n msg += (\n f\"{self.n_dispatched_tasks - self.n_completed_tasks} tasks \"\n \"which were still being processed by the workers have been \"\n \"cancelled.\"\n )\n\n if msg:\n msg += (\n \" You could benefit from adjusting the input task \"\n \"iterator to limit unnecessary computation time.\"\n )\n\n warnings.warn(msg)",
"def life_critical():\n return True",
"def test_breeding_failed_carn(self):\n nt.assert_equal(self.carn.breeding(1), None)",
"def test_pm_Completeness(self):\n pass",
"def test_cliches_write_good_basic(self):\n assert chk.check_cliches_write_good(\"\"\"No cliches here.\"\"\") == []\n # use one of the example cliches to verify basic functionality\n assert chk.check_cliches_write_good(self.l_write_good) != []\n assert \"cliches.write_good\" in chk.check_cliches_write_good(\n self.l_write_good)[0]",
"def test_check_opt_crashed(self):\n self.assertEqual(check_opt(self.jobset2.job), 'ocrashed')",
"def test_noFailure(self):\n for i in range(10):\n self.assertTrue(self.circuit_breaker.available())",
"def test_life_critical():\n assert chap2.life_critical()",
"def test_block_bad_state(self):\n pass",
"def test_is_poor_style(self):\n self.assertEqual(1, uut.is_poor_style(\"test/with_leak.cc\"))\n self.assertEqual(0, uut.is_poor_style(\"test/without_leak.cc\"))",
"def test_failure_does_not_set_work_presentation_ready(self):\n\n provider = NeverSuccessfulBibliographicCoverageProvider(\n self.pool.collection\n )\n result = provider.ensure_coverage(self.identifier)\n assert CoverageRecord.TRANSIENT_FAILURE == result.status\n assert False == self.work.presentation_ready",
"def test_not_ready_if_insufficient_working_space(self):\n self.command.package = self.input_ovf\n\n self.command.ui.default_confirm_response = False\n with mock.patch.object(self.command,\n 'working_dir_disk_space_required',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertFalse(ready)\n self.assertRegex(reason, \"Insufficient disk space available for\"\n \" temporary file storage\")\n\n # User can opt to continue anyway\n self.command.ui.default_confirm_response = True\n self.command._cached_disk_requirements.clear()\n with mock.patch.object(self.command,\n 'working_dir_disk_space_required',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertTrue(ready)",
"def delay_checks(self):\n return False",
"def testcheatFalse(self):\n import Cheat\n res = Cheat.cheatclass.cheatF(self)\n exp = Cheat.cheatclass.cheatingR(self)\n\n self.assertFalse(res, exp)",
"def test_that_test_can_fail():\n try:\n verify_atomic_weight_for_substance(\"O2\", 1.0)\n except AssertionError as e:\n return\n\n raise AssertionError(\"test_that_test_can_fail() didn't fail\")",
"def testDryRun(self):\n\n\t\tself.testTooLong(dry_run=True)",
"def test_dont_cancel_bookings_in_cancellation_period_if_warning_not_sent(self, mock_tz):\n mock_tz.now.return_value = datetime(2015, 2, 11, 12, 0, tzinfo=dt_timezone.utc)\n\n # self.ticketed_event payment due date 2015/2/11 23:59\n\n unpaid_no_warning = baker.make(\n TicketBooking,\n ticketed_event=self.ticketed_event,\n paid=False,\n date_booked=datetime(2015, 2, 10, 5, 30, tzinfo=dt_timezone.utc),\n warning_sent=False\n )\n unpaid_warning_within_2_hrs = baker.make(\n TicketBooking,\n ticketed_event=self.ticketed_event,\n paid=False,\n date_booked=datetime(2015, 2, 10, 5, 30, tzinfo=dt_timezone.utc),\n warning_sent=True,\n date_warning_sent=datetime(2015, 2, 11, 10, 30, tzinfo=dt_timezone.utc),\n )\n unpaid_warning_more_than_2_hrs_ago = baker.make(\n TicketBooking,\n ticketed_event=self.ticketed_event,\n paid=False,\n date_booked=datetime(2015, 2, 10, 5, 30, tzinfo=dt_timezone.utc),\n warning_sent=True,\n date_warning_sent=datetime(2015, 2, 11, 9, 30, tzinfo=dt_timezone.utc),\n )\n\n self.assertFalse(unpaid_no_warning.cancelled)\n self.assertFalse(unpaid_warning_within_2_hrs.cancelled)\n self.assertFalse(unpaid_warning_more_than_2_hrs_ago.cancelled)\n\n management.call_command('cancel_unpaid_ticket_bookings')\n unpaid_no_warning.refresh_from_db()\n unpaid_warning_within_2_hrs.refresh_from_db()\n unpaid_warning_more_than_2_hrs_ago.refresh_from_db()\n self.assertFalse(unpaid_no_warning.cancelled)\n self.assertFalse(unpaid_warning_within_2_hrs.cancelled)\n self.assertTrue(unpaid_warning_more_than_2_hrs_ago.cancelled)",
"def test_case_01(self):\n if True:\n self.fail()",
"def this_needs_work_test_ensure_our_presence(self):\n self.do_test_ensure_our_presence()",
"def issue_locked_warning() -> None:\n print(\"\\n[red]Warning:[/red] Your bank account has been completely \"\n \"locked out for exceeding 2 or more categories!\")",
"def test_email_warnings(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 10, 10, 0, tzinfo=dt_timezone.utc\n )\n\n # cancellation period starts 2015/2/14 17:00\n # payment_due_date 2015/2/11 23:59\n event = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 14, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n payment_due_date=datetime(2015, 2, 11, tzinfo=dt_timezone.utc),\n cancellation_period=1)\n # cancellation period starts 2015/2/14 17:00\n # payment_due_date 2015/2/12 23:59\n event1 = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 14, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n payment_due_date=datetime(2015, 2, 12, tzinfo=dt_timezone.utc),\n cancellation_period=1)\n # no cost, no warnings sent\n event2 = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 14, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=0,\n cancellation_period=1)\n baker.make_recipe(\n 'booking.booking', event=event, paid=False,\n payment_confirmed=False,\n date_booked=datetime(2015, 2, 9, 19, 30, tzinfo=dt_timezone.utc),\n _quantity=5,\n )\n baker.make_recipe(\n 'booking.booking', event=event1, paid=False,\n payment_confirmed=False,\n date_booked=datetime(2015, 2, 9, 19, 30, tzinfo=dt_timezone.utc),\n _quantity=5,\n )\n baker.make_recipe(\n 'booking.booking', event=event2, paid=False,\n payment_confirmed=False,\n date_booked=datetime(2015, 2, 9, 21, 00, tzinfo=dt_timezone.utc),\n _quantity=5,\n )\n _add_user_email_addresses(Booking)\n management.call_command('email_warnings')\n self.assertEqual(len(mail.outbox), 10)",
"def test_wip(self):\n self.assertTrue(not return_true())",
"def test_warning(self):\n self.p.compute_termination_criteria = True\n self.set_parameter_and_step(\"max_iter\", True, 5, \"ignore\")",
"def noCheck():\n dislin.nochek()",
"def test_block_bad_consensus(self):\n pass",
"def test_require_now_silently_succeeds_for_available_tests(self, test_generator):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n feature.require_now(\"no message\")\n check.assert_called_once()",
"def test_out_of_date(self):\n self.assertTrue(update_available(0.0))",
"def confirm_harvest_pickle(before, after):\n assert after.strategy.balanceOf > before.strategy.balanceOf\n assert after.strategy.pickleBalance == 0\n assert after.strategy.stakingRewards.stakedPickle == 0\n if before.sett.pricePerFullShare:\n assert after.sett.pricePerFullShare > before.sett.pricePerFullShare"
] | [
"0.65008634",
"0.625711",
"0.620796",
"0.58594835",
"0.5785558",
"0.5728688",
"0.57037574",
"0.57026243",
"0.5623727",
"0.56204015",
"0.5618192",
"0.56137985",
"0.5599557",
"0.5592458",
"0.5572111",
"0.55385923",
"0.5527014",
"0.551451",
"0.551242",
"0.55018896",
"0.5490242",
"0.5488431",
"0.54782844",
"0.5456323",
"0.5447276",
"0.54462975",
"0.5437933",
"0.54363036",
"0.5432498",
"0.54274404"
] | 0.7838216 | 0 |
Test of changing state of cook. Busy cook set to free and then tries to cook the dish. Cooking should be successful (product.get_need_cook_status should be False) | def test_cook_set_free(cook_busy, product_for_cook):
cook_busy.set_free(True)
# if product needs to be cooked
assert product_for_cook.get_need_cook_status() is True
cook_busy.cook_dish(product_for_cook)
assert product_for_cook.get_need_cook_status() is False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_cook_twice(cook_not_busy, product_for_cook):\n\n cook_not_busy.cook_dish(product_for_cook)\n with pytest.raises(ValueError):\n cook_not_busy.cook_dish(product_for_cook)",
"def test_update_state1(self):\n pass",
"def test_update_state(self):\n pass",
"def test_update_state2(self):\n pass",
"def testSettled(self):\n self.injectEvent(safe.Settling.SETTLED)\n self.assertCurrentState(safe.Grabbing)",
"def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)",
"def test_update_state3(self):\n pass",
"def Cook(self, env, customer, cooking_time_type = 'fixed', manual_cook_time = None):\n with self.resource.request() as req:\n yield req #resource를 점유 해야 함.\n now_time = round(env.now , 1)\n req.info = [customer.name, now_time]\n if cooking_time_type == 'fixed':\n cooking_time = self.order_ready_time\n elif cooking_time_type == 'random':\n cooking_time = random.randrange(1,self.order_ready_time)\n elif cooking_time_type == 'uncertainty':\n cooking_time = customer.cook_time\n else:\n cooking_time = 0.001\n print('T :{} 가게 {}, {} 분 후 주문 {} 조리 완료'.format(int(env.now),self.name,cooking_time,customer.name))\n if manual_cook_time == None:\n yield env.timeout(cooking_time)\n else:\n yield env.timeout(manual_cook_time)\n print('T :{} 가게 {} 주문 {} 완료'.format(int(env.now),self.name,customer.name))\n customer.food_ready = True\n customer.ready_time = env.now\n self.ready_order.append(customer)",
"def updateState(self):\n\n if ('cutting' in self.step_ops) and (self.cut_state.user_cutting):\n self.step_ops['cutting'] = True\n \n if ('cooking' in self.step_ops) and (self.cut_state.user_cooking):\n self.step_ops['cooking'] = True\n\n # TODO: add the rest of the operations\n\n advance = True\n\n # Check if ALL operations are complete\n for op in self.step_ops:\n if self.step_ops[op] == False:\n advance = False\n break\n\n if advance:\n self.nextStep()",
"def test_update_state4(self):\n pass",
"def change_availability():\n artwork_sold = get_artwork_name()\n if not controls_utils.artwork_exists(artwork_sold):\n print('No record of that piece of art. ')\n else:\n artist = controls_utils.name_of_artist(artwork_sold)\n if not controls_utils.artwork_available(artwork_sold, artist):\n print('Sorry that piece has already been sold. ')\n else:\n response = input('Mark ' + artwork_sold + ' as sold? Y or N ')\n if response.upper() == 'Y':\n mark_as_sold(artwork_sold)\n while not controls_utils.response_affirmative(response):\n response = input('Are you sure you want to mark '\n + artwork_sold + ' by ' + artist + ' as sold? Y or N or press X to escape ')\n if response.upper() == 'X':\n break\n elif response.upper() == 'N':\n break",
"def test_charge_correct_for_fiction_after_close(self):\n rental = create_test_rental(\n book=self.book2,\n customer=self.user1,\n date_borrowed=\"2019-05-22 00:00:00.400952+00:00\",\n )\n close_rental_url = reverse(\"close_rental\", kwargs={\"pk\": rental.pk})\n\n data = {\"date_returned\": \"2019-05-25 13:46:57.249145+03:00\"}\n response = self.client.put(close_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.data[\"amount_charged\"], \"9.00\")\n self.assertEqual(response.data[\"rental_status\"], \"Closed\")\n self.assertEqual(response.data[\"currency\"], CURRENCY)",
"def test_ensure_state_change_if_needed(self, setState, commit):\n advisory = errata.Advisory(errata_id=123, errata_state='QE')\n advisory.ensure_state('NEW_FILES')\n setState.assert_called()",
"async def test_state_update(hass: HomeAssistant) -> None:\n await init_integration(hass)\n\n state = hass.states.get(\"sensor.home_cloud_ceiling\")\n assert state\n assert state.state != STATE_UNAVAILABLE\n assert state.state == \"3200.0\"\n\n future = utcnow() + timedelta(minutes=60)\n\n current_condition = load_json_object_fixture(\n \"accuweather/current_conditions_data.json\"\n )\n current_condition[\"Ceiling\"][\"Metric\"][\"Value\"] = 3300\n\n with patch(\n \"homeassistant.components.accuweather.AccuWeather.async_get_current_conditions\",\n return_value=current_condition,\n ), patch(\n \"homeassistant.components.accuweather.AccuWeather.requests_remaining\",\n new_callable=PropertyMock,\n return_value=10,\n ):\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.home_cloud_ceiling\")\n assert state\n assert state.state != STATE_UNAVAILABLE\n assert state.state == \"3300\"",
"def test_is_active(self):\n\n self.sold.health = 0\n self.assertFalse(self.sold.is_active)",
"async def test_fan_read_state(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_fan_service)\n\n state = await helper.async_update(\n ServicesTypes.FAN, {CharacteristicsTypes.ON: False}\n )\n assert state.state == \"off\"\n\n state = await helper.async_update(\n ServicesTypes.FAN, {CharacteristicsTypes.ON: True}\n )\n assert state.state == \"on\"",
"def test_updating_item_status(self):\n #test original quantity\n self.assertEqual(self.supply1.quantity, 10)\n self.assertEqual(self.supply2.quantity, 10)\n \n modified_po = copy.deepcopy(base_purchase_order)\n modified_po['status'] = 'Received'\n modified_po['items'][0]['id'] = 1\n modified_po['items'][0]['status'] = 'Receieved'\n \n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po)\n \n self.assertEqual(resp.status_code, 200, msg=resp)\n \n po = resp.data\n \n self.assertEqual(Supply.objects.get(pk=1).quantity, 20)",
"def test_consume_status(self):\n # Arrange\n player = Character.objects.get(pk=1)\n target = Character.objects.get(pk=2)\n\n object_to_test = Combat(player=player,\n target=target,\n player_attack_type=\"disrupt\",\n target_attack_type=\"block\",\n player_enhanced=True)\n\n # Inflict a status effect\n _ = object_to_test.do_combat_round()\n\n check_status_before_apply = StatusEffects.objects.filter(character_id=target.pk)\n self.assertTrue(check_status_before_apply.exists())\n\n # Act\n # Check and apply the status effect\n _ = object_to_test.check_and_apply_status()\n\n check_status_after_apply = StatusEffects.objects.filter(character_id=target.pk)\n self.assertFalse(check_status_after_apply.exists())",
"async def test_fanv2_read_state(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_fanv2_service)\n\n state = await helper.async_update(\n ServicesTypes.FAN_V2, {CharacteristicsTypes.ACTIVE: False}\n )\n assert state.state == \"off\"\n\n state = await helper.async_update(\n ServicesTypes.FAN_V2, {CharacteristicsTypes.ACTIVE: True}\n )\n assert state.state == \"on\"",
"def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)",
"def test(self, state):\n\n # manual dice should have been typed in by this point, if they don't\n # exist exit\n if state.dice == \"manual\" and (\n state.rolls is None or state.rolls == []):\n return state\n\n test_dict = {\"attr\": self._test_1dice,\n \"fight_talent\": self._test_1dice,\n \"advantage\": self._test_1dice,\n \"skill\": self._test_3dice,\n \"spell\": self._test_3dice,\n \"misc\": self._test_misc}\n\n state = test_dict[state.selection.category](state)\n\n return state",
"def test_update_station_status(self):\n\t\t# Seed the db and initialize all counts to 0\n\t\tget_info.seed_station_information()\n\n\t\t# Save number of bikes/docks before update\n\t\tE40th_5thave = db.session.query(Station).filter(Station.id == 153).one()\n\t\tbikes_before = E40th_5thave.num_bikes_available\n\t\tdocks_before = E40th_5thave.num_docks_available\n\n\t\t# Update bike/dock numbers\n\t\tget_info.update_station_status()\n\n\t\tE40th_5thave = db.session.query(Station).filter(Station.id == 153).one()\n\t\tbikes_after = E40th_5thave.num_bikes_available\n\t\tdocks_after = E40th_5thave.num_docks_available\n\n\t\tself.assertNotEqual(bikes_before + docks_before, bikes_after + docks_after, 'Bikes did not update, or station is disabled.')",
"def test_give_correct_change(self):\n item, change, _ = give_item_and_change('coke', 1)\n self.assertEqual(item, 'coke')\n self.assertEqual(change, [.20, .05, .02])",
"def test_multiple_states(self):\n\n # Prepare.\n app = self.factory()\n request = self.getRequest(app)\n context = model.factory()\n\n # Create a dummy event and get it back.\n event_id = boilerplate.createEvent(context)\n event = repo.LookupActivityEvent()(event_id)\n\n # Cancel when created.\n state_changer = request.state_changer\n # We have to use a transaction manager because perform creates\n # a new event on state change.\n with transaction.manager:\n bm.Session.add(event)\n bm.Session.add(context)\n state_changer.perform(context, a.CANCEL, event)\n s1 = context.work_status.value\n self.assertEqual(s1, s.CANCELLED)\n\n # Cancel when started.\n c2 = model.factory(initial_state=s.STARTED)\n # We have to use a transaction manager because perform creates\n # a new event on state change.\n with transaction.manager:\n bm.Session.add(event)\n state_changer.perform(c2, a.CANCEL, event)\n s2 = c2.work_status.value\n self.assertEqual(s2, s.CANCELLED)",
"def test_single_quant_confirm(self):\n pick = self.quant_1.create_picking(self.picking_type_pick, confirm=True)\n # Check it is confirmed\n self.assertEqual(pick.state, \"confirmed\")",
"def test_update_condition_true(self):\n original_alt_info = getattr(self.form, 'alt_field_info', None)\n expected_label = 'alt_test_feature'\n test_method = getattr(self.form, 'condition_' + expected_label, None)\n alt_info = getattr(self, 'alt_field_info', None)\n expected = alt_info.get(expected_label, None)\n self.form.alt_field_info = alt_info\n self.form.test_condition_response = True\n actual = self.form.get_alt_field_info()\n\n self.assertIsNotNone(alt_info)\n self.assertIsNotNone(test_method)\n self.assertTrue(test_method())\n self.assertIsNotNone(expected)\n self.assertIn(expected_label, alt_info)\n self.assertEqual(expected, actual)\n\n self.form.test_condition_response = False\n self.form.alt_field_info = original_alt_info\n if original_alt_info is None:\n del self.form.alt_field_info",
"def test_ensure_state_untouched_if_not_necessary(self, setState):\n advisory = errata.Advisory(errata_id=123, errata_state='QE')\n advisory.ensure_state('QE')\n setState.assert_not_called()",
"def test_update_cupcake(self):\n\n response = self.client.patch(\"/cupcakes/10000\",\n json={\"flavor\": \"chocolate\",\n \"size\": \"giant\",\n \"rating\": 11,\n \"id\": 10000,\n \"image\": None})\n\n self.assertEqual(response.json['response']['flavor'], \"chocolate\")\n self.assertEqual(response.json['response']['size'], 'giant')\n self.assertEqual(response.json['response']['rating'], 11)\n self.assertEqual(response.status_code, 200)\n\n all_cupcakes = self.client.get('/cupcakes')\n all_cupcakes_data = all_cupcakes.json['response']\n self.assertEqual(len(all_cupcakes_data), 1)",
"def check(self):\n os.system(\"clear\")\n self.print()\n print(\"\\n\")\n _, response = _num_select(\"Would you like to make any changes?\",\n [\"Name\", \"Ingredients\", \"Steps\", \"Description\", \"Tags\", \"No\"])\n if response == \"Name\":\n self.name = input(\"Recipe Name: \")\n print(\"New name is: \" + self.name)\n return False\n elif response == \"Ingredients\":\n self.edit_ingredients()\n return False\n elif response == \"Steps\":\n self.edit_steps()\n return False\n elif response == \"Description\":\n self.description = input(\"Description: \")\n return False\n elif response == \"Tags\":\n self.edit_tags()\n return False\n return True",
"def the_changed_brightness_should_be_reflected_in_the_state_10():\n assert web_app.get_state()\n assert web_app.check_value_in_state(\"brightness\",\"10\")"
] | [
"0.61100876",
"0.5935458",
"0.59113294",
"0.5875447",
"0.58063436",
"0.57781756",
"0.57589597",
"0.5701074",
"0.5680897",
"0.5638473",
"0.5637342",
"0.56042886",
"0.5569046",
"0.5534861",
"0.5534524",
"0.54650354",
"0.545623",
"0.54499906",
"0.542222",
"0.5383806",
"0.5364995",
"0.5354965",
"0.5350797",
"0.5349044",
"0.5345805",
"0.5343979",
"0.5336155",
"0.53216034",
"0.53191954",
"0.5310999"
] | 0.75115085 | 0 |
Formats the output of a transaction receipt to its proper values | def output_transaction_receipt_formatter(receipt):
if receipt is None:
return None
logs_formatter = compose(functools.partial(map, outputLogFormatter), list)
formatters = {
'blockNumber': to_decimal,
'transactionIndex': to_decimal,
'cumulativeGasUsed': to_decimal,
'gasUsed': to_decimal,
'logs': lambda l: logs_formatter(l) if is_array(l) else l,
}
return {
key: formatters.get(key, identity)(value)
for key, value in receipt.items()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_receipt(self) -> typing.List[str]:\n lines = []\n euro_total=0\n usd_total=0\n gbp_total=0\n\n for item in self._items.items():\n euro_price = self._get_product_price(item[0]) * item[1]\n usd_price = self.get_price_in_currency(euro_price,\"USD\")\n gbp_price = self.get_price_in_currency(euro_price,\"GBP\")\n\n euro_total += euro_price\n usd_total += usd_price\n gbp_total += gbp_price\n\n euro_price_string = \"€%.2f\" % euro_price\n usd_price_string = \"$%.2f\" % usd_price\n gbp_price_string = \"£%.2f\" % gbp_price\n \n lines.append(item[0] + \" - \" + str(item[1]) + ' - ' + euro_price_string + ' - ' + \\\n usd_price_string + ' - ' + gbp_price_string)\n \n euro_total_str=\"€%.2f\" % euro_total\n usd_total_str=\"$%.2f\" % usd_total\n gbp_total_str=\"£%.2f\" % gbp_total\n\n lines.append(\"Total = \"+euro_total_str+ ' - ' + usd_total_str + ' - ' + gbp_total_str)\n logging.info(str(datetime.now())+': Receipt =' +str(lines))\n return lines",
"def convert_trans_to_string(self, transaction):\r\n #note, repr will not work because it doesn't remove curly brackets and colons\r\n record_list = []\r\n for mode, trans in transaction.iteritems():\r\n record_list.append(str(\"mode: \" + mode + \" \"))\r\n for product,quantity in trans.iteritems():\r\n record_list.append(str(product + \":\"))\r\n record_list.append(str(quantity) + \" \")\r\n \r\n record_string = \"\".join(record_list) + \"\\n\"\r\n return record_string",
"def receipt(basket):\n\n cprint(\"\"\"\\n\\n Item Price Discount Final Price\n------------------------------------------------------------------\"\"\")\n sigma_all = sum([e[1] for e in basket])\n sigma_discount = 0\n for name, price, discount in basket:\n discounted_price = (100 - discount) / 100 * price\n cprint(\"| %16s | £%10.2f | %3d\" % (name, price, discount) + \"%\" + f\" | £%10.2f |\" % discounted_price)\n sigma_discount += discounted_price\n cprint(\"|________________________________________________________________|\")\n\n cprint(\"\\n\\nTotal Price: £%.2f\" % sigma_all)\n cprint(\"Total Discount: £%.2f\" % (sigma_all - sigma_discount))\n cprint(\"Final Price: £%.2f\" % sigma_discount)\n\n cprint(\"\\nThank you for shopping at \" + SHOP_NAME)",
"def output(self):\n \n str_title_len = 50\n str_date_len = 40\n str_purpose_len = 30\n str_price_len = 10\n str_payer_len = 20\n #str_comment_len =\n \n if len(self.title) > (str_title_len - 2):\n out_title = self.title[:str_title_len - 2] + \" |\"\n else:\n out_title = self.title + (\" \" * (str_title_len - len(self.title) - 2)) + \" |\"\n \n # if date is presented with <datetime> object, then\n # then output it in format %d.%m.%y (31.12.99)\n if type(self.date) is datetime.datetime:\n out_date = \" \" + datetime.datetime.strftime(\"%d.%m.%y\") + \" |\"\n # or output as string otherwise\n else:\n if len(self.date) > (str_date_len - 4):\n out_date = \" \" + self.date[:str_date_len - 4] + \" |\"\n else:\n out_date = \" \" + self.date + (\" \" * (str_date_len - len(self.date) - 4)) + \" |\"\n \n if len(self.purpose) > (str_purpose_len - 4):\n out_purpose = \" \" + self.purpose[:str_purpose_len - 4] + \" |\"\n else:\n out_purpose = \" \" + self.purpose + (\" \" * (str_purpose_len - len(self.purpose) - 4)) + \" |\"\n \n # enormous sums aren't supported (over 9999999 at the moment)\n if len(str(self.price)) > (str_price_len - 4):\n raise Exception\n out_price = (' ' * (str_price_len - len(str(self.price)) - 4) ) + str(self.price) + ' |'\n \n if len(self.payer) > (str_payer_len - 2):\n out_payer = \" \" + self.payer[:str_payer_len - 2]\n else:\n out_payer = \" \" + self.payer + (\" \" * (str_payer_len - len(self.payer) - 2))\n \n out_line = out_title + out_date + out_purpose + out_price + out_payer\n return out_line",
"def pp_entry(self, entry):\n self.separator()\n print('Type: {}'.format(self.TRANSACTION_CODES[entry['Transaction Code']]))\n for item in entry:\n print(item.ljust(25, ' ') + ': {}'.format(entry[item]))\n self.separator()",
"def print_invoice(request, invoice_number):\n\n data = Invoice.objects.get(number=invoice_number)\n\n sub_total = sum([a.get(\"total_cost\") for a in data.items])\n s_gst_val = float(sub_total) * (float(data.s_gst) / 100)\n c_gst_val = float(sub_total) * (float(data.c_gst) / 100)\n\n data.addressed_to = data.addressed_to.replace(\"\\n\", \"<br>\")\n\n return render(request,\n \"invoice/invoice_print.html\",\n {\n \"data\": data,\n \"sub_total\": sub_total,\n \"s_gst_value\": s_gst_val,\n \"c_gst_value\": c_gst_val\n })",
"def printPayment(self):\n print self.output()",
"def format(self, data):",
"def receipt_text(self, **kw):\n return self._text(self._receipt_template, **kw)",
"def _format_output(selected_number, raw_data):\n tmp_data = {}\n data = collections.defaultdict(lambda: 0)\n balance = raw_data.pop('balance')\n for number in raw_data.keys():\n tmp_data = dict([(k, int(v) if v is not None else \"No limit\")\n for k, v in raw_data[number].items()])\n tmp_data['number'] = number\n if selected_number is None or selected_number == number:\n data[number] = tmp_data\n\n output = (\"\"\"Account Balance\n=======\n\nBalance: {:.2f} $\n\"\"\")\n print(output.format(balance))\n for number_data in data.values():\n _print_number(number_data)",
"def format_coin_output(coin):\n coin_output1 = \"Grabbing latest data for *\" + coin['name'] + \"*\\n\"\n coin_output2 = \"```{:20s}\\t${:.2f}\\n\".format(\"Price USD\",float(coin['price_usd']))\n coin_output3 = \"{:20s}\\t{:.8f}\\n\".format(\"Price BTC\",float(coin['price_btc']))\n coin_output4 = \"{:20s}\\t${:.2f}\\n\".format(\"Market Cap\",float(coin['market_cap_usd']))\n coin_output5 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 1hr\",float(coin['percent_change_1h']))\n coin_output6 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 24hr\",float(coin['percent_change_24h']))\n coin_output7 = \"{:20s}\\t{:.2f}%\\n```\".format(\"Change 7d\",float(coin['percent_change_7d']))\n return (coin_output1+coin_output2+coin_output3+coin_output4+coin_output5+coin_output6+coin_output7)",
"def __repr__(self):\n\n output = list()\n output.append('{resonance_id:6s}'.format(**self.par))\n output.append('{h_larmor_frq:6.1f}'.format(**self.par))\n output.append('{temperature:4.1f}'.format(**self.par))\n output.append('{:10.5f}'.format(self.val))\n output.append('{:10.5f}'.format(self.err))\n\n if self.cal:\n output.append('{:10.5f}'.format(self.cal))\n\n return ' '.join(output)",
"def __str__(self):\n string = \"\"\n for i in range(len(self.book[Trade.WAY_SELL])-1, -1, -1):\n string = string + \"%.10f\\t\\t%.8f\\n\" % (self.book[Trade.WAY_SELL][i].get_price(),\n self.book[Trade.WAY_SELL][i].get_quote_amount())\n string = string + \"-----------------------------------\\n\"\n for i in range(len(self.book[Trade.WAY_BUY])):\n string = string +\"%.10f\\t\\t%.8f\\n\" % (self.book[Trade.WAY_BUY][i].get_price(),\n self.book[Trade.WAY_BUY][i].get_quote_amount())\n return string",
"def format_step(self):\n if self.terminal:\n totrwdstr = \" %6.3f\" % self.total_reward\n else:\n totrwdstr = \"\"\n \n logging.info(\" %3d %1.0f => %7.1f %4.1f %7.1f %7.1f %4.1f %4.1f = %i %6.3f%s\" % (\n self.nsteps,\n self.action['heater_on'],\n self.state['heat_cost'],\n self.state['set_temp'],\n self.state['room_temp'],\n self.state['room_temp_change'],\n self.state['outside_temp'],\n self.state['outside_temp_change'],\n self.terminal,\n self.reward,\n totrwdstr,\n ))",
"def pay_formatter(self, pay_item):\n return {\n \"payment_id\": pay_item[0],\n \"amount_paid\": pay_item[1],\n \"payment_info\": pay_item[2],\n \"approved\": pay_item[3],\n \"pay_date\": pay_item[4],\n \"loan_id\": pay_item[5],\n \"farmer_id\": pay_item[6]\n }",
"def format(self) -> str:",
"def format_result(self, order):\n return u\"%s\" % (order)",
"def format_data(self, data):",
"def format_tuition(self, data):\n d = u'$%.2f' % data\n return d.replace('.00','')",
"def _format_remittance_information_70(self, val):\n if not self.use_operations_xml:\n val = self.format_MT103_field70(val, 35, 4)\n else:\n val = val.replace('newline', '\\n')\n return str(val)",
"def _massage_raw_pg_output_vals(self):\n pass",
"def format(self):\n ...",
"def final_info_printing(self, title_string, amount_to_display):\n self.__string_to_print = f\"{title_string} \" \\\n f\"{'.' * (40 - len(title_string))} \" \\\n f\"$ {'.' * (11 - len('{:0,.2f}'.format(amount_to_display)))}\" \\\n f\"{amount_to_display:0,.2f}\"\n return self.__string_to_print",
"def format_item(self, order):\n return unicode(order)",
"def outputBlockFormatter(block):\n\n # Transform to number\n block[\"gasLimit\"] = to_decimal(block[\"gasLimit\"])\n block[\"gasUsed\"] = to_decimal(block[\"gasUsed\"])\n block[\"size\"] = to_decimal(block[\"size\"])\n block[\"timestamp\"] = to_decimal(block[\"timestamp\"])\n\n if block.get(\"number\"):\n block[\"number\"] = to_decimal(block[\"number\"])\n\n block[\"difficulty\"] = to_decimal(block[\"difficulty\"])\n block[\"totalDifficulty\"] = to_decimal(block[\"totalDifficulty\"])\n\n if is_array(block.get(\"transactions\")):\n for item in block[\"transactions\"]:\n if not is_string(item):\n item = output_transaction_formatter(item)\n\n return block",
"def formatResult(self, result):\r\n return str(result)",
"def toQif(self):\n out=list();\n if 'date' in self:\n out.append(\"D{}\".format(self['date']));\n if 'amount' in self:\n out.append(\"T{}\".format(self['amount']));\n if 'memo' in self and len(self['memo'])>3:\n out.append(\"M{}\".format(self['memo']));\n if 'payee' in self and len(self['payee'])>3:\n out.append(\"P{}\".format(self['payee']));\n out.append(\"^\");\n return \"\\n\".join(out);",
"def __format__(self, format_spec: str) -> str:\n\n return format(self.balance, format_spec)",
"def create_invoice(self):\n sales_tax = 0.06\n item_sum = 0\n inv = f'Invoice#: {self.invoice_id}\\n'\n for key, value in self.items_with_price.items():\n item_sum += value\n inv += f'{key}.....${value:.2f}\\n'\n\n tax = item_sum * sales_tax\n inv += f'Tax.....${tax:.2f}\\n'\n inv += f'Total.....${tax + item_sum:.2f}'\n # print(inv)\n # returning for unit testing purposes\n return inv",
"def to_transfac(self):\n m = \"%s\\t%s\\t%s\\n\" % (\"DE\", self.id, \"unknown\")\n for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())):\n m += \"%i\\t%s\\t%s\\n\" % (i, \"\\t\".join([str(int(x)) for x in row]), cons)\n m += \"XX\"\n return m"
] | [
"0.6561996",
"0.6189167",
"0.60160506",
"0.59889376",
"0.5762017",
"0.57275635",
"0.56979066",
"0.5656792",
"0.56265295",
"0.5626407",
"0.55991745",
"0.55775195",
"0.5537981",
"0.55367833",
"0.5487953",
"0.5431081",
"0.5430332",
"0.5380763",
"0.5369978",
"0.53524035",
"0.5349362",
"0.53448206",
"0.5334473",
"0.53053594",
"0.5294763",
"0.52895546",
"0.52810216",
"0.52768064",
"0.5259357",
"0.5255496"
] | 0.676757 | 0 |
Formats the output of a block to its proper values | def outputBlockFormatter(block):
# Transform to number
block["gasLimit"] = to_decimal(block["gasLimit"])
block["gasUsed"] = to_decimal(block["gasUsed"])
block["size"] = to_decimal(block["size"])
block["timestamp"] = to_decimal(block["timestamp"])
if block.get("number"):
block["number"] = to_decimal(block["number"])
block["difficulty"] = to_decimal(block["difficulty"])
block["totalDifficulty"] = to_decimal(block["totalDifficulty"])
if is_array(block.get("transactions")):
for item in block["transactions"]:
if not is_string(item):
item = output_transaction_formatter(item)
return block | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reformat_block(specline, values):\n data = reformat_spec_line(specline)\n desc = '\\n'.join(values)\n data.append(desc)\n return data",
"def verbose(self, block: Block):\n print('\\n\\n==============================')\n print('Hash:\\t\\t', block.hash.hexdigest())\n print('Previous Hash:\\t', block.previous_hash.hexdigest())\n print('Nounce:\\t\\t', block.nonce)\n print('Data:\\t\\t', block.data)\n print('\\n\\n==============================')",
"def format(self):\n ...",
"def format(self, data):",
"def format_blocks(self):\n\n block_text = []\n for el, text in self._block_text.items():\n self.soft_break(el, text)\n content = ''.join(text)\n if content:\n block_text.append((content, self.additional_context + self.construct_selector(el)))\n return block_text",
"def format_data(self, data):",
"def _format(self):\n output = f\"\\n{color('>>> DUMP')} from {self.filename}: {color(f'L{self.line}')} in {color(f'{self.method}()')}\"\n\n for name, obj in self.objects.items():\n output += f\"\\n\\n{color(f' - {name}:')}\\n\"\n output += f\" {pformat(obj, width=110, indent=4)}\"\n\n output += color(\"\\n\\n<<< END\")\n return output",
"def display_blocks(self):\n buf = \"\"\n cpt = 0\n\n for block in self.blocks:\n buf += \"Block N. %d\\n\" % cpt\n buf += \"H \\t%s\\n\" % getHashBlock(block)\n buf += \"Header \\t%s \\n\\n\" % str(block.header)\n cpt += 1\n\n buf += \"Is chain valid ? %r\" % self.chainIsValid()\n print buf",
"def __repr__(self):\r\n rep = '\\n======= PRINT BLOCK HEADER =======\\n'\r\n rep += f'VersionNumber: {self.version_num}\\n'\r\n rep += f'hashPrevBlock: {self.hash_prev_block_header}\\n'\r\n rep += f'hashMerkleRoot: {self.hash_merkle_root}\\n'\r\n rep += f'Timestamp: {self.timestamp}\\n'\r\n rep += f'Bits: {self.bits}\\n'\r\n rep += f'Nonce: {self.nonce}\\n'\r\n rep += '======= END OF BLOCK HEADER ======='\r\n return rep",
"def format_coin_output(coin):\n coin_output1 = \"Grabbing latest data for *\" + coin['name'] + \"*\\n\"\n coin_output2 = \"```{:20s}\\t${:.2f}\\n\".format(\"Price USD\",float(coin['price_usd']))\n coin_output3 = \"{:20s}\\t{:.8f}\\n\".format(\"Price BTC\",float(coin['price_btc']))\n coin_output4 = \"{:20s}\\t${:.2f}\\n\".format(\"Market Cap\",float(coin['market_cap_usd']))\n coin_output5 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 1hr\",float(coin['percent_change_1h']))\n coin_output6 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 24hr\",float(coin['percent_change_24h']))\n coin_output7 = \"{:20s}\\t{:.2f}%\\n```\".format(\"Change 7d\",float(coin['percent_change_7d']))\n return (coin_output1+coin_output2+coin_output3+coin_output4+coin_output5+coin_output6+coin_output7)",
"def __str__(self):\n if self.filename:\n filename = self.filename\n else:\n filename = 'Unknown'\n if self.endian == '<':\n endian = 'Little Endian'\n else:\n endian = 'Big Endian'\n ret_val = ('FILE: %s\\nRecord Offset: %i byte\\n' +\n 'Header Endianness: %s\\n\\n') % \\\n (filename, self.record_offset, endian)\n ret_val += 'FIXED SECTION OF DATA HEADER\\n'\n for key in self.fixed_header.keys():\n ret_val += '\\t%s: %s\\n' % (key, self.fixed_header[key])\n ret_val += '\\nBLOCKETTES\\n'\n for key in self.blockettes.keys():\n ret_val += '\\t%i:' % key\n if not len(self.blockettes[key]):\n ret_val += '\\tNOT YET IMPLEMENTED\\n'\n for _i, blkt_key in enumerate(self.blockettes[key].keys()):\n if _i == 0:\n tabs = '\\t'\n else:\n tabs = '\\t\\t'\n ret_val += '%s%s: %s\\n' % (tabs, blkt_key,\n self.blockettes[key][blkt_key])\n ret_val += '\\nCALCULATED VALUES\\n'\n ret_val += '\\tCorrected Starttime: %s\\n' % self.corrected_starttime\n return ret_val",
"def output(self):\n to_write = 'S '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x1'])+' '\n to_write += str(self.offset[1] + self.def_field['y1'])+' '\n to_write += str(self.offset[0] + self.def_field['x2'])+' '\n to_write += str(self.offset[1] + self.def_field['y2'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write",
"def _format_output(selected_number, raw_data):\n tmp_data = {}\n data = collections.defaultdict(lambda: 0)\n balance = raw_data.pop('balance')\n for number in raw_data.keys():\n tmp_data = dict([(k, int(v) if v is not None else \"No limit\")\n for k, v in raw_data[number].items()])\n tmp_data['number'] = number\n if selected_number is None or selected_number == number:\n data[number] = tmp_data\n\n output = (\"\"\"Account Balance\n=======\n\nBalance: {:.2f} $\n\"\"\")\n print(output.format(balance))\n for number_data in data.values():\n _print_number(number_data)",
"def _massage_raw_pg_output_vals(self):\n pass",
"def outputLogFormatter(log):\n if log.get(\"blockNumber\"):\n log[\"blockNumber\"] = to_decimal(log[\"blockNumber\"])\n if log.get(\"transactionIndex\"):\n log[\"transactionIndex\"] = to_decimal(log[\"transactionIndex\"])\n if log.get(\"logIndex\"):\n log[\"logIndex\"] = to_decimal(log[\"logIndex\"])\n\n return log",
"def asformat(self, format):",
"def output(self):\n to_write = 'C '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x'])+' '\n to_write += str(self.offset[1] + self.def_field['y'])+' '\n to_write += str(self.def_field['radius'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write",
"def __str__(self):\r\n return (str(self.blockNum) + \" \" + str(self.coords))",
"def pformat(self, tree):\n return str(self.to_tree_text_block(tree))",
"def reformat(ctx):\n pass",
"def nice_output(self):\n return self.des",
"def nice_output(self):\n return self.des",
"def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()",
"def prettyPrint(description, ip_comp, host, width):\n value = (len(ip_comp) + len(host))\n #When printing values wider than the second column, split and print them\n if value > (int(width/3)):\n print(\"| \" + description.ljust(int(width/3)) + \" |\" ), \n i=0\n wrapped=textwrap.wrap(value, 60) \n for loop in wrapped:\n print(\"Fail point 3 inside loop\")\n if i == 0:\n print(loop + \"|\".rjust(int(width/3-(len(loop)))))\n else: \n print(\"| \".ljust(int(width/3+3)) + \" | \" + loop + \"|\".rjust(int(width/3-(len(loop)))))\n i=i+1\n else: \n print( \"| \" + description.ljust(int(width/3)) + \" | \" + ip_comp.rjust(int(width/3-6)) + \" | \" + host.rjust(int(width/3+2)) + \"|\")",
"def formatted(self) -> str:\r\n ...",
"def format_field(self, value, spec):\n cache = Cache()\n if spec == \"co\":\n # if cache(re.match(\"(.*)co$\", spec)):\n value = co_join(value)\n spec = \"s\"\n # cache.output.group(1) + \"s\"\n elif cache(re.match(r\"^sub(\\d?)_?(.*)$\", spec)):\n depth = (1 if cache.output.group(1) == \"\" else\n int(cache.output.group(1)))\n value = \"\\n\".join([\n \"{0}{1} = {2}\".format(depth * \" \", key, val)\n for key, val in value.items()])\n if cache.output.group(2) != \"\":\n value = (\n depth * \"[\" + cache.output.group(2) + depth * \"]\" + \"\\n\" +\n value)\n spec = \"s\"\n return super(Format, self).format_field(value, spec)",
"def output(self, state):\n h, t = state\n\n return h",
"def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)",
"def print_block():\n do_twice(do_block)\n do_twice(do_block)\n print_column()",
"def print_block():\n do_twice(do_block)\n print_column()"
] | [
"0.64059097",
"0.6007487",
"0.5995631",
"0.5994798",
"0.57876045",
"0.5775755",
"0.5706006",
"0.56863886",
"0.5680554",
"0.5670126",
"0.5669903",
"0.5630315",
"0.5629678",
"0.5619763",
"0.5568151",
"0.55656964",
"0.5549295",
"0.55125326",
"0.5497574",
"0.5473819",
"0.54342616",
"0.54342616",
"0.5427073",
"0.5417499",
"0.54116344",
"0.54113334",
"0.5399961",
"0.53947765",
"0.53895164",
"0.5387718"
] | 0.7147282 | 0 |
Formats the output of a log | def outputLogFormatter(log):
if log.get("blockNumber"):
log["blockNumber"] = to_decimal(log["blockNumber"])
if log.get("transactionIndex"):
log["transactionIndex"] = to_decimal(log["transactionIndex"])
if log.get("logIndex"):
log["logIndex"] = to_decimal(log["logIndex"])
return log | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format(self, record):\n msg = logging.Formatter.format(self, record)\n label, color = self.label(record)\n if self.strip:\n return \"{:10s}{}\".format(label, sub(\"\\033\\\\[[0-9]+m\", \"\", msg, 0))\n else:\n return \"\\033[1;{}m{:10s}\\033[0m{}\".format(color, label, msg)",
"def format(self, record):\n row = [self.formatTime(record, self.datefmt), record.name, record.levelname]\n keys = filter(self.filterer, record.__dict__)\n extra = [record.__dict__[k] for k in keys]\n\n self.writer.writerow(row + extra + [record.getMessage()])\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()",
"def format(self, record: LogRecord) -> str:\n record.asctime = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n message = record.getMessage()\n if record.exc_info:\n eno = record.exc_info\n stacktrace = \"\".join(traceback.format_exception(None, eno[1], eno[2]))\n message += f\" excp: {stacktrace}\"\n if record.stack_info:\n stack = self.formatStack(record.stack_info)\n message += f\" trace: {stack}\"\n\n log_output = {\n \"tool\": type(self.checker).__name__,\n \"type\": \"infrastructure\",\n \"severity\": record.levelname,\n \"severityLevel\": max(0, record.levelno // 10 - 1),\n \"timestamp\": record.asctime,\n \"module\": record.module,\n \"function\": record.funcName,\n \"flag\": self.checker.flag,\n \"flagIndex\": self.checker.flag_idx,\n \"runId\": self.checker.run_id,\n \"roundId\": self.checker.round,\n \"relatedRoundId\": self.checker.flag_round,\n \"message\": message,\n \"teamName\": self.checker.team,\n \"teamId\": self.checker.team_id,\n \"serviceName\": self.checker.service_name,\n \"method\": self.checker.method,\n }\n\n return LOGGING_PREFIX + json.dumps(log_output)",
"def format_log(request, message):\n now = datetime.now().replace(microsecond=0)\n log = MESSAGE_LOG_FORMAT % dict(request.META, MESSAGE=message, TIME=now)\n return log + \"\\n\"",
"def format(self, record):\n log_fmt = self.FORMATS.get(record.levelno)\n return BaseFormatter(log_fmt).format(record)",
"def format_result(self):\n return ('{}\\n\\n{}'.format(\n LogParser.format_dict(LogParser.order_dict(self.urls)[:3]),\n LogParser.format_dict(LogParser.order_dict(self.status_codes))))",
"def log_message(self, format, *args):",
"def format(self, message):",
"def format(self, record):\n message = record.getMessage()\n asctime = self.formatTime(record, self.datefmt)\n name = yellow(record.name)\n\n s = \"%(timestamp)s %(levelname)s %(name)s \" % {\n \"timestamp\": green(\"%s,%03d\" % (asctime, record.msecs), bold=True),\n \"levelname\": self.LEVELS[record.levelname],\n \"name\": name,\n }\n\n if \"\\n\" in message:\n indent_length = len(re_color_codes.sub(\"\", s))\n message = message.replace(\"\\n\", \"\\n\" + \" \" * indent_length)\n\n s += message\n return s",
"def logger_format(self) -> str:\n\t\treturn ('%(asctime) -19s | %(levelname) -8s | %(threadName) -10s | '\n\t\t\t\t'%(funcName) -16s | %(message)s')",
"def formatter(record):\n\n lines = record[\"message\"].splitlines()\n prefix = (\n \"{time:YY-MM-DD HH:mm:ss.S} | {level.name:<8} | \"\n + \"{file}.{function}:{line} - \".format(**record)\n )\n indented = (\n lines[0] + \"\\n\" + \"\\n\".join(\" \" * len(prefix) + line for line in lines[1:])\n )\n record[\"message\"] = indented.strip()\n return (\n \"<g>{time:YY-MM-DD HH:mm:ss.S}</> | <lvl>{level.name:<8}</> | \"\n + \"<e>{file}.{function}:{line}</> - <lvl>{message}\\n</>{exception}\"\n )",
"def format(self, record: logging.LogRecord = None) -> str:\n # s = super().format(record)\n s = None\n e = {}\n e['id'] = uuid.uuid4().hex\n e['message'] = record.getMessage()\n # log.warning('record.message: %r', record.getMessage())\n # log.warning('record.args: %r', record.args)\n e['created'] = record.created\n e['priority'] = record.levelname\n e['args'] = record.args\n e['source_code'] = {}\n e['source_code']['pathname'] = record.pathname\n e['source_code']['funcName'] = record.funcName\n e['source_code']['lineno'] = record.lineno\n ctx = record.args.get(PIPELINE_CONTEXT_KEY, None)\n if ctx:\n e[PIPELINE_CONTEXT_KEY] = ctx.toDict()\n # use array enclosure a[] to mainain the log file\n # yaml compliant as new events are appended\n # - event1:\n # - event2:\n # - ...\n a = [e]\n s = yaml.dump(a)\n return s",
"def format(self, record):\n record.message = indent_string(record.getMessage())\n if \"%(asctime)\" in self._fmt:\n record.asctime = self.formatTime(record, self.datefmt)\n s = self._fmt % record.__dict__\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if s[-1:] != \"\\n\":\n s = s + \"\\n\"\n s = \"{0} Exception:\\n {1}\".format(s, indent_string(record.exc_text))\n return s",
"def format(self, record):\n\n\n if not hasattr(record, 'filename_'):\n record.file_indicator = '-'\n else:\n record.file_indicator = os.path.relpath(record.filename_.strip(),\n self.study_dir)\n record.line_indicator = self.format_aggregated(\n record,\n 'line_number',\n ' line %d:',\n ' lines [%s]:',\n optional=True)\n record.column_indicator = self.format_aggregated(\n record,\n 'column_number',\n ' column %d:',\n ' columns [%s]:',\n optional=True)\n record.cause_indicator = self.format_aggregated(\n record,\n 'cause',\n \"; value encountered: '%s'\",\n \"; values encountered: ['%s']\",\n join_string=\"', '\",\n optional=True)\n\n # format the string based on these fields\n formatted_result = super(LogfileStyleFormatter, self).format(record)\n\n # prepend an empty line if the filename is different than before\n current_filename = getattr(record, 'filename_', '')\n if (self.previous_filename is not None and\n current_filename != self.previous_filename):\n formatted_result = '\\n' + formatted_result\n self.previous_filename = current_filename\n\n return formatted_result",
"def formatLogs(logs,format):\n formattedLogs=[]\n \n if(format.__eq__(\"json\")):\n for log in logs:\n formattedLogs.append(json.dumps(dict(log)))\n return formattedLogs\n elif(format.__eq__(\"xml\")):\n for log in logs:\n formattedLogs.append(dict2xml.dict2xml(dict(log)))\n return formattedLogs\n else:\n return logs",
"def get_formatted_task_log(self):\n try:\n log = requests.get(self.gs_base_url + \"/out.log\").content\n except:\n return [f\"####-##-## ##:##:## Task ID: {self.name}\\n\"]\n return (f\"####-##-## ##:##:## Task ID: {self.name}\\n\" + log.decode('utf-8')).splitlines()",
"def _format_msg(self, format_str, *args):\r\n return u\"{0} - - [{1}] {2}\\n\".format(\r\n self.client_address[0],\r\n self.log_date_time_string(),\r\n format_str % args\r\n )",
"def _format_msg(self, format_str, *args):\n if not args:\n format_str = six.moves.urllib.parse.unquote(format_str)\n return \"{} - - [{}] {}\\n\".format(\n self.client_address[0],\n self.log_date_time_string(),\n format_str % args\n )",
"def _stab_log_data(self, timestamp, data, logconf):\n print('[%d][%s]: %s' % (timestamp, logconf.name, data))",
"def log_format_info(event_str, data = {}):\n\tcheck_type(event_str, StringType)\n\tcheck_type(data, DictType)\n\n\tinfo = []\n\tfor k in data:\n\t\tinfo.append('{0}: {1}'.format(k, data[k]))\n\treturn '{0}. Info: {1}'.format(event_str, ', '.join(info))",
"def log_message(self, format, *args):\n if self.headers:\n xff = self.headers.getheader('X-Forwarded-For', '-')\n xgo = self.headers.getheader('X-Grafana-Org-Id', '-')\n ua = self.headers.getheader('User-Agent', '-')\n\n logging.info(\"%s - - [%s] %s [X-Forwarded-For: %s, X-Grafana-Org-Id: %s, User-Agent: %s]\" %\n (self.client_address[0], self.log_date_time_string(), format % args, xff, xgo, ua))\n else:\n logging.info(\"%s - - [%s] %s\" %\n (self.client_address[0], self.log_date_time_string(), format % args))",
"def pretty_end_log(title):\n output = '>' * 10 + ' ' + title + ' ' + '<' * 10 + '\\n\\n'\n return output",
"def _process_str(self, fmt, *args, **kwargs):\n log_str = fmt\n if len(args) > 0 or len(kwargs) > 0:\n log_str = fmt.format(*args, **kwargs)\n\n return log_str",
"def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()",
"def output_log():\r\n log_str = (\"Contents:\\n\"\r\n f\"Input file: {LOC}\\n\"\r\n f\"Length checked: {LENGTH}\\n\"\r\n f\"Rows where {COL_NAME} is non_standard: {FLAGGED_FILE}\\n\"\r\n f\"NB, if {FLAGGED_FILE} does not exist, no values with\"\r\n f\"length not of {LENGTH} were found.\\n\"\r\n f\"Rows where {COL_NAME} is of standard length: {CLEANED_FILE}\\n\"\r\n f\"NB, if {CLEANED_FILE} does not exist, no values with\"\r\n f\"length not of {LENGTH} were found.\"\r\n f\"Rows where {COL_NAME} is NULL: {NULL_FILE}\\n\"\r\n f\"NB, if {NULL_FILE} does not exist, no values with\"\r\n )\r\n with open(LOG_FILE, 'w+') as f:\r\n f.write(log_str)",
"def log(string, *format):\n print (string+\"\\n\") % format",
"def format_output(output, case_number, status):\n output.append(\"Case #%s: %s\" % (case_number, status))",
"def set_log_format(format):\n root = logging.getLogger()\n handler = root.handlers[0]\n formatter = logging.Formatter(format)\n handler.setFormatter(formatter)",
"def test_format_log_message(self, log_message):\n token = Token(\"NDY3MjIzMjMwNjUwNzc3NjQx\", \"XsySD_\", \"s45jqDV_Iisn-symw0yDRrk_jf4\")\n log_message.format.return_value = \"Howdy\"\n\n return_value = TokenRemover.format_log_message(self.msg, token)\n\n self.assertEqual(return_value, log_message.format.return_value)\n log_message.format.assert_called_once_with(\n author=self.msg.author,\n author_id=self.msg.author.id,\n channel=self.msg.channel.mention,\n user_id=token.user_id,\n timestamp=token.timestamp,\n hmac=\"x\" * len(token.hmac),\n )",
"def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()"
] | [
"0.6809159",
"0.6568247",
"0.6517458",
"0.64604336",
"0.63606316",
"0.63424927",
"0.6341781",
"0.63280874",
"0.6323291",
"0.63123155",
"0.6245183",
"0.6183246",
"0.6177255",
"0.6145785",
"0.61174196",
"0.61150354",
"0.6070282",
"0.60515445",
"0.6051452",
"0.6049557",
"0.60433686",
"0.603381",
"0.60109496",
"0.5991823",
"0.5966236",
"0.5930233",
"0.5929388",
"0.5907589",
"0.59047997",
"0.5894704"
] | 0.71652734 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.