query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Returns the mediant (n1+n2)/(d1+d2) of the two fractions, represented as a 2tuple (n,d). frac1 and frac2 are given as 2tuples (n,d)
def mediant(frac1, frac2): # print "%s m %s = %s" % (frac1, frac2, (frac1[0]+frac2[0], frac1[1]+frac2[1]) return (frac1[0]+frac2[0], frac1[1]+frac2[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simplify_fraction(a, b):\n c = gcd(a, b)\n return a // c, b // c", "def diff_frac(data_1, data_2):\n\n frac_1 = np.sum(data_1) / len(data_1)\n frac_2 = np.sum(data_2) / len(data_2)\n\n return frac_1 - frac_2", "def fraction_to_proper_fraction(rational):\n assert isinstance(rational, Fraction), repr(rational)\n quotient = int(rational)\n residue = rational - quotient\n return quotient, residue", "def __mul__(self, frac):\n numerator = self.numerator * frac.numerator\n denominator = self.denominator * frac.denominator\n gcd = math.gcd(numerator, denominator)\n\n return Fraction(int(numerator/gcd), int(denominator/gcd))", "def fraction_gcd(x, y):\n a = x.numerator\n b = x.denominator\n c = y.numerator\n d = y.denominator\n return Fraction(gcd(a, c), lcm(b, d))", "def denominator(num):\n require_type(isa(num,fractions.Fraction) or isa(num,int),\n 'parameter of denominator must be a fraction or integer')\n return num.denominator", "def compute_fraction(x, y): \r\n if x == 'NaN' or y == 'NaN':\r\n return 0.\r\n if x == 0 or y == 0: \r\n return 0\r\n fraction = x / y\r\n return fraction", "def to_mass_fraction(molar_ratio, massfrac_denominator, numerator_mass, denominator_mass):\n return molar_ratio * massfrac_denominator * numerator_mass / denominator_mass", "def morph_fraction(lex, num, den, digit):\n raise NotImplementedError", "def divide(num1, num2):\n return float(num1) / float(num2)", "def to_molar_ratio(massfrac_numerator, massfrac_denominator, numerator_mass, denominator_mass):\n return (massfrac_numerator / numerator_mass) / (massfrac_denominator / denominator_mass)", "def fractionify_and_reduce(n):\n nume, denom = fractionify(n)\n return reduce(nume, denom)", "def normalized_fraction(p, q=1):\r\n x, y = p, q\r\n while y:\r\n x, y = y, x % y\r\n if x != 1:\r\n p //= x\r\n q //= x\r\n if q == 1:\r\n return p\r\n return mpq((p, q))", "def denominator(self, ???):", "def div1(left: float, right: float) -> float:\n return left / right", "def photon_fraction(r, r1, r2):\n return rotate_phasor(r, r1, r2).real", "def div2(left: float, right: float) -> float:\n return left / right", "def divide(n1, n2):\n return n1 / n2", "def _calculate_mole_fraction(mol1, mol2):\n return mol1/(mol1+mol2)", "def div(a, b):\r\n if type(b) in inttypes_set:\r\n if not b:\r\n return Infinity(a)\r\n raise ZeroDivisionError('%r / %r' % (a, b))\r\n if b == 1:\r\n return a\r\n if type(a) in inttypes_set:\r\n return normalized_fraction(a, b)\r\n return a / b", "def ratio_func(a, b):\n return a / b", "def test_fraction_math_ops(self):\n fract1 = source.Fraction(5, 3)\n fract2 = source.Fraction(2, 3)\n self.assertEqual(fract1 + fract2, source.Fraction(7, 3))\n self.assertEqual(fract1 + 5, source.Fraction(20, 3))\n self.assertEqual(3 + fract1, source.Fraction(14, 3))\n self.assertEqual(fract1 * fract2, source.Fraction(10, 9))\n self.assertEqual(5 * fract2, source.Fraction(10, 3))", "def find_fractions():\n num_list = []\n den_list = []\n for n in range(10, 100):\n for d in range(10, 100):\n if d > n:\n x = n / d\n ln = list(str(n))\n ld = list(str(d))\n if (ln[0] == ld[1]) and (ln[0] != '0'):\n if ld[0] != '0':\n if (int(ln[1]) / int(ld[0])) == x:\n print \"n/d =\", n, d\n num_list.append(n)\n den_list.append(d)\n else:\n continue\n elif (ln[1] == ld[0]) and (ln[1] != '0'):\n if ld[1] != '0':\n if (int(ln[0]) / int(ld[1])) == x:\n print \"n/d =\", n, d\n num_list.append(n)\n den_list.append(d)\n else:\n continue\n else:\n continue\n return num_list, den_list", "def ratio_calc(first_strandI, second_strandI):\n if first_strandI + second_strandI != 0:\n Ratio = first_strandI / float(first_strandI + second_strandI)\n return Ratio\n else:\n return np.nan", "def parseFraction(f):\n p = f.find(\"/\")\n if p < 1:\n return None\n s1 = f[:p]\n s2 = f[p+1:]\n try:\n v1 = int(s1)\n v2 = int(s2)\n except ValueError:\n return None\n if v2:\n return 1.0 * v1 / v2\n else:\n return None", "def rational_frac_tests():\n print('\\n==========================================')\n print('rfrac.py @ rational_frac_tests: //////////\\n')\n print(RationalFrac(-1), RationalFrac(0))\n # __prime_factors()\n frac0 = RationalFrac(4.5)\n frac1 = RationalFrac(-0.125)\n frac2 = RationalFrac(0.99999)\n f = [frac0, frac1, frac2]\n print('float(-0.125) =', float(f[1]))\n print('float(0.99999) =', float(f[2]))\n print('9/2 + -1/8 =', f[0] + f[1])\n print('9/2 * -1/8 =', f[0] * f[1])\n print('-1/8 * 9/2 =', f[1] * f[0])\n print(f)\n print('frac(-0/1) + frac(-0/1) =',\n RationalFrac(-0) + RationalFrac(-0))\n f3 = RationalFrac('-7/29')\n print(f3)\n print(RationalFrac(25, 1000))\n print('\\nrfrac.py @ end of rational_frac_tests ////')\n print('==========================================\\n')", "def test_denominator_float(self):\n steps = save_divide(np.ones(2), 2)\n np.testing.assert_equal(steps, 0.5 * np.ones(2))", "def __mul__ (self,other):\n if (self.debug): print(f'enter fraction.__mul__ with {other}')\n f3 = fraction(self.value[0]*other.value[0],self.value[1]*other.value[1])\n if (self.debug): print(f3, self, other)\n f3.simplify()\n return f3", "def divide(num1, num2):\n quotient = num1 / float(num2)\n return quotient", "def ratio_calculator(numerator, denominator):\n ratios = []\n for i in numerator:\n for j in denominator:\n if i[0] == j[0] and j[1] != 0:\n ratios.append([i[0], round(float(i[1]) / j[1], 3)])\n break\n elif i[0] == j[0]:\n ratios.append([i[0], 0])\n return ratios" ]
[ "0.62101525", "0.5934107", "0.58908975", "0.58583033", "0.57450724", "0.57265115", "0.57136464", "0.56596386", "0.5658014", "0.56181496", "0.5612167", "0.5595354", "0.55772436", "0.5576887", "0.55666596", "0.5561884", "0.5558363", "0.5557005", "0.5555802", "0.5551015", "0.5546394", "0.55441576", "0.5532617", "0.5527722", "0.551052", "0.55094326", "0.5488323", "0.5480262", "0.5465409", "0.54604214" ]
0.80688035
0
Return True if frac1 is greater than frac2.
def compare_fracs(frac1, frac2): return frac1[0]*frac2[1] > frac2[0]*frac1[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __gt__(self, frac):\n\n if isinstance(frac,Fraction):\n if (self.numerator ==0 and self.denominator==0) or (frac.numerator ==0 and frac.denominator==0):\n return self.inf_size > frac.inf_size\n if self.inf_size!=0 or frac.inf_size!= 0:\n return self.inf_size > frac.inf_size\n else:\n return self.numerator*frac.denominator > frac.numerator*self.denominator\n elif type(frac) in(float,int):\n if self.numerator ==0 and self.denominator==0:\n return 0 > frac\n return self.numerator/self.denominator > frac\n else:\n raise ValueError('Fraction can only be compare to int float or fractions')", "def fp_gt(x: float, y: float) -> bool:\n return not fp_eq(x, y) and x > y", "def test_GreaterThan(self):\n self.assertTrue(Fraction(9,10)>Fraction(7,10))", "def test_gt(self):\n f12: Fraction = Fraction(1, 2)\n f34: Fraction = Fraction(3, 4)\n f105: Fraction = Fraction(10, 5)\n self.assertTrue(f34 > f12)\n self.assertFalse(f12 > f105)\n self.assertFalse(f12 > f12)", "def __gt__(self, other):\n self.numerator = abs(self.numerator)\n self.denominator = abs(self.denominator)\n other.numerator = abs(other.numerator)\n other.denominator = abs(other.denominator)\n\n num1 = (self.numerator/other.numerator)\n num2 = (self.denominator/other.denominator)\n\n if num1 > num2:\n return True\n else:\n return False", "def __gt__(self, other):\n if isinstance(other, float):\n return self.floatvalue > other\n else:\n return not self.negative and not self == other", "def test_GreaterThanorEqualto(self):\n self.assertTrue(Fraction(7,10)>=Fraction(7,10))", "def __gt__(self,f2):\n return self.__num * f2.den > self.__den * f2.num", "def isGE(self, a : float, b : float) -> bool:\n return (a >= b - self.tol * max(abs(a), abs(b), 1.0)) #and (a >= b - 0.1)", "def more(value1, value2):\n number1 = FloatConverter.to_nullable_float(value1)\n number2 = FloatConverter.to_nullable_float(value2)\n\n if number1 == None or number2 == None:\n return False\n\n return number1 > number2", "def is_close(num1, num2, prec=dec.Decimal('1E-9')):\r\n if not isinstance(num1, dec.Decimal):\r\n num1 = dec.Decimal(num1)\r\n if not isinstance(num2, dec.Decimal):\r\n num2 = dec.Decimal(num2)\r\n err = abs(num1 - num2)\r\n if num1 == 0:\r\n if num2 == 0:\r\n return True\r\n return err < prec\r\n if num2 == 0:\r\n return err < prec\r\n return 2 * err / (num1 + num2) < prec", "def __gt__(self, other): \n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator>other.numerator):\n return True\n else:\n return False", "def test_ge(self):\n f12: Fraction = Fraction(1, 2)\n f34: Fraction = Fraction(3, 4)\n f93: Fraction = Fraction(9, 3)\n f124: Fraction = Fraction(12, 4)\n self.assertTrue(f12 >= f34)\n self.assertTrue(f93 >= f124)\n self.assertFalse(f93 >= f12)", "def test_LessThanorEqualto(self):\n self.assertTrue(Fraction(-7,10)<=Fraction(-7,10))", "def almostgte(a, b):\n return np.all(np.logical_or(a > b, almosteq(a, b)))", "def almost_equal(x, y):\n return abs(x-y) < FP_PREC", "def __eq__(self, frac):\n return self.equal == frac.equal", "def test_gt_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert b > a", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def gte(cls, lhs, rhs):\n return lhs >= rhs", "def test_lt(self):\n f12: Fraction = Fraction(1, 2)\n f34: Fraction = Fraction(3, 4)\n f44: Fraction = Fraction(4, 4)\n self.assertTrue(f12 < f34)\n self.assertFalse(f44 < f12)", "def fp_lt(x: float, y: float) -> bool:\n return not fp_eq(x, y) and x < y", "def __ge__(self,f2):\n return self > f2 or self == f2", "def __gt__(self, other):\n return self.__f > other.get_f()", "def __gt__(self, other):\n return True if self._compare(other) > 0 else False", "def greater_equal(x1, x2):\n return compare_chararrays(x1, x2, '>=', True)", "def test_ge_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert b > a", "def __gt__(self, other):\n return self.x ** 2 + self.y ** 2 > other.x ** 2 + other.y ** 2", "def are_close(num1: float, num2: float, error: float) -> bool:\n\n if abs(num1-num2) < error:\n return True\n return False", "def __le__(self, other):\n try:\n lhs = (self._num * other._den)\n rhs = (other._num * self._den)\n return (lhs <= rhs)\n except AttributeError:\n return (self <= Rational.parse_number(other))" ]
[ "0.7365881", "0.7272041", "0.7179911", "0.7146161", "0.70425826", "0.7039962", "0.70348424", "0.7027964", "0.6976749", "0.6785346", "0.6748767", "0.6638374", "0.662061", "0.6575495", "0.6486706", "0.6460307", "0.64515734", "0.64330274", "0.6383279", "0.63759863", "0.63638836", "0.63607776", "0.63286376", "0.6320352", "0.6269105", "0.6238221", "0.6176536", "0.6173543", "0.61698854", "0.6135096" ]
0.75417024
0
returns the existing fraction immediately to the left of this one
def get_left_frac(self): if self.parent == None: # if this is the root node return (0,1) elif self.is_left_child: # if the left side, run up the tree until we find a right child return self.parent.get_left_frac() else: # if right child, just return the fraction above it return self.parent.frac
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __pos__( self ):\r\n\t\treturn fraction( self )", "def inverse( self ):\r\n\t\treturn fraction( self.denominator, self.numerator )", "def reduce(self):\n import math\n g = math.gcd(self.num, self.den)\n return Fraction(self.num//g, self.den//g)", "def numerator(self):\n return +self", "def pleft(self):\n return -self.pfill(1) + self.plen(-1, s=True)", "def getfloat(self, fraction) -> float:\n self.numerator_a = fraction.numerator_a\n self.denominator_b = fraction.denominator_b\n self.fraction = str(self.numerator_a) + '/' + str(self.denominator_b)\n return super().__float__()", "def reciprocal(self):\n return Rational(self.denominator, self.numerator)", "def fraction_space(self):\n self.custom_space(*[0,0,1,1])", "def fractionPassing(self):\n return self.cut.entries / self.entries", "def inverse(self):\n return fraction(self.denom, self.num)", "def __reduce(self):\n if self.denominator <0:\n self.denominator *= -1\n self.numerator *= -1\n gcd = math.gcd(int(self.denominator),int(self.numerator))\n if self.denominator != 0 and self.numerator!= 0:\n if gcd > 0:\n self.denominator /= gcd\n self.numerator /= gcd\n self.numerator = int(self.numerator)\n self.denominator = int(self.denominator)", "def __add__ (self, other):\n if (self.debug): print(f'enter fraction.__add__ with {other}')\n lcm = find_lcm(self.value[1], other.value[1])\n if (self.debug): print(f'{int(lcm/self.value[1])} {self.value}')\n f1 = self.equivalent(int(lcm/self.value[1]))\n f2 = other.equivalent(int(lcm/other.value[1]))\n f3 = fraction(f1[0]+f2[0], lcm)\n if (self.debug): print(f'{f3}, {self}, {other}')\n return f3", "def __neg__(self):\n if self.numerator ==0 and self.denominator==0:\n return Fraction(-self.inf_size,0)\n else:\n return Fraction(-self.numerator,self.denominator)", "def __neg__( self ):\r\n\t\treturn fraction( -self.numerator, self.denominator )", "def get_right_frac(self):\n\t\tif self.parent == None:\n\t\t\t# if this is the root node\n\t\t\treturn (1,0)\n\t\telif self.is_left_child:\n\t\t\t# if the left side, just return the fraction above it\n\t\t\treturn self.parent.frac\n\t\telse:\n\t\t\t# if right child, run up the tree til we find a left child\n\t\t\treturn self.parent.get_right_frac()", "def get_left_right_lean_fraction(self):\n n_left_notes = len(self.get_notes(NoteGroupType.LEFT))\n n_right_notes = len(self.get_notes(NoteGroupType.RIGHT))\n n_normal_notes = n_left_notes + n_right_notes\n return (n_right_notes - n_left_notes) / n_normal_notes", "def __sub__ (self,other):\n if (self.debug): print(f'enter fraction.__sub__ with {other}')\n f2 = fraction(-1*other.value[0],other.value[1])\n f3 = self.__add__(f2)\n return f3", "def left(self):\n # type: () -> float\n return self._left", "def __rsub__(self, left):\n return left - self.value()", "def maskedFraction(self):\n\n\t\treturn self._masked_fraction", "def fraction(amount, start, stop, truncated, sequence):\n ratio = stop\n for x in range(start, amount):\n y = abs(round(ratio / (abs(x) + 1)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def simplify (self):\n if (self.debug): print(f'enter fraction.simplify')\n hcf = find_hcf(self.value[0], self.value[1])\n self.value = (self.value[0] // hcf.product(), self.value[1] // hcf.product())\n return", "def __add__(self, frac):\n add_new_numerator = (self.numerator * frac.denominator) +\\\n (frac.numerator * self.denominator)\n add_new_denominator = self.denominator * frac.denominator\n if add_new_denominator == 0:\n raise ValueError(\"UNDEFINED.\")\n return Fraction(add_new_numerator, add_new_denominator)", "def __sub__(self, frac):\n if isinstance(frac,Fraction):\n if self.inf_size !=0 or frac.inf_size != 0:\n if self.inf_size == frac.inf_size:\n return 0\n elif abs(self.inf_size) > abs(frac.inf_size):\n return Fraction(self.inf_size,0)\n else:\n return Fraction(-frac.inf_size,0)\n numerator = ((self.numerator*frac.denominator)-(frac.numerator*self.denominator))\n denominator = (self.denominator*frac.denominator)\n elif type(frac) in(float,int):\n if self.inf_size !=0:\n return self\n numerator = self.numerator - (frac*self.denominator)\n denominator = self.denominator\n else:\n raise ValueError('Fraction can only add with to int float or fractions')\n return Fraction(numerator,denominator)", "def __add__(self, frac):\n numerator = ((self.numerator * frac.denominator) + (self.denominator * frac.numerator))\n denominator = (self.denominator * frac.denominator)\n return Fraction(numerator, denominator)", "def fraction_full_scale(self):\n return self._fraction_full_scale", "def maskedFraction(self):\n\n\t\tif not self._masked:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn self._masked_fraction", "def __add__(self, other):\n top = self.num*other.denom + self.denom*other.num\n bott = self.denom*other.denom\n return fraction(top, bott)", "def __sub__(self, frac):\n numerator = ((self.numerator * frac.denominator) - (self.denominator * frac.numerator))\n denominator = (self.denominator * frac.denominator)\n return Fraction(numerator, denominator)", "def __add__(self, frac):\n if isinstance(frac,Fraction):\n if self.inf_size !=0 or frac.inf_size != 0:\n if self.inf_size == -frac.inf_size:\n return 0\n elif abs(self.inf_size) > abs(frac.inf_size):\n return Fraction(self.inf_size,0)\n else:\n return Fraction(frac.inf_size,0)\n numerator = ((self.numerator*frac.denominator)+(frac.numerator*self.denominator))\n denominator = (self.denominator*frac.denominator)\n elif type(frac) in(float,int):\n if self.inf_size !=0:\n return self\n numerator = (frac*self.denominator) + self.numerator\n denominator = self.denominator\n else:\n raise ValueError('Fraction can only add with to int float or fractions')\n return Fraction(numerator,denominator)" ]
[ "0.6986293", "0.6574903", "0.6369118", "0.6360639", "0.63306767", "0.63286066", "0.6244909", "0.62005043", "0.618055", "0.61376506", "0.60833395", "0.6046018", "0.60386914", "0.6024639", "0.6022832", "0.5980461", "0.5970324", "0.5944058", "0.59315735", "0.5909025", "0.5904262", "0.5870892", "0.5855104", "0.58517426", "0.582927", "0.581523", "0.5793602", "0.5771894", "0.5760188", "0.57481086" ]
0.7014593
0
returns the fraction immediately to the right of this one
def get_right_frac(self): if self.parent == None: # if this is the root node return (1,0) elif self.is_left_child: # if the left side, just return the fraction above it return self.parent.frac else: # if right child, run up the tree til we find a left child return self.parent.get_right_frac()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __pos__( self ):\r\n\t\treturn fraction( self )", "def denominator(self):\n return 1", "def fractionPassing(self):\n return self.cut.entries / self.entries", "def reciprocal(self):\n return Rational(self.denominator, self.numerator)", "def inverse( self ):\r\n\t\treturn fraction( self.denominator, self.numerator )", "def reduce(self):\n import math\n g = math.gcd(self.num, self.den)\n return Fraction(self.num//g, self.den//g)", "def denominator(self, ???):", "def getfloat(self, fraction) -> float:\n self.numerator_a = fraction.numerator_a\n self.denominator_b = fraction.denominator_b\n self.fraction = str(self.numerator_a) + '/' + str(self.denominator_b)\n return super().__float__()", "def get_as_float(self):\n return float(self.numerator / self.denominator)", "def __float__(self) -> float:\n float_number = self.numerator_a / self.denominator_b\n print(f'Fraction {self.fraction} float number is {float_number}')\n return float_number", "def __float__(self):\n return self.num/self.denom", "def __float__(self):\n return self.num/self.denom", "def fraction_space(self):\n self.custom_space(*[0,0,1,1])", "def get_left_right_lean_fraction(self):\n n_left_notes = len(self.get_notes(NoteGroupType.LEFT))\n n_right_notes = len(self.get_notes(NoteGroupType.RIGHT))\n n_normal_notes = n_left_notes + n_right_notes\n return (n_right_notes - n_left_notes) / n_normal_notes", "def dfr(self):\n return self.table[1, 0] / (self.table[1, 0] + self.table[1, 1])", "def __float__(self):\n return self.num / self.denom # result of / is of type float", "def __rdiv__(self, _scalar):\n\t\treturn self / _scalar", "def inverse(self):\n return fraction(self.denom, self.num)", "def __reduce(self):\n if self.denominator <0:\n self.denominator *= -1\n self.numerator *= -1\n gcd = math.gcd(int(self.denominator),int(self.numerator))\n if self.denominator != 0 and self.numerator!= 0:\n if gcd > 0:\n self.denominator /= gcd\n self.numerator /= gcd\n self.numerator = int(self.numerator)\n self.denominator = int(self.denominator)", "def numerator(self):\n return +self", "def fraction_full_scale(self):\n return self._fraction_full_scale", "def ionic_fraction(self) -> Real:\n return self._ionic_fraction", "def div(self):\n a = self.nums()\n x = LibraryFunctions.per(a, 0.9) - LibraryFunctions.per(a, 0.1)\n return x / 2.58", "def __neg__(self):\n if self.numerator ==0 and self.denominator==0:\n return Fraction(-self.inf_size,0)\n else:\n return Fraction(-self.numerator,self.denominator)", "def value(G):\n from .printing import unicode_fraction\n\n return unicode_fraction(G._n.numerator, G._n.denominator)", "def __neg__( self ):\r\n\t\treturn fraction( -self.numerator, self.denominator )", "def num (self):\n return self.value[0]/self.value[1]", "def __rdiv__(self, number):\n return self.__div__(number)", "def get_left_frac(self):\n\t\tif self.parent == None:\n\t\t\t# if this is the root node\n\t\t\treturn (0,1)\n\t\telif self.is_left_child:\n\t\t\t# if the left side, run up the tree until we find a right child\n\t\t\treturn self.parent.get_left_frac()\n\t\telse:\n\t\t\t# if right child, just return the fraction above it\n\t\t\treturn self.parent.frac", "def get_fraction_of_paddle(self, point: Point):\n fraction = (point.y - self.y) / self.height\n fraction = max(min(fraction, 0.5), -0.5) # clamp to +/- 0.5\n return fraction" ]
[ "0.7226107", "0.69381106", "0.69352823", "0.69166285", "0.6860141", "0.68463516", "0.6796122", "0.6757292", "0.661353", "0.6584862", "0.6569921", "0.6569921", "0.6495624", "0.6387157", "0.6384618", "0.63573", "0.6352741", "0.6351452", "0.6347366", "0.63327", "0.6324134", "0.62976474", "0.62811184", "0.62770325", "0.62722063", "0.62688714", "0.6245888", "0.6227342", "0.6203781", "0.61817193" ]
0.70297647
1
Populates self.left, self.right with the proper child nodes. If max_denom is given, the children also generate their nodes until the maximum denominator is reached. While in max_denom mode, all created nodes will be <1, otherwise the tree blows up to infinity. If max_depth is given, it will generate that many layers of the tree. Depth is indexed from 0 s.t. the 1/1 node has depth 0. Externally this should be called as node.gen_children(max_depth=x). If neither are given, the children do not generate any further nodes.
def gen_children(self, max_denom=None, max_depth=None, current_depth=0): left_child_frac = mediant(self.frac, self.get_left_frac() ) right_child_frac = mediant(self.frac, self.get_right_frac()) # print "%s generating children %s and %s" % (self.frac, left_child_frac, right_child_frac) if max_denom != None: if left_child_frac[1] < max_denom and left_child_frac[0] < left_child_frac[1]: self.left_child = SBNode(frac=left_child_frac, is_left_child=True, parent=self) self.left_child.gen_children(max_denom=max_denom) if right_child_frac[1] < max_denom and right_child_frac[0] < right_child_frac[1]: self.right_child = SBNode(frac=right_child_frac, is_left_child=False, parent=self) self.right_child.gen_children(max_denom=max_denom) elif max_depth != None and current_depth < max_depth: self.left_child = SBNode(frac=left_child_frac, is_left_child=True, parent=self) self.right_child = SBNode(frac=right_child_frac, is_left_child=False, parent=self) self.left_child.gen_children( max_depth=max_depth, current_depth=current_depth+1) self.right_child.gen_children(max_depth=max_depth, current_depth=current_depth+1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_tree(self, max_depth = None):\n\n if max_depth is None:\n max_depth = self.tree.max_depth\n else:\n max_depth -= 1\n if max_depth == 0:\n return\n self.generate_children()\n if self.tree.remove:\n os.unlink(self.source_filename)\n for child in self.children:\n if child.count > self.tree.max_count:\n child.generate_tree(max_depth)", "def __build_tree(self, x, y, depth):\n\n node = Node(x, y, depth)\n # update max depth\n if depth > self.maximum_depth:\n self.maximum_depth = depth\n\n classes = np.unique(y)\n class_counts = np.unique(y, return_counts=True)[1]\n\n # accounting for data inconsistency (such as identical feature\n # distribution but different assigned class)\n predicted_class = classes[np.argmax(class_counts)]\n feature, split = self.__find_best_split(x, y)\n\n # only assign a predicted class to leaf nodes\n if feature is None or split is None:\n node.is_leaf = True\n node.predicted_class = predicted_class\n return node\n\n node.feature_index_split = feature\n node.integer_splitting_rule = split\n node.entropy = self.__entropy(y)\n\n row_indices_left_child = x[:, feature] < split\n left_child_features, left_child_labels = x[row_indices_left_child], y[row_indices_left_child]\n right_child_features, right_child_labels = x[~row_indices_left_child], y[~row_indices_left_child]\n\n # recursively call build tree of child nodes\n node.left_child = self.__build_tree(left_child_features,\n left_child_labels, depth + 1)\n node.right_child = self.__build_tree(right_child_features,\n right_child_labels, depth + 1)\n\n return node", "def create_children(self):\n actionCount = len(self.availableActions)\n self.children = [None] * actionCount\n\n # Split creation into multiple threads if this is the master node.\n if self.level == 0 and USE_THREADS:\n threads = [None] * actionCount\n for idx in range(actionCount):\n threads[idx] = threading.Thread(target=create_child, args=(self, idx))\n threads[idx].start()\n for t in threads:\n t.join()\n else:\n for idx in range(actionCount):\n create_child(self, idx)\n # Stop making child branches if the most recent child branch already found lethal.\n if self.children[idx].get_max_win_strength() == WIN_VALUE:\n self.children = self.children[:idx+1]\n break", "def prune(self, rng, get_nodes, max_depth=1):\n if not self.children:\n return\n for i_c, child in enumerate(self.children):\n if child.min_depth >= max_depth:\n self.children[i_c] = Node(\n rng.choice(get_nodes(arity=0)),\n self.tree_type)\n self.children[i_c].parent = self\n elif max_depth > 1:\n child.prune(rng, get_nodes, max_depth - 1)", "def _build_tree_recursive(self, tree, cur_node, X, y, depth):\r\n n_samples, n_features = X.shape\r\n leaf_reached = False\r\n\r\n # Evaluates if all instances belong to the same class\r\n if utils.all_instances_same_class(y):\r\n leaf_reached = True\r\n\r\n # Evaluates the min_samples_split stopping criteria\r\n if n_samples < self._min_samples_split:\r\n leaf_reached = True\r\n\r\n # Evaluates the depth stopping criteria\r\n if self._max_depth is not None and depth >= self._max_depth:\r\n leaf_reached = True\r\n\r\n best_split = None\r\n if not leaf_reached:\r\n best_split = self._find_split(X, y, n_features)\r\n if best_split is None or best_split.gain < self._min_gain_split:\r\n leaf_reached = True\r\n\r\n if leaf_reached:\r\n samples = utils.bin_count(y, length=self._n_classes)\r\n result = np.argmax(samples)\r\n new_leaf = DecisionLeaf(samples=samples, depth=depth, result=result)\r\n tree.nodes.append(new_leaf)\r\n\r\n else:\r\n is_categorical = utils.categorical_data(X[:, best_split.feature_id])\r\n samples = utils.bin_count(y, length=self._n_classes)\r\n\r\n if is_categorical:\r\n new_fork = DecisionForkCategorical(samples=samples, depth=depth,\r\n feature_id=best_split.feature_id, value=best_split.value,\r\n gain=best_split.gain)\r\n X_left, X_right, y_left, y_right = split_categorical_data(X, y, best_split.feature_id, best_split.value)\r\n\r\n else:\r\n new_fork = DecisionForkNumerical(samples=samples, depth=depth,\r\n feature_id=best_split.feature_id, value=best_split.value,\r\n gain=best_split.gain)\r\n X_left, X_right, y_left, y_right = split_numerical_data(X, y, best_split.feature_id, best_split.value)\r\n\r\n tree.nodes.append(new_fork)\r\n tree.last_node_id += 1\r\n node_to_split = tree.last_node_id\r\n new_branch = self._build_tree_recursive(tree, node_to_split, X_left, y_left, depth=depth+1)\r\n tree.nodes[cur_node].left_branch = new_branch\r\n\r\n tree.last_node_id += 1\r\n node_to_split = tree.last_node_id\r\n new_branch = self._build_tree_recursive(tree, node_to_split, X_right, y_right, depth=depth+1)\r\n tree.nodes[cur_node].right_branch = new_branch\r\n\r\n return cur_node", "def make_tree(self, X_subset, y_subset, depth):\n \n # YOUR CODE HERE\n #self.depth += 1\n if depth < self.max_depth and X_subset.shape[0] >= self.min_samples_split:\n \n best_feature, best_threshold = self.choose_best_split(X_subset, y_subset)\n print('depth = {}, size parent node = {}'.format(depth, len(X_subset)))\n print('best_feature = {}, best_threshold = {}'.format(best_feature, best_threshold))\n new_node = Node(best_feature, best_threshold)\n \n left_child, right_child = self.make_split(best_feature, best_threshold, X_subset, y_subset)\n new_node.left_child = self.make_tree(*left_child, depth+1)\n new_node.right_child = self.make_tree(*right_child, depth+1)\n \n else: # we have a leaf\n new_node = Node(-1, -1) # We flag leaf nodes by setting feature_index and threshold to -1\n new_node.value = self.predicted_values(y_subset)\n \n if self.classification:\n new_node.proba = np.mean(y_subset, axis=0)\n \n # We reduce the depth to compensate for the two calls to self.depth += 1 we make on\n # the same level for left_child and right_child.\n #self.depth -= 1\n \n return new_node", "def build_tree(self):\n stack = []\n self._handle_solo_node_case()\n while self.root_hash == None:\n if len(stack) >= 2 and stack[-1].height == stack[-2].height:\n mom = stack.pop()\n dad = stack.pop()\n child_hash = self.sha256Sum(mom.hash + dad.hash)\n child = self.Node(mom, dad, child_hash)\n self.node_table[child_hash] = child\n mom.child = child\n dad.child = child\n\n if child.height == self.max_height:\n self.root_hash = child.hash\n\n stack.append(child)\n elif len(self.leaves) > 0:\n leaf = self.leaves.pop()\n self.node_table[leaf.hash] = leaf\n stack.append(leaf)\n # Handle case where last 2 nodes do not match in height by \"graduating\"\n # last node\n else:\n stack[-1].height += 1\n self.is_built = True", "def branch_generator(self, tree, ids, init_tree):\n if len(tree[\"comments\"]) < self.config[\"max_tree_size\"]:\n yield tree\n else:\n top_level_comments = []\n for id, comment in tree[\"comments\"].items():\n if comment[\"parent_id\"] == ids[-1]:\n top_level_comments.append(id)\n\n if len(top_level_comments) == 0:\n yield None\n\n for id in top_level_comments:\n branch_tree = self.new_branch_tree(init_tree, ids)\n self.add_children(tree, branch_tree, id)\n ids.append(id)\n for sub_branch in self.branch_generator(\n branch_tree, ids, init_tree):\n yield sub_branch\n ids = ids[:-1]", "def build_tree_helper(x, n, d, max_d, name=defaultname):\n ret = {}\n ret['name'] = name(x)\n if d == max_d:\n return ret\n children = collatz.children(x, n)\n if x == 1:\n children = children[1:]\n if children:\n ret['children'] = [build_tree_helper(x, n, d + 1, max_d, name) for x in children]\n return ret", "def generate(cls, rng, get_nodes, tree_type, depth, force_types=[],\n parent=None):\n\n # Determine allowed types\n force_types_ = [] # For children\n if force_types: # If there's forced type for this depth,\n types = force_types[0] # use it, and don't pass it on.\n force_types_ = force_types[1:]\n elif parent is not None: # Otherwise, inherit from parent.\n # Doesn't work with mutate, because\n this_i = len(parent.children)\n types = parent.child_type[this_i]\n else:\n raise ValueError('Types must be specified for each node, either '\n 'by the \"force_types\" kwarg (required for root, '\n 'optional for depths), or inherited from the '\n '\"child_type\" attribute of the parent Node.')\n\n # Decide whether terminal or function\n if depth == 0: # Always return a terminal at depth 0\n is_terminal = True\n elif (tree_type == 'g' and # For grow trees,\n 'terminal' in types and # if it's allowed,\n rng.choice([False, True])): # flip a coin.\n is_terminal = True\n else: # Otherwise, return a function\n is_terminal = False\n types = [t for t in types if t != 'terminal']\n\n # Generate a random node\n if is_terminal:\n node_data = rng.choice(get_nodes(['terminal', 'constant']))\n node = cls(node_data, tree_type, parent=parent)\n else:\n node_data = rng.choice(get_nodes(types, depth))\n node = cls(node_data, tree_type, parent=parent)\n # Generate children\n node.children = []\n for i in range(node.arity):\n node.children.append(cls.generate(\n rng, get_nodes, tree_type, depth-1, force_types_, node))\n return node", "def create(self, X, dimensions=None):\n n_samples, n_features = X.shape\n self.X = X\n if not dimensions:\n dimensions = n_features\n\n self.root = KdNode(depth=0,\n splitting_feature=0,\n splitting_value=np.median(X[:, 0]),\n idx=np.arange(n_samples),\n parent=None)\n # grow the tree by DFS\n stack = [self.root]\n while stack:\n node = stack.pop()\n # splitting samples in the node into two children\n sample_values = X[node.idx, node.splitting_feature]\n left_idx = node.idx[sample_values < node.splitting_value]\n right_idx = node.idx[sample_values > node.splitting_value]\n node.idx = node.idx[sample_values == node.splitting_value]\n # since left and right subtrees are divided by the median of their parent,\n # the sizes of the two subtrees are expected to be equal\n assert len(left_idx) == len(right_idx),\\\n 'left and right subtrees should have the same number of samples'\n # append left and right children\n if len(left_idx):\n child_depth = node.depth + 1\n child_feature = (node.depth + 1) % dimensions\n left_value = np.median(X[left_idx, child_feature])\n node.left = KdNode(depth=child_depth, splitting_feature=child_feature,\n splitting_value=left_value, idx=left_idx, parent=node)\n right_value = np.median(X[right_idx, child_feature])\n node.right = KdNode(depth=child_depth, splitting_feature=child_feature,\n splitting_value=right_value, idx=right_idx, parent=node)\n stack.append(node.left)\n stack.append(node.right)", "def build_tree(self, df, layer=0, side=0, max_depth=100):\n # Cannot be branched anymore\n if len(set(df.y)) == 1:\n self.Value[(layer, side)] = df.y.values[0]\n # Depth reaches the limit\n elif layer >= max_depth:\n self.Value[(layer, side)] = 2 * (sum(df.y.values) >= 0) - 1\n else:\n best_d, best_val = self.branching(df, layer, side)\n # Left hand side\n p = (df[df[best_d] >= best_val], layer + 1, 2 * side, max_depth)\n self.build_tree(*p)\n # Right hand side\n p = (df[df[best_d] < best_val], layer + 1, 2 * side + 1, max_depth)\n self.build_tree(*p)", "def create_binary_tree(depth=3):\n # The only node is root\n max_nodes, nodes = 2 ** depth - 1, 1\n queue = Queue()\n val = IntGenerator()\n root = Node(val=next(val))\n\n queue.enqueue(root)\n while nodes + 2 <= max_nodes:\n current = queue.dequeue()\n\n # Create child nodes until total num is le max_nodes\n # for specified length in binary tree\n if nodes + 2 <= max_nodes: # tree will have +2 nodes after creating children\n current.left = Node(val=next(val))\n current.right = Node(val=next(val))\n queue.enqueue(current.left)\n queue.enqueue(current.right)\n nodes += 2\n\n return root", "def split(root, Dk, maxDepth, minRows, currDepth):\n \n left, right = root['branches']\n del(root['branches'])\n \n# if not left and not right:\n# return\n \n # Check if the node is a leaf\n if not len(left): \n root['left'] = root['right'] = getLeafClass(right)\n return\n elif not len(right):\n root['left'] = root['right'] = getLeafClass(left)\n return\n \n # Check for max depth\n if(currDepth >= maxDepth):\n root['left'], root['right'] = getLeafClass(left), getLeafClass(right)\n return\n \n # Process left branch\n if(len(left) <= minRows):\n root['left'] = getLeafClass(left)\n else:\n root['left'] = findNextSplit(left, Dk)\n split(root['left'], Dk, maxDepth, minRows, currDepth + 1)\n \n # Process right branch\n if(len(right) <= minRows):\n root['right'] = getLeafClass(right)\n else:\n root['right'] = findNextSplit(right, Dk)\n split(root['right'], Dk, maxDepth, minRows, currDepth + 1)", "def setup_children(self):\n # Only generate new children if there are none\n if len(self.children) == 0:\n # Create the encoder and decoder genes\n encoder = EncoderGene(name='encoder',\n parent=self,\n spatial_scale=self.hyperparam(\n 'spatial_scale'))\n self.children = [encoder]\n\n decoder = DecoderGene(name='decoder',\n parent=self,\n spatial_scale=self.hyperparam(\n 'spatial_scale'))\n\n self.children.append(decoder)\n\n pass", "def generateChildren(node,nrVertices,mutations,oneChild = False):\n\t#print \"genChild\"\n\t#print \"numVertices:\", nrVertices\n\tif(nrVertices == 0):\n\t\t#print \"gCEND\"\n\t\treturn node\n\telse:\n\t\tvalues = []\n\t\tif (oneChild):\n\t\t\tvalue = nrVertices-1\n\t\t\tvalues.append(value)\n\t\t\tnrVertices -= 1 \n\t\telse:\n\t\t\twhile nrVertices > 1:\n\t\t\t\tvalue = np.random.randint(0, nrVertices)\n\t\t\t\t#print \"Value: \", value\n\t\t\t\tif (value > 0):\n\t\t\t\t\tvalues.append(value)\n\t\t\t\t\tnrVertices -= value \n\n\t\tfor i in range(len(values)):\n\t\t\t#print mutations\n\t\t\ttmpName = np.random.choice(mutations)\n\t\t\tmutations.remove(tmpName)\n\t\t\tnode.add_child(name = tmpName)\n\t\t\tgenerateChildren(node.children[i],values[i],mutations,oneChild) # Each child gets a int of children", "def __init__(self, max_depth=5,\n min_samples_split=2, min_samples_leaf=1, bootstrap=False):\n self._max_depth = max_depth\n self._min_samples_split = min_samples_split\n self._min_samples_leaf = min_samples_leaf\n self._bootstrap = bootstrap\n\n # Root node\n self._root = Node()", "def random_tree(self, grow, max_depth, depth=0):\n # Full method\n if depth < MIN_DEPTH or (depth < MAX_DEPTH and not grow):\n self.body = random.choice(operators)\n elif depth >= MAX_DEPTH:\n self.body = random.choice(terminals)\n else: # intermediate depth, grow\n if random.random() > 0.9:\n self.body = random.choice(operators)\n else:\n self.body = random.choice(terminals)\n if self.body in operators:\n self.left = Tree()\n self.left.random_tree(grow, max_depth, depth=depth + 1)\n self.right = Tree()\n self.right.random_tree(grow, max_depth, depth=depth + 1)", "def make_trees(self):\n self.trees = build_recursively_from_cells(self.cells, container=self)\n# self.trees = []\n# for cell in self.cells:\n# if cell.bpointer is None: # test whether cell is root\n# tree = Colony(container=self)\n# tree.add_cell_recursive(cell)\n# self.trees.append(tree)\n return", "def draw_tree(self):\n\n print \"--- \" + str(self.name)\n \n def draw_child_tree(current, depth):\n \n for c in current.children:\n print depth * \" \" + \"|-- \" + str(c.name)\n if hasattr(c, 'children'):\n draw_child_tree(c, depth + 1)\n \n draw_child_tree(self, 1)\n \n return", "def draw_tree(self, ax: plt.Axes = None, tree_depth: int = None, exclude_empty: bool = False,\n line_width: int = 1, edge_color='red', plot_nodes: bool = False, plot_points: bool = False):\n\n manager = self.root.manager\n manager._finalize_data()\n\n root_quad = self.root\n norm = matplotlib.colors.Normalize(vmin=root_quad.settings['min_depth'], vmax=root_quad.settings['max_depth'])\n cmap = matplotlib.cm.rainbow\n\n if ax is None:\n ax = plt.subplots(figsize=[11, 7], dpi=150)[1]\n\n if tree_depth is None or tree_depth == 0:\n if exclude_empty and not self.index:\n pass\n else:\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n if self.quad_index != -1:\n try:\n idx = self.quad_index[0], self.quad_index[1]\n except:\n idx = self.quad_index\n quad_z = manager.node_data['z'][idx].compute()\n rect = matplotlib.patches.Rectangle(self.mins, *sizes, zorder=2, alpha=0.5, lw=line_width, ec=edge_color, fc=cmap(norm(quad_z)))\n if plot_nodes:\n quad_x = manager.node_data['x'][idx].compute()\n quad_y = manager.node_data['y'][idx].compute()\n ax.scatter(quad_x, quad_y, s=5)\n if plot_points:\n ax.scatter(manager.data['x'][self.index].compute(),\n manager.data['y'][self.index].compute(), s=2)\n else: # no depth for the quad\n rect = matplotlib.patches.Rectangle(self.mins, *sizes, zorder=2, alpha=1, lw=line_width, ec=edge_color, fc='None')\n ax.add_patch(rect)\n\n if tree_depth is None:\n for child in self.children:\n child.draw_tree(ax, tree_depth=None, exclude_empty=exclude_empty, line_width=line_width, edge_color=edge_color, plot_points=plot_points, plot_nodes=plot_nodes)\n elif tree_depth > 0:\n for child in self.children:\n child.draw_tree(ax, tree_depth=tree_depth - 1, exclude_empty=exclude_empty, line_width=line_width, edge_color=edge_color, plot_points=plot_points, plot_nodes=plot_nodes)\n\n if (self.tree_depth == 0) or (tree_depth is None and self.tree_depth == 0):\n xsize = self.maxs[0] - self.mins[0]\n ysize = self.maxs[1] - self.mins[1]\n ax.set_ylim(self.mins[1] - ysize / 10, self.maxs[1] + ysize / 10)\n ax.set_xlim(self.mins[0] - xsize / 10, self.maxs[0] + xsize / 10)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n plt.gcf().colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Depth (+down, meters)')\n\n return ax", "def build(self):\n # copy the leaves\n height, layer = 0, self.leaves[::]\n # reduce the leaves to exactly one node\n while len(layer) != 1:\n layer = self._build(layer, height)\n height += 1\n self.root = layer[0]", "def _build_octree_branch(self, bodies: List[int], coords_min: np.ndarray, coords_max: np.ndarray) -> Tuple[int, int]:\n # in case of empty octant\n if len(bodies) == 0:\n return OCTANT_EMPTY, -1\n\n # in case of single body\n if len(bodies) == 1:\n return OCTANT_BODY, bodies[0]\n\n # create new node\n node_id = len(self._nodes_positions)\n self._nodes_positions.append(np.average(self._positions[bodies], axis=0, weights=self._masses[bodies]))\n self._nodes_mass.append(np.sum(self._masses[bodies]))\n self._nodes_sizes.append(coords_max[0] - coords_min[0])\n self._nodes_children_types.append(np.empty((8,), dtype=np.int))\n self._nodes_children_ids.append(np.empty((8,), dtype=np.int))\n\n # calculate octant for each body\n coords_mid = (coords_min + coords_max) / 2\n bodies_octant = np.sum((self._positions[bodies] > coords_mid) * [1, 2, 4], axis=1)\n\n # create octants\n for i in range(8):\n child_type, child_id = self._build_octree_branch(\n bodies=[body_id for body_id, octant in zip(bodies, bodies_octant) if octant == i],\n coords_min=octant_coords(coords_min, coords_max, i)[0],\n coords_max=octant_coords(coords_min, coords_max, i)[1]\n )\n self._nodes_children_types[node_id][i] = child_type\n self._nodes_children_ids[node_id][i] = child_id\n\n return OCTANT_NODE, node_id", "def iter_dfs(self, depth=0):\n yield self, depth\n yield from self.left.iter_dfs(depth=depth + 1)\n yield from self.right.iter_dfs(depth=depth + 1)", "def create_leaves(self, parent_node, leaf_values):\n from pyDD.diagram.node import Leaf\n import numpy\n parent_node.child_nodes[0] = Leaf(1.0, 1, diagram_type=MEVxDD)\n try:\n base_factor = leaf_values[numpy.nonzero(leaf_values)[0][0]]\n except IndexError:\n base_factor = 1.0\n for i in range(self.base):\n parent_node.child_nodes[i] = parent_node.child_nodes[0]\n parent_node.offsets[i] = leaf_values[i] / base_factor\n return parent_node, base_factor", "def generate_tiles(self):\n if self.children:\n for child in self.children:\n child.generate_tiles()\n print \"Generating tile for %s using child tiles\" % self.bbox\n self.generate_tile_from_child_tiles()\n else:\n print \"Generating tile for %s using source data\" % self.bbox\n self.generate_tile_from_source()", "def reset_max_depth(self) -> None:\n # The max depth is now calculated on the fly, so this is a no-op.\n pass", "def allocate(self):\n index = 0\n if self.bool_array[index] == True:\n raise CannotAllocateException(\"No ids available\")\n while index < max_val:\n left_child_index = 2 * index + 1\n right_child_index = 2 * index + 2\n if self.bool_array[left_child_index] == False: #There's an unallocated id in the subtree\n index = left_child_index\n elif self.bool_array[right_child_index] == False: #... in the right subtree\n index = right_child_index\n else: #Both subtrees are allocated, this actually means you broke your tree\n raise CannotAllocateException(\"No ids available\")\n id = self.get_index_from_id(index)\n self.update_tree(id)", "def at_depth(self, depth):\n\n for child in list(self.children):\n if depth == 0:\n yield child\n else:\n for grandchild in child.at_depth(depth - 1):\n yield grandchild", "def expand(self):\n # distribution = self.net.predict_distribution(self.pos)\n self.children = []\n for c in self.pos.moves():\n pos2 = self.pos.move(c)\n # 如果存在斩杀,children应为空值(即表面以结束游戏?)\n if pos2 is int:\n self.children = [Treenode(self.net, pos2, c)]\n node = Treenode(self.net, pos2, move=c)\n node.v += 1\n tree_update([self, node], node.pos.simulate(self.net))\n self.children.append(node)" ]
[ "0.665115", "0.5866646", "0.5856637", "0.57829106", "0.56562775", "0.5619012", "0.54160964", "0.5317201", "0.5247463", "0.518805", "0.5149137", "0.51216483", "0.51197636", "0.49725723", "0.49714312", "0.4908645", "0.4853916", "0.48523086", "0.48383847", "0.48062086", "0.4805974", "0.47789893", "0.47666842", "0.47601584", "0.47522333", "0.47397628", "0.473287", "0.47303757", "0.47213733", "0.47194016" ]
0.79430693
0
Can be called recursively. Will return a list, sorted L2G, of the 2tuple fractions below in the tree. If max_depth and current_depth given, will return the row of the tree at a certain depth. Depth is indexed from 0; ie the 1/1 node has depth 0. Otherwise, will return the entire tree with no divisions. External calls should look like node.get_tree_below(max_depth)
def get_tree_below(self, max_depth=None, current_depth=0): tree_list = [] if max_depth == None: # if we are not returning a row. if self.left_child != None: # if this is not the base of the tree tree_list = self.left_child.get_tree_below() tree_list.append(self.frac) tree_list = tree_list + self.right_child.get_tree_below() else: # if this is the base of the tree tree_list = [self.frac] else: if current_depth == max_depth: # if this is the deepest level we want to go to tree_list = self.frac else: # WE NEED TO GO DEEPER left_side = self.left_child.get_tree_below( max_depth, current_depth+1) right_side = self.right_child.get_tree_below(max_depth, current_depth+1) tree_list = left_side + right_side return tree_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _new_depth(self, node, curr_depth):\n right = curr_depth\n left = curr_depth\n if node._rkid:\n right = self._new_depth(node._rkid, curr_depth + 1)\n if node._lkid:\n left = self._new_depth(node._lkid, curr_depth + 1)\n if right > left:\n return right\n return left", "def zigzagLevelOrder(self, root):\n if not root:\n return []\n\n # create direction variable\n going_right = False\n\n # initialize a level stack\n level_stack = [root]\n\n # initialize a result array\n result = []\n\n # loop while level stack is not empry\n while level_stack:\n\n # create tmp array\n tmp = []\n\n res_tmp = []\n\n # do the BFS by looping through each element\n while level_stack:\n\n # pop an element from the stack\n node = level_stack.pop()\n\n res_tmp.append(node.val)\n\n # if direction is 1 or going right, take left node first\n if going_right == 1:\n if node.left:\n tmp.append(node.left)\n\n if node.right:\n tmp.append(node.right)\n\n # otherwise take right node first, append to the temp list\n else:\n if node.right:\n tmp.append(node.right)\n\n if node.left:\n tmp.append(node.left)\n\n # append temp list to the result list\n result.append(res_tmp[::-1])\n\n # update level stack\n level_stack = tmp\n\n # change the direction\n going_right = not going_right\n\n return result", "def build_tree(self, df, layer=0, side=0, max_depth=100):\n # Cannot be branched anymore\n if len(set(df.y)) == 1:\n self.Value[(layer, side)] = df.y.values[0]\n # Depth reaches the limit\n elif layer >= max_depth:\n self.Value[(layer, side)] = 2 * (sum(df.y.values) >= 0) - 1\n else:\n best_d, best_val = self.branching(df, layer, side)\n # Left hand side\n p = (df[df[best_d] >= best_val], layer + 1, 2 * side, max_depth)\n self.build_tree(*p)\n # Right hand side\n p = (df[df[best_d] < best_val], layer + 1, 2 * side + 1, max_depth)\n self.build_tree(*p)", "def bfs_w_depth(tree):\n visited = []\n frontier = [(0, tree)]\n while frontier:\n depth, tree = frontier.pop(0)\n if tree is not None:\n visited.append((depth, tree[0]))\n frontier.append((depth + 1, tree[1]))\n frontier.append((depth + 1, tree[2]))\n return visited", "def gen_children(self, max_denom=None, max_depth=None, current_depth=0):\n\t\tleft_child_frac = mediant(self.frac, self.get_left_frac() )\n\t\tright_child_frac = mediant(self.frac, self.get_right_frac())\n\n\t\t# print \"%s generating children %s and %s\" % (self.frac, left_child_frac, right_child_frac)\n\n\t\tif max_denom != None:\n\t\t\tif left_child_frac[1] < max_denom and left_child_frac[0] < left_child_frac[1]:\n\t\t\t\tself.left_child = SBNode(frac=left_child_frac, is_left_child=True, parent=self)\n\t\t\t\tself.left_child.gen_children(max_denom=max_denom)\n\n\t\t\tif right_child_frac[1] < max_denom and right_child_frac[0] < right_child_frac[1]:\n\t\t\t\tself.right_child = SBNode(frac=right_child_frac, is_left_child=False, parent=self)\n\t\t\t\tself.right_child.gen_children(max_denom=max_denom)\n\n\t\telif max_depth != None and current_depth < max_depth:\n\t\t\tself.left_child = SBNode(frac=left_child_frac, is_left_child=True, parent=self)\n\t\t\tself.right_child = SBNode(frac=right_child_frac, is_left_child=False, parent=self)\n\t\t\tself.left_child.gen_children( max_depth=max_depth, current_depth=current_depth+1)\n\t\t\tself.right_child.gen_children(max_depth=max_depth, current_depth=current_depth+1)", "def level_order_list(root):\n return_list = []\n queue = []\n queue.append(root)\n\n while 0 < len(queue):\n next_node = queue.pop(0)\n if next_node.get_left():\n queue.append(next_node.get_left())\n if next_node.get_right():\n queue.append(next_node.get_right())\n return_list.append(next_node)\n\n return return_list", "def get_depth(self, current, n):\n if current is not None:\n return max(self.get_depth(current.left, n + 1), self.get_depth(current.right, n + 1))\n else:\n return n", "def a_star_alg(self, p1: int, p2: int, max_level: int = 1000):\r\n \r\n # Create start and end node\r\n start_node = Node(None, p1, self.node_dict[p1])\r\n start_node.g = start_node.h = start_node.f = 0\r\n end_node = Node(None, p2, self.node_dict[p2])\r\n end_node.g = end_node.h = end_node.f = 0\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n closed_list = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n\r\n # Loop until you find the end\r\n level = 0\r\n while len(open_list) > 0 and level < max_level:\r\n level += 1\r\n\r\n # Get the current node (the node in open_list with the lowest cost)\r\n current_node = open_list[0]\r\n current_index = 0\r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n closed_list.append(current_node)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n distance = current_node.g\r\n current = current_node\r\n while current is not None:\r\n path.append(current.number)\r\n current = current.parent\r\n\r\n return path[::-1], distance # Return reversed path\r\n\r\n # Generate children\r\n children = []\r\n for new_number in self.road_tree[current_node.number]: # Adjacent nodes\r\n new_node = Node(current_node, new_number, self.node_dict[new_number])\r\n children.append(new_node)\r\n\r\n # Loop through children\r\n for child in children:\r\n append_to_open_list = False\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + self.road_dict[(current_node.number, child.number)]\r\n child.h = sqrt((child.x - end_node.x) ** 2 + (child.y - end_node.y) ** 2) / 200\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the closed list\r\n closed_list, append_to_open_list = self.check_in_list(child, closed_list, append_to_open_list)\r\n\r\n # Child is already in the open list\r\n open_list, append_to_open_list = self.check_in_list(child, open_list, append_to_open_list)\r\n\r\n # Add the child to the open list\r\n if append_to_open_list:\r\n open_list.append(child)\r\n\r\n return [], 1e10", "def levelOrder(self, root: TreeNode) -> List[List[int]]:\n\n result = []\n if(root is None):\n return result\n\n q = deque([root])\n while(q):\n n = len(q)\n level = []\n for i in range(0,n):\n f = q.popleft()\n level.append(f.val)\n\n if (f.left is not None):\n q.append(f.left)\n if (f.right is not None):\n q.append(f.right)\n\n if(len(level) > 0):\n result.append(level[:])\n level.clear()\n return result", "def bfs(root):\n lvl = 0\n vals = [[root.data, lvl]]\n roots = [root]\n while roots:\n roots = ([r.right for r in roots if r.right] +\n [r.left for r in roots if r.left])\n lvl += 1\n for r in roots:\n vals.append([r.data, lvl])\n return vals", "def zigzak_using_bfs(root):\n current_level = [root]\n next_level = []\n while current_level:\n node = current_level.pop()\n print(node.data, end=\" \")\n if node.right:\n next_level.append(node.right)\n if node.left:\n next_level.append(node.left)\n if not current_level:\n current_level, next_level = next_level, current_level\n print()", "def search_tree(self, tgt_frac):\n\t\tif self.frac == tgt_frac:\n\t\t\treturn self\n\t\telif compare_fracs(self.frac, tgt_frac):\n\t\t\t# tgt is less than self and to left\n\t\t\treturn self.left_child.search_tree(tgt_frac)\n\t\telse:\n\t\t\t# tgt is greater than self and to right\n\t\t\treturn self.right_child.search_tree(tgt_frac)", "def bfs_ex(self, depth: Optional[int] = None, reverse: bool = False, mirror: bool = False) \\\n -> Iterable[Tuple['Tree', int, Tuple[int, ...]]]:\n if reverse:\n if mirror:\n return non_recursive_tree_bfs_reverse_mirror_ex(self, depth)\n else:\n return non_recursive_tree_bfs_reverse_original_ex(self, depth)\n else:\n if mirror:\n return non_recursive_tree_bfs_forward_mirror_ex(self, depth)\n else:\n return non_recursive_tree_bfs_forward_original_ex(self, depth)", "def _print_tree(self, tree, current_depth=0):\n if 'surv' in tree:\n self._print_with_depth(tree['times'], current_depth)\n return\n self._print_with_depth(\n \"{0} > {1}\".format(self.column_names[tree['feature']],\n tree['threshold']),\n current_depth)\n self._print_tree(tree['left'], current_depth + 1)\n self._print_tree(tree['right'], current_depth + 1)", "def breadth_first_traversal(self):\n breadth_first = []\n h = self.root.get_height() \n for i in range(h+2): \n self.level = []\n self.print_level(self.root, i + 1) \n breadth_first.append(self.level)\n return breadth_first", "def _active_depth(self):\n for n_left, n_right in self.graph.dfs():\n if self.node(n_right)['pad'] == 0:\n return self.node(n_right)['level']\n return 0", "def depth_fs(start: Vector2D, goal: Vector2D, grid: Scene, *args) -> (list, list):\n frontier = Stack()\n prev_node = dict()\n explored = []\n\n frontier.put(start)\n prev_node[start] = None\n\n while not frontier.empty():\n current = frontier.get()\n \n if current == goal:\n return (reconstruct_path(goal, prev_node), explored[1:]) # [1:] to remove start from list\n\n grid.set_cell(current, Cell(val = CellType.searched))\n explored.append(current)\n\n for neighbor in grid.get_unexplored_neighbors(current):\n prev_node[neighbor] = current\n frontier.put(neighbor)\n\n # grid.set_cell(neighbor, Cell(val = CellType.searched))\n \n # If frontier empty but goal was never reached, no solution was found\n return ([], explored[1:]) # [1:] to remove start from list", "def breadth_traverse(self):\n\n output = []\n q = Queue()\n q.enqueue(self.root)\n while q.peek():\n curr = q.dequeue()\n output.append(curr.value)\n if curr.l_child:\n q.enqueue(curr.l_child)\n if curr.r_child:\n q.enqueue(curr.r_child)\n return output", "def get_levels(tree, level=0):\n if type(tree) == list:\n return [level]+get_levels(tree[0], level+1)+get_levels(tree[1], level+1)\n elif type(tree) == tuple:\n return [level, level]+get_levels(tree[1], level+1)\n else:\n return [level]", "def by_level_traversal(self) -> Queue:\n # initialize Queue objects\n new_q = Queue()\n last_q = Queue()\n\n #binary search tree == empty\n if self.root is None:\n return last_q\n\n #root in enque.q\n new_q.enqueue(self.root)\n\n # iterate for processing\n while not new_q.is_empty():\n working_node = new_q.dequeue()\n if working_node is not None:\n last_q.enqueue(working_node)\n new_q.enqueue(working_node.left)\n new_q.enqueue(working_node.right)\n\n return last_q", "def height(root:Node) -> int:\n current = root.left\n depth = 0\n maxdepth = [0]\n #track the value and whether it has a branchpoint or not (bool)\n seen = dict()\n\n #do the left side first, then the right\n\n while current is not None:\n if current.val not in seen:\n if (current.left is not None) and (current.right is not None):\n seen.update({current.val:True})\n else:\n seen.update({current.val:False})\n depth +=1\n maxdepth.append(depth)\n if current.left is not None:\n current = current.left\n elif current.right is not None:\n current = current.right\n else:\n current = None\n\n print(' maxdepth left so far is {}'.format(maxdepth))\n\n current = root.right\n depth = 0\n\n while current is not None:\n if current.val not in seen:\n if (current.left is not None) and (current.right is not None):\n seen.update({current.val: True})\n else:\n seen.update({current.val: False})\n depth +=1\n maxdepth.append(depth)\n if current.right is not None:\n current = current.right\n elif current.left is not None:\n current = current.left\n else:\n current = None\n print(' maxdepth right so far is {}'.format(maxdepth))\n\n return max(maxdepth)", "def _pf(self, depth):\n self_line = self.pf_one_node(depth)\n prev_out = self.prev._pf(depth + 1) if self.prev else []\n next_out = self.next._pf(depth + 1) if self.next else []\n out = []\n if Node.flg_tree_view:\n out.extend(prev_out)\n out.append(self_line)\n out.extend(next_out)\n else:\n out.extend(prev_out)\n out.extend(next_out)\n out.append(self_line)\n return out", "def get_tree(self):\n current = self\n nodes = []\n while current is not None:\n nodes.append(current)\n current = current.parent\n return nodes # reversed tree path", "def bfs(start_node, goal_node, max_depth) -> \"solution path\":\n\td = deque([start_node,[]])\n\texplored = {}\n\tlevel = 0\n\n\t# Return empty path if start is equal to goal\n\tif start_node == goal_node:\n\t\treturn []\n\n\t# Keep exploring while the deque has nodes\n\twhile len(d) > 0:\n\t\tpath = d.popleft()\n\n\t\tif level == 0:\n\t\t\tnode = path\n\t\telse:\n\t\t\t# To keep track of levels an empty node gets popped between levels which will cause an exception\n\t\t\ttry:\n\t\t\t\tnode = path[-1]\n\t\t\texcept Exception:\n\t\t\t\tnode = []\n\t\t\t\tpass\n\n\t\tif len(node) == 0:\n\t\t\tlevel += 1\n\t\t\t# Return empty list if max depth was reached\n\t\t\tif max_depth == level:\n\t\t\t\treturn []\n\t\t\td.append(node)\n\n\t\telse:\n\t\t\tval = getNodeVal(node)\n\t\t\tif val not in explored:\n\n\t\t\t\t# Mark node as explored\n\t\t\t\texplored[val] = True\n\n\t\t\t\tfor row in range(len(node)):\n\t\t\t\t\tfor col in range(len(node)):\n\t\t\t\t\t\tchild = toggle(node, row, col)\n\t\t\t\t\t\tnew_path = list(path)\n\t\t\t\t\t\tif level == 0:\n\t\t\t\t\t\t\tnew_path = [new_path]\n\t\t\t\t\t\tnew_path.append(child)\n\t\t\t\t\t\td.append(new_path)\n\t\t\t\t\t\tif child == goal_node:\n\t\t\t\t\t\t\tlevel+=1\n\t\t\t\t\t\t\treturn new_path\n\t# No solution found\n\treturn []", "def levelOrder(self, root: 'Node') -> List[List[int]]:\n if not root: return []\n level = []\n waiting = []\n result = []\n level.append(root)\n while level:\n current = []\n while level:\n tmp = level.pop(0)\n if not tmp:\n continue\n current.append(tmp.val)\n waiting.append(tmp)\n if len(current) > 0:\n result.append(current)\n while waiting:\n tmp = waiting.pop(0)\n for ch in tmp.children:\n level.append(ch)\n return result", "def _height1(self): # works, but O(n^2) worst-case time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def branches(tree):\n return tree[1:]", "def bestAtDepthL(self,depth=0,scoreFunc=None) :\n scoreFunc = scoreFunc if scoreFunc != None else lambda g : g.leafScore()\n max_score = ACG.minf\n max_L = None\n for g in self.L() :\n if depth > 0 :\n g = g.bestAtDepthR(depth=depth-1,scoreFunc=scoreFunc)\n score = scoreFunc(g)\n if score > max_score :\n max_score = score\n max_L = g\n return max_L if max_L != None else self", "def score_max_depths(graph, max_depths):\n ###TODO\n pass", "def bfs(self):\n level = 0\n q = Queue()\n visit_order = list()\n node = self.get_root()\n q.enq( (node.get_value()[1],level) )\n while(len(q) > 0):\n node, level = q.deq()\n if node == None:\n visit_order.append( (\"<empty>\", level))\n continue\n visit_order.append( (node, level) )\n if type(node) == str:\n q.enq( (None, level +1) )\n q.enq( (None, level +1) )\n continue\n if node.has_left_child():\n q.enq( (node.get_left_child()[1], level +1 ))\n else:\n q.enq( (None, level +1) )\n\n if node.has_right_child():\n q.enq( (node.get_right_child()[1], level +1 ))\n else:\n q.enq( (None, level +1) )\n\n return visit_order" ]
[ "0.5385036", "0.53593683", "0.52864254", "0.52792823", "0.5208625", "0.51871115", "0.5185997", "0.5131725", "0.5105242", "0.50936437", "0.5092283", "0.50806874", "0.50686944", "0.5068055", "0.5050487", "0.5041063", "0.5021816", "0.5019684", "0.5018557", "0.5002453", "0.4985244", "0.49710143", "0.4958573", "0.49361277", "0.4935967", "0.49359044", "0.49321815", "0.49275547", "0.49232396", "0.49222" ]
0.7730652
0
Returns the node furthest down the tree to the left. This one if it doesn't have a left child.
def get_leftmost_child(self): if self.left_child == None: return self else: return self.left_child.get_leftmost_child()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_left_node(self):\n\t\tif self.left_child == None:\n\t\t\t# if we are at the end of a branch\n\t\t\tlowest_right_parent = self.get_lowest_right_parent()\n\t\t\tif lowest_right_parent.parent == None:\n\t\t\t\t# if this was called from left edge of the tree\n\t\t\t\t# the lowest right parent is the 1/1 node\n\t\t\t\t# return the 0/1 node on the left edge of the tree\n\t\t\t\treturn SBNode(frac=(0,1))\n\t\t\telse:\n\t\t\t\t# if we had a lower right parent\n\t\t\t\treturn lowest_right_parent.parent\n\t\telse:\n\t\t\treturn self.left_child.get_rightmost_child()", "def left_most_child(n):\n if n == None:\n return None\n while n.left:\n n = n.left\n return n", "def get_left(self):\n return BinaryNode.or_none(self.left)", "def left(self, node):\r\n if self._col(node.count) > 0:\r\n return self.nodes[node.count - 1]\r\n else:\r\n return None", "def get_leftchild(self):\n return self._leftchild", "def getLeftmost(self, root):\n current = root\n while current.left is not None:\n current = current.left\n return current", "def get_left(self):\n return self.left", "def get_left(self):\n return self.__left", "def remove_left(self):\n temp = self._leftchild\n self._leftchild.set_parent(None)\n self.set_leftchild(None)\n return temp", "def getLeft(self):\n return self.left", "def get_lowest_left_parent(self):\n\t\tif self.parent == None:\n\t\t\t# if we reached the top of the tree\n\t\t\t# just return this node bc the 1/1 node is technically a child of both the 1/0 and 0/1 nodes\n\t\t\treturn self\n\t\telif not self.parent.is_left_child:\n\t\t\t# the parent is a right child\n\t\t\treturn self.parent.get_lowest_left_parent()\n\t\telse:\n\t\t\t# the parent is a left child\n\t\t\treturn self.parent", "def get_node_left(self, n: MazeCell) -> MazeCell:\n if n.x == 0:\n return None\n else:\n return self.get_node(n.x - 1, n.y)", "def left(self) -> Optional[\"ExpressionNode\"]:\n return self.__left", "def findMin(self):\n curr = self\n while curr.hasLeftChild():\n curr = curr.leftChild\n return curr", "def left(self):\n\t\treturn self._left", "def left(self):\n return self._left", "def left(self):\n return self._left", "def left_root(self):\n return self.left_child(self.virtual_root)", "def left_child(self, u):\n return self._ll_tree.get_left_child(u)", "def left_child(self, position):\n child = 2 * position + 1\n if child > len(self.table) - 1:\n return None\n return child", "def _left(node):\n return 2 * node + 1", "def getLeftChild(self):\n return _libsbml.ASTNode_getLeftChild(self)", "def left(self, index):\n try:\n if index == self.root_index():\n index = self.adjacency_list[index][0]\n else:\n index = self.adjacency_list[index][1]\n return index\n except IndexError:\n return -1", "def get_left_child(self, index):\n return self.heap[self.get_left_child_index(index)]", "def left(self, n):\n return n._left", "def find_min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current", "def get_lowest_right_parent(self):\n\t\tif self.parent == None:\n\t\t\t# if we reached the top of the tree\n\t\t\t# just return this node bc the 1/1 node is technically a child of both the 1/0 and 0/1 nodes\n\t\t\treturn self\n\t\telif not self.parent.is_left_child:\n\t\t\t# the parent is a right child\n\t\t\treturn self.parent\n\t\telse:\n\t\t\t# the parent is a left child\n\t\t\treturn self.parent.get_lowest_right_parent()", "def left(self):\n # type: () -> float\n return self._left", "def min(self):\n node = self\n while node.left:\n node = node.left\n return node", "def get_left_frac(self):\n\t\tif self.parent == None:\n\t\t\t# if this is the root node\n\t\t\treturn (0,1)\n\t\telif self.is_left_child:\n\t\t\t# if the left side, run up the tree until we find a right child\n\t\t\treturn self.parent.get_left_frac()\n\t\telse:\n\t\t\t# if right child, just return the fraction above it\n\t\t\treturn self.parent.frac" ]
[ "0.78348905", "0.7655214", "0.76067376", "0.7596791", "0.7550032", "0.7471591", "0.73937553", "0.7313952", "0.72924364", "0.72379893", "0.7209601", "0.7154187", "0.71160156", "0.7078668", "0.70374316", "0.70040953", "0.70040953", "0.7002512", "0.69932324", "0.69902736", "0.6989705", "0.6919863", "0.6893479", "0.6876793", "0.68764627", "0.6859515", "0.68551224", "0.68109506", "0.68069273", "0.68030703" ]
0.80510676
0
Returns the node furthest down the tree to the right. This one if it doesn't have a right child.
def get_rightmost_child(self): if self.right_child == None: return self else: return self.right_child.get_rightmost_child()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_right_node(self):\n\t\tif self.right_child == None:\n\t\t\t# if we are at the end of a branch\n\t\t\tlowest_left_parent = self.get_lowest_left_parent()\n\t\t\tif lowest_left_parent.parent == None:\n\t\t\t\t# if this was called from right edge of the tree\n\t\t\t\t# the lowest left parent is the 1/1 node\n\t\t\t\t# return the 1/0 (infinity) node on the right edge of the tree\n\t\t\t\treturn SBNode(frac=(1,0))\n\t\t\telse:\n\t\t\t\t# if we had a lower left parent\n\t\t\t\treturn lowest_left_parent.parent\n\t\telse:\n\t\t\treturn self.right_child.get_leftmost_child()", "def get_right(self):\n return BinaryNode.or_none(self.right)", "def get_rightchild(self):\n return self._rightchild", "def right(self, node):\r\n if self._col(node.count) < self.width - 1:\r\n return self.nodes[node.count + 1]\r\n else:\r\n return None", "def right(self) -> Optional[\"ExpressionNode\"]:\n return self.__right", "def get_node_right(self, n: MazeCell) -> MazeCell:\n if n.x == self._ncols - 1:\n return None\n else:\n return self.get_node(n.x + 1, n.y)", "def get_right(self):\n return self.right", "def right_left_most_has_right_child():\n from bbst import Bst\n return Bst([1, 5, 3, 10, 8, 6, 20, 7])", "def getRightChild(self):\n return _libsbml.ASTNode_getRightChild(self)", "def get_right(self):\n return self.__right", "def right_child(self, position):\n child = 2 * position + 2\n if child > len(self.table) - 1:\n return None\n return child", "def get_right_child(self, index):\n return self.heap[self.get_right_child_index(index)]", "def left_most_child(n):\n if n == None:\n return None\n while n.left:\n n = n.left\n return n", "def right(self):\n\t\treturn self._right", "def right(self):\n return self._right", "def right(self):\n return self._right", "def getRight(self):\n return self.right", "def _successor(self):\n if self.right is None:\n # get first rightward ancestor\n m = self\n n = m.parent\n while n is not None and m is n.right:\n m = n\n n = n.parent\n else:\n # get leftmost of right child\n n = self.right\n while n.left is not None:\n n = n.left\n return n", "def right_child(self, u):\n return self._ll_tree.get_right_child(u)", "def remove_right(self):\n temp = self._rightchild\n self._rightchild.set_parent(None)\n self.set_rightchild(None)\n return temp", "def right(self):\n return self.left + self.width", "def get_leftmost_child(self):\n\t\tif self.left_child == None:\n\t\t\treturn self\n\t\telse:\n\t\t\treturn self.left_child.get_leftmost_child()", "def find_successor(node):\n # If right child exists, find right child's left-most descendant.\n if node.right is not None:\n node = node.right\n while node.left is not None:\n node = node.left\n return node\n # If no right child, continue up the tree until we are coming\n # from a node's left child, then visit that node.\n last_node = node.right\n while last_node is node.right:\n if node.parent is None:\n return None\n last_node, node = node, node.parent\n # if we get here, we've come up from a left child\n return node", "def get_lowest_right_parent(self):\n\t\tif self.parent == None:\n\t\t\t# if we reached the top of the tree\n\t\t\t# just return this node bc the 1/1 node is technically a child of both the 1/0 and 0/1 nodes\n\t\t\treturn self\n\t\telif not self.parent.is_left_child:\n\t\t\t# the parent is a right child\n\t\t\treturn self.parent\n\t\telse:\n\t\t\t# the parent is a left child\n\t\t\treturn self.parent.get_lowest_right_parent()", "def _get_successor(self):\n tmp = self.right\n while tmp.left:\n tmp = tmp.left\n return tmp", "def _right(node):\n return 2 * node + 2", "def rightChild(self, pos):\n return (2 * pos) + 1", "def rightChild(self, pos):\n return (2 * pos) + 1", "def right(self) -> Optional[FloatObject]:\n return self.get(\"/Right\", None)", "def right(self):\n\n return self._right" ]
[ "0.796525", "0.75402415", "0.7468751", "0.74566835", "0.73042727", "0.7213994", "0.71809685", "0.7162541", "0.7078963", "0.70587814", "0.7023619", "0.69773936", "0.692357", "0.6920898", "0.6904356", "0.6904356", "0.68679136", "0.6867284", "0.6864408", "0.6818495", "0.68105125", "0.68086976", "0.67820007", "0.6722077", "0.67170644", "0.67062265", "0.67018664", "0.67018664", "0.6671961", "0.6626315" ]
0.81017864
0
returns the lowest parent up the tree from herethat is a right child.
def get_lowest_right_parent(self): if self.parent == None: # if we reached the top of the tree # just return this node bc the 1/1 node is technically a child of both the 1/0 and 0/1 nodes return self elif not self.parent.is_left_child: # the parent is a right child return self.parent else: # the parent is a left child return self.parent.get_lowest_right_parent()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lowest_left_parent(self):\n\t\tif self.parent == None:\n\t\t\t# if we reached the top of the tree\n\t\t\t# just return this node bc the 1/1 node is technically a child of both the 1/0 and 0/1 nodes\n\t\t\treturn self\n\t\telif not self.parent.is_left_child:\n\t\t\t# the parent is a right child\n\t\t\treturn self.parent.get_lowest_left_parent()\n\t\telse:\n\t\t\t# the parent is a left child\n\t\t\treturn self.parent", "def next_larger(self):\n if self.right is not None:\n return self.right.find_min()\n current = self\n while current.parent is not None and current is current.parent.right:\n current = current.parent\n return current.parent", "def get_leftmost_child(self):\n\t\tif self.left_child == None:\n\t\t\treturn self\n\t\telse:\n\t\t\treturn self.left_child.get_leftmost_child()", "def findMin(self):\n curr = self\n while curr.hasLeftChild():\n curr = curr.leftChild\n return curr", "def get_parent(self):\n return BinaryNode.or_none(self.parent)", "def _successor(self):\n if self.right is None:\n # get first rightward ancestor\n m = self\n n = m.parent\n while n is not None and m is n.right:\n m = n\n n = n.parent\n else:\n # get leftmost of right child\n n = self.right\n while n.left is not None:\n n = n.left\n return n", "def _parent(node):\n if node == _root():\n return _root()\n return (node + 1) // 2 - 1", "def get_top_parent(node):\n\n\ttop_node = cmds.listRelatives(node, p=True)\n\twhile top_node:\n\t\tnode = top_node[0]\n\t\ttop_node = cmds.listRelatives(node, p=True)\n\treturn node", "def get_left_node(self):\n\t\tif self.left_child == None:\n\t\t\t# if we are at the end of a branch\n\t\t\tlowest_right_parent = self.get_lowest_right_parent()\n\t\t\tif lowest_right_parent.parent == None:\n\t\t\t\t# if this was called from left edge of the tree\n\t\t\t\t# the lowest right parent is the 1/1 node\n\t\t\t\t# return the 0/1 node on the left edge of the tree\n\t\t\t\treturn SBNode(frac=(0,1))\n\t\t\telse:\n\t\t\t\t# if we had a lower right parent\n\t\t\t\treturn lowest_right_parent.parent\n\t\telse:\n\t\t\treturn self.left_child.get_rightmost_child()", "def parent(self):\n other = self\n while True:\n for rev in other._hgmo['parents']:\n parent = Push(rev)\n if parent.id != self.id:\n return parent\n other = parent", "def predecessor(self) -> Union[\"Node\", None]:\n if self.left is not None: # case 1: the node has a left child\n return self.left.max()\n\n else: # case 2: the node does not have a left child\n current = self\n while current.parent is not None: # traverse up\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n\n return None # the root is reached, so no predecessor exists", "def get_parent(self):\n if self.parent:\n return self.parent()\n else:\n return None", "def _highest_parent_(self):\n if self._parent_ is None:\n return self\n return self._parent_._highest_parent_", "def get_right_node(self):\n\t\tif self.right_child == None:\n\t\t\t# if we are at the end of a branch\n\t\t\tlowest_left_parent = self.get_lowest_left_parent()\n\t\t\tif lowest_left_parent.parent == None:\n\t\t\t\t# if this was called from right edge of the tree\n\t\t\t\t# the lowest left parent is the 1/1 node\n\t\t\t\t# return the 1/0 (infinity) node on the right edge of the tree\n\t\t\t\treturn SBNode(frac=(1,0))\n\t\t\telse:\n\t\t\t\t# if we had a lower left parent\n\t\t\t\treturn lowest_left_parent.parent\n\t\telse:\n\t\t\treturn self.right_child.get_leftmost_child()", "def get_rightmost_child(self):\n\t\tif self.right_child == None:\n\t\t\treturn self\n\t\telse:\n\t\t\treturn self.right_child.get_rightmost_child()", "def get_sibling(self):\r\n if (not self) or (self.parent is None):\r\n return None\r\n else:\r\n if self is self.parent.left:\r\n return self.parent.right\r\n else:\r\n return self.parent.left", "def left_most_child(n):\n if n == None:\n return None\n while n.left:\n n = n.left\n return n", "def return_parent(self):\n # Return parent if completed\n if self.completed:\n return self.father\n return -1", "def lowestCommonAncestor_1(self, root, p, q):\n while root is not None:\n if p.val < root.val and q.val < root.val:\n root = root.left\n elif p.val > root.val and q.val > root.val:\n root = root.right\n else:\n break\n\n return root", "def find_min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current", "def get_left_child_index(self, parent):\n return 2*parent+1", "def successor(self) -> Union[\"Node\", None]:\n if self.right is not None: # case 1: the node has a right child\n return self.right.min()\n\n else: # case 2: the node does not have a right child\n current = self\n while current.parent is not None: # traverse up\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n\n return None # the root is reached, so no successor exists", "def parent(self):\n return self if self.is_root else self.__parent", "def side_of_parent(self):\n if self._node_empty(self._parent):\n return -1\n\n if self._parent.left is self:\n return 0\n elif self._parent.right is self:\n return 1\n else:\n raise ValueError('Node invalid.')", "def _findMin(root, parent):\n\n # we use an ugly trick: the parent node is passed in as an argument\n # so that eventually when the leftmost child is reached, the \n # call can return both the parent to the successor and the successor\n\n if root.left:\n return _findMin(root.left, root)\n else:\n return [parent, root]", "def parent(self, node):\n self._validate_node(node)\n idx = node._index\n if idx == 0:\n return None # Root node has no parent\n if idx % 2 == 0:\n return self._array[(idx-2)//2] # Right child (even number)\n return self._array[(idx-1)//2] # left child (odd number)", "def get_leftchild(self):\n return self._leftchild", "def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n high = max([p.val, q.val])\n low = min([p.val, q.val])\n node = root\n while True:\n if node.val < low:\n node = node.right\n elif node.val > high:\n node = node.left\n else:\n return node", "def before(self, p):\n self._validate(p)\n # if there is a left subtree, then the first positiion of in subtree \n # rooted at the left(p) will be the immediate position before p\n if self.left(p) is not None:\n return self._subtree_first_position(self.left(p))\n # if there is no left substree, \n # the immediate smaller position will be the parent of the \"left turn\" position\n # when going upward. \n else: \n walk = p # if p is the root of the tree None will be returned\n above = self.parent(walk)\n # not None is the boundary for root node\n # walk == self.left(above) is to look for \"left turn\":\n # if walk != self.left(above), that means there is left turn\n while above is not None and walk==self.left(above):\n walk = above\n above = self.parent(walk)\n return above", "def _update_min(self):\n tmp = self\n while tmp.left is not None:\n tmp = tmp.left\n return tmp.parent.key" ]
[ "0.8165508", "0.7484334", "0.7232219", "0.72201324", "0.7219222", "0.7097782", "0.706845", "0.70569", "0.7031903", "0.6971588", "0.6931417", "0.6929054", "0.6923967", "0.69074976", "0.6900387", "0.6890993", "0.6888747", "0.6853589", "0.68239844", "0.67936134", "0.6713629", "0.6685374", "0.66716856", "0.66498345", "0.6642043", "0.66288793", "0.66113895", "0.66008264", "0.65727514", "0.65665364" ]
0.879376
0
returns the lowest parent up the tree from herethat is a right child.
def get_lowest_left_parent(self): if self.parent == None: # if we reached the top of the tree # just return this node bc the 1/1 node is technically a child of both the 1/0 and 0/1 nodes return self elif not self.parent.is_left_child: # the parent is a right child return self.parent.get_lowest_left_parent() else: # the parent is a left child return self.parent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lowest_right_parent(self):\n\t\tif self.parent == None:\n\t\t\t# if we reached the top of the tree\n\t\t\t# just return this node bc the 1/1 node is technically a child of both the 1/0 and 0/1 nodes\n\t\t\treturn self\n\t\telif not self.parent.is_left_child:\n\t\t\t# the parent is a right child\n\t\t\treturn self.parent\n\t\telse:\n\t\t\t# the parent is a left child\n\t\t\treturn self.parent.get_lowest_right_parent()", "def next_larger(self):\n if self.right is not None:\n return self.right.find_min()\n current = self\n while current.parent is not None and current is current.parent.right:\n current = current.parent\n return current.parent", "def get_leftmost_child(self):\n\t\tif self.left_child == None:\n\t\t\treturn self\n\t\telse:\n\t\t\treturn self.left_child.get_leftmost_child()", "def findMin(self):\n curr = self\n while curr.hasLeftChild():\n curr = curr.leftChild\n return curr", "def get_parent(self):\n return BinaryNode.or_none(self.parent)", "def _successor(self):\n if self.right is None:\n # get first rightward ancestor\n m = self\n n = m.parent\n while n is not None and m is n.right:\n m = n\n n = n.parent\n else:\n # get leftmost of right child\n n = self.right\n while n.left is not None:\n n = n.left\n return n", "def _parent(node):\n if node == _root():\n return _root()\n return (node + 1) // 2 - 1", "def get_top_parent(node):\n\n\ttop_node = cmds.listRelatives(node, p=True)\n\twhile top_node:\n\t\tnode = top_node[0]\n\t\ttop_node = cmds.listRelatives(node, p=True)\n\treturn node", "def get_left_node(self):\n\t\tif self.left_child == None:\n\t\t\t# if we are at the end of a branch\n\t\t\tlowest_right_parent = self.get_lowest_right_parent()\n\t\t\tif lowest_right_parent.parent == None:\n\t\t\t\t# if this was called from left edge of the tree\n\t\t\t\t# the lowest right parent is the 1/1 node\n\t\t\t\t# return the 0/1 node on the left edge of the tree\n\t\t\t\treturn SBNode(frac=(0,1))\n\t\t\telse:\n\t\t\t\t# if we had a lower right parent\n\t\t\t\treturn lowest_right_parent.parent\n\t\telse:\n\t\t\treturn self.left_child.get_rightmost_child()", "def parent(self):\n other = self\n while True:\n for rev in other._hgmo['parents']:\n parent = Push(rev)\n if parent.id != self.id:\n return parent\n other = parent", "def predecessor(self) -> Union[\"Node\", None]:\n if self.left is not None: # case 1: the node has a left child\n return self.left.max()\n\n else: # case 2: the node does not have a left child\n current = self\n while current.parent is not None: # traverse up\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n\n return None # the root is reached, so no predecessor exists", "def get_parent(self):\n if self.parent:\n return self.parent()\n else:\n return None", "def _highest_parent_(self):\n if self._parent_ is None:\n return self\n return self._parent_._highest_parent_", "def get_right_node(self):\n\t\tif self.right_child == None:\n\t\t\t# if we are at the end of a branch\n\t\t\tlowest_left_parent = self.get_lowest_left_parent()\n\t\t\tif lowest_left_parent.parent == None:\n\t\t\t\t# if this was called from right edge of the tree\n\t\t\t\t# the lowest left parent is the 1/1 node\n\t\t\t\t# return the 1/0 (infinity) node on the right edge of the tree\n\t\t\t\treturn SBNode(frac=(1,0))\n\t\t\telse:\n\t\t\t\t# if we had a lower left parent\n\t\t\t\treturn lowest_left_parent.parent\n\t\telse:\n\t\t\treturn self.right_child.get_leftmost_child()", "def get_rightmost_child(self):\n\t\tif self.right_child == None:\n\t\t\treturn self\n\t\telse:\n\t\t\treturn self.right_child.get_rightmost_child()", "def get_sibling(self):\r\n if (not self) or (self.parent is None):\r\n return None\r\n else:\r\n if self is self.parent.left:\r\n return self.parent.right\r\n else:\r\n return self.parent.left", "def left_most_child(n):\n if n == None:\n return None\n while n.left:\n n = n.left\n return n", "def return_parent(self):\n # Return parent if completed\n if self.completed:\n return self.father\n return -1", "def lowestCommonAncestor_1(self, root, p, q):\n while root is not None:\n if p.val < root.val and q.val < root.val:\n root = root.left\n elif p.val > root.val and q.val > root.val:\n root = root.right\n else:\n break\n\n return root", "def find_min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current", "def get_left_child_index(self, parent):\n return 2*parent+1", "def successor(self) -> Union[\"Node\", None]:\n if self.right is not None: # case 1: the node has a right child\n return self.right.min()\n\n else: # case 2: the node does not have a right child\n current = self\n while current.parent is not None: # traverse up\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n\n return None # the root is reached, so no successor exists", "def parent(self):\n return self if self.is_root else self.__parent", "def side_of_parent(self):\n if self._node_empty(self._parent):\n return -1\n\n if self._parent.left is self:\n return 0\n elif self._parent.right is self:\n return 1\n else:\n raise ValueError('Node invalid.')", "def _findMin(root, parent):\n\n # we use an ugly trick: the parent node is passed in as an argument\n # so that eventually when the leftmost child is reached, the \n # call can return both the parent to the successor and the successor\n\n if root.left:\n return _findMin(root.left, root)\n else:\n return [parent, root]", "def parent(self, node):\n self._validate_node(node)\n idx = node._index\n if idx == 0:\n return None # Root node has no parent\n if idx % 2 == 0:\n return self._array[(idx-2)//2] # Right child (even number)\n return self._array[(idx-1)//2] # left child (odd number)", "def get_leftchild(self):\n return self._leftchild", "def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n high = max([p.val, q.val])\n low = min([p.val, q.val])\n node = root\n while True:\n if node.val < low:\n node = node.right\n elif node.val > high:\n node = node.left\n else:\n return node", "def before(self, p):\n self._validate(p)\n # if there is a left subtree, then the first positiion of in subtree \n # rooted at the left(p) will be the immediate position before p\n if self.left(p) is not None:\n return self._subtree_first_position(self.left(p))\n # if there is no left substree, \n # the immediate smaller position will be the parent of the \"left turn\" position\n # when going upward. \n else: \n walk = p # if p is the root of the tree None will be returned\n above = self.parent(walk)\n # not None is the boundary for root node\n # walk == self.left(above) is to look for \"left turn\":\n # if walk != self.left(above), that means there is left turn\n while above is not None and walk==self.left(above):\n walk = above\n above = self.parent(walk)\n return above", "def _update_min(self):\n tmp = self\n while tmp.left is not None:\n tmp = tmp.left\n return tmp.parent.key" ]
[ "0.879376", "0.7484334", "0.7232219", "0.72201324", "0.7219222", "0.7097782", "0.706845", "0.70569", "0.7031903", "0.6971588", "0.6931417", "0.6929054", "0.6923967", "0.69074976", "0.6900387", "0.6890993", "0.6888747", "0.6853589", "0.68239844", "0.67936134", "0.6713629", "0.6685374", "0.66716856", "0.66498345", "0.6642043", "0.66288793", "0.66113895", "0.66008264", "0.65727514", "0.65665364" ]
0.8165508
1
Search through the tree and return the SBNode with the target fraction.
def search_tree(self, tgt_frac): if self.frac == tgt_frac: return self elif compare_fracs(self.frac, tgt_frac): # tgt is less than self and to left return self.left_child.search_tree(tgt_frac) else: # tgt is greater than self and to right return self.right_child.search_tree(tgt_frac)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select(self):\n best_qsa_star_add = -99999\n best_node = None\n for a, c in self.children.items():\n qsa = c.wins / c.visits\n if c.visits_amaf == 0:\n qsa_tilde = 0\n else:\n qsa_tilde = c.wins_amaf / c.visits_amaf\n bsa = sqrt(self.k / (self.visits + self.k))\n qsa_star = (1 - bsa) * qsa + bsa * qsa_tilde\n qsa_star_add = qsa_star + 0.2 * self.c * sqrt(log(self.visits) / c.visits)\n if qsa_star_add > best_qsa_star_add:\n best_qsa_star_add = qsa_star_add\n best_node = c\n return best_node", "def breadth_first_search(self, target: Dict) -> Optional[Node]:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node: Node = assist_queue.popleft()\n flag = True\n for k, v in target.items():\n flag = flag and getattr(current_node, k) == v\n if not flag:\n break\n if flag:\n return current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)\n return None", "def search(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n current_node = self._root\n while current_node:\n if val == current_node._data:\n return current_node\n if val > current_node._data:\n current_node = current_node._rkid\n else:\n current_node = current_node._lkid\n return", "def search(self, val):\n currentNode = self.rootNode\n while True:\n if currentNode is None:\n print(\"Number not found.\")\n return None\n elif currentNode.val == val:\n print(\"Number found.\")\n return currentNode\n elif currentNode.val < val:\n currentNode = currentNode.right\n else:\n currentNode = currentNode.left", "def findNode(self, target: hash.hash.Hash):\n for bucket in self.buckets:\n if bucket.inRange(nodeID):\n for node in bucket:\n if node.hash == target:\n return node\n \n return None\n return None", "def tree_search(problem, frontier):\n compteur = 0\n stop = False\n frontier.append(Node(problem.initial))\n while frontier and not stop:\n compteur+=1\n node = frontier.pop()\n if problem.goal_test(node.state):\n return node\n if(compteur <= limit):\n frontier.extend(node.expand(problem))\n else:\n stop = True\n \n return None", "def search(self):\r\n #get the initial state\r\n initialState = State()\r\n \r\n #create root node\r\n rootNode = Node(initialState)\r\n \r\n #show the search tree explored so far\r\n treeplot = TreePlot()\r\n treeplot.generateDiagram(rootNode, rootNode)\r\n \r\n #perform search from root node\r\n self.performBacktrackSearch(rootNode, rootNode)\r\n \r\n rootNode.printTree()", "def test_search_finds_node(bst_balanced):\n assert bst_balanced.search(1).val == 1", "def find_BFS(self, value):\n to_visit = [self]\n while to_visit:\n curr = to_visit.pop(0) # BFS -> .pop(0) -> queue \n if curr.value == value:\n return curr\n to_visit.extend(curr.children)", "def search(self, value):\n return BST.__search(self._root, value)", "def find_node(self, value):\n for (fun, node) in self.__root.__fast_find:\n if fun(value):\n return node\n return None", "def select(self, index, source=None):\r\n if index > self.root.size_tree or index <= 0:\r\n raise IndexError(\"The index is out of range!\")\r\n\r\n check_node = self.root if not source else source\r\n\r\n while True:\r\n size_left_tree = check_node.left_child.size_tree\r\n if size_left_tree == index - 1:\r\n break\r\n elif size_left_tree >= index:\r\n check_node = check_node.left_child\r\n else:\r\n check_node = self.select(index - size_left_tree - 1, source=check_node.right_child)\r\n break\r\n\r\n return check_node", "def searchTreeF(node, d):\n if isinstance(node, DecisionTree):\n if node.i == 999: return node.mostCommon()\n if d[node.i] < node.v:\n return searchTreeF(node.lt, d)\n else:\n return searchTreeF(node.gt, d)\n else:\n return node", "def select(self, index, source=None):\n if index > self.root.size_tree or index <= 0:\n raise IndexError(\"The index is out of range!\")\n\n check_node = self.root if not source else source\n\n while True:\n size_left_tree = check_node.left_child.size_tree\n if size_left_tree == index - 1:\n break\n elif size_left_tree >= index:\n check_node = check_node.left_child\n else:\n check_node = self.select(index - size_left_tree - 1, source=check_node.right_child)\n break\n\n return check_node", "def find_significant_children(tree, node):\n if node not in tree.children:\n return None\n smax = 1\n c1, c2 = tree.children[node]\n sch = c1, c2\n while tree.population[c1] > 1 or tree.population[c2] > 1:\n if tree.population[c1] >= tree.population[c2]:\n small, big = c2, c1\n else:\n small, big = c1, c2\n if tree.population[small] >= smax:\n smax = tree.population[small]\n sch = small, big\n c1, c2 = tree.children[big]\n return sch", "def search(self, target):\n if DEBUG: print('search({})'.format(target))\n\n result = False\n\n cur = self.head\n \n output = \"\\tPath: \"\n \n while cur:\n output += \"{}\".format(cur.val)\n if not cur.next and not cur.below:\n output += \" END\"\n break\n elif cur.next == None or\\\n target < cur.next.val:\n cur = cur.below\n output += \" v \"\n elif cur.next.val == target:\n result = True\n output += \" -> {}! FOUND\".format(target)\n break\n elif target > cur.next.val:\n output += \" -> \"\n cur = cur.next\n else:\n print(\"\\thow did i get here\")\n\n if DEBUG: print(output)\n if DEBUG: print('\\t{}'.format(result))\n return result", "def select_leaf(self):\n current = self\n best_child = None\n selected_nodes_R = 0\n while current.isExpanded:\n maxUCT = - float('inf')\n for child in current.children.values():\n UCT = child.compute_uct()\n if UCT > maxUCT:\n maxUCT = UCT\n best_child = child\n\n current = best_child\n selected_nodes_R += current.score\n return current, selected_nodes_R", "def mcts_search(self, state):\n assert state.current_player() == self.player\n root = SearchNode(None, 1)\n for _ in range(self.max_simulations):\n visit_path, working_state = self._apply_tree_policy(root, state)\n if working_state.is_terminal():\n node_value = working_state.player_return(self.player)\n else:\n node_value = self.evaluator.evaluate(\n working_state, self.player, self._random_state)\n\n for node in visit_path:\n node.total_reward += node_value * node.player_sign\n node.explore_count += 1\n\n most_visited = root.most_visited_child()\n\n if self.verbose:\n print(\"Root:\", root.to_str())\n print(\"Children:\")\n print(root.children_str(working_state))\n print(\"Children of chosen:\")\n chosen_state = state.clone()\n chosen_state.apply_action(most_visited.action)\n print(most_visited.children_str(chosen_state))\n\n return most_visited.action", "def uct_select_child(self):\n s = sorted(self.child_nodes, key=lambda c: c.Q + sqrt(\n 2 * log(self.visits) / c.visits))[-1]\n return s", "def tree_query(self, pta_root):\n self.sul.pre()\n curr_node = pta_root\n\n inputs = []\n outputs = []\n\n while True:\n\n if curr_node.children:\n frequency_sum = sum(curr_node.input_frequencies.values())\n if frequency_sum == 0:\n # uniform sampling in case we have no information\n inp = choice(list(curr_node.children.keys()))\n else:\n # use float random rather than integers to be able to work with non-integer frequency information\n selection_value = random() * frequency_sum\n inp = None\n for i in curr_node.input_frequencies.keys():\n inp = i\n selection_value -= curr_node.input_frequencies[i]\n if selection_value <= 0:\n break\n # curr_node.input_frequencies[inp] -= 1\n\n inputs.append(inp)\n out = self.sul.step(inp)\n new_node = curr_node.get_child(inp, out)\n\n if new_node:\n outputs.append(out)\n curr_node = new_node\n else:\n self.sul.post()\n return\n else:\n curr_node = pta_root\n for i, o in zip(inputs, outputs):\n self.curr_node.input_frequencies[i] -= 1\n curr_node = curr_node.get_child(i, o)\n self.sul.post()\n return", "def getExactBranch(root, tag):\n tags = tag.split(':')\n if tags[0] == 'stats':\n tags = tags[1:]\n n = root\n for t in tags:\n newChildren = []\n for child in n.children:\n if child.nodeName == t or t == '*':\n newChildren.append(child)\n n.children = newChildren\n if n.children:\n n = n.children[0]\n if tags[-1] != '*':\n n.children = [] # prune off non-specified children tags", "def select_final(self):\n best_qsa_star = -99999\n best_node = None\n for a, c in self.children.items():\n qsa = c.wins / c.visits\n if c.visits_amaf == 0:\n qsa_tilde = 0\n else:\n qsa_tilde = c.wins_amaf / c.visits_amaf\n bsa = sqrt(self.k / (self.visits + self.k))\n qsa_star = (1 - bsa) * qsa + bsa * qsa_tilde\n if qsa_star > best_qsa_star:\n best_qsa_star = qsa_star\n best_node = c\n return best_node.action", "def lookup(self,entry):\n if self.type == 'v':\n return self\n v = entry[self.feature]\n assert v != None\n if self.type == 's':\n c = None\n try:\n c = self.children[v]\n except KeyError:\n #print \"Unseen value for feature\",self.feature,\": \",v\n best = None\n bestDist = float('inf')\n for (val,c) in self.children.iteritems():\n if abs(val - v) < bestDist:\n bestDist = abs(val - v)\n best = c\n c = best\n return c.lookup(entry)\n elif self.type == 'i':\n if v <= self.value:\n return self.children[0].lookup(entry)\n else:\n return self.children[1].lookup(entry)\n raise RuntimeError(\"Invalid DecisionTreeNode type?\")", "def find(self, value):\n # initialize node as root\n node = self.root\n\n # find value\n while node != None:\n\n # value found: return node\n if node.value == value:\n return node\n\n # value is smaller than node: search in left sub tree\n elif node.value > value:\n node = node.left\n\n # value is bigger than node: search in right sub tree\n else:\n node = node.right\n\n # value not found: return None\n return None", "def _node_search(self, mapping: WizardDataMappingBaseEnum, root_node: str, target_dict: dict) -> tuple:\n keys = mapping.get_registration_field_reference(root_node).split(\".\")\n max_depth: int = len(keys) - 1\n\n return self._recursive_search(target_dict, keys, max_depth)", "def __arb__(self):\n if self.tree.total < 1:\n return None\n if self.tree.total % 2 == 0:\n return self.first()\n else:\n return self.last()", "def best_first_graph_search(self, problem, f):\n f = memoize(f, 'f')\n # Set starting node\n node = SearchNode(problem.initial)\n # If the goal is reached, return the resulting node\n if problem.goal_test(node.state):\n return node\n\n # Set priority queue to organize nodes\n # in order of lowest f\n frontier = PriorityQueue(min, f)\n # Append the first node\n frontier.append(node)\n # Initialize empty set\n explored = set()\n # While the frontier is not empty\n while frontier:\n # Get the first node with lowest f\n node = frontier.pop()\n # Check if node is goal\n if problem.goal_test(node.state):\n return node\n # Add the state to the explored set\n explored.add(tuple(node.state))\n # For every child in the expanded node\n for child in node.expand(problem):\n # If the child is not a repeat child append it\n if child.state not in explored and child not in frontier:\n frontier.append(child)\n # If the child is in the frontier\n # This statement basically just filters out children that\n # have the same state but lower path costs\n elif child in frontier:\n # Select that child\n incumbent = frontier[child]\n # If one child is has a lower path cost\n if f(child) < f(incumbent):\n # Remove the child that is farther\n del frontier[incumbent]\n frontier.append(child)\n return None", "def get_first(self) -> object:\n #binary search tree == empty\n if self.root is None:\n return None\n\n # return\n return self.root.value", "def gQ(n):\n return b.toNode('.'.join(n.fullName().split('.')[:-1])) or b.root()", "def query(self, point):\n if self.root is None:\n raise Exception('tree must be built first')\n return self.root.query(point)" ]
[ "0.5656099", "0.55765146", "0.55034596", "0.5352702", "0.5343666", "0.53300196", "0.52742344", "0.5213806", "0.5170091", "0.51638526", "0.51331604", "0.5125105", "0.51183134", "0.5118209", "0.5068344", "0.50424486", "0.50326693", "0.50320405", "0.50314623", "0.5024199", "0.50223327", "0.5014868", "0.5009242", "0.4997078", "0.4996951", "0.49805963", "0.49545395", "0.4934216", "0.49328235", "0.4930002" ]
0.7159926
0
Print results of PCA analysis to command line.
def printPCAresults(pc_ana, param_list, print_components=False): print(f'explained variance ratio ' f'({pc_ana.components_.shape[0]} components): ' f'{sum(pc_ana.explained_variance_ratio_):2.2f} ' f'({pc_ana.explained_variance_ratio_.round(2)})') if print_components: for j, principal_component in enumerate(pc_ana.components_): print(f'Principal component {j+1}') for idx, lbl in enumerate(param_list): print(f'{principal_component[idx]: 2.4f} * {lbl}') print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runPCA(data, reducedDimensions, showScree):\n print(\"-->Running PCA.\")\n latent = gp.pca(data['features'], reducedDimensions, showScree, savePlots)\n plot(latent, data['colours'], reducedDimensions, \"Iris Dataset\", \"PCA\")", "def pca():\n pca = PCA()\n\n data = pca.fit_transform([[22,23,24],[23,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54]])\n\n print(data)", "def plot_PCA():\n X, languages = prepare_data_matrix()\n #print(X)\n eigenvectors, eigenvalues=power_iteration_two_components(X)\n explain = explained_variance_ratio(X, eigenvectors, eigenvalues)\n X=project_to_eigenvectors(X,eigenvectors)\n\n #print(X)\n plt.title('Explained variance: %.3f' % explain)\n plt.scatter(X[:,0], X[:,1])\n for i in range(len(X)):\n plt.text(X[i,0], X[i,1], languages[i][:3])\n plt.show()", "def pca_helper(_args):\n # unpack args\n _trimmed_frame, _win, _sou_name, _sou_dir, _out_path, \\\n _library, _library_names_short, _fwhm, _plsc, _sigma, _nrefs, _klip = _args\n\n # run pca\n try:\n output = pca(_trimmed_frame=_trimmed_frame, _win=_win, _sou_name=_sou_name,\n _sou_dir=_sou_dir, _out_path=_out_path,\n _library=_library, _library_names_short=_library_names_short,\n _fwhm=_fwhm, _plsc=_plsc, _sigma=_sigma, _nrefs=_nrefs, _klip=_klip)\n return output\n except Exception as _e:\n print(_e)\n return None\n # finally:\n # return None", "def doPCA(df, grouping_variable, features_to_analyse, plot_save_dir=None, PCs_to_keep=10):\n \n data = df[features_to_analyse]\n \n # Normalise the data before PCA\n zscores = data.apply(zscore, axis=0)\n \n # Drop features with NaN values after normalising\n colnames_before = list(zscores.columns)\n zscores.dropna(axis=1, inplace=True)\n colnames_after = list(zscores.columns)\n nan_cols = [col for col in colnames_before if col not in colnames_after]\n if len(nan_cols) > 0:\n print(\"Dropped %d features with NaN values after normalization:\\n%s\" %\\\n (len(nan_cols), nan_cols))\n\n print(\"\\nPerforming Principal Components Analysis (PCA)...\")\n \n # Fit the PCA model with the normalised data\n pca = PCA()\n pca.fit(zscores)\n \n # Project data (zscores) onto PCs\n projected = pca.transform(zscores) # A matrix is produced\n # NB: Could also have used pca.fit_transform()\n\n # Plot summary data from PCA: explained variance (most important features)\n important_feats, fig = pcainfo(pca, zscores, PC=1, n_feats2print=10) \n \n if plot_save_dir:\n # Save plot of PCA explained variance\n PCAplotroot = Path(plot_save_dir) / 'PCA'\n PCAplotroot.mkdir(exist_ok=True, parents=True)\n PCAplotpath = PCAplotroot / ('control_variation_in_' + \n grouping_variable + \n '_PCA_explained.eps')\n savefig(PCAplotpath, tight_layout=True, tellme=True, saveFormat='eps')\n plt.pause(2); plt.close()\n else:\n PCAplotpath=None\n plt.show(); plt.pause(2); plt.close()\n \n # Store the results for first few PCs in dataframe\n projected_df = pd.DataFrame(projected[:,:PCs_to_keep],\n columns=['PC' + str(n+1) for n in range(PCs_to_keep)]) \n \n # Add concatenate projected PC results to metadata\n projected_df.set_index(df.index, inplace=True) # Do not lose video snippet index position\n \n df = pd.concat([df, projected_df], axis=1)\n\n # Plot PCA - Variation in control data with respect to a given variable (eg. date_recording_yyyymmdd)\n \n # 2-PC\n if plot_save_dir:\n PCAplotpath = Path(str(PCAplotpath).replace('_PCA_explained', \n '_PCA_2_components'))\n title = \"2-Component PCA: Control variation in\\n\\\n '{0}'\".format(grouping_variable) + \" (Top256 features)\"\n plotPCA(df, grouping_variable, var_subset=None, savepath=PCAplotpath, \n title=title, n_component_axes=2)\n plt.pause(2); plt.close()\n \n # 3-PC\n if plot_save_dir:\n PCAplotpath = Path(str(PCAplotpath).replace('_PCA_2_components', \n '_PCA_3_components'))\n title = \"3-Component PCA: Control variation in\\n\\\n '{0}'\".format(grouping_variable) + \" (Top256 features)\"\n plotPCA(df, grouping_variable, var_subset=None, savepath=PCAplotpath, \n title=title, n_component_axes=3, rotate=False)\n plt.pause(2)\n \n return df", "def doPCA(self):\n data = [l.points for l in self.preprocessedLandmarks]\n data.append(data[0])\n\n S = np.cov(np.transpose(data))\n\n eigenvalues, eigenvectors = np.linalg.eig(S)\n sorted_values = np.flip(eigenvalues.argsort(), 0)[:self.pcaComponents]\n\n self.eigenvalues = eigenvalues[sorted_values]\n self.eigenvectors = eigenvectors[:, sorted_values]\n # print(self.eigenvalues)\n return self", "def pca(self, name=\"\", display=True, saveFile = False, saveFig = False, fileLocation=\"\", fullscreen=False):\n if fileLocation == '':\n fileLocation = self.fileLocation\n try:\n return modules.pca(self.experimentFullIntersection.copy(), self.cellLines, self.timePoints, self.secondTimePoints, name, display, saveFile, saveFig, fileLocation, fullscreen, self.colors)\n except AttributeError:\n print(\"ERROR: Combine replicates first.\")", "def pca(filename, class_col, sample):\n\n\tX = ml.read_file( filename )\n\n\t# Remove the class label from the dataset so that it doesn't prevent us from training a classifier in the future\n\tif class_col != None:\n\t\ttry:\n\t\t\tclassifier = ml.pd.DataFrame(X.iloc[:, class_col])\n\t\texcept:\n\t\t\tml.sys.exit('Class column out of range.')\n\t\tm = X.shape[1]\n\t\tkeepers = list(range(m))\n\t\tkeepers.pop( class_col )\n\n\t# Determine whether sample is present\n\tX_input = X.iloc[:, keepers]\n\n\t# # Visualize raw data\n\tml.plt.figure()\n\tml.sns.scatterplot(data = X, x = X_input['Petal Length (cm)'], y = X_input['Petal Width (cm)'], color = 'k', alpha = 0.5).set(title = filename + ' raw')\n\n\t# Normalize features by Z-score (so that features' units don't dominate PCs), and apply PCA\n\tX_norm, X_mean, X_std = ml.z_norm(X_input)\n\tY, P, e_scaled = ml.pca_cov( X_norm )\n\n\t# Visualize 2D PC data\n\tml.plt.figure()\n\tml.sns.scatterplot(data = Y, x = Y.iloc[:, 0], y = Y.iloc[:, 1], alpha=0.5, color = 'k').set(title = 'PC 2D Projection')\n\n\t# Visualize PCs with heatmap and cree plot\n\tinfo_retention = ml.scree_plot( e_scaled )\n\tml.pc_heatmap( P, info_retention )\n\n\t# Reconstruct data\n\treconstruct(X_input, X_mean, X_std, Y, P, e_scaled, 2, 3)\n\n\tml.plt.show()", "def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"", "def pca(self, name=\"\", display=True, saveFile = False, saveFig = False, fileLocation=\"\", fullscreen=False):\n if fileLocation == '':\n fileLocation = self.fileLocation\n modules.pca(self.cellFullIntersection.copy(), self.cellLines, self.timePoints, self.secondTimePoints, name, display, saveFile, saveFig, fileLocation, fullscreen, self.colors)", "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "def plot_PCA(fig_name):\n dir = \"log/peps mini\"\n pattern = r'(internal|access|lock)\\\\\\d{1,2}.csv$'\n pattern_valid = r'(3|6|9|12).csv$'\n utils.construct_set(dir, pattern, pattern_valid)\n X, y = utils.load_all()\n utils.plot_PCA(X, y)\n plt.title(fig_name)\n if not os.path.exists(dir_fig):\n os.makedirs(dir_fig)\n plt.savefig(dir_fig + '/' + fig_name + '.png')", "def main():\n test_cases = ast.literal_eval(sys.argv[1])\n results = str(my_info()) + '\\t\\t'\n for test_case in test_cases:\n mode = test_case[0]\n id_1 = int(test_case[1])\n id_2 = int(test_case[2])\n if mode == 'jc':\n results += str(Jaccard_Coefficient(id_1, id_2)) + '\\t\\t'\n elif mode == 'cc':\n results += str(Correlation_Coefficient(id_1, id_2)) + '\\t\\t'\n else:\n exit('bad command')\n print results + '\\n'", "def do_pca(x_data, n_class):\n\n run_pca = decomposition.PCA(n_components = n_class)\n pca_fit = run_pca.fit(x_data)\n #pca_fit\n x_pca = run_pca.transform(x_data);\n #pca_cov = run_pca.get_covariance(x_pca)\n #pca_score = run_pca.score(x_data)\n pca_noise = pca_fit.noise_variance_\n pca_var_explained = pca_fit.explained_variance_ratio_\n\n return x_pca, pca_noise, pca_var_explained", "def main():\n\tparser = setup_argument_parser()\n\targuments = parser.parse_args()\n\tto_print = arguments.to_print\n\techo(to_print)", "def main(args):\n # Results: print to console and also write to output file\n pass", "def analyse_pca(cluster, three_dim=True):\n # create data array and name array:\n A = cluster.data_matrix\n names = cluster.row_header\n\n # assign colours to samples:\n colorconvert = {'F':'go', 'S':'co', 1:'ro', 2:'go', 3:'ko', 4:'bo', 5:'co', 6:'mo', 7:'yo', 8:'r<', 9:'g<', 10:'k<', 11:'b<', 12:'c<', 13:'m<', 14:'y<', 15:'rs', 16:'gs', 17:'ks', 18:'bs', 19:'cs', 20:'ms', 21:'ys' }\n colourlist = []\n for name in names:\n phase = re.search(\"(F|S)\", name)\n if phase is not None:\n #print phase.groups()[0]\n colourlist.append(colorconvert[phase.groups()[0]])\n else:\n colourlist.append('ko')\n #print names, \"\\n\", colourlist\n\n ############# PCA using numpy SVD decomposition ##################################\n print \"#\" * 30\n print \"SVA analysis\"\n U, s, Vt = numpy.linalg.svd(A, full_matrices=True)\n V = Vt.T\n\n # sort the PCs by descending order of the singular values (i.e. by the\n # proportion of total variance they explain)\n ind = numpy.argsort(s)[::-1]\n U = U[:, ind]\n s = s[ind]\n V = V[:, ind]\n S = numpy.diag(s)\n\n sumval = sum([ i ** 2 for i in s ])\n\n # if we use all of the PCs we can reconstruct the noisy signal perfectly\n\n # Mhat = numpy.dot(U, numpy.dot(S, V.T))\n # if we use only the first 2 PCs the reconstruction is less accurate\n # Mhat2 = numpy.dot(U[:, :2], numpy.dot(S[:2, :2], V[:,:2].T))\n\n # To remove the variance of the 1st PC, which is primarily associated with experimenter:\n matrix_reduced = numpy.dot(U[:,1:], numpy.dot(S[1:,1:], V[:,1:].T))\n #for checking decomposition is occurring properly:\n #print numpy.shape(U)\n #print numpy.shape(S)\n #print numpy.shape(Vt)\n #print numpy.shape(matrix_reduced)\n\n #print \"#\" * 30\n #print \"SVD eigenvectors/loadings:\"\n #print header[:var_num] , \"\\n\"\n #print U # need to work out appropriate way to calculate loadings!\n #print \"#\" * 30\n #print \"checking distance of loadings (eigen vectors)\"\n #for col in loadings[:,:]:\n # print col\n # print numpy.sqrt(sum([ a ** 2 for a in col ]))\n\n print \"PCA explained variance:\"\n print [ (z ** 2 / sumval) for z in s ]\n\n # * if M is considered to be an (observations, features) matrix, the PCs\n # themselves would correspond to the rows of S^(1/2)*V.T. if M is\n # (features, observations) then the PCs would be the columns of\n # U*S^(1/2).\n\n #q_scores = numpy.dot(numpy.sqrt(S), V.T)\n q_scores = numpy.dot(U, numpy.sqrt(S))\n\n pp = PdfPages(cluster.exportPath[0:-4] + '.PCA.pdf')\n if three_dim: # plot a three dimensional graph:\n fig = plt.figure(1)\n ax = fig.add_subplot(111, projection='3d')\n for idx in range(len(colourlist)):\n xs = q_scores[idx,0]\n ys = q_scores[idx,1]\n zs = q_scores[idx,2]\n name = re.search('[FS][LP][0-9]+',names[idx]).group(0)\n ax.scatter(xs, ys, zs, c=colourlist[idx][0], marker='o')\n ax.text(xs, ys, zs, name)\n\n ax.set_xlabel(\"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval))\n ax.set_ylabel(\"PC2 (%.2f%%)\" % (100.0 * (s[1]**2)/sumval))\n ax.set_zlabel(\"PC3 (%.2f%%)\" % (100.0 * (s[2]**2)/sumval))\n\n plt.savefig(pp, format='pdf')\n plt.show()\n else: # plot two 2D graphs instead:\n for idx in range(len(colourlist)):\n fig = plt.figure(1)\n\n sub1 = fig.add_subplot(2,1,1)\n sub1.plot(q_scores[idx,0], q_scores[idx,1], colourlist[idx])\n plt.xlabel( \"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval) )\n plt.ylabel( \"PC2 (%.2f%%)\" % (100.0 * (s[1]**2)/sumval) )\n sub1.annotate( names[idx], xy=(q_scores[idx,0], q_scores[idx,1]),xytext=(-15,10), xycoords='data', textcoords='offset points' )\n\n sub2 = fig.add_subplot(2,1,2)\n sub2.plot(q_scores[idx,0], q_scores[idx,2], colourlist[idx])\n plt.xlabel( \"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval) )\n plt.ylabel( \"PC3 (%.2f%%)\" % (100.0 * (s[2]**2)/sumval) )\n sub2.annotate( names[idx], xy=(q_scores[idx,0],q_scores[idx,2]),xytext=(-15,10), xycoords='data', textcoords='offset points' )\n\n plt.savefig(pp, format='pdf')\n plt.show()\n\n plt.close()\n return matrix_reduced", "def main():\n arguments = docopt(__doc__, version='cluster_parameter_extractor 1.0 BETA')\n\n input_file = arguments['--input']\n output_file = arguments[\"--output\"]\n process_synthetic = arguments[\"--synthetic_peptides\"]\n\n # make sure the input file exists\n if not os.path.isfile(input_file):\n print(\"Error: Cannot find input file '\" + input_file + \"'\")\n sys.exit(1)\n\n # make sure the output file does not exist\n if os.path.isfile(output_file):\n print(\"Error: Output file exists '\" + output_file + \"'\")\n sys.exit(1)\n\n with open(output_file, \"w\") as OUT:\n # write the header\n OUT.write(\"id\\tprecursor_mz\\tav_charge\\tsize\\tidentified_spec_count\\tunidentified_spec_count\\t\"\n \"max_ratio\\tmax_il_ratio\\tprecursor_mz_range\\tsequences\\t\"\n \"max_sequence\\tmax_sequence_count\\tmax_sequence_mods\\t\"\n \"second_max_sequence\\tsecond_max_sequence_count\\tsecond_max_sequence_mods\\tn_input_files\\t\"\n \"max_consensus_peak_rel_tic\\tmax_consensus_peak_mz\")\n\n if process_synthetic:\n OUT.write(\"\\tsynth_count\\tsynth_ratio\\tsynth_max_sequence\")\n\n OUT.write(\"\\n\")\n\n # process the file\n parser = clustering_parser.ClusteringParser(input_file)\n\n for cluster in parser:\n cluster_line = process_cluster(cluster)\n OUT.write(cluster_line)\n\n # process synthetic peptides\n if process_synthetic:\n synth_line = process_synthetic_peptides(cluster)\n OUT.write(\"\\t\" + synth_line)\n\n OUT.write(\"\\n\")\n\n print(\"Results written to \" + output_file)", "def run_pca_test(args):\n test_file, outdir = args\n start = time.time()\n n_components = config.pca.n_components\n outfile_path1, outfile_path2 = FileName.get_pca_rst_name()\n run_pca(test_file, rs, n_components, outfile_path1, outfile_path2)\n dt = time.time() - start\n print(\"run_pca_test Done. Elapsed time is %.2f seconds.\" % dt)", "def show(self, options=None):\n\n # # IMPLEMENTATION NOTE: Stub for implementing options:\n # if options and self.InspectOptions.ALL_OUTPUT_LABELS in options:\n # pass\n\n print (\"\\n---------------------------------------------------------\")\n print (\"\\n{0}\".format(self.name))\n\n\n print (\"\\n\\tControl enabled: {0}\".format(self.enable_controller))\n print (\"\\n\\tProcesses:\")\n\n for process in self.processes:\n print (\"\\t\\t{} [learning enabled: {}]\".format(process.name, process._learning_enabled))\n\n\n # Print execution_sets (output of toposort)\n print (\"\\n\\tExecution sets: \".format(self.name))\n # Sort for consistency of output\n execution_sets_sorted = sorted(self.execution_sets)\n for i in range(len(execution_sets_sorted)):\n # for i in range(len(self.execution_sets)):\n print (\"\\t\\tSet {0}:\\n\\t\\t\\t\".format(i),end='')\n print(\"{ \",end='')\n sorted_mechs_names_in_set = sorted(list(mech_tuple.mechanism.name\n for mech_tuple in self.execution_sets[i]))\n for name in sorted_mechs_names_in_set:\n print(\"{0} \".format(name), end='')\n print(\"}\")\n\n # Print executionList sorted by phase and including EVC mechanism\n\n # Sort executionList by phase\n sorted_execution_list = self.executionList.copy()\n\n\n # Sort by phaseSpec and, within each phase, by mechanism name\n sorted_execution_list.sort(key=lambda mech_tuple: mech_tuple.phase)\n\n\n # Add controller to execution list for printing if enabled\n if self.enable_controller:\n sorted_execution_list.append(MechanismTuple(self.controller, None, self.controller.phaseSpec))\n\n\n mech_names_from_exec_list = list(mech_tuple.mechanism.name for mech_tuple in self.executionList)\n mech_names_from_sorted_exec_list = list(mech_tuple.mechanism.name for mech_tuple in sorted_execution_list)\n\n print (\"\\n\\tExecution list: \".format(self.name))\n phase = 0\n print(\"\\t\\tPhase {}:\".format(phase))\n for mech_tuple in sorted_execution_list:\n if mech_tuple.phase != phase:\n phase = mech_tuple.phase\n print(\"\\t\\tPhase {}:\".format(phase))\n print (\"\\t\\t\\t{}\".format(mech_tuple.mechanism.name))\n\n print (\"\\n\\tOrigin mechanisms: \".format(self.name))\n for mech_tuple in self.originMechanisms.mech_tuples_sorted:\n print(\"\\t\\t{0} (phase: {1})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n\n print (\"\\n\\tTerminal mechanisms: \".format(self.name))\n for mech_tuple in self.terminalMechanisms.mech_tuples_sorted:\n print(\"\\t\\t{0} (phase: {1})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n for output_state_name in mech_tuple.mechanism.outputStates:\n print(\"\\t\\t\\t{0}\".format(output_state_name))\n\n # if any(process.learning for process in self.processes):\n if self.learning:\n print (\"\\n\\tTarget mechanisms: \".format(self.name))\n for mech_tuple in self.targetMechanisms.mech_tuples:\n print(\"\\t\\t{0} (phase: {1})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n\n print (\"\\n---------------------------------------------------------\")", "def pca(adata: AnnData, *args, **kwargs):\n\n scatters(adata, \"pca\", *args, **kwargs)", "def __main__():\r\n\tparser = optparse.OptionParser()\r\n\tparser.add_option(\"-i\", \"--input\", default=None, dest=\"input\",\r\n\t\t\t\t\t help=\"The input bam file\")\r\n\tparser.add_option(\"-c\", \"--chromfile\", default=None, dest=\"chromfile\",\r\n\t\t\t\t\t help=\"The input list of chromosomes \")\r\n\tparser.add_option(\"-d\", \"--directory\", default=None, dest=\"directory\",\r\n\t\t\t\t\t\thelp=\"directory\")\r\n\tparser.add_option(\"-l\", \"--lane\", default=None, dest=\"lane\",\r\n\t\t\t\t\t\thelp=\"Lane\")\t\t\t\t \r\n\tparser.add_option(\"-o\", \"--output\", default=None, dest=\"output\",\r\n\t\t\t\t\t help=\"The output file\")\r\n\t(options, args) = parser.parse_args()\r\n\t\r\n\tif not options.input:\r\n\t\tparser.error(\"Need to specify the input file\")\r\n\tif not options.chromfile:\r\n\t\tparser.error(\"Need to specify the list of chromosomes file\")\r\n\tif not options.output:\r\n\t\tparser.error(\"Need to specify the output file\")\r\n\r\n\twith open(options.output, \"w\") as outfile:\r\n\t\twith open(options.chromfile, \"r\") as handle:\r\n\t\t\tfor line in handle:\r\n\t\t\t\tline = line.rstrip()\r\n\t\t\t\t#p = subprocess.run([\"echo %s\" % line], shell=True, check=True, stdout=subprocess.PIPE)\r\n\t\t\t\tp = subprocess.run([\"samtools depth -r %s %s/%s | sort -n -k 3 | tail -n 1\" % (line, options.directory, options.input)], shell=True, check=True, stdout=subprocess.PIPE)\r\n\t\t\t\toutfile.write(p.stdout.decode('utf-8').rstrip() + \"\\t%s\" % options.input + \"\\t%s\\n\" % options.lane)", "def run_pca(data_file, rs, n_components, outfile1, outfile2):\n print('running PCA with n_components={}'.format(n_components))\n day_batcher = DayBatcher(data_file, skiprow=1, delimiter=' ')\n mat = day_batcher.next_batch()\n rst = []\n while mat is not None:\n if mat.shape[1] == 13:\n # use compact10d\n datadict = {'features': mat[:, 3:],\n 'red': mat[:, 2],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n else:\n # use all_fixed\n datadict = {'features': mat[:, 14:],\n 'red': mat[:, 13],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n batch = scale(datadict['features'])\n pca = PCA(n_components=n_components, random_state=rs)\n pca.fit(batch)\n data_reduced = np.dot(batch, pca.components_.T) # pca transform\n data_original = np.dot(data_reduced, pca.components_) # inverse_transform\n pointloss = np.mean(np.square(batch - data_original), axis=1)\n loss = np.mean(pointloss)\n for d, u, t, l, in zip(datadict['day'].tolist(),\n datadict['user'].tolist(),\n datadict['red'].tolist(),\n pointloss.flatten().tolist()):\n rst.append((u, d, l, t))\n mat = day_batcher.next_batch()\n train_rst, test_rst = split_train_test(rst)\n save_rst(train_rst, outfile1)\n save_rst(test_rst, outfile2)\n eval_cr(test_rst, 'pca')", "def get_pca():\n from sklearn.decomposition import PCA\n return PCA()", "def pca(X = Math.array([]), no_dims = 50):\n\n print \"Preprocessing the data using PCA...\"\n (n, d) = X.shape;\n X = X - Math.tile(Math.mean(X, 0), (n, 1));\n (l, M) = Math.linalg.eig(Math.dot(X.T, X));\n Y = Math.dot(X, M[:,0:no_dims]);\n return Y;", "def align_meshes_pca(self, display_opt):\n # convert vtk points to numpy first\n vtk_pts = self.points\n numpy_pts = numpy_support.vtk_to_numpy(vtk_pts.GetData())\n\n # perform pca\n pca = PCA(n_components=3)\n trans_coords = pca.fit_transform(numpy_pts)\n eigenvectors = pca.components_\n eigenvalues = pca.explained_variance_ratio_\n\n # save pca vectors as global variables\n self.pca1 = eigenvectors[0]\n self.pca2 = eigenvectors[1]\n self.pca3 = eigenvectors[2]\n\n if display_opt:\n axes = get_axes_actor([80,80,80], [0,0,0])\n\n trans_act = include_points(trans_coords, trans_coords.shape[0], 4, (0,1,0))\n self.meshActor.GetProperty().SetOpacity(0.6)\n\n ren = vtk.vtkRenderer()\n ren.AddActor(self.meshActor)\n ren.AddActor(trans_act)\n ren.AddActor(axes)\n vtk_show(ren)\n\n # reset the self.attributes with transformed coordinates\n trans_vtk_pts = MakevtkPoints(trans_coords, deep=True)\n self.points = trans_vtk_pts\n self.mesh_poly.SetPoints(trans_vtk_pts)\n\n meshMapper = vtk.vtkPolyDataMapper()\n meshMapper.SetInputData(self.mesh_poly)\n\n self.meshActor = vtk.vtkActor()\n self.meshActor.SetMapper(meshMapper)\n self.meshActor.GetProperty().SetColor(1.0, 0.0, 0.0)", "def print_results(self):\n pass", "def pca(X_train, X_test, n):\n\n print \"Extracting %d principle components from %d features\" % \\\n (n, X_train.shape[1])\n t0 = time()\n pca = RandomizedPCA(n_components=n, whiten=True, random_state=47).fit(X_train)\n print \"done in %0.3fs\" % (time() - t0)\n \n print \"Transforming the input data\"\n t0 = time()\n X_train_pca = pca.transform(X_train)\n X_test_pca = pca.transform(X_test)\n print \"done in %0.3fs\" % (time() - t0)\n\n return X_train_pca, X_test_pca", "def main():\n\n # parse arguments\n args = parseArguments()\n\n # read prisma dataset\n prisma = Prisma()\n prisma.loadData( args.pathname )\n\n # get channel indices closest to central wavelengths of sentinel-2 optical channels\n s2_rgb_wavelengths = [ 492.4, 559.8, 664.6 ] \n indexes = prisma.getVnirChannelIndexes( s2_rgb_wavelengths )\n\n # create 24-bit rgb image \n image = rgb.getImage( [ prisma._vnir[ 'channels' ][ :,:, idx ] for idx in indexes ] )\n rgb.saveImage( image, prisma.getGcps(), 'c:\\\\Users\\\\Chris.Williams\\\\Desktop\\\\test.tif' )\n\n # pc analysis\n vnir_pc = getPca( prisma._vnir[ 'channels' ] )\n swir_pc = getPca( prisma._swir[ 'channels' ] )\n\n\n return", "def PCA_vis(select_PCA_features, player_attributes):\n x = player_attributes.loc[:, select_PCA_features].values\n\n # Standardizing the features\n x = StandardScaler().fit_transform(x)\n\n # perform 3 component PCA\n pca = PCA(n_components=3)\n principalComponents = pca.fit_transform(x)\n principalDf = pd.DataFrame(\n data=principalComponents,\n columns=[\n \"principal component 1\",\n \"principal component 2\",\n \"principal component 3\",\n ],\n )\n\n # plot players dataset projection on three principal components\n # %matplotlib notebook\n\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1, projection=\"3d\")\n ax.set_title(\"3 component PCA\", fontsize=30)\n\n # plot first k players' info along principal components\n k = 4000\n ax.scatter(\n principalDf.loc[:k, \"principal component 1\"],\n principalDf.loc[:k, \"principal component 2\"],\n principalDf.loc[:k, \"principal component 3\"],\n s=1,\n )\n\n ax.set_xlabel(\"Principal Component 1\", fontsize=15)\n ax.set_ylabel(\"Principal Component 2\", fontsize=15)\n ax.set_zlabel(\"Principal Component 3\", fontsize=15)\n plt.show()\n\n return principalDf" ]
[ "0.6608524", "0.63565147", "0.6017152", "0.6001357", "0.58680606", "0.5785082", "0.5773136", "0.5744654", "0.5728827", "0.5686987", "0.5664689", "0.56456596", "0.5635779", "0.5624087", "0.56012213", "0.5596487", "0.5595612", "0.55919385", "0.5590758", "0.55323523", "0.5470266", "0.54688555", "0.5467379", "0.5454734", "0.5454596", "0.53909147", "0.53880286", "0.53755754", "0.53749794", "0.53584045" ]
0.67139107
0
Update annotation and image.
def update_annot(ind): # update text annotation pos = sc.get_offsets()[ind["ind"][0]] annot.xy = pos idxlist = [] for element in PC: idxlist.append(np.allclose(element, pos)) idx = idxlist.index(True) annotation_string = f'{idx + 1}\n' if display_parameter_values: for i, label in enumerate(parameterList): annotation_string += (f'{parameters[i, idx]: 10.2f} ' f'+/- {errors[i, idx]:8.2f} ' f'({label})\n') annot.set_text(annotation_string[:-1]) annot.get_bbox_patch().set_alpha(0.4) # update immage annotation label = mapp.listOfFiles[idx].split(os.sep)[-1].split('.')[0] image = get_image(mapp.pltdir, label) ab.xy = pos ab.offsetbox = OffsetImage(image) ax.add_artist(ab) if show_both_images: additional_image = get_image(additional_fitplot_folder, label) ac.xy = pos + shift_second_image ac.offsetbox = OffsetImage(additional_image) ax.add_artist(ac)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_image(self, image):\n raise NotImplementedError()", "def update_image(self):\n self.image = Image.fromarray(self.img)", "def write_annotation(self, ann_file, img_path, new_img_name):\n if self.type == \"imagenet\":\n label = self.in_annotations[img_path]\n logger.debug(f\"Img {img_path}, imagenet label {label}\")\n ann_file.write(str(label) + \"\\n\")\n elif self.type == \"coco\":\n ann_file.write(\"detection_results {\\n\")\n for obj in self.in_annotations[img_path].keys():\n ann_file.write(\" objects {\\n\")\n ann_file.write(f\" class_id: {self.in_annotations[img_path][obj]['label']}\\n\")\n ann_file.write(\" bounding_box {\\n\")\n ann_file.write(f\" normalized_top: {self.in_annotations[img_path][obj]['normalized_bbox'][0]}\\n\")\n ann_file.write(f\" normalized_bottom: {self.in_annotations[img_path][obj]['normalized_bbox'][1]}\\n\")\n ann_file.write(f\" normalized_left: {self.in_annotations[img_path][obj]['normalized_bbox'][2]}\\n\")\n ann_file.write(f\" normalized_right: {self.in_annotations[img_path][obj]['normalized_bbox'][3]}\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(f' image_name: \"{new_img_name}\"\\n')\n ann_file.write(f' image_id: {int(new_img_name.split(\".\")[0])}\\n')\n ann_file.write(\"}\\n\")", "def update_img(self):\n self.img = np.array(self.image)", "def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)", "def update_annot(cls, ind):\n gen = ind + FigureControl.minPossibleGenNumber\n for cplot in gs.cloud_plots:\n fitness = cplot.update_annot(gen)\n\n text = \"{}\".format(gen)\n gs.fitness_plot.floating_annot.xy = (gen, fitness)\n gs.fitness_plot.floating_annot.set_text(text)", "def _render_static_image_annotation(self):\n cv2.rectangle(self._image,\n (0,0), (640, 40),\n (0, 0, 0),\n -1)\n \n cv2.putText(self._image,self._current_mode, (40, 25),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, 255, 2)\n\n cv2.putText(self._image, time.asctime(), (400, 460),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, 255, 2)", "def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))", "def _update_(self,update_background=True):\n # -- Make sure the fundamental update (if any) are made\n super(Image,self)._update_()\n # - Data\n self._update_data_(update_background=update_background)", "def update_image(self, path=None):\n if path:\n self.image_path.current = path\n\n if self.image_path.current == self.image_canvas.image_path:\n self.image_canvas.fit_in_view()\n else:\n self.image_canvas.draw_image(self.image_path.current)", "def update_annotation(som: str, save: str, key: tuple):\n som = pu.SOM(som)\n\n chan, height, width, depth, n_height, n_width = som.file_head\n\n with open(f\"{save}\", \"rb\") as of:\n annotations = pickle.load(of)\n\n x, y, z = key\n\n # Note that the depth is ignored here\n neurons = [som.get_neuron(y=y, x=x, channel=c) for c in range(chan)]\n\n callback, results = annotate_neuron(neurons, key)\n\n if callback.next_move == 'next':\n annotations[key] = results\n \n save_annotations_table(annotations, f\"{save}\".replace('pkl','csv'))\n save_annotations_pickle(annotations, f\"{save}\")", "def annotate(self, **annotations):\n _check_annotations(annotations)\n self.annotations.update(annotations)", "def updateAnnot( xdata, ydata, pixels, annot, rawdata, **kwargs):\n\ty, x = pol2cart( ydata/180, xdata, pixels )\n\tannot.xy = ( xdata, ydata )\n\t# Inconsistent wrapping; plot the right variable.\n\tif xdata < 0:\n\t\txdata += 2 * np.pi\n\ttext = 'Az=' + str( round( xdata * 180 / np.pi, 1 ) )+ ', El=' + str( round( np.arccos( ydata/180 ) * 180/np.pi, 1) ) + u'\\xb0' + '\\nInt.=' + '{:.3E}'.format((rawdata[int(y),int(x)]))\n\tannot.set_text( text )\n\tannot.get_bbox_patch().set_alpha( 0.66 )\n\tannot.set_color('black')", "def updateAnnotations(self, annotator=None):\n plt = self.plt\n updated = False\n if annotator is None:\n for annotator in self.annotators.values():\n if annotator.update():\n updated = True\n elif annotator.update(): updated = True\n if updated:\n # This raises a warning with newer matplotlib\n #plt.pause(0.0001)\n plt.draw()", "def annotate(self, annotation):\n self._data = self._data.annotate(**annotation)", "def test_annotate_image(self):\n im_path = test_im_dir / \"square-im-1.png\"\n image = li.Image(im_path)\n\n label = li.Label(\"Test Label\", (0.50, 0.50))\n image.annotate(label)\n annotated_im = image.data\n\n # Ensure the shape is retained\n shape_expected = image.data_original.shape\n shape_test = annotated_im.shape\n self.assertEqual(\n shape_test, shape_expected, msg=\"shape not retained after annotation\"\n )\n\n if PLOT:\n imsave(\"/tmp/annotate_image.png\", annotated_im, check_contrast=False)", "def updateImage(self, frame, image):\n frame.imgtk = image\n frame.configure(image=image)", "def AnnotationSave(self):\n self.active_mode = 'default'\n self.reset_buttons()\n try:\n self.model.AnnotationSave()\n except Exception as ex:\n self.log_error(str(ex))", "def _update(self):\n print(\"Saving prediction json files...\")\n self._dump_json()\n print(\"Saving prediction json files done...\")\n print(\"Saving prediction images...\")\n self._dump_image()\n print(\"Saving prediction images done...\")", "def save_annotated_image(self, file: Path) -> None:\n pass", "def augment(self, image):\n pass", "def update_image(self, cv_img):\n\t\tqt_img = self.ImageEdits(cv_img)\n\t\tself.camera.setPixmap(qt_img)", "def update(self):\n if self.value:\n self.image = self.rect2 \n else:\n self.image = self.rect1", "def adjust(self, image):\n ...", "def annotations(self, annotations):\n self._annotations = annotations", "def modify_image(self, example, target_label):\n raise NotImplementedError()", "def update_image(self):\n modified_since = (\n dict(modified__gt=self.image_updated) if self.image_updated else {}\n )\n if self.image_path and self.image_path != \"__uploaded__\":\n # Does the file need updating?\n images = self.files.filter(\n current=True, path=self.image_path, **modified_since\n ).order_by(\"-modified\")\n if len(images) > 0:\n self.set_image_from_file(images[0])\n else:\n # Try to find an image for the project and use the most\n # recently modified since the image was last updated\n images = self.files.filter(\n current=True, mimetype__startswith=\"image/\", **modified_since,\n ).order_by(\"-modified\")\n if len(images) > 0:\n self.set_image_from_file(images[0])", "def update_annot(self, ind, plt_ref, ref_label):\r\n\r\n # Get selected data coordinates\r\n pos = plt_ref._xy[ind[\"ind\"][0]]\r\n\r\n # Shift annotation box left or right depending on which half of the axis the pos x is located and the\r\n # direction of x increasing.\r\n if plt_ref.axes.viewLim.intervalx[0] < plt_ref.axes.viewLim.intervalx[1]:\r\n if pos[0] < (plt_ref.axes.viewLim.intervalx[0] + plt_ref.axes.viewLim.intervalx[1]) / 2:\r\n self.annot._x = -20\r\n else:\r\n self.annot._x = -80\r\n else:\r\n if pos[0] < (plt_ref.axes.viewLim.intervalx[0] + plt_ref.axes.viewLim.intervalx[1]) / 2:\r\n self.annot._x = -80\r\n else:\r\n self.annot._x = -20\r\n\r\n # Shift annotation box up or down depending on which half of the axis the pos y is located and the\r\n # direction of y increasing.\r\n if plt_ref.axes.viewLim.intervaly[0] < plt_ref.axes.viewLim.intervaly[1]:\r\n if pos[1] > (plt_ref.axes.viewLim.intervaly[0] + plt_ref.axes.viewLim.intervaly[1]) / 2:\r\n self.annot._y = -40\r\n else:\r\n self.annot._y = 20\r\n else:\r\n if pos[1] > (plt_ref.axes.viewLim.intervaly[0] + plt_ref.axes.viewLim.intervaly[1]) / 2:\r\n self.annot._y = 20\r\n else:\r\n self.annot._y = -40\r\n\r\n self.annot.xy = pos\r\n\r\n # Format and display text\r\n text = 'x: {:.2f}, {}: {:.2f}'.format(pos[0], ref_label, pos[1])\r\n self.annot.set_text(text)", "def on_image_change(self, value):\n self.current_image.setImage( self._model.image )", "def update_graphics(self, new_image_ref):\n self.image_ref = new_image_ref\n self.init_graphics()" ]
[ "0.7223096", "0.67385095", "0.65887845", "0.6427451", "0.64269096", "0.63743126", "0.63287795", "0.6328405", "0.63069886", "0.63019645", "0.62771714", "0.6265501", "0.6256589", "0.6237346", "0.6206293", "0.61357236", "0.6047715", "0.6038806", "0.60358", "0.6002621", "0.598172", "0.5941385", "0.5914707", "0.59100103", "0.58858114", "0.5880133", "0.5870402", "0.58596057", "0.58562845", "0.5851309" ]
0.7386941
0
Call this method to check if runner is in shutdown mode.
def is_in_shutdown(self): return self._in_shutdown
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shutting_down(self):\n return self._shutdown.is_set()", "def check_main_stop(notifier):\n pass", "def initiate_shutdown(self) -> None:", "def shutdown(self):\r\n self._update('shutdown')\r\n self.supervisord.options.mood = SupervisorStates.SHUTDOWN\r\n return True", "def shutdown(self):\n self.shutdown_requested = True", "def _is_running(self, _):\n if self._shutdown_event.is_set():\n raise RequestProcessingError(\n \"Unable to process message - currently shutting down\"\n )", "def shutdown() -> None: # TODO Better place for this code\n # TODO Safe landing\n pass", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "def shutdown(self) -> None:", "def shutdown(self) -> None:", "def clean_shutdown(self):\n\t\tprint(\"\\nExiting example...\")\n\t\tif self._done:\n\t\t self.set_neutral()\n\t\tif not self._init_state and self._rs.state().enabled:\n\t\t print(\"Disabling robot...\")\n\t\t self._rs.disable()", "def shutdown(self):\n ...", "def is_instance_up(self):\n logging.debug(\"checking if starter instance booted: \" + str(self.basedir))\n if not self.instance.is_running():\n message = \"Starter Instance {0.name} is gone!\".format(self)\n logging.error(message)\n raise Exception(message)\n\n # if the logfile contains up and running we are fine\n lfs = self.get_log_file()\n regx = re.compile(r\"(\\w*) up and running \")\n for line in lfs.splitlines():\n match = regx.search(line)\n if match:\n groups = match.groups()\n if len(groups) == 1 and groups[0] == \"agent\":\n continue\n return True\n\n return False", "def shutdown(self):", "def shutdown(self):\n\n pass", "def is_fully_stopped(self) -> bool:\n\t\treturn self.is_disconnected() and self.recorder.is_stopped() and not self.__flag_auto_restart", "def is_running(self) -> bool:\n return False", "def is_restarting(self) -> bool:\r\n return False", "def check_that_instance_is_alive(self):\n if not self.instance.is_running():\n raise Exception(f\"Starter instance is not running. Base directory: {str(self.basedir)}\")\n if self.instance.status() == psutil.STATUS_ZOMBIE:\n raise Exception(f\"Starter instance is a zombie. Base directory: {str(self.basedir)}\")", "def _shutdown(self):", "def checkWakeup(self):\n # TODO include check for external wakeup sources\n if self.dbus2vdr.checkVDRstatus():\n\n return self.dbus2vdr.Shutdown.ManualStart()\n else:\n return True", "def shutdown(self):\n raise NotImplementedError", "def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()", "def shutdown(self):\n\n if self.sessionState in (JT808SessionStates.OPEN,):\n self.log.warning(\"Shutdown requested...disconnecting\")\n self.disconnect()\n else:\n self.log.debug(\"Shutdown already in progress\")", "def do_run(self):\n return not self._do_exit.isSet()", "def __check_stop(self):\n if not self.__parent_thread.is_alive():\n global _iom_shutdown\n self.__logger.info(\"Parent thread ended. Stopping IOManager.\")\n _iom_shutdown = True\n self.__running = False\n\n if not self.__wrappers and not self.__disconnected_wrappers and time.time() > self.__empty_time:\n self.__logger.info(\"No IOWrappers registered. Stopping IOManager\")\n self.__running = False\n elif self.__wrappers or self.__disconnected_wrappers:\n self.__empty_time = time.time() + 30", "def on_shutdown(self) -> None:\n pass", "def check_finish(self):\r\n return not self.proc.is_alive()" ]
[ "0.7437572", "0.6536957", "0.6488601", "0.64296126", "0.6410239", "0.6397543", "0.63587606", "0.63571006", "0.63571006", "0.63571006", "0.6332087", "0.6332087", "0.6316774", "0.626796", "0.6259934", "0.62512004", "0.62483555", "0.622814", "0.62151444", "0.62003744", "0.6196279", "0.61941314", "0.61867005", "0.618395", "0.61839056", "0.617731", "0.61687076", "0.6143319", "0.6130395", "0.6121853" ]
0.71672535
1
Submit connection observer to background execution. Returns Future that could be used to await for connection_observer done.
def submit(self, connection_observer): assert connection_observer.life_status.start_time > 0.0 # connection-observer lifetime should already been self._add_connection_observer(connection_observer=connection_observer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sync_connect(self):\n loop = asyncio.get_event_loop()\n task = loop.create_task(self.connect())\n loop.run_until_complete(task)", "def connect(self):\n self.conn.add_listener(self.handle_connection_change)\n self.conn.start_async()", "async def _async_on_connect():\n await self._watchdog.trigger()\n await target()", "def connection_callback(self, connected):\n self._connection_queue.put_nowait(connected)", "def __call__(self, timeout=None, *args, **kwargs):\n if timeout:\n self.timeout = timeout\n started_observer = self.start(timeout, *args, **kwargs)\n if started_observer:\n return started_observer.await_done(*args, **kwargs)\n # TODO: raise ConnectionObserverFailedToStart", "def _add_connection_observer(self, connection_observer):\n with self._connection_observer_lock:\n if connection_observer not in self._connections_observers:\n moler_connection = connection_observer.connection\n moler_connection.subscribe_connection_observer(connection_observer=connection_observer)\n self._connections_observers.append(connection_observer)\n _, msg = RunnerSingleThread._its_remaining_time(\n prefix=\"remaining\", timeout=connection_observer.timeout,\n from_start_time=connection_observer.life_status.start_time\n )\n connection_observer._log(logging.INFO,\n \"{} started, {}\".format(connection_observer.get_long_desc(), msg))\n self._start_command(connection_observer=connection_observer)\n connection_observer.life_status.last_feed_time = time.time()", "async def connect(self):\n self._conn = await self._loop.run_in_executor(\n None, connector.Connector, self._creds\n )", "def await_done(self, timeout=None):\n if self.done():\n return self.result()\n if self._future is None:\n raise ConnectionObserverNotStarted(self)\n self.runner.wait_for(connection_observer=self, connection_observer_future=self._future,\n timeout=timeout)\n\n return self.result()", "async def connect(self):\n connect = asyncio.gather(*[conn.connect_to_server() for name, conn in self._exchange_connections.items()])\n wait_for = asyncio.gather(*[self.on_connection(name) for name, conn in self._exchange_connections.items()])\n await asyncio.gather(connect, wait_for)", "async def async_connect(self) -> None:\n # pylint: disable-next=import-outside-toplevel\n import paho.mqtt.client as mqtt\n\n result: int | None = None\n try:\n result = await self.hass.async_add_executor_job(\n self._mqttc.connect,\n self.conf[CONF_BROKER],\n self.conf.get(CONF_PORT, DEFAULT_PORT),\n self.conf.get(CONF_KEEPALIVE, DEFAULT_KEEPALIVE),\n )\n except OSError as err:\n _LOGGER.error(\"Failed to connect to MQTT server due to exception: %s\", err)\n\n if result is not None and result != 0:\n _LOGGER.error(\n \"Failed to connect to MQTT server: %s\", mqtt.error_string(result)\n )\n\n self._mqttc.loop_start()", "async def notify_connected(self):\n payload = {\n \"uuid\": self.uuid,\n \"addr\": self.addr,\n \"port\": self.port,\n \"rows\": self.rows,\n }\n msg = {\n \"event\": \"connection/connected\",\n \"secret\": WS_SECRET,\n \"payload\": payload,\n }\n\n asyncio.create_task(messages_to_game.put(Message(\"IO\", message=json.dumps(msg, sort_keys=True, indent=4))))", "async def on_connected(self):\n self._connected = True", "async def connect(self):\n await asyncio.gather(self._exchange_connection.connect_to_server(), self.on_connection())", "async def _handler(self):\n reconnect_delay = DEFAULT_RECONNECT_SLEEP\n while True:\n try:\n async with websockets.connect(\n self.websocket_url\n ) as self.websocket, aiohttp.ClientSession() as self.session:\n self.connected = True\n self.server.set_session(self.session)\n for att in dir(self):\n att = getattr(self, att)\n if (\n hasattr(att, \"_is_interval_task\")\n and att._is_interval_task\n ):\n self._tasks.append(asyncio.ensure_future(att()))\n done, pending = await asyncio.wait(\n self._tasks, return_when=asyncio.FIRST_COMPLETED\n )\n for task in pending:\n task.cancel()\n for task in done:\n if task.exception():\n raise task.exception()\n except:\n import traceback\n\n traceback.print_exc()\n await self._on_disconnect(self.autoreconnect)\n if not self.autoreconnect:\n logger.info(\n \"An exception has occurred. The bot will \"\n \"go offline. To reconnect automatically start the \"\n \"bot with Client.start(autoreconnect=True).\"\n )\n return\n\n logger.info(\n \"An exception has occurred. The bot will \"\n \"reconnect. To forgo autoreconnect start the bot with \"\n \"Client.start(autoreconnect=False).\"\n )\n\n logger.info(\n \"Sleeping for {}s before reconnecting\".format(reconnect_delay)\n )\n await asyncio.sleep(reconnect_delay)\n reconnect_delay = min(\n MAX_RECONNECT_SLEEP, reconnect_delay * 2\n ) # Bounded exponential backoff", "def _establish_connection(self):\n self.conn = self.listener.accept()", "async def connect(self):\n\n self.socket = await self._session.ws_connect(str(self._url))\n self._create_task(self.__handle_connection())", "def async_on_connect(self, target: Callable[..., Awaitable]) -> None:\n\n async def _async_on_connect():\n \"\"\"Act when connection occurs.\"\"\"\n await self._watchdog.trigger()\n await target()\n\n self._sio.on(\"connect\", _async_on_connect)", "def _connect_async(self):\n self._pgconn = libpq.PQconnectStart(ascii_to_bytes(self.dsn))\n if not self._pgconn:\n raise exceptions.OperationalError('PQconnectStart() failed')\n elif libpq.PQstatus(self._pgconn) == libpq.CONNECTION_BAD:\n raise self._create_exception()\n\n libpq.PQsetNoticeProcessor(\n self._pgconn, self._notice_callback, ffi.NULL)", "async def _start_listening(self) -> None:\n result = await self.async_send_command({\"command\": \"start_listening\"})\n loop = asyncio.get_running_loop()\n\n if self.driver is None:\n self.driver = cast(\n Driver, await loop.run_in_executor(None, Driver, self, result[\"state\"])\n )\n self._logger.info(\n \"Z-Wave JS initialized. %s nodes\", len(self.driver.controller.nodes)\n )\n asyncio.create_task(\n gather_callbacks(self._logger, \"on_initialized\", self._on_initialized)\n )\n else:\n self._logger.warning(\n \"Re-connected and don't know how to handle new state yet\"\n )", "def connect_to_ibkr(self):\n\n self.update_console(\"Reporting connection to the server...\")\n print(\"Reporting connection to the server...\")\n result = report_login_to_server(self.settings)\n self.update_console(result)\n connector = Worker(self.ibkrworker.prepare_and_connect)\n connector.signals.result.connect(self.connection_done)\n connector.signals.status.connect(self.update_status)\n connector.signals.notification.connect(self.update_console)\n # Execute\n self.threadpool.start(connector)", "def start(self):\n self._check_closed()\n fut = Future()\n self._watch_q.put_nowait({\"op\": \"start\", \"future\": fut})\n return fut", "async def run(self):\n main_loop = asyncio.get_event_loop()\n # so many threads, name this so it's identifiable\n pfx = 'ThreadPoolExecutor-GPSEventConsumer'\n # NOTE: there should only be one thread pool executor worker\n # from here since this method is only called once from\n # gordon core, so there _should_ be no need to limit\n # workers\n executor = concurrent.futures.ThreadPoolExecutor(thread_name_prefix=pfx)\n coro = main_loop.run_in_executor(executor, self._manage_subs)\n await coro", "async def connect(self):\n pass", "async def wait_connected(self):\n if self.closed:\n raise exceptions.ConnectionClosedError()\n\n await self._connected_event.wait()", "async def connect(self):\n raise NotImplementedError", "async def connect(self) -> None:\n if self.state != STATE_DISCONNECTED:\n raise RuntimeError(\"Connect called while not disconnected\")\n\n self.close_requested = False\n self.state = STATE_CONNECTING\n self.tries = 0\n self._disconnect_event = asyncio.Event()\n\n while not self.close_requested:\n try:\n self._logger.debug(\"Trying to connect\")\n await self._handle_connection()\n except Exception: # pylint: disable=broad-except\n # Safety net. This should never hit.\n # Still adding it here to make sure we can always reconnect\n self._logger.exception(\"Unexpected error\")\n\n if self.state == STATE_CONNECTED:\n # change state to connecting/disconnected\n self.state = (\n STATE_DISCONNECTED if self.close_requested else STATE_CONNECTING\n )\n # notify callbacks about disconnection\n if self._on_disconnect:\n asyncio.create_task(\n gather_callbacks(\n self._logger, \"on_disconnect\", self._on_disconnect\n )\n )\n\n if self.close_requested:\n break\n\n self.tries += 1\n\n try:\n await self._wait_retry()\n except asyncio.CancelledError:\n # Happens if disconnect called\n break\n\n self.state = STATE_DISCONNECTED\n self._disconnect_event.set()\n self._disconnect_event = None", "async def _connect(self):\n if not self._reader:\n self._reader = asyncio.create_task(self._read())", "async def _connect(self) -> bool:\n\n try:\n logger.debug(f\"Creating ws connection to {self._url!r}\")\n ws = await asyncio.wait_for(\n websockets.connect(self._url,\n extra_headers=self._cookie_jar.get_cookies_as_headers()),\n self.CONNECT_TIMEOUT\n )\n logger.debug(f\"Established ws connection to {self._url!r}\")\n\n self._ws = ws\n self._awaiting_replies = {}\n logger.debug(\"Starting ping check\")\n self._ping_check = asyncio.create_task(\n self._disconnect_in(self.PING_TIMEOUT))\n\n # Put received cookies into cookie jar\n for set_cookie in ws.response_headers.get_all(\"Set-Cookie\"):\n self._cookie_jar.add_cookie(set_cookie)\n self._cookie_jar.save()\n\n return True\n\n except (websockets.InvalidHandshake, websockets.InvalidStatusCode,\n OSError, asyncio.TimeoutError):\n logger.debug(\"Connection failed\")\n return False", "async def _connect(self):\n pass", "async def async_reconnect(self) -> None:\n await self.async_disconnect()\n await asyncio.sleep(1)\n await self.async_connect()" ]
[ "0.5857671", "0.56647265", "0.56131005", "0.5611528", "0.55837786", "0.5582157", "0.54861045", "0.54837835", "0.5407324", "0.54024726", "0.5363167", "0.5361956", "0.5330528", "0.52729976", "0.52215517", "0.5154078", "0.5144362", "0.5119566", "0.5107343", "0.5095302", "0.5091608", "0.5082747", "0.500566", "0.49733043", "0.4970606", "0.49495685", "0.4938614", "0.49347168", "0.49205", "0.49155855" ]
0.63165396
0
Feeds connection_observer with data to let it become done. This is a place where runner is a glue between words of connection and connectionobserver. Should be called from backgroundprocessing of connection observer. Left only for backward compatibility.
def feed(self, connection_observer): pass # pylint: disable=unnecessary-pass # For backward compatibility only
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _runner_loop(self):\n while not self._stop_loop_runner.is_set():\n with self._connection_observer_lock:\n if self._copy_of_connections_observers != self._connections_observers:\n self._copy_of_connections_observers = copy_list(self._connections_observers, deep_copy=False)\n # ConnectionObserver is feed by registering data_received in moler connection\n self._check_last_feed_connection_observers()\n self._check_timeout_connection_observers()\n self._remove_unnecessary_connection_observers()\n time.sleep(self._tick)", "def run(self):\r\n self.collect_data()", "def run(self):\n\t\tself.data_source.connect()\n\t\twhile self.running:\n\t\t\tself.data_source.read()", "def _handle_connection(self, conn):\n conn.serve_all()", "def _run(self):\n try:\n # Send a connect message\n self.socket.send_json({\"worker_id\": self.socket_id, \"message\": \"connect\"})\n while not self.stop_event.is_set():\n job = self.socket.recv_json()\n if self.debug:\n print(\"Received task %s\" % job)\n value = self._do_work(job)\n self.socket.send_json(\n {\n \"worker_id\": self.socket_id,\n \"message\": \"job_done\",\n \"job\": Job.get_result(job, value),\n }\n )\n except KeyboardInterrupt:\n pass\n except Exception as e:\n print(e)\n finally:\n self._disconnect()", "def submit(self, connection_observer):\n assert connection_observer.life_status.start_time > 0.0 # connection-observer lifetime should already been\n self._add_connection_observer(connection_observer=connection_observer)", "def _add_connection_observer(self, connection_observer):\n with self._connection_observer_lock:\n if connection_observer not in self._connections_observers:\n moler_connection = connection_observer.connection\n moler_connection.subscribe_connection_observer(connection_observer=connection_observer)\n self._connections_observers.append(connection_observer)\n _, msg = RunnerSingleThread._its_remaining_time(\n prefix=\"remaining\", timeout=connection_observer.timeout,\n from_start_time=connection_observer.life_status.start_time\n )\n connection_observer._log(logging.INFO,\n \"{} started, {}\".format(connection_observer.get_long_desc(), msg))\n self._start_command(connection_observer=connection_observer)\n connection_observer.life_status.last_feed_time = time.time()", "def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()", "def run(self):\n print(\"[CONNEXION_MANAGER] start connecting\")\n while True:\n self.connexion_init()", "def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "def connection_callback(self, connected):\n self._connection_queue.put_nowait(connected)", "def connection_handler(self):\n\t\tyield", "def execute(self):\n self.advertise_and_connect()\n self.display_data()\n self.disconnect()", "def doTask(self):\n try:\n # see if we need to connect first before doing any collection\n self.openProxy()\n log.debug(\"Opened proxy to %s [%s]\", self._devId, self._manageIp)\n yield self._collectCallback()\n finally:\n self._finished()", "def connectionMade(self):\n self.factory._r_on_connection_established(self)", "def run(self):\n self.connect()", "def run(self):\n self._connection = self.open_connection()\n self._connection.ioloop.start()", "def _on_connection(self, *_):\n\n if self._user_apc_token is not None:\n # run asynchronous auto configure.\n self._controller._thread_loop.run_async(self._autoconfigure_run)", "def run(self):\n patterns = self.conn.dispatch_patterns()\n\n while not self.disconnect.is_set():\n try:\n data = self.conn.get_data() # returns empty string if times out\n if data:\n self.conn.dispatch_data(data, patterns)\n\n command = self.command_queue.get_nowait()\n self.process_command(command)\n except DisconnectedException:\n self.logger.info('Disconnected from server. Reconnecting.')\n self.conn.close()\n self.connect_and_join_channels(self.channels)\n continue\n except Queue.Empty:\n continue", "def connectionMade(self):\n self.output = DelayedStartupLineLogger()\n self.output.makeConnection(self.transport)\n self.output.tag = self.name", "def notify(self, configuration_data):\n for observer in self.observers:\n observer(configuration_data)", "def on_run(self):\n wxMediator.on_run(self)\n listener_evt = InterThreadEventWX(self,\n wxEVT_NEW_LISTEN_CONN) \n talker_evt = InterThreadEventWX(self,\n wxEVT_NEW_TALK_CONN) \n server = self.server()\n sys.stderr.write('Starting server threads...\\n')\n sys.stderr.flush()\n server.start_other_threads(listener_evt, talker_evt)", "def run(self):\n self.update_link_statistics()\n self.send_packet()", "def run_server(self):\n self.establish_connection()\n while True:\n self.receive_data(self.conn)", "def __call__(self, *args, **kwargs):\n for conn in self._connections:\n conn(args, kwargs)", "def collector_process_data(self, data):\n for c in clients:\n c.on_message(json.dumps(data))", "def run(self):\n self.connect()\n self.run_forever()", "def _on_connection_success(self):\n if self.connect_handler:\n self.connect_handler()", "def after_connect(self):\n pass" ]
[ "0.68458927", "0.62021434", "0.61665285", "0.60803646", "0.5961914", "0.5954607", "0.59324074", "0.58269244", "0.5813925", "0.58135957", "0.58135957", "0.580633", "0.5800871", "0.57929605", "0.57856214", "0.5737841", "0.57358813", "0.5700238", "0.5689247", "0.56702626", "0.56653523", "0.5628625", "0.5622197", "0.55779016", "0.5574767", "0.555636", "0.5541933", "0.55394757", "0.5496585", "0.5486171" ]
0.77465856
0
Add connection observer to the runner.
def _add_connection_observer(self, connection_observer): with self._connection_observer_lock: if connection_observer not in self._connections_observers: moler_connection = connection_observer.connection moler_connection.subscribe_connection_observer(connection_observer=connection_observer) self._connections_observers.append(connection_observer) _, msg = RunnerSingleThread._its_remaining_time( prefix="remaining", timeout=connection_observer.timeout, from_start_time=connection_observer.life_status.start_time ) connection_observer._log(logging.INFO, "{} started, {}".format(connection_observer.get_long_desc(), msg)) self._start_command(connection_observer=connection_observer) connection_observer.life_status.last_feed_time = time.time()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(self):\n self.conn.add_listener(self.handle_connection_change)\n self.conn.start_async()", "def feed(self, connection_observer):\n pass # pylint: disable=unnecessary-pass\n # For backward compatibility only", "def connected(self):\n manager = self.manager()\n self.log().debug(\"Register [%s] callbacks\", self.name())\n\n manager.subscribeServerCallbacks(self, self.cfg().chatimg.servers or manager.SERVERS_ALL)", "def _establish_connection(self):\n self.conn = self.listener.accept()", "def add_observer(self, observer):\n self.observers.append(observer)", "async def on_connect(self):\n pass", "def addObserver(self, observer):\n self.observers.append(observer)", "def attach(observer):\n Bots._observers.add(observer)", "def add_connection(self, connection):\n self.connections.append(connection)", "def on_run(self):\n wxMediator.on_run(self)\n listener_evt = InterThreadEventWX(self,\n wxEVT_NEW_LISTEN_CONN) \n talker_evt = InterThreadEventWX(self,\n wxEVT_NEW_TALK_CONN) \n server = self.server()\n sys.stderr.write('Starting server threads...\\n')\n sys.stderr.flush()\n server.start_other_threads(listener_evt, talker_evt)", "def run(self):\n self.connect()", "def register_observer(self, observer: AbstractObserver):\n self.observers.append(observer)", "def attach(self, observer: Observer) -> None:\n self._observers.add(observer)", "def subscribe(observer):", "def subscribe(observer):", "def listen_connections(self):\n self.MAIN_CONNECTION.listen(server.MAX_CONNECTIONS)", "def addObserver(self, observer):\n self.__observers.append(observer)", "async def on_connect(self) -> None:", "def attach(self, observer: Observer) -> None:\n pass", "def attach(self, observer: Observer) -> None:\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "async def on_connected(self):\n self._connected = True", "def __register_observer(self, observer, compoennt) -> None:" ]
[ "0.6652319", "0.6634749", "0.6357493", "0.6296808", "0.62870336", "0.6249382", "0.62253284", "0.619049", "0.6146856", "0.61381954", "0.61206764", "0.6047478", "0.6041911", "0.60389286", "0.60389286", "0.60245043", "0.60162157", "0.5997827", "0.59874004", "0.59874004", "0.5938499", "0.5938499", "0.5938499", "0.5938499", "0.5938499", "0.5938499", "0.5938499", "0.5938499", "0.5914234", "0.5905996" ]
0.7385649
0
Wait for not started connection observer (command or event) is done.
def _wait_for_not_started_connection_observer_is_done(self, connection_observer): # Have to wait till connection_observer is done with terminaing timeout. eol_remain_time = connection_observer.life_status.terminating_timeout start_time = time.time() while not connection_observer.done() and eol_remain_time > 0.0: time.sleep(self._tick) eol_remain_time = start_time + connection_observer.life_status.terminating_timeout - time.time()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _wait_ready(self):\n command = self._recv_from_client()\n while command != \"READY\":\n command = self._client.recv_from_client()", "def wait_for_connection(no_wait):\n\n while not no_wait and not handler.is_client_attached():\n time.sleep(0.1) # spinlock", "async def wait_for_disconnect(self) -> None:\n self.add_to_output('Waiting for disconnect...')\n while await self.ping_http():\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n await asyncio.sleep(1)", "async def wait_connected(self):\n if self.closed:\n raise exceptions.ConnectionClosedError()\n\n await self._connected_event.wait()", "async def wait_until_done(self) -> None:\n ...", "def wait_until_ready(self):\n while not self.is_ready():\n time.sleep(0.01)", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def wait_for_disconnect_event(self):\n logging.info(\"Cert: Waiting for DISCONNECT_COMPLETE\")\n assertThat(self._hci_event_stream).emits(HciMatchers.DisconnectionComplete())", "def _wait_till_done(self, connection_observer, timeout, check_timeout):\n timeout_add = 0.1\n term_timeout = 0 if connection_observer.life_status.terminating_timeout is None else \\\n connection_observer.life_status.terminating_timeout\n remain_time = timeout - (time.time() - connection_observer.life_status.start_time) + term_timeout + timeout_add\n while remain_time >= 0:\n if connection_observer.done():\n return True\n time.sleep(self._tick)\n if check_timeout:\n timeout = connection_observer.timeout\n term_timeout = 0 if connection_observer.life_status.terminating_timeout is None else \\\n connection_observer.life_status.terminating_timeout\n remain_time = timeout - (time.time() - connection_observer.life_status.start_time) + term_timeout + \\\n timeout_add\n return False", "def wait(self):\n while not self.done:\n self.device._handle_events(1000)", "def _waitConnected(self):\n if not self.isConnected.wait(5.0): # timeout after 5 seconds\n raise Exception(\"Timed out waiting for connection completion\")", "def wait_for_async_data(self) -> None:\n if self.__is_active:\n self.async_read()", "def _runner_loop(self):\n while not self._stop_loop_runner.is_set():\n with self._connection_observer_lock:\n if self._copy_of_connections_observers != self._connections_observers:\n self._copy_of_connections_observers = copy_list(self._connections_observers, deep_copy=False)\n # ConnectionObserver is feed by registering data_received in moler connection\n self._check_last_feed_connection_observers()\n self._check_timeout_connection_observers()\n self._remove_unnecessary_connection_observers()\n time.sleep(self._tick)", "def wait_until_finished(self) -> None:\n if not self._parent_signal_conn:\n raise ValueError(\"Process not started.\")\n if self._async_mode:\n raise RuntimeError(\"wait_until_finished should only be called in sync_mode\")\n while self._parent_signal_conn.poll(timeout=None):\n try:\n result = self._parent_signal_conn.recv()\n except EOFError:\n return\n self._process_message(result)\n if isinstance(result, DagParsingStat):\n # In sync mode (which is the only time we call this function) we don't send this message from\n # the Manager until all the running processors have finished\n return", "def poll(self):\r\n if self.channel.is_available():\r\n self.serve()\r\n return True\r\n else:\r\n return False", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def wait(self):\n pass", "def wait(self):\n pass", "def nanny(self): \n while not self.started and not self.failed:\n eventlet.sleep(.1)\n return not self.failed", "def _check_timeout_connection_observers(self):\n for connection_observer in self._copy_of_connections_observers:\n start_time = connection_observer.life_status.start_time\n current_time = time.time()\n run_duration = current_time - start_time\n timeout = connection_observer.timeout\n if connection_observer.life_status.in_terminating:\n timeout = connection_observer.life_status.terminating_timeout\n if (timeout is not None) and (run_duration >= timeout):\n if connection_observer.life_status.in_terminating:\n msg = \"{} underlying real command failed to finish during {} seconds. It will be forcefully\" \\\n \" terminated\".format(connection_observer, timeout)\n self.logger.info(msg)\n connection_observer.set_end_of_life()\n else:\n self._timeout_observer(connection_observer=connection_observer,\n timeout=connection_observer.timeout, passed_time=run_duration,\n runner_logger=self.logger)\n if connection_observer.life_status.terminating_timeout > 0.0:\n connection_observer.life_status.start_time = time.time()\n connection_observer.life_status.in_terminating = True\n else:\n connection_observer.set_end_of_life()", "def connection_ready(self) -> bool:\n return self._connection_ready", "async def wait_until_ready(self):\n await self._ready.wait()", "async def on_disconnected(self):\n self._connected = False\n self._connectedToBroker = False", "def wait_for_termination(self):\n self.server.wait_for_termination()", "def _start_command(self, connection_observer):\n if connection_observer.is_command():\n connection_observer.send_command()", "def wait_for_termination(self):\n self.server.wait_for_termination()", "def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")" ]
[ "0.6908525", "0.6747412", "0.6716177", "0.6633412", "0.65618926", "0.6443611", "0.6427281", "0.6427281", "0.6427281", "0.6427281", "0.64223105", "0.6414303", "0.63679636", "0.63404787", "0.62514836", "0.6183887", "0.61603725", "0.61309344", "0.61298865", "0.6106968", "0.6106968", "0.61058694", "0.60893846", "0.606267", "0.60560465", "0.6038718", "0.60336393", "0.60323286", "0.6008437", "0.5995523" ]
0.8095583
0
Call on_inactivity on connection_observer if needed.
def _check_last_feed_connection_observers(self): current_time = time.time() for connection_observer in self._copy_of_connections_observers: life_status = connection_observer.life_status if (life_status.inactivity_timeout > 0.0) and (life_status.last_feed_time is not None): expected_feed_timeout = life_status.last_feed_time + life_status.inactivity_timeout if current_time > expected_feed_timeout: try: connection_observer.on_inactivity() except Exception as ex: self.logger.exception(msg=r'Exception "{}" ("{}") inside: {} when on_inactivity.'.format( ex, repr(ex), connection_observer)) connection_observer.set_exception(exception=ex) finally: connection_observer.life_status.last_feed_time = current_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_enter(self):\n\n Clock.schedule_once(partial(check_connection.is_alive,\n self.ids[\"ico_connection\"]\n )\n )\n self.check_connection = Clock.schedule_interval(partial(check_connection.is_alive,\n self.ids[\"ico_connection\"]\n ),\n 5.0\n )", "def _add_connection_observer(self, connection_observer):\n with self._connection_observer_lock:\n if connection_observer not in self._connections_observers:\n moler_connection = connection_observer.connection\n moler_connection.subscribe_connection_observer(connection_observer=connection_observer)\n self._connections_observers.append(connection_observer)\n _, msg = RunnerSingleThread._its_remaining_time(\n prefix=\"remaining\", timeout=connection_observer.timeout,\n from_start_time=connection_observer.life_status.start_time\n )\n connection_observer._log(logging.INFO,\n \"{} started, {}\".format(connection_observer.get_long_desc(), msg))\n self._start_command(connection_observer=connection_observer)\n connection_observer.life_status.last_feed_time = time.time()", "def feed(self, connection_observer):\n pass # pylint: disable=unnecessary-pass\n # For backward compatibility only", "def on_connection_closed(self):", "async def _inactivity_check_loop(self, after: float):\n while True:\n saved = self._last_activity_time\n await asyncio.sleep(after)\n inactive = self._last_activity_time == saved\n\n if inactive:\n # Do nothing if already disconnected.\n if not self:\n return\n\n self.disconnect()\n self._timeout_callback(self._caddr)\n return", "async def ensure_activity(self, ctx: commands.Context) -> None:\n\n if self.inactivity_timeout is None:\n return\n\n await asyncio.sleep(self.inactivity_timeout)\n\n if (\n ctx.voice_client\n and ctx.voice_client.is_connected()\n and not ctx.voice_client.is_playing()\n ):\n await self.cleanup(ctx.voice_client, ctx.guild)\n\n await self.call_event(\"on_inactivity_disconnect\", ctx)", "def _wait_for_not_started_connection_observer_is_done(self, connection_observer):\n # Have to wait till connection_observer is done with terminaing timeout.\n eol_remain_time = connection_observer.life_status.terminating_timeout\n start_time = time.time()\n while not connection_observer.done() and eol_remain_time > 0.0:\n time.sleep(self._tick)\n eol_remain_time = start_time + connection_observer.life_status.terminating_timeout - time.time()", "async def _inactivity_check_loop(self, after: float):\n while True:\n saved = self._last_activity_time\n await asyncio.sleep(after)\n inactive = self._last_activity_time == saved\n\n if inactive:\n # Do nothing if already disconnected.\n if not self:\n return\n\n self.close()\n return", "async def on_broker_connection_status_changed(self, connected: bool):\n def disconnect():\n asyncio.run(self.on_disconnected())\n self._connectedToBroker = connected\n if hasattr(self, '_status_timer'):\n self._status_timer.cancel()\n self._status_timer = Timer(60, disconnect)\n self._status_timer.start()", "def keepAliveReceived(self):", "def on_connection_open(self, unused_conncetion):\n self.logger.info('connection opened, adding connection close callback')\n self._connection.add_on_close_callback(self.on_connection_closed)\n self.open_channel()", "def _check_timeout_connection_observers(self):\n for connection_observer in self._copy_of_connections_observers:\n start_time = connection_observer.life_status.start_time\n current_time = time.time()\n run_duration = current_time - start_time\n timeout = connection_observer.timeout\n if connection_observer.life_status.in_terminating:\n timeout = connection_observer.life_status.terminating_timeout\n if (timeout is not None) and (run_duration >= timeout):\n if connection_observer.life_status.in_terminating:\n msg = \"{} underlying real command failed to finish during {} seconds. It will be forcefully\" \\\n \" terminated\".format(connection_observer, timeout)\n self.logger.info(msg)\n connection_observer.set_end_of_life()\n else:\n self._timeout_observer(connection_observer=connection_observer,\n timeout=connection_observer.timeout, passed_time=run_duration,\n runner_logger=self.logger)\n if connection_observer.life_status.terminating_timeout > 0.0:\n connection_observer.life_status.start_time = time.time()\n connection_observer.life_status.in_terminating = True\n else:\n connection_observer.set_end_of_life()", "def on_leave(self):\n\n self.check_connection.cancel()", "def _session_observer(self, event):\n if self.observers:\n if event.state_name == \"CONNECTED_STATE\":\n event = ServiceHashringEvent(ServiceHashringEvent.CONNECTED_EVENT)\n elif event.state_name == \"CONNECTING_STATE\":\n event = ServiceHashringEvent(ServiceHashringEvent.DISCONNECTED_EVENT)\n elif event.state_name == \"EXPIRED_SESSION_STATE\":\n event = ServiceHashringEvent(ServiceHashringEvent.DISCONNECTED_EVENT)\n else:\n self.log.error(\"unhandled zookeeper event: state=%s\" % event.state_name)\n return\n \n for observer in self.observers:\n try:\n observer(self, event)\n except Exception as error:\n self.log.exception(error)", "def or_conn_status_event(self, event):\r\n pass", "def _onConnectionEvent(args):\n ctx = current_context()\n pvname = name(args.chid)\n global _cache\n\n if ctx is None and len(_cache.keys()) > 0:\n ctx = list(_cache.keys())[0]\n if ctx not in _cache:\n _cache[ctx] = {}\n\n # search for PV in any context...\n pv_found = False\n for context in _cache:\n if pvname in _cache[context]:\n pv_found = True\n break\n\n if not pv_found:\n _cache[ctx][pvname] = {'conn':False, 'chid': args.chid,\n 'ts':0, 'failures':0, 'value': None,\n 'callbacks': []}\n\n # set connection time, run connection callbacks\n # in all contexts\n for context, cvals in _cache.items():\n if pvname in cvals:\n entry = cvals[pvname]\n ichid = entry['chid']\n if isinstance(entry['chid'], dbr.chid_t):\n ichid = entry['chid'].value\n\n if int(ichid) == int(args.chid):\n conn = (args.op == dbr.OP_CONN_UP)\n chid = args.chid\n entry.update({'chid': chid, 'conn': conn,\n 'ts': time.time(), 'failures': 0})\n for callback in entry.get('callbacks', []):\n poll()\n if hasattr(callback, '__call__'):\n callback(pvname=pvname, chid=chid, conn=conn)\n return", "def _prepare_for_time_out(self, connection_observer, timeout):\n passed = time.time() - connection_observer.life_status.start_time\n self._timeout_observer(connection_observer=connection_observer,\n timeout=timeout, passed_time=passed,\n runner_logger=self.logger, kind=\"await_done\")", "def _on_connection(self, *_):\n\n if self._user_apc_token is not None:\n # run asynchronous auto configure.\n self._controller._thread_loop.run_async(self._autoconfigure_run)", "def on_connection_open(self, unused_connection):\n logger.info('Connection opened')\n self.add_on_connection_close_callback()\n self.open_channel()", "def connection_callback(self, connected):\n self._connection_queue.put_nowait(connected)", "def on_upstream_connection(self) -> None:\n pass # pragma: no cover", "def submit(self, connection_observer):\n assert connection_observer.life_status.start_time > 0.0 # connection-observer lifetime should already been\n self._add_connection_observer(connection_observer=connection_observer)", "def enable_connection_heartbeat():\n conn = self._vc_connection\n status = self.vip.health.get_status()\n conn.vip.health.set_status(\n status['status'], status['context']\n )\n conn.vip.heartbeat.start()", "def reload_when_connected(self, inst, poll_ms=500):\n if inst.isConnected():\n self.reload()\n else:\n self.after(poll_ms, utils.named_partial(self.reload_when_connected, inst, poll_ms))", "def check_for_inactivity(self):\n if self.last_contacted + self.NO_CONTACT_TIMEOUT < time.time():\n import sys\n sys.exit(\"StarNode terminated due to inactivity with other nodes\")", "def client_connected(self, telnet_connection):", "def on_timeout(self):\n pass", "def handle_connect(self):\r\n print \"http_evented::handle_connect\"\r\n self._connection_state = STATE_CONNECTED\r\n super(http_evented, self).handle_connect()\r\n call_if_not_none_and_callable(self._onConnected)", "def onSMPPOperation(self):\n if self.isBound():\n self.activateEnquireLinkTimer()\n\n self.activateInactivityTimer()", "def track_connection(self, connection):\n self._connections_openned.append(connection)" ]
[ "0.64713466", "0.6371828", "0.5915407", "0.5905554", "0.58560777", "0.58488846", "0.58475876", "0.5762941", "0.5762161", "0.5735699", "0.5579951", "0.55780596", "0.55497235", "0.55436623", "0.5540052", "0.55236906", "0.5475399", "0.5463221", "0.54446304", "0.54095566", "0.5394125", "0.53612584", "0.5350636", "0.5337464", "0.5324821", "0.5322257", "0.53095466", "0.5291703", "0.52751607", "0.5256012" ]
0.66244066
0
Check list of ConnectionObservers if any timeout.
def _check_timeout_connection_observers(self): for connection_observer in self._copy_of_connections_observers: start_time = connection_observer.life_status.start_time current_time = time.time() run_duration = current_time - start_time timeout = connection_observer.timeout if connection_observer.life_status.in_terminating: timeout = connection_observer.life_status.terminating_timeout if (timeout is not None) and (run_duration >= timeout): if connection_observer.life_status.in_terminating: msg = "{} underlying real command failed to finish during {} seconds. It will be forcefully" \ " terminated".format(connection_observer, timeout) self.logger.info(msg) connection_observer.set_end_of_life() else: self._timeout_observer(connection_observer=connection_observer, timeout=connection_observer.timeout, passed_time=run_duration, runner_logger=self.logger) if connection_observer.life_status.terminating_timeout > 0.0: connection_observer.life_status.start_time = time.time() connection_observer.life_status.in_terminating = True else: connection_observer.set_end_of_life()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_last_feed_connection_observers(self):\n current_time = time.time()\n for connection_observer in self._copy_of_connections_observers:\n life_status = connection_observer.life_status\n if (life_status.inactivity_timeout > 0.0) and (life_status.last_feed_time is not None):\n expected_feed_timeout = life_status.last_feed_time + life_status.inactivity_timeout\n if current_time > expected_feed_timeout:\n try:\n connection_observer.on_inactivity()\n except Exception as ex:\n self.logger.exception(msg=r'Exception \"{}\" (\"{}\") inside: {} when on_inactivity.'.format(\n ex, repr(ex), connection_observer))\n connection_observer.set_exception(exception=ex)\n finally:\n connection_observer.life_status.last_feed_time = current_time", "def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False", "def _wait_for_not_started_connection_observer_is_done(self, connection_observer):\n # Have to wait till connection_observer is done with terminaing timeout.\n eol_remain_time = connection_observer.life_status.terminating_timeout\n start_time = time.time()\n while not connection_observer.done() and eol_remain_time > 0.0:\n time.sleep(self._tick)\n eol_remain_time = start_time + connection_observer.life_status.terminating_timeout - time.time()", "def has_connect_timed_out(self):\n if self.debug_mode:\n return False\n\n self.connect_count -= 1\n return self.connect_count == 0", "async def check_timeout(self) -> None:\n async with self.lock:\n # Looping through the keys because we are mutating the dict\n for message_id in copy.copy(list(self.ongoing_events.keys())):\n ongoing_event = self.ongoing_events[message_id]\n if ongoing_event.timed_out():\n await self.end_ongoing_event(message_id)", "def _wait_till_done(self, connection_observer, timeout, check_timeout):\n timeout_add = 0.1\n term_timeout = 0 if connection_observer.life_status.terminating_timeout is None else \\\n connection_observer.life_status.terminating_timeout\n remain_time = timeout - (time.time() - connection_observer.life_status.start_time) + term_timeout + timeout_add\n while remain_time >= 0:\n if connection_observer.done():\n return True\n time.sleep(self._tick)\n if check_timeout:\n timeout = connection_observer.timeout\n term_timeout = 0 if connection_observer.life_status.terminating_timeout is None else \\\n connection_observer.life_status.terminating_timeout\n remain_time = timeout - (time.time() - connection_observer.life_status.start_time) + term_timeout + \\\n timeout_add\n return False", "def _outgoing_check_cb(self, timer):\n\n for peer_id, remote in self._conf.items():\n if peer_id in self._outgoing:\n continue\n\n # A None value means that we're currently attempting a\n # connection.\n self._outgoing[peer_id] = None\n\n remote_host, remote_port = remote\n self._logger.info(\n \"attempting connection to %s:%d\",\n remote_host,\n remote_port,\n )\n\n remote_ip = socket.gethostbyname(remote_host)\n client = pyuv.TCP(self._loop)\n client.connect(\n (remote_ip, remote_port),\n partial(self._outgoing_connect_cb, peer_id),\n )\n\n for peer_id, outgoing in self._outgoing.items():\n if not outgoing:\n continue\n\n if outgoing.remote_addr == self._conf.get(peer_id):\n continue\n\n # if we get here then we have an outgoing connection that\n # doesn't belong\n self._logger.info(\n \"closing unncessary connection to %s\",\n outgoing.remote_addr,\n )\n self._outgoing_read_cb(\n peer_id,\n outgoing.handle,\n None,\n \"force close\",\n )", "def run(self):\n for req, resp in self.servings:\n resp.check_timeout()", "async def check_connection_status(self):\n while True:\n if not self.connected:\n self.log.error(\"Lost connection to spa, attempting reconnect.\")\n await self.connect()\n await asyncio.sleep(10)\n continue\n if (self.lastupd + 5 * self.sleep_time) < time.time():\n self.log.error(\"Spa stopped responding, requesting panel config.\")\n await self.send_panel_req(0, 1)\n await asyncio.sleep(self.sleep_time)", "def check_time(self):\n while True:\n for name in self.neighbors:\n if not self.neighbors[name].is_killed:\n if not self.neighbors[name].update_ready and time.time() - self.neighbors[name].send_timer > self.timeout:\n self.neighbors[name].update_ready = True\n if time.time() - self.neighbors[name].kill_timer > 3 * self.timeout:\n self.neighbors[name].is_killed = True", "def _remove_unnecessary_connection_observers(self):\n for connection_observer in self._copy_of_connections_observers:\n if connection_observer.done():\n self._to_remove_connection_observers.append(connection_observer)\n _, msg = RunnerSingleThread._its_remaining_time(\n \"remaining\", timeout=connection_observer.timeout,\n from_start_time=connection_observer.life_status.start_time\n )\n connection_observer._log(logging.INFO,\n \"{} finished, {}\".format(connection_observer.get_short_desc(), msg))\n if self._to_remove_connection_observers:\n with self._connection_observer_lock:\n for connection_observer in self._to_remove_connection_observers:\n try:\n self._connections_observers.remove(connection_observer)\n except ValueError:\n pass\n moler_connection = connection_observer.connection\n moler_connection.unsubscribe_connection_observer(connection_observer=connection_observer)\n self._to_remove_connection_observers = list() # clear() is not available under old Pythons.", "def onCheckTimeOut(self):\r\n\r\n self.pros += 1\r\n self.pb_load.setValue(self.pros * 5)\r\n \r\n # timeout error\r\n if(self.pros == 20):\r\n self.check_timer.stop()\r\n self.onCheckConnectionError()\r\n # connected to server\r\n if(self.pros > 5 and self.check_result == 0): \r\n self.check_timer.stop()\r\n self.checkSession()", "def check_connection(self):\n pass", "def wait_for_servers(self, timeout):\n for user, host, port in self.server_addresses:\n if not self.wait_for_server(user, host, port, timeout):\n logging.warn(\"could not start server %s:%s:%s\", user, host, port)\n return False\n return True", "def check_timeout(self) -> None:\n\n # find all timed out metric instances\n to_delete = [\n labelstr\n for labelstr, instance in self._data.items()\n if instance.is_timed_out\n ]\n\n # remove the metric instances\n for labelstr in to_delete:\n del self._data[labelstr]", "def checkTimeout(self):\n if TIMEOUT <= (datetime.now() - self.clockCheckStop).total_seconds():\n print('Didn\\'t received messages for 1 minute - Program ends')\n exit(0)", "def wait_for_bluetooth_status_connection_all(\n self, timeout=DEFAULT_TIMEOUT, interval=DEFAULT_CMD_INTERVAL):\n ret = False\n self.measurement_timer.start_timer(DEFAULT_BT_STATUS, force=True)\n # All profile not connected by default.\n connected_status = {key: False for key in DEFAULT_BT_STATUS}\n start_time = time.time()\n while time.time() < start_time + timeout:\n try:\n time.sleep(interval)\n status = self.dut.get_bt_status()\n for key in DEFAULT_BT_STATUS:\n if (not connected_status[key] and key in status\n and 'TRUE' == status[key]):\n self.measurement_timer.stop_timer(key)\n connected_status[key] = True\n self.logger.info(\n 'BT status %s connected at %fs.' %\n (key, self.measurement_timer.elapsed(key)))\n if False not in connected_status.values():\n ret = True\n break\n except DeviceError as ex:\n self.logger.warning(\n 'Device exception when waiting for reconnection: %s' % ex)\n self.measurement_timer.stop_timer(DEFAULT_BT_STATUS)\n return ret", "def _check_connectionlist(self):\n self.symbol = self.scanner.get_symbol()\n # Repeatedly call _check_connectionline() until END CONNECTIONS\n while (\n not self._is_end(\n self.symbol)) and (\n not self._is_eof(\n self.symbol)):\n self._check_connectionline()\n if self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n return None", "def test_connection_is_established(self):\n for conn in self.connections:\n assert conn.is_connected is True", "def poll(self, timeout: int = 0) -> list:\n return []", "def _check_timeouts(self):\n\n expired_tokens = []\n for token in self._capability_timeouts:\n interval = datetime.utcnow() - self._capability_timeouts[token]\n if interval.total_seconds() >= 10:\n expired_tokens.append(token)\n\n for token in expired_tokens:\n cap_withdraw = mplane.model.Withdrawal(capability=self._capabilities[token])\n self.handle_message(cap_withdraw, self.identity_for(token))", "def wait_disconnected(self, timeout=-1):\n\n with self.connect_cv:\n util.timed_wait(self.connect_cv,\n lambda: True if not self.switch_socket else None,\n timeout=timeout)\n return self.switch_socket is None", "def should_poll(self):\n return self.notifier.socket is not None", "def _wait_for_bluetooth_profile_connection(self, profiles_to_check,\n timeout, interval, timer):\n timer.start_timer(profiles_to_check, force=True)\n start_time = time.time()\n while time.time() - start_time < timeout:\n profiles = self._bluetooth_check_profile_connection()\n for profile in profiles:\n if profiles[profile]:\n timer.stop_timer(profile)\n # now check if the specified profile connected.\n all_connected = True\n for profile in profiles_to_check:\n if not profiles[profile]:\n all_connected = False\n break\n if all_connected:\n return True\n time.sleep(interval)\n # make sure the profile timer are stopped.\n timer.stop_timer(profiles_to_check)\n return False", "def __on_connection_close(self) -> None:\r\n logger.info(\"Checking closed connections.\")\r\n for browser in set(self.browsers.keys()):\r\n if browser._connection.connection is None or not browser._connection.connection.open:\r\n logger.warning(f\"Found closed connection: {browser}\")\r\n asyncio.create_task(\r\n self.replace_browser(browser))", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True" ]
[ "0.720017", "0.60273504", "0.5892644", "0.58705306", "0.5864277", "0.5846206", "0.58225244", "0.58115554", "0.5797517", "0.5743379", "0.57153195", "0.56690484", "0.5665623", "0.5610668", "0.56082106", "0.55857295", "0.5578086", "0.55780697", "0.553308", "0.54411095", "0.54365206", "0.54132736", "0.54059523", "0.54007095", "0.5394116", "0.5384007", "0.5384007", "0.5384007", "0.5384007", "0.5384007" ]
0.78741795
0
Prepare ConnectionObserver (command or event) for timeout.
def _prepare_for_time_out(self, connection_observer, timeout): passed = time.time() - connection_observer.life_status.start_time self._timeout_observer(connection_observer=connection_observer, timeout=timeout, passed_time=passed, runner_logger=self.logger, kind="await_done")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_timeout_connection_observers(self):\n for connection_observer in self._copy_of_connections_observers:\n start_time = connection_observer.life_status.start_time\n current_time = time.time()\n run_duration = current_time - start_time\n timeout = connection_observer.timeout\n if connection_observer.life_status.in_terminating:\n timeout = connection_observer.life_status.terminating_timeout\n if (timeout is not None) and (run_duration >= timeout):\n if connection_observer.life_status.in_terminating:\n msg = \"{} underlying real command failed to finish during {} seconds. It will be forcefully\" \\\n \" terminated\".format(connection_observer, timeout)\n self.logger.info(msg)\n connection_observer.set_end_of_life()\n else:\n self._timeout_observer(connection_observer=connection_observer,\n timeout=connection_observer.timeout, passed_time=run_duration,\n runner_logger=self.logger)\n if connection_observer.life_status.terminating_timeout > 0.0:\n connection_observer.life_status.start_time = time.time()\n connection_observer.life_status.in_terminating = True\n else:\n connection_observer.set_end_of_life()", "def __call__(self, timeout=None, *args, **kwargs):\n if timeout:\n self.timeout = timeout\n started_observer = self.start(timeout, *args, **kwargs)\n if started_observer:\n return started_observer.await_done(*args, **kwargs)\n # TODO: raise ConnectionObserverFailedToStart", "def _timeout_observer(self, connection_observer, timeout, passed_time, runner_logger, kind=\"background_run\"):\n if not connection_observer.life_status.was_on_timeout_called:\n connection_observer.life_status.was_on_timeout_called = True\n if not connection_observer.done():\n if connection_observer.is_command():\n exception = CommandTimeout(connection_observer=connection_observer,\n timeout=timeout, kind=kind, passed_time=passed_time)\n else:\n exception = ConnectionObserverTimeout(connection_observer=connection_observer,\n timeout=timeout, kind=kind, passed_time=passed_time)\n connection_observer.set_exception(exception)\n connection_observer.on_timeout()\n\n observer_info = \"{}.{}\".format(connection_observer.__class__.__module__, connection_observer)\n timeout_msg = \"has timed out after {:.2f} seconds.\".format(passed_time)\n msg = \"{} {}\".format(observer_info, timeout_msg)\n\n # levels_to_go_up: extract caller info to log where .time_out_observer has been called from\n connection_observer._log(logging.INFO, msg, levels_to_go_up=2)\n log_into_logger(runner_logger, level=logging.DEBUG,\n msg=\"{} {}\".format(connection_observer, timeout_msg),\n levels_to_go_up=1)", "def _add_connection_observer(self, connection_observer):\n with self._connection_observer_lock:\n if connection_observer not in self._connections_observers:\n moler_connection = connection_observer.connection\n moler_connection.subscribe_connection_observer(connection_observer=connection_observer)\n self._connections_observers.append(connection_observer)\n _, msg = RunnerSingleThread._its_remaining_time(\n prefix=\"remaining\", timeout=connection_observer.timeout,\n from_start_time=connection_observer.life_status.start_time\n )\n connection_observer._log(logging.INFO,\n \"{} started, {}\".format(connection_observer.get_long_desc(), msg))\n self._start_command(connection_observer=connection_observer)\n connection_observer.life_status.last_feed_time = time.time()", "def on_timeout(self):\n pass", "def _wait_for_not_started_connection_observer_is_done(self, connection_observer):\n # Have to wait till connection_observer is done with terminaing timeout.\n eol_remain_time = connection_observer.life_status.terminating_timeout\n start_time = time.time()\n while not connection_observer.done() and eol_remain_time > 0.0:\n time.sleep(self._tick)\n eol_remain_time = start_time + connection_observer.life_status.terminating_timeout - time.time()", "def __init__(self, timeout=None):\n self._waiting_messages = {}\n self._waiting_for_connection = []\n self._current_protocol = None\n self._event_callbacks = {}\n if timeout:\n self._timeout = timeout\n else:\n self._timeout = default_timeout", "def Prepare(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def _start_command(self, connection_observer):\n if connection_observer.is_command():\n connection_observer.send_command()", "def __init__(self, command: TelnetCommand, connection: TelnetConnection):\n self._attempts = 0\n self._command = command\n self._command_timeout = const.DEFAULT_TELNET_TIMEOUT\n self._connection = connection\n self._event = asyncio.Event()\n self._expire_task = None # type: asyncio.Task\n self._qos_task = None # type: asyncio.Task\n self._response = None\n self._time_sent = None", "def __init__(self, timeout=5):\n self.sock = None\n self.timeout = timeout", "def connect(self, timeout=-1):\n pass", "def prepare_timeout(self):\n shared_utils.print_and_log(\n logging.INFO,\n '{} timed out before sending.'.format(self.id)\n )\n self.timed_out = True\n return self._get_episode_done_msg(TIMEOUT_MESSAGE)", "def wait_for_connection(timeout):\n global connected \n\n total_time = 0\n while not connected and total_time < timeout:\n time.sleep(1)\n total_time += 1\n\n if not connected:\n raise RuntimeError('Could not connect to MQTT bridge.')", "def __init__(self, timeout_time):\n self.timeout_time = timeout_time", "def __init__(self, timeout=120):\n self.m_timeout = timeout", "def set_timeout(self, timeout):\n if self._timeout != timeout:\n self._timeout = timeout\n if self._zerorpc:\n self.close()\n self.connect()", "def _connect_ping_listener(connection, branch):\n if branch:\n return\n\n save_should_close_with_result = connection.should_close_with_result\n connection.should_close_with_result = False\n try:\n connection.scalar(select([1]))\n except Exception as ex:\n connection.scalar(select([1]))\n finally:\n connection.should_close_with_result = save_should_close_with_result", "def __init__(self, timeout=129600):\n self.timeout = timeout", "def _wait_conn(cls, gen: PQGenConn[RV], timeout: Optional[int]) -> RV:\n return waiting.wait_conn(gen, timeout=timeout)", "def __init__(self, timeout=0.1):\n self.poller = select.epoll()\n self.timeout = timeout", "def __init__(self, timeout=0.1):\n self.poller = select.epoll()\n self.timeout = timeout", "def test_polling_plugin_timeout(self):\n pass", "def set_timeout(self, timeout):\n self.timeout = timeout", "def timeoutConnection(self):\n self.transport.stopProducing()", "def setup_pipe_timer(self):\n self.data_timer = QtCore.QTimer()\n self.data_timer.timeout.connect(self.update_visuals)\n self.data_timer.start(0)", "def assert_timeout(self) -> None:", "def __init__(self, connection=None, runner=None):\n super(Command, self).__init__(connection=connection, runner=runner)\n self.command_string = None\n self.cmd_name = Command.observer_name", "def _on_timeout(self, info: str = None) -> None:\n self._timeout = None\n error_message = \"Timeout {0}\".format(info) if info else \"Timeout\"\n if self.final_callback is not None:\n print('raise')\n\n # self._handle_exception(\n # HTTPTimeoutError, HTTPTimeoutError(error_message), None\n # )", "def __init__( self, timeout = 60.0 ):\n\n self.timeout = timeout\n self.alive = None" ]
[ "0.62128496", "0.611689", "0.60697615", "0.5706707", "0.54417396", "0.54395616", "0.53547174", "0.533949", "0.5256496", "0.5228185", "0.52027476", "0.51956594", "0.51771903", "0.51439977", "0.5131876", "0.51315176", "0.51151115", "0.5111697", "0.5094155", "0.50818205", "0.50817597", "0.50817597", "0.503451", "0.5018024", "0.49945575", "0.49888662", "0.49719512", "0.4958228", "0.49544522", "0.49537554" ]
0.70860237
0
Remove unnecessary ConnectionObservers from list to proceed.
def _remove_unnecessary_connection_observers(self): for connection_observer in self._copy_of_connections_observers: if connection_observer.done(): self._to_remove_connection_observers.append(connection_observer) _, msg = RunnerSingleThread._its_remaining_time( "remaining", timeout=connection_observer.timeout, from_start_time=connection_observer.life_status.start_time ) connection_observer._log(logging.INFO, "{} finished, {}".format(connection_observer.get_short_desc(), msg)) if self._to_remove_connection_observers: with self._connection_observer_lock: for connection_observer in self._to_remove_connection_observers: try: self._connections_observers.remove(connection_observer) except ValueError: pass moler_connection = connection_observer.connection moler_connection.unsubscribe_connection_observer(connection_observer=connection_observer) self._to_remove_connection_observers = list() # clear() is not available under old Pythons.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanup(self):\n self.removeObservers()", "def cleanup(self):\n self.removeObservers()", "def cleanup(self):\n\t\tself.removeObservers()\n\t\tself.active = False", "def cleanup(self):\r\n #self.removeObservers()\r\n pass", "def _cleanup(self, server: Any) -> None: # noqa: F821\n for subs in server.subscribers.values():\n subs.discard(self)\n for subs in server.psubscribers.values():\n subs.discard(self)\n self._clear_watches()", "def removeObservers(self):\r\n # productive #framework\r\n profprint()\r\n for observee, tag in self.styleObserverTags:\r\n observee.RemoveObserver(tag)\r\n self.styleObserverTags = []\r\n self.sliceWidgetsPerStyle = {}", "def removeObservers(self):\n #productive #framework\n profprint()\n for observee,tag in self.styleObserverTags:\n observee.RemoveObserver(tag)\n self.styleObserverTags = []\n self.sliceWidgetsPerStyle = {}", "def cleanIdleConnections(self):\n\n for cid in self.calls.keys():\n if self.calls[cid]['state'] == 'IDLE':\n del self.calls[cid]\n elif self.calls[cid]['state'] == 'IDLE-CALL-STOPPED':\n core.dissector_dict.dissd.unloadDissectorsInstances(['rtp'])\n del self.calls[cid]", "def disconnect_all(self):\n all_conns = chain([_x[0] for _x in self._available_connections], self._in_use_connections)\n for connection in all_conns:\n try:\n connection.disconnect()\n except Exception as err:\n self.class_logger.warning(\"Error occurred while disconnecting connection: %s\" % (err, ))\n self._available_connections = []\n self._in_use_connections = set()", "def _cleanUp(self):\r\n limit = datetime.now() - timedelta(seconds=self._timeout)\r\n\r\n toClean = [msg for msg in self._incompleteMsgs if msg.older(limit)]\r\n\r\n if toClean:\r\n for msg in toClean:\r\n self._incompleteMsgs.remove(msg)\r\n\r\n log.msg('{0} incomplete messages have been dropped '\r\n 'from assembler.'.format(len(toClean)))\r\n\r\n toClean = [uri for uri, (_, timestamp) in self._binaries.iteritems()\r\n if timestamp < limit]\r\n\r\n if toClean:\r\n for uri in toClean:\r\n del self._binaries[uri]\r\n\r\n log.msg('{0} unused binaries have been dropped '\r\n 'from assembler.'.format(len(toClean)))", "def killconnections(self):\n for conn in self._connections:\n try:conn.close()\n except:pass\n self._connections=[]", "def remove_connections(self):\n while self.qsize() > 0:\n try:\n cnx = self.get(block=True, timeout=self.queue_timeout)\n except queue.Empty:\n pass\n else:\n try:\n cnx.close_connection()\n except (RuntimeError, OSError, InterfaceError):\n pass\n finally:\n self.remove_connection(cnx)", "def _clean(self):\n\t\tfor hid in self.handlers_id:\n\t\t\tself.obj.handler_disconnect(hid)", "def delConns(self):\r\n for line in self.connLines:\r\n self.canvasCirkt.delete(line)\r\n self.canvasCirkt.update()", "def clean_up(self, observable):\n pass", "def __remove_01_connected_juncs(self) -> int:\n total_removed = 0\n while True:\n removed = 0\n for junc in self.get_all_juncs():\n if junc.connections_count() <= 1:\n self.remove_junction(junc)\n removed += 1\n if removed == 0:\n break\n total_removed += removed\n return total_removed", "def _disconnect_clients(self):\n # This construction is needed to avoid mutating the dictionary\n # while iterating through it.\n while len(self._clients):\n caddr = next(iter(self._clients))\n self.disconnect_client(caddr)", "def unsubscribe(observer):", "def unsubscribe(observer):", "def disconnect(self):\n for attr in self._filter():\n sourceAttr = attr.listConnections(source=True, plugs=True)\n sourceAttr = (sourceAttr or [None])[0]\n\n if sourceAttr and sourceAttr.isSource():\n sourceAttr // attr\n else:\n message = '%s has no incoming connections.' % attr.name()\n pm.warning(message)", "def disconnect(self):\n for connection in six.itervalues(self):\n connection.disconnect()", "def clear_hooks(self):\n self._conn_hooks = []", "def _unbind_observers(self):\n workbench = self.workbench\n point = workbench.get_extension_point(CLOSING_POINT)\n point.unobserve('extensions', self._on_closing_checks_updated)", "def collect_garbage(self) -> None:\n pass", "def _clear_hasevents_refs(self, ob):\n for connection in self._connections:\n for i in reversed(range(len(connection.objects))):\n if connection.objects[i][0] is ob:\n connection.objects.pop(i)\n \n # Do not clear pending events. This handler is assumed to continue\n # working, and should thus handle its pending events at some point,\n # at which point it cannot hold any references to ob anymore.", "def disconnect(self):\n self.costs = set()\n self.disconnectedHouses = []\n self.nthChoiceHouses = []\n self.allConnected = True\n for house in self.houses:\n house.connection = set()\n house.distance = 1000\n house.possible_connections = []\n for battery in self.batteries:\n battery.connectedHouses = []\n battery.capacity = battery.maxCapacity\n battery.totalDistance = set()", "def clearConnections( self, cls ):\n count = 0\n for connection in self.connections(cls):\n connection.remove()\n count += 1\n return count", "def delete_all_exchanges(self) -> None:\n self.connected_exchanges.clear()", "def clean(self: AutoScaler) -> None:\n marked = []\n for i, client in enumerate(self.clients):\n log.trace(f'Autoscale clean ({i+1}: {client.pid})')\n status = client.poll()\n if status is not None:\n marked.append(i)\n if status != 0:\n log.warning(f'Autoscale client ({i+1}) exited with status {status}')\n else:\n log.debug(f'Autoscale client disconnected ({client.pid})')\n self.clients = [client for i, client in enumerate(self.clients) if i not in marked]", "def _clearCallbacks(self):\n self._stateCallbackList = []" ]
[ "0.73731667", "0.7266392", "0.7062582", "0.7014867", "0.6624878", "0.6520376", "0.6510573", "0.6391279", "0.6221481", "0.61856866", "0.616328", "0.6141174", "0.61400443", "0.6113766", "0.61032987", "0.6094096", "0.60722625", "0.6015473", "0.6015473", "0.60107964", "0.60098463", "0.5992452", "0.5965933", "0.5946435", "0.5940792", "0.5924947", "0.5903286", "0.58666015", "0.5833743", "0.5823011" ]
0.84189975
0
Srart command if connection_observer is an instance of a command. If an instance of event then do nothing.
def _start_command(self, connection_observer): if connection_observer.is_command(): connection_observer.send_command()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_command(self, command):\r\n if self.visprotocol is not None:\r\n _LOGGER.info(\"client process_command called {0} type is {1}\".format(command, type(self.visprotocol))) \r\n self.visprotocol.process_command(command)\r\n else:\r\n _LOGGER.error(\"[VisonicClient] The pyvisonic command is None\")", "async def event(self, ctx):\r\n if ctx.invoked_subcommand is None:\r\n await self.bot.say('No, {0.subcommand_passed} is not cool'.format(ctx))", "def process_command(self, command):\r\n if self.visprotocol is not None:\r\n _LOGGER.debug(\"client process_command called %s type is %s\", command, type(self.visprotocol))\r\n self.visprotocol.process_command(command)\r\n else:\r\n _LOGGER.warning(\"[VisonicClient] The pyvisonic command is None\")", "def received_command(self, cmd: Data, source: tuple, destination: tuple) -> bool:\n raise NotImplemented", "def __init__(self, connection=None, runner=None):\n super(Command, self).__init__(connection=connection, runner=runner)\n self.command_string = None\n self.cmd_name = Command.observer_name", "def responds_to(self, command) -> bool:\n return command == self.command and self.active is True and self.command is not None", "def _is_command(self, ext):\n try:\n return issubclass(ext, CommandExtension)\n except TypeError:\n return False", "def _transform_command(self) -> None:\n self.command = None if self.command == [] else self.command", "def accept_command():\n # TODO", "def send_command_without_response(self, command):\r\n if not self.is_dummy:\r\n self.socket.sendto(command.encode('utf-8'), self.tello_address)", "def test_command():\n\n dispatcher = ntelebot.dispatch.Dispatcher()\n dispatcher.add_command('command', lambda ctx: 'DISPATCHED')\n ctx = MockContext()\n ctx.type = 'message'\n ctx.command = None\n assert dispatcher(ctx) is False\n ctx.command = 'command'\n assert dispatcher(ctx) == 'DISPATCHED'\n ctx.type = 'callback_query'\n assert dispatcher(ctx) == 'DISPATCHED'\n ctx.type = 'inline_query'\n assert dispatcher(ctx) is False", "def run_command(self, command, joy_state):\n cmd = self.command_list[command]\n if cmd['type'] == 'topic':\n self.run_topic(command, joy_state)\n elif cmd['type'] == 'action':\n if cmd['action_name'] in self.offline_actions:\n self.get_logger().error('command {} was not played because the action '\n 'server was unavailable. Trying to reconnect...'\n .format(cmd['action_name']))\n self.register_action(command, self.command_list[command])\n else:\n if joy_state.buttons != self.old_buttons:\n self.run_action(command, joy_state)\n elif cmd['type'] == 'service':\n if cmd['service_name'] in self.offline_services:\n self.get_logger().error('command {} was not played because the service '\n 'server was unavailable. Trying to reconnect...'\n .format(cmd['service_name']))\n self.register_service(command, self.command_list[command])\n else:\n if joy_state.buttons != self.old_buttons:\n self.run_service(command, joy_state)\n else:\n raise JoyTeleopException(\n 'command {} is neither a topic publisher nor an action or service client'\n .format(command))", "def _msg_is_command(self, msg):\n return isinstance(msg, dict)", "def disable_cmd_ended_cb(self, event):\n this_server = TangoServerHelper.get_instance()\n if event.err:\n log_msg = (\n f\"{const.ERR_INVOKING_CMD}{event.cmd_name}\\n{event.errors}\"\n )\n self.logger.error(log_msg)\n this_server.write_attr(\"activityMessage\", log_msg, False)\n else:\n log_msg = f\"{const.STR_COMMAND}{event.cmd_name}{const.STR_INVOKE_SUCCESS}\"\n self.logger.info(log_msg)\n this_server.write_attr(\"activityMessage\", log_msg, False)", "def _cmd_don(self, command):\r\n self.l_debug(\"_cmd_don\",\"\")\r\n # TODO: If no PowerOn command, do PowerToggle\r\n return self._send_command('PowerOn')", "def _process_command(self, command, opts):\n command_type = postproc.get_structure_type(command)\n if not opts.Ignore_motion and command_type == MOTION_COMMAND:\n return _process_motion_command(command, opts)\n elif not opts.Ignore_IOs and command_type == IO_COMMAND:\n return _process_io_command(command, opts)", "def __no_command(*args):\n pass", "def commandInIgnoredPlugin(self, command, ignoredPlugins):\n if (isinstance(command, (BukkitCommand, )) or isinstance(command, (VanillaCommand, ))) and ignoredPlugins.contains(\"Bukkit\"):\n return True\n if isinstance(command, (PluginIdentifiableCommand, )) and ignoredPlugins.contains((command).getPlugin().__name__):\n return True\n return False", "def command_callback(self, command):\n while not self.socket_available: # wait for socket to be available\n pass\n self.socket_available = False # block socket from being used in other processes\n if self.robot.is_in_error():\n self.robot.ResetError()\n self.robot.ResumeMotion()\n reply = self.robot.exchange_msg(command.data, decode=False)\n self.socket_available = True # Release socket so other processes can use it\n if reply is not None:\n self.reply_publisher.publish(reply)", "def _command(self, *cmd, handler=None):", "def on_command(self, game) -> None:\n pass", "async def cool(ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send('No, {0.subcommand_passed} is not cool'.format(ctx))", "def command(self):\n if self.model is self.model_action:\n return self.command_action\n else:\n return self.command_candidate", "def test_clear_dispatched_events_no_connector(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper(connector_name='fooconn')\n msg = msg_helper.make_ack()\n self._add_to_dispatched(\n worker_helper.broker, 'fooconn.event', msg)\n self.assertNotEqual(\n worker_helper.broker.dispatched['vumi']['fooconn.event'], [])\n worker_helper.clear_dispatched_events()\n self.assertEqual(\n worker_helper.broker.dispatched['vumi']['fooconn.event'], [])", "def on_command(server, user, command, args):", "def is_command(schema_obj):\n\n return isinstance(schema_obj, schema.Command)", "def comandar_drone(self, event):\n # O comando definido anteriormente é enviado periodicamente se o drone estiver voando\n if self.status == StatusDrone.Voando or self.status == StatusDrone.ModoFoco or self.status == StatusDrone.Girando:\n self.pub_comandar.publish(self.comando)", "def untrackCmd(self):\n if self._cmdToTrack:\n self._cmdToTrack.addCallback(self._cmdCallback)\n self._cmdToTrack = None", "def teleop_cmd_callback(self, msg):\n\n # If boolean command is true\n if msg.data:\n # Get next state if action is executed\n for i in range(len(self.curr_ts_state.state_dimension_names)):\n # Find state in the TS state\n if self.curr_ts_state.state_dimension_names[i] == self.state_dimension_name:\n # Check if trap using potential state if action would to be executed\n if not self.check_for_trap(self.action_to_state[self.curr_ts_state.states[i]][self.monitored_action]):\n # If not a trap publish command\n self.mix_cmd_pub.publish(msg)\n\n # If TS state doesn't contain proper dimension\n return None", "def _handle_commands(self, event, session):\n message = event['body']\n\n for regex, func, help in self._COMMANDS:\n match = regex.match(message)\n if match is not None:\n func(self, event, session=session, **match.groupdict())\n return True\n\n return False" ]
[ "0.5808302", "0.5749298", "0.5739811", "0.572681", "0.54549485", "0.53268474", "0.5285533", "0.5209355", "0.51952237", "0.5170219", "0.5147668", "0.5114667", "0.51124805", "0.50656945", "0.50455505", "0.5018172", "0.5008057", "0.49998525", "0.49858227", "0.49823084", "0.4965173", "0.49518302", "0.49384463", "0.49051955", "0.49027556", "0.49015895", "0.48967117", "0.48908734", "0.4848382", "0.48362648" ]
0.683487
0
method to set queryset for retrieving objects for user's company only.
def get_queryset(self): qs = super().get_queryset() qs.filter(company=self.request.user.company) return qs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n queryset = Company.objects.all().order_by('name')\n\n if self.request.GET.get('supplier', None):\n queryset = queryset.filter(is_supplier=True)\n\n if self.request.GET.get('customer', None):\n queryset = queryset.filter(is_customer=True)\n\n return queryset", "def filter_queryset(self, request, queryset, view):\n if view.action == \"retrieve\" and request.method == \"GET\":\n return queryset.model.objects.all()\n\n filtered_queryset = super().filter_queryset(request, queryset, view)\n org_users = set(\n [group.team.organization for group in request.user.groups.all()] +\n [o.user for o in filtered_queryset]\n )\n\n return queryset.model.objects.filter(user__in=org_users, user__is_active=True)", "def filter_queryset(self, queryset):\n user = self.request.user\n if user.is_superuser:\n return super().filter_queryset(queryset)\n return queryset.filter(collaborators=user)", "def get_queryset(self, *args, **kwargs):\n qs = super().get_queryset(*args, **kwargs).filter(user=self.request.user)\n return qs", "def get_queryset(self):\n queryset = super(BaseViewSet, self).get_queryset()\n user = self.request.user\n return queryset.filter(user=user)", "def get_queryset(self):\n return super().get_queryset().filter(user=self.request.user)", "def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.available_for_user(self.request.user)", "def get_queryset(self):\n user = self.request.user\n return Contact.objects.filter(owner=user)", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.filter(user=request.user)", "def get_queryset(self):\n\n qs = super().get_queryset() # get company specific queryset\n\n filters = dict(self.request.GET.lists()) # dictionary of lists\n\n # pull out order_by and order\n order_by = filters.pop(\"order_by\", None)\n order = filters.pop(\"order\", None)\n\n # Ordering by JSON field taken from\n # https://stackoverflow.com/questions/36641759/django-1-9-jsonfield-order-by\n # Jan 2, 2018\n\n if order_by:\n if order:\n pass\n # TODO: Figure out what can be done for ordering...\n\n else:\n qs = qs.order_by(\"-id\") # default to descending id order\n\n for exp_filter in filters:\n try:\n qs = self.FILTERS[exp_filter](qs, filters[exp_filter])\n except KeyError:\n pass\n # do nothing if not a filter\n\n return qs", "def get_queryset(self):\n return get_user_model().objects.none()", "def get_queryset(self):\n\n user = self.request.user\n\n if user.role == 'LA':\n return PropertyEnquiry.objects.all()\n\n # check if the user is a client admin\n # and return all enquiries made on his/her property\n if user.role == 'CA':\n return PropertyEnquiry.active_objects.for_client(\n client=user.employer.first())\n\n # else if the user is a buyer return only\n # the records that are associated with him/her\n return PropertyEnquiry.active_objects.for_user(user=user)", "def get_list_filter(self, request):\n if request.user.is_superuser:\n return self.list_filter\n return self.list_filter_companies", "def get_queryset(self):\n\n user = self.request.user\n\n if user.is_authenticated and user.role == 'LA':\n # check if the user is a landville admin and return all records\n # even soft deleted ones\n return PropertyEnquiry.objects.all()\n\n if user.is_authenticated and user.role == 'CA':\n # if the user is a client admin, return only his records\n employer = user.employer.first()\n return PropertyEnquiry.active_objects.for_client(client=employer)\n\n # if the user is a buyer, return also only his enquiries\n return PropertyEnquiry.active_objects.for_user(user=user)", "def get_queryset(self):\n target_author = get_object_or_404(CustomUser, username=self.kwargs.get('username', None))\n if self.request.user == target_author:\n return Taxonomy.objects.filter(author=target_author)\n else:\n return Taxonomy.objects.filter(author=target_author).filter(public=True)", "def get_queryset(self):\n qs = Job.objects.filter(user=self.request.user)\n return qs", "def get_queryset(self):\n # queryset = Article.objects.all()\n user_id = self.kwargs['user_id']\n if user_id is not None:\n queryset = User.objects.filter(user_id=int(user_id))\n return queryset", "def queryset(self, request: HttpRequest, queryset: QuerySet) -> QuerySet:\n return {\n 'superuser': queryset.filter(is_superuser=True),\n 'staff': queryset.filter(is_staff=True),\n 'scanlator': queryset.filter(groups__name='Scanlator'),\n 'regular': queryset.exclude(is_staff=True)\n }.get(self.value() or '', queryset)", "def filter_queryset(self, request, queryset, view):\n if str(request.query_params.get(\"orgs\")).lower() == \"false\":\n organization_user_ids = OrganizationProfile.objects.values_list(\n \"user__id\", flat=True\n )\n queryset = queryset.exclude(id__in=organization_user_ids)\n\n return queryset", "def get_queryset(self, request):\n user = request.user\n if is_superuser_or_manager(user):\n return super(ClientAdminConfig, self).get_queryset(request)\n\n return Client.objects.filter(\n Q(main_sales_contact=user)\n | Q(contracts__sales_contact=user)\n | Q(contracts__event__support_contact=user)\n ).distinct()", "def get_queryset(self):\n if self.request.user.is_staff:\n return Job.objects.select_related()\n else:\n raise Http404()", "def get_queryset(self):\n queryset = Project.objects.filter(contributor__user=self.request.user.pk)\n return queryset", "def get_queryset(self):\n return AutomaticEmail.objects.filter(staff_user=self.request.user)", "def queryset(self):\n User = get_user_model()\n return User.objects", "def filter_queryset(self, request, queryset, view):\n owner = request.query_params.get(\"owner\")\n\n if owner:\n kwargs = {self.owner_prefix + \"__username__iexact\": owner}\n\n return queryset.filter(**kwargs)\n\n return queryset", "def get_queryset(self):\n if self.queryset is not None:\n queryset = self.queryset\n if hasattr(queryset, '_clone'):\n queryset = queryset._clone()\n elif self.model is not None:\n queryset = self.model._default_manager.all()\n else:\n raise ImproperlyConfigured(\"'%s' must define 'queryset' or 'model'\"\n % self.__class__.__name__)\n return queryset" ]
[ "0.74833214", "0.74833214", "0.74833214", "0.74833214", "0.7096127", "0.6695653", "0.6640594", "0.6633222", "0.6544434", "0.6500737", "0.6420627", "0.6395806", "0.6321692", "0.62992424", "0.62633705", "0.61954886", "0.6175595", "0.6160776", "0.6148539", "0.6130602", "0.6111685", "0.60935384", "0.6078098", "0.60733074", "0.6068962", "0.60685885", "0.6062923", "0.6058406", "0.6054895", "0.60505766" ]
0.83381206
0
Gets a queryset with specified filters from request.GET overrides django.views.generic.list.MultipleObjectMixin.get_queryset
def get_queryset(self): qs = super().get_queryset() # get company specific queryset filters = dict(self.request.GET.lists()) # dictionary of lists # pull out order_by and order order_by = filters.pop("order_by", None) order = filters.pop("order", None) # Ordering by JSON field taken from # https://stackoverflow.com/questions/36641759/django-1-9-jsonfield-order-by # Jan 2, 2018 if order_by: if order: pass # TODO: Figure out what can be done for ordering... else: qs = qs.order_by("-id") # default to descending id order for exp_filter in filters: try: qs = self.FILTERS[exp_filter](qs, filters[exp_filter]) except KeyError: pass # do nothing if not a filter return qs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obj_get_list(self, request=None, **kwargs):\n filters = None\n\n if hasattr(request, 'GET'):\n filters = request.GET\n\n applicable_filters = self.build_filters(filters=filters)\n applicable_filters.update(kwargs)\n\n try:\n return self.get_object_list(request).filter(**applicable_filters)\n except ValueError, e:\n raise NotFound(\"Invalid resource lookup data provided (mismatched type).\")", "def get_queryset(self):\n tags = self.request.query_params.get('tags')\n account = self.request.query_params.get('account')\n year = self.request.query_params.get('year')\n month = self.request.query_params.get('month')\n day = self.request.query_params.get('day')\n\n queryset = self.queryset\n if tags:\n tag_ids = self._params_to_ints(tags)\n queryset = queryset.filter(tags__id__in=tag_ids)\n if account:\n account_id = self._params_to_ints(account)\n queryset = queryset.filter(account__id__in=account_id)\n if year:\n queryset = queryset.filter(date__year=year)\n if month:\n queryset = queryset.filter(date__month=month)\n if day:\n queryset = queryset.filter(date__day=day)\n return queryset.filter(user=self.request.user)", "def get_queryset(self):\n # Get tags from the request if it was specified\n tags = self.request.query_params.get('tags')\n # Get authors from the request if it was specified\n authors = self.request.query_params.get('authors')\n # Make copy of queryset as to not modify the original queryset\n queryset = self.queryset\n if tags:\n # Get list of ids specified\n tag_ids = self._params_to_ints(tags)\n # Filter on the foreign key object with tags__id__in\n queryset = queryset.filter(tags__id__in=tag_ids)\n if authors:\n # Get list of ids specified\n author_ids = self._params_to_ints(authors)\n # Filter by the author\n queryset = queryset.filter(authors__id__in=author_ids)\n\n return queryset.filter(user=self.request.user)", "def get_queryset(self, request):\n querys = self.model.all_objects.get_queryset()\n ordering = self.get_ordering(request)\n if ordering:\n querys = querys.order_by(*ordering)\n return querys", "def get_queryset(self):\n tags = self.request.query_params.get('tags')\n categories = self.request.query_params.get('categories')\n user = self.request.query_params.get('user')\n queryset = self.queryset\n\n if tags:\n tags_title = self._params(tags)\n queryset = queryset.filter(tags__title__in=tags_title)\n\n if categories:\n categories_title = self._params(categories)\n queryset = queryset.filter(categories__title__in=categories_title)\n\n if user:\n user_id = self._params_to_ints(user)\n queryset = queryset.filter(user__id__in=user_id)\n return queryset", "def get_queryset(self, request, queryset):\n\n return queryset", "def obj_get_list(self, request=None, **kwargs):\n filters = {}\n if hasattr(request, 'GET'):\n # Grab a mutable copy.\n filters = request.GET.copy()\n\n # Update with the provided kwargs.\n filters.update(kwargs)\n if \"community\" in filters:\n try:\n community = Community.objects.get(\n uuid=uuid_from_uri(filters['community']))\n im = community.image_set.filter(is_active=True)\n wb = community.wordbox_set.filter(is_active=True)\n base_object_list = sorted(chain(im, wb),\n key=attrgetter('created_time'))[::-1]\n return self.apply_authorization_limits(request,\n base_object_list)\n except ValueError:\n raise BadRequest(\"Invalid resource lookup data provided \"\n \"(mismatched type).\")\n else:\n raise BadRequest(\"Invalid filtering parameter\")", "def get_queryset(self):\n # tags = self.request.query_params.get('tags')\n # ingredients = self.request.query_params.get('ingredients')\n queryset = self.queryset\n # if tags:\n # tag_ids = self._params_to_ints(tags)\n # queryset = queryset.filter(tags__id__in=tag_ids)\n # if ingredients:\n # ingredient_ids = self._params_to_ints(ingredients)\n # queryset = queryset.filter(ingredients__id__in=ingredient_ids)\n\n return queryset.filter(user=self.request.user).order_by('-id')", "def get_queryset(self):\n queryset = Company.objects.all().order_by('name')\n\n if self.request.GET.get('supplier', None):\n queryset = queryset.filter(is_supplier=True)\n\n if self.request.GET.get('customer', None):\n queryset = queryset.filter(is_customer=True)\n\n return queryset", "def get_queryset(self):\n tags = self.request.query_params.get('tags')\n ingredient = self.request.query_params.get('ingredient')\n queryset = self.queryset\n if tags:\n tags_id = self._params_to_int(tags)\n queryset = queryset.filter(tags__id__in =tags_id)\n if ingredient:\n ingredient_id = self._params_to_int(ingredient)\n queryset = queryset.filter(ingredient__id__in = ingredient_id)\n\n return queryset.filter(user = self.request.user)", "def get_queryset(self):\n queryset = Food.objects.all()\n name = self.request.query_params.get('name', None)\n ndb_no = self.request.query_params.get('ndb_no', None)\n if name is not None:\n queryset = queryset.filter(name=name)\n elif ndb_no is not None:\n queryset = queryset.filter(ndb_no=ndb_no)\n return queryset", "def get_queryset(self):\n qs = super().get_queryset()\n search_value = self.request.GET.get('search_box')\n\n if search_value is not None:\n qs = qs.search_by(search_value)\n\n return qs", "def get_queryset(self):\n if 'email' in self.request.GET:\n if (Person.objects.filter(user__email=self.request.GET['email'])\n .exists()):\n queryset = (Person.objects\n .filter(user__email=self.request.GET['email']))\n else:\n queryset = []\n else:\n queryset = Person.objects.all()\n return queryset", "def get_queryset(self):\n if 'email' in self.request.GET:\n if (Person.objects.filter(user__email=self.request.GET['email'])\n .exists()):\n queryset = (Person.objects\n .filter(user__email=self.request.GET['email']))\n else:\n queryset = []\n else:\n queryset = Person.objects.all()\n return queryset", "def get_queryset(self):\n\n\t\t# Initially set the returned objects to be all clubs\n\t\tqueryset = Club.objects.all()\n\n\t\t# Access the request params\n\t\tclubname = self.request.query_params.get('clubname', None)\n\n\t\t# If a club name is specified ---> Set the filter\n\t\tif clubname is not None:\n\t\t\tqueryset = queryset.filter(club=clubname)\n\n\t\treturn queryset", "def get_queryset(self):\n\n\t\t# Initially set the returned objects to be all clubs\n\t\tqueryset = Club.objects.all()\n\n\t\t# Access the request params\n\t\tclubname = self.request.query_params.get('clubname', None)\n\n\t\t# If a club name is specified ---> Set the filter\n\t\tif clubname is not None:\n\t\t\tqueryset = queryset.filter(club=clubname)\n\n\t\treturn queryset", "def get_queryset(self, request):\n queryset = self.model._default_manager.all()\n queryset = queryset.filter(user=request.user)\n ordering = self.get_ordering()\n if ordering:\n if isinstance(ordering, str):\n ordering = (ordering,)\n queryset = queryset.order_by(*ordering)\n return queryset", "def get_queryset(self, *args, **kwargs):\n qs = super().get_queryset(*args, **kwargs).filter(user=self.request.user)\n return qs", "def get_queryset(self):\n if 'person_id' in self.request.GET:\n queryset = Question.objects.all()\n if 'my_questions' in self.request.GET:\n queryset = (queryset.filter(creator__id=self.request.GET['person_id']))\n if 'search_autor' in self.request.GET:\n text_search = self.request.GET['search_autor']\n queryset = (queryset.filter(Q(creator__user__first_name__icontains=text_search) |\n Q(creator__user__last_name__icontains=text_search)))\n if 'last_questions' in self.request.GET:\n queryset = (queryset.order_by('-creation_date')[:5])\n else:\n if 'by_tag' in self.request.GET:\n queryset = (queryset.order_by('-questiontag__tag__name'))\n if 'order_by' in self.request.GET:\n if self.request.GET['order_by'] == 'newest':\n queryset = (queryset.order_by('-creation_date'))\n elif self.request.GET['order_by'] == 'oldest':\n queryset = (queryset.order_by('creation_date'))\n else:\n queryset = []\n return queryset", "def get_queryset(self):\n queryset = Cliente.objects.all()\n id = self.request.query_params.get('id')\n cedula = self.request.query_params.get('cedula')\n \n if id is not None:\n queryset = queryset.filter(id=id)\n return queryset\n if cedula is not None:\n queryset = queryset.filter(cedula=cedula)\n return queryset\n return queryset", "def get_queryset(self):\n\n\t\t# Initially set the returned objects to be all players\n\t\tqueryset = Player.objects.all()\n\n\t\t# Access the request params\n\t\tplayername = self.request.query_params.get('playername', None)\n\n\t\t# If a player name is specified ---> Set the filter\n\t\tif playername is not None:\n\t\t\tqueryset = queryset.filter(player=playername)\n\n\t\t# Return the appropriate queryset\n\t\treturn queryset", "def get_queryset(self):\n queryset = self.queryset\n\n # Get query params and filter the queryset\n author = self.request.query_params.get('author')\n category = self.request.query_params.get('category')\n tags = self.request.query_params.get('tags')\n\n if author:\n queryset = queryset.filter(author__id__exact=int(author))\n\n if category:\n queryset = queryset.filter(category__slug__exact=category)\n\n if tags:\n tags_list = tags.split(',')\n queryset = queryset.filter(tags__slug__in=tags_list).distinct()\n\n return queryset.order_by('-creation_date')", "def get_queryset(self):\n queryset = Venta.objects.all()\n n_venta = self.request.query_params.get('n_venta')\n if n_venta is not None:\n queryset = queryset.filter(n_venta=n_venta)\n return queryset\n return queryset", "def get_queryset(self):\n if 'person_id' in self.request.GET:\n queryset = (Answer.objects\n .filter(creator__id=self.request.GET['person_id']))\n elif 'question_id' in self.request.GET:\n queryset = (Answer.objects.filter(question__id=self.request\n .GET['question_id']))\n else:\n queryset = Answer.objects.all()\n return queryset", "def queryset(self, request):\n qs = self.model.all_objects.get_query_set()\n ordering = self.ordering or ()\n if ordering:\n qs = qs.order_by(*ordering)\n return qs", "def get_queryset(self, **kwargs):\n # if getattr(self.view, 'deleted_obj_lookup', False) and self.view.queryset is None and self.view.model:\n if getattr(self.view, 'deleted_obj_lookup', False) or self.request.GET.get('deleted_obj_lookup', None):\n return self.view.model._default_manager.all_with_deleted().filter(**kwargs)\n return self.super.get_queryset(**kwargs)", "def get_queryset(self):\n if self.queryset is None:\n return self.model.objects.all()\n return self.queryset", "def get_queryset(self):\n queryset = self.queryset\n project_id = self.request.query_params.get('project', None)\n if project_id:\n queryset = self.queryset.filter(document__project_id=project_id)\n id_value = self.request.query_params.get('id', None)\n if id_value:\n id_list = id_value.split(',')\n queryset = self.queryset.filter(id__in=id_list)\n return queryset", "def get_queryset(self) -> Union[QuerySet, List[TodoListModel]]:\n queryset = super().get_queryset()\n\n if 'name_search' in self.kwargs:\n queryset = queryset.filter(name__icontains=self.kwargs['name_search'])\n\n return queryset", "def get_queryset(self):\n queryset = super(BaseViewSet, self).get_queryset()\n user = self.request.user\n return queryset.filter(user=user)" ]
[ "0.76005995", "0.74862903", "0.74671745", "0.73489726", "0.7338303", "0.7274823", "0.72584987", "0.7209284", "0.7193955", "0.7184572", "0.7105138", "0.70795804", "0.7065119", "0.7065119", "0.7050204", "0.7050204", "0.70427555", "0.70153874", "0.69784844", "0.69669074", "0.69559526", "0.6954292", "0.6933565", "0.6887303", "0.68588966", "0.6848154", "0.6841547", "0.6835706", "0.6830036", "0.6827105" ]
0.76752055
0
Will fill inplace the export data structure with nested headers and their respective text
def get_data(export, headers, section): for unit in section: # We use a temporary variable so as to not update it for all # iteration temp_header = headers + [unit.title] # Get the text text = unit.text # NOTE: this is probably dirty, could use classes instead? export['headers'].append(temp_header) export['content'].append(unit.text) get_data(export, temp_header, unit.sections)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_dict(data: list, output_path: str, title: str) -> None:\n output = []\n for row in data:\n heading = get_heading(row)\n output.append(f'# {heading}')\n output.append('')\n for _, header in utils.srg_export.data.COLUMN_MAPPINGS.items():\n output.append(f'## {header}')\n content = get_content(header, row)\n output.append(content)\n output.append('')\n output.append('')\n output.append('\\\\newpage')\n output.append('')\n\n write_md_file(output, output_path)", "def fill_export_section():\n section = _SectionData(\"Export\")\n section.props.append((\"ExportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_scale)))\n section.props.append((\"ApplyModifiers\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_apply_modifiers))))\n section.props.append((\"ExcludeEdgesplit\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_exclude_edgesplit))))\n section.props.append((\"IncludeEdgesplit\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_include_edgesplit))))\n section.props.append((\"ActiveUVOnly\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_active_uv_only))))\n section.props.append((\"ExportVertexGroups\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_groups))))\n section.props.append((\"ExportVertexColor\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color))))\n section.props.append((\"ExportVertexColorType\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color_type)))\n section.props.append((\"ExportVertexColorType7\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color_type_7)))\n # section.props.append((\"ExportAnimFile\", info.get_default_prop_value(bpy.types.GlobalSCSProps.export_anim_file)))\n section.props.append((\"ExportPimFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pim_file))))\n section.props.append((\"OutputType\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_output_type)))\n section.props.append((\"ExportPitFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pit_file))))\n section.props.append((\"ExportPicFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pic_file))))\n section.props.append((\"ExportPipFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pip_file))))\n section.props.append((\"SignExport\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_write_signature))))\n return section", "def export_data(data, header):\n return [[row[k] for k in header] for row in data]", "def format_report_header(self):", "def _export_field_content(xblock_item, item_dir):\r\n module_data = xblock_item.get_explicitly_set_fields_by_scope(Scope.content)\r\n if isinstance(module_data, dict):\r\n for field_name in module_data:\r\n if field_name not in DEFAULT_CONTENT_FIELDS:\r\n # filename format: {dirname}.{field_name}.json\r\n with item_dir.open('{0}.{1}.{2}'.format(xblock_item.location.name, field_name, 'json'),\r\n 'w') as field_content_file:\r\n field_content_file.write(dumps(module_data.get(field_name, {}), cls=EdxJSONEncoder))", "def test_export_data_dict_1(self):\n\n list_of_data = [self.tkt_dict1, self.tkt_dict2]\n headers = [\"type\", \"phage_id\", \"host_genus\", \"cluster\"]\n basic.export_data_dict(list_of_data, self.export_file,\n headers, include_headers=True)\n\n exp_success_tkts = []\n with open(self.export_file,'r') as file:\n file_reader = csv.DictReader(file)\n for dict in file_reader:\n exp_success_tkts.append(dict)\n\n with self.subTest():\n self.assertEqual(len(exp_success_tkts), 2)\n with self.subTest():\n self.assertEqual(set(exp_success_tkts[0].keys()), set(headers))", "def handleExistingData(iIndent):\r\n\tglobal sEType, sEVar, sEData\r\n\r\n\t# If none, quit.\r\n\tif not sEType:\r\n\t\treturn\r\n\r\n\t# Skip if we have no data.\r\n\tif not sEData:\r\n\t\treturn\r\n\r\n\t# Insert tab level and comments into a header.\r\n\tsHead = (\" \" * iIndent) + \"/// \"\r\n\r\n\t# Sanitise data.\r\n\tsEData.rstrip()\r\n\r\n\t# Swap breaks for heads.\r\n\tsEData = sEData.replace(BREAK, \"\\n\" + sHead)\r\n\r\n\t# Write out the respective blocks.\r\n\tif sEType == BRIEF:\r\n\t\t#sEData = sEData.replace(\"<summary>\", \"\")\r\n\t\t#sEData = sEData.replace(\"</summary>\", \"\")\r\n\t\tpOutFile.write(sHead + \"<summary>\\n\")\r\n\t\tpOutFile.write(sHead + sEData + \"\\n\")\r\n\t\tpOutFile.write(sHead + \"</summary>\\n\")\r\n\r\n\telif sEType == PARAM:\r\n\t\tpOutFile.write(sHead + \"<param name=\\\"\" + str(sEVar) + \"\\\">\" + str(sEData) + \"</param>\\n\")\r\n\r\n\telif sEType == RETURN:\r\n\t\tpOutFile.write(sHead + \"<returns>\" + str(sEData) + \"</returns>\\n\")\r\n\r\n\telif sEType == AUTHOR:\r\n\t\tpOutFile.write(sHead + \"<author>\" + str(sEData) + \"</author>\\n\")\r\n\t\t\r\n\telif sEType == DATE:\r\n\t\tpOutFile.write(sHead + \"<date>\" + str(sEData) + \"</date>\\n\")\r\n\t\t\r\n\telif sEType == RETURN:\r\n\t\tpOutFile.write(sHead + \"<returns>\" + str(sEData) + \"</returns>\\n\")\r\n\r\n\telif sEType == REMARK:\r\n\t\tpOutFile.write(sHead + str(sEData) + \"\\n\")\r\n\r\n\t# Zap any leftover data.\r\n\tsEType = None\r\n\tsEVar = None\r\n\tsEData = \"\"", "def dumpData(self,out):\n out.packSub0('NAME',self.id)\n if getattr(self,'isDeleted',False):\n out.packSub('DELE','i',0)\n return\n out.packSub0('MODL',self.model)\n if self.title: out.packSub0('FNAM',self.title)\n out.packSub('BKDT','f4i',\n self.weight, self.value, self.isScroll, self.teaches, self.enchantPoints)\n if self.script: out.packSub0('SCRI',self.script)\n if self.icon: out.packSub0('ITEX',self.icon)\n if self.text: out.packSub0('TEXT',self.text)\n if self.enchant: out.packSub0('TEXT',self.enchant)", "def export_data(self):\r\n if len(app.entry6.get()) != 0:\r\n\r\n if app.var.get() == 'xls':\r\n\r\n wb = Workbook()\r\n sheet = wb.add_sheet('Sheet1')\r\n self.columns = ['id', 'Name', 'Section', 'Dept.', 'Gpa', 'MP1', 'MP2', 'MP3', 'MT', 'FINAL']\r\n style = xlwt.easyxf('font: bold 1')\r\n for col in range(10):\r\n sheet.write(0, col, self.columns[col], style)\r\n index=0\r\n for row in range(1,162):\r\n sheet.write(row, 1, open_data.sort_list[index])\r\n index += 1\r\n index1 = -1\r\n for row in range(1,162):\r\n index1 += 1\r\n index2=0\r\n for col in range(10):\r\n if col == 1 or index2 == 1:\r\n index2 += 1\r\n continue\r\n if index2 == 0:\r\n sheet.write(row, col, int(open_data.student[open_data.sort_list[index1]][index2]))\r\n index2 += 1\r\n continue\r\n sheet.write(row, col, open_data.student[open_data.sort_list[index1]][index2])\r\n index2 += 1\r\n file_name=app.entry6.get()\r\n if '.xls' not in file_name:\r\n wb.save(file_name+'.xls')\r\n else:\r\n wb.save(file_name)\r\n\r\n elif app.var.get() == 'txt':\r\n\r\n file_name = app.entry6.get()\r\n if '.txt' not in file_name:\r\n file_name = file_name + '.txt'\r\n file = open(file_name, 'w')\r\n index2 = 0\r\n for key in open_data.student:\r\n for index in range(10):\r\n if index == 0:\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n continue\r\n if index == 1:\r\n try:\r\n self.split_names = open_data.sort_list[index2].split(' ')\r\n file.write(self.split_names[0])\r\n file.write(', ')\r\n file.write(self.split_names[1])\r\n file.write(', ')\r\n index2 += 1\r\n except UnicodeEncodeError:\r\n index2 += 1\r\n pass\r\n continue\r\n if index >= 5 and index <= 9:\r\n if open_data.student[key][index] != '':\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n else:\r\n file.write('\\n')\r\n break\r\n if index == 9:\r\n file.write('\\n')\r\n continue\r\n try:\r\n file.write(str(open_data.student[key][index]))\r\n file.write(', ')\r\n except UnicodeEncodeError:\r\n pass\r\n file.close()\r\n\r\n\r\n\r\n elif app.var.get() == 'csv':\r\n app.info.configure(text=\"INFO: Type not Supported\")\r\n # The program does not support saving in 'csv' type. If the user selects 'csv' file type, 'Info' Label\r\n # shows the message: 'INFO: Type not Supported'.\r\n\r\n else:\r\n app.info.configure(text='INFO: Type not chosen!')\r\n # Also, If the user presses on 'Export Data' button, with a file name provided, but without choosing a\r\n # file type, 'Info' Label shows the message: 'INFO: Type not chosen'.\r\n\r\n else:\r\n app.info.configure(text=\"INFO: Please provide the name of the file.\")\r\n # Also, if the user presses 'Export Data' button without giving a file name, 'Info' Label shows the message:\r\n # 'INFO: Please provide the name of the file.'\r", "def writeHeader( self ):\n for k in self.secondaryTargets.keys():\n fileName = self.treyGene[k] + \"-GenesinCommon.txt\" \n with open( fileName, 'w' ) as out:\n out.write(\"%s\\t%s\\t%s\\n\" %(\"Gene_trey\", \"Gene\", \"Gene_inCommon\" ))\n out.close()", "def dumpData(self,out):\n #--Header\n out.packSub0('NAME',self.id)\n if getattr(self,'isDeleted',False):\n out.packSub('DELE','i',0)\n return\n if self.name == 'LEVC':\n flags = 1*self.calcFromAllLevels\n etype = 'CNAM'\n else:\n flags = 1*self.calcForEachItem + 2*self.calcFromAllLevels\n etype = 'INAM'\n out.packSub('DATA','i',flags)\n out.packSub('NNAM','B',self.chanceNone)\n out.packSub('INDX','i',len(self.entries))\n #--Entries\n for pcLevel, objectId in self.entries:\n out.packSub0(etype,objectId)\n out.packSub('INTV','h',pcLevel)", "def dict_to_file(data):\n def get_trailer(trailer_totals):\n trailer_format = (\n u'7' +\n u'999-999' +\n u' ' * 12 +\n u'{net_total:010}' +\n u'{credit_total:010}' +\n u'{debit_total:010}' +\n u' ' * 24 +\n u'{count_trans:06}' +\n u' ' * 40\n )\n return trailer_format.format(\n net_total=abs(trailer_totals[TOTAL_CREDITS]-trailer_totals[TOTAL_DEBITS]),\n credit_total=trailer_totals[TOTAL_CREDITS],\n debit_total=trailer_totals[TOTAL_DEBITS],\n count_trans=trailer_totals[TOTAL_ITEMS]\n )\n\n record_format = (\n u'0' +\n u' ' * 17 +\n u'{reel_seq_num:2.2}' +\n u'{name_fin_inst:3}' +\n u' ' * 7 +\n u'{user_name:26.26}' +\n u'{user_num:6.6}' +\n u'{file_desc:12.12}' +\n u'{date_for_process:6.6}' +\n u' ' * 40 +\n u'{record_type:1.1}' +\n u'{bsb_number:7.7}' +\n u'{account_number:9.9}' +\n u'{indicator:1.1}' +\n u'{tran_code:2.2}' +\n u'{amount:10.10}' +\n u'{account_title:32.32}' +\n u'{lodgement_ref:18.18}' +\n u'{trace_bsb_number:7.7}' +\n u'{trace_account_number:9.9}' +\n u'{name_of_remitter:16.16}' +\n u'{withholding_tax_amount:8.8}'\n )\n\n LOGGER.debug('record_format={}'.format(record_format))\n flat_trans = sorted([(record_format.format(**tran), tran) for tran in data])\n\n # remove duplicate headers and accumulate for trailer\n last_header = ''\n output_list = []\n totals = [0, 0, 0]\n\n for tran, data in flat_trans:\n if last_header != tran[:120]:\n if len(output_list) != 0:\n output_list.append(get_trailer(totals))\n totals = [0, 0, 0]\n\n output_list.append(tran[:120])\n last_header = tran[:120]\n\n if data['tran_code'] == u'13':\n totals[TOTAL_CREDITS] += int(data['amount'])\n else:\n totals[TOTAL_DEBITS] += int(data['amount'])\n totals[TOTAL_ITEMS] += 1\n output_list.append(tran[120:])\n\n output_list.append(get_trailer(totals))\n\n # add to stream\n output_stream = StringIO()\n # add line endings\n output_stream.writelines('\\n'.join(output_list))\n output_stream.seek(0)\n\n return output_stream", "def data_file(self, *, expand=False, contract=False):\n # Rely on SuperDataObj to proof expand/contract args\n # Extra empty string at the end puts a newline at the end\n # of the generated string, consistent with files\n # generated by Sphinx.\n return \"\\n\".join(\n (\n self.header_preamble,\n self.header_project.format(project=self.project),\n self.header_version.format(version=self.version),\n self.header_zlib,\n *(\n obj.data_line(expand=expand, contract=contract)\n for obj in self.objects\n ),\n \"\",\n )\n ).encode(\"utf-8\")", "def restore_structure(self, internal_usage=False):\n self.headers = []\n self.headersindex = {}\n i = 0\n database = managers.database_manager.get_database(self.owner_id, self.database_id)\n cur = database.get_connection().cursor()\n cur.execute(\"select * from `%s`\" % self.name)\n for fieldDesc in cur.description:\n # if len(fieldDesc[0])>20:\n #\ttext = fieldDesc[0].ljust(20)\n # else:\n text = fieldDesc[0]\n try:\n self.headers.append(text.decode(\"UTF-8\"))\n self.headersindex[text.decode(\"UTF-8\")] = i\n except:\n self.headers.append(text)\n self.headersindex[text] = i\n i += 1\n if not internal_usage:\n managers.request_manager.get_request().session().value(\"headers\", self.headers)", "def _write_all_headers(unit, fobj):\n\n now = datetime.datetime.now()\n if unit.jump_speed:\n jump_speed = '%.2f' % unit.jump_speed\n else:\n jump_speed = None\n\n # Case and order is significant.\n header_map = (\n ('Name', unit.name),\n ('Reference', unit.reference),\n ('Type', unit.unit_type),\n ('Unit_Era', unit.unit_era),\n ('Unit_TRO', unit.unit_tro),\n ('Move_Type', unit.unit_move_type),\n ('Tons', unit.weight),\n ('Comment', \"Saved by: btmux_maplib_io(Python) at %s\" % now.ctime()),\n ('Computer', 4),\n ('Radio', 5),\n ('Heat_Sinks', unit.heatsink_total),\n ('Mech_BV', unit.battle_value),\n ('Cargo_Space', unit.cargo_space),\n ('Max_Suits', unit.battlesuit_total),\n ('Max_Speed', '%.2f' % unit.max_speed),\n ('Jump_Speed', jump_speed),\n ('Specials', ' '.join(list(unit.specials))),\n )\n\n for header_name, header_value in header_map:\n if not header_value:\n continue\n if isinstance(header_value, list):\n header_value = ' '.join(header_value)\n header_str = \"{header_name:<16} {{ {header_value} }}\\n\".format(\n header_name=header_name, header_value=header_value)\n fobj.write(header_str)", "def build_export_row(self, data:dict) -> dict:\n data = {k : v for k, v in data.items() if v is not np.nan}\n for key, value in data.items():\n if key in _column_types:\n if _column_types[key] == list:\n if isinstance(value, (list, set, tuple)):\n data[key] = LIST_DELIMITER.join(value)\n else:\n data[key] = str(value)\n elif _column_types[key] == bool:\n try:\n data[key] = bool(value)\n except:\n data[key] = False\n else:\n data[key] = str(value)\n return data", "def standalone_writer( extracted_data_dict ):\n import JsonExport ## Make sure JsonExport.py is available in this directory.\n JsonExport.write( extracted_data_dict, \"standalone_parse\", \n \"standalone_parse\" )", "def _write_header(self, header):\n # write out telescope and source information\n header[\"latitude\"] = self.telescope_location_lat_lon_alt_degrees[0]\n header[\"longitude\"] = self.telescope_location_lat_lon_alt_degrees[1]\n header[\"altitude\"] = self.telescope_location_lat_lon_alt_degrees[2]\n header[\"telescope_name\"] = np.string_(self.telescope_name)\n header[\"instrument\"] = np.string_(self.instrument)\n header[\"object_name\"] = np.string_(self.object_name)\n\n # write out required UVParameters\n header[\"Nants_data\"] = self.Nants_data\n header[\"Nants_telescope\"] = self.Nants_telescope\n header[\"Nbls\"] = self.Nbls\n header[\"Nblts\"] = self.Nblts\n header[\"Nfreqs\"] = self.Nfreqs\n header[\"Npols\"] = self.Npols\n header[\"Nspws\"] = self.Nspws\n header[\"Ntimes\"] = self.Ntimes\n header[\"antenna_numbers\"] = self.antenna_numbers\n header[\"uvw_array\"] = self.uvw_array\n header[\"vis_units\"] = np.string_(self.vis_units)\n header[\"channel_width\"] = self.channel_width\n header[\"time_array\"] = self.time_array\n header[\"freq_array\"] = self.freq_array\n header[\"integration_time\"] = self.integration_time\n header[\"lst_array\"] = self.lst_array\n header[\"polarization_array\"] = self.polarization_array\n header[\"spw_array\"] = self.spw_array\n header[\"ant_1_array\"] = self.ant_1_array\n header[\"ant_2_array\"] = self.ant_2_array\n header[\"antenna_positions\"] = self.antenna_positions\n\n # handle antenna_names; works for lists or arrays\n header[\"antenna_names\"] = np.asarray(self.antenna_names, dtype=\"bytes\")\n\n # write out phasing information\n header[\"phase_type\"] = np.string_(self.phase_type)\n if self.phase_center_ra is not None:\n header[\"phase_center_ra\"] = self.phase_center_ra\n if self.phase_center_dec is not None:\n header[\"phase_center_dec\"] = self.phase_center_dec\n if self.phase_center_epoch is not None:\n header[\"phase_center_epoch\"] = self.phase_center_epoch\n if self.phase_center_frame is not None:\n header[\"phase_center_frame\"] = np.string_(self.phase_center_frame)\n\n # write out optional parameters\n if self.dut1 is not None:\n header[\"dut1\"] = self.dut1\n if self.earth_omega is not None:\n header[\"earth_omega\"] = self.earth_omega\n if self.gst0 is not None:\n header[\"gst0\"] = self.gst0\n if self.rdate is not None:\n header[\"rdate\"] = np.string_(self.rdate)\n if self.timesys is not None:\n header[\"timesys\"] = np.string_(self.timesys)\n if self.x_orientation is not None:\n header[\"x_orientation\"] = np.string_(self.x_orientation)\n if self.blt_order is not None:\n header[\"blt_order\"] = np.string_(\", \".join(self.blt_order))\n if self.antenna_diameters is not None:\n header[\"antenna_diameters\"] = self.antenna_diameters\n if self.uvplane_reference_time is not None:\n header[\"uvplane_reference_time\"] = self.uvplane_reference_time\n if self.eq_coeffs is not None:\n header[\"eq_coeffs\"] = self.eq_coeffs\n if self.eq_coeffs_convention is not None:\n header[\"eq_coeffs_convention\"] = np.string_(self.eq_coeffs_convention)\n\n # write out extra keywords if it exists and has elements\n if self.extra_keywords:\n extra_keywords = header.create_group(\"extra_keywords\")\n for k in self.extra_keywords.keys():\n if isinstance(self.extra_keywords[k], str):\n extra_keywords[k] = np.string_(self.extra_keywords[k])\n else:\n extra_keywords[k] = self.extra_keywords[k]\n\n # write out history\n header[\"history\"] = np.string_(self.history)\n\n return", "def populate_contents(self):\n\n data_table = self.data_table\n world = self.world\n\n self.add_text_row('World Name', data_table.world_name_label.text())\n self.add_text_row('Coordinates', data_table.world_coords_label.text())\n self.add_text_row('World Type', data_table.world_type_label.text())\n if data_table.world_extra_label.text() != '':\n self.add_text_row('Extra Info', data_table.world_extra_label.text())\n self.add_text_row('Filename', world.base_filename)\n self.add_text_row('Size', '{}x{}'.format(*world.info.size))\n\n if len(world.info.dungeons) > 0:\n dungeons = self.add_text_row('Dungeons', '<br/>'.join(sorted(world.info.dungeons)))\n else:\n self.add_text_row('Dungeons', '-')\n\n if len(world.info.biomes) > 0:\n biomes = self.add_text_row('Biomes', '<br/>'.join(sorted(world.info.biomes)))\n else:\n self.add_text_row('Biomes', '-')", "def export_data(self):\r\n stocks = {}\r\n headings = ['Security', 'Price', 'Change', 'Change %', '52 Week', 'Market Cap']\r\n\r\n for data in range(6):\r\n for items in self.root.main.treeview.get_children():\r\n values = self.root.main.treeview.item(items, 'values')\r\n if headings[data] not in stocks:\r\n stocks[headings[data]] = []\r\n stocks.get(headings[data]).append(values[data])\r\n\r\n df = pd.DataFrame(stocks, columns=headings)\r\n path = tk.filedialog.asksaveasfilename(title='Save File As...',\r\n filetypes=((\"CComma-separated values (.csv)\", \"*.csv\"), (\"Text Document(.txt)\", \"*.txt\")))\r\n\r\n if not path:\r\n return\r\n else:\r\n df.to_excel(path, index=False, header=True)", "def write_data_files(self):\n # build our strings\n header_string = \"\"\n data_string = \"\"\n for value in self.data.values():\n header_string += value[2] + \",\"\n if value[0] != None:\n data_string += value[1].format(value[0])\n else:\n data_string += \",\"\n # remove the extra comma and replace with a newline\n header_string = header_string[:-1]\n header_string += \"\\n\"\n data_string = data_string[:-1]\n data_string += \"\\n\"\n \n # show what we built\n #print(header_string)\n #print(data_string)\n \n # open a temp file\n with open(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"w\") as temp_file:\n #temp_file.write(header_string)\n temp_file.write(data_string)\n \n # move to the input file\n filetools.mv(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"{:s}\\\\VWSInput\\\\data.csv\".format(self.path))\n \n return", "def _text_write_preprocess(self):\n self.check()\n\n max_name_len = np.max([len(name) for name in self.name])\n fieldtypes = [\"U\" + str(max_name_len), \"f8\", \"f8\"]\n comp_names = self._get_lon_lat_component_names()\n frame_obj = self._get_frame_obj()\n frame_desc_str = _get_frame_desc_str(frame_obj)\n\n component_fieldnames = []\n for comp_name in comp_names:\n # This will add e.g. ra_J2000 and dec_J2000 for FK5\n component_fieldnames.append(comp_name + \"_\" + frame_desc_str)\n fieldnames = [\"source_id\"] + component_fieldnames\n stokes_names = [\"I\", \"Q\", \"U\", \"V\"]\n fieldshapes = [()] * 3\n\n if self.stokes_error is not None:\n stokes_error_names = [(f\"{k}_error\") for k in [\"I\", \"Q\", \"U\", \"V\"]]\n\n n_stokes = 0\n stokes_keep = []\n for si, total in enumerate(np.nansum(self.stokes.to(\"Jy\"), axis=(1, 2))):\n if total > 0:\n fieldnames.append(stokes_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n if self.stokes_error is not None:\n fieldnames.append(stokes_error_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n n_stokes += 1\n stokes_keep.append(total > 0)\n\n assert n_stokes >= 1, \"No components with nonzero flux.\"\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n fieldnames.append(\"subband_frequency\")\n else:\n fieldnames.append(\"frequency\")\n fieldtypes.append(\"f8\")\n fieldshapes.extend([(self.Nfreqs,)])\n elif self.reference_frequency is not None:\n fieldnames.extend([(\"reference_frequency\")])\n fieldtypes.extend([\"f8\"])\n fieldshapes.extend([()] * n_stokes + [()])\n if self.spectral_index is not None:\n fieldnames.append(\"spectral_index\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_rise_lst\"):\n fieldnames.append(\"rise_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_set_lst\"):\n fieldnames.append(\"set_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n dt = np.dtype(list(zip(fieldnames, fieldtypes, fieldshapes)))\n\n arr = np.empty(self.Ncomponents, dtype=dt)\n arr[\"source_id\"] = self.name\n\n for comp_ind, comp in enumerate(comp_names):\n arr[component_fieldnames[comp_ind]] = getattr(self.skycoord, comp).deg\n\n for ii in range(4):\n if stokes_keep[ii]:\n arr[stokes_names[ii]] = self.stokes[ii].T.to(\"Jy\").value\n if self.stokes_error is not None:\n arr[stokes_error_names[ii]] = self.stokes_error[ii].T.to(\"Jy\").value\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n arr[\"subband_frequency\"] = self.freq_array.to(\"Hz\").value\n else:\n arr[\"frequency\"] = self.freq_array.to(\"Hz\").value\n elif self.reference_frequency is not None:\n arr[\"reference_frequency\"] = self.reference_frequency.to(\"Hz\").value\n if self.spectral_index is not None:\n arr[\"spectral_index\"] = self.spectral_index\n\n if hasattr(self, \"_rise_lst\"):\n arr[\"rise_lst\"] = self._rise_lst\n if hasattr(self, \"_set_lst\"):\n arr[\"set_lst\"] = self._set_lst\n\n return arr", "def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')", "def writeheader(writer):\n writer.writerow(dict((fn, fn) for fn in writer.fieldnames))", "def export_table (self,_w):\n try:\n _data = \"\"\n maxRow = _w.rowCount()\n maxColumn = _w.columnCount()\n for hc in range(0,maxColumn):\n try: _hci = str(_w.horizontalHeaderItem(hc).text())\n except:_hci=\"None\";pass\n if hc == (maxColumn-1) :_data += _hci\n elif hc < maxColumn:_data += \"%s,\" % _hci\n _data += \"\\n\"\n for r in range(0, maxRow):\n for c in range(0, maxColumn):\n _d = str(_w.item(r, c).text())\n if c == (maxColumn-1):_data += _d\n elif c < maxColumn:_data += \"%s,\" % _d\n _data += \"\\n\"\n options = QFileDialog.Options()\n saved_file, _ = QFileDialog.getSaveFileName(self, \"Save Table to file \", \"data\", \"Plain Text (*.txt);;CSV (*.csv);;All Files (*)\", options=options)\n _file = open(saved_file, 'w')\n _file.write(_data)\n _file.close()\n except FileNotFoundError:pass", "def start_meta(text_key='main'):\n meta = {\n 'info': {\n 'text': ''\n },\n 'lib': {\n 'default text': text_key,\n 'values': {}\n },\n 'columns': {},\n 'masks': {},\n 'sets': {\n 'data file': {\n 'text': {text_key: 'Variable order in source file'},\n 'items': []\n }\n },\n 'type': 'pandas.DataFrame'\n }\n return meta", "def variants_export_header(case_obj):\n header = []\n header = header + EXPORT_HEADER\n # Add fields specific for case samples\n for individual in case_obj['individuals']:\n display_name = str(individual['display_name'])\n header.append('AD_reference_'+display_name) # Add AD reference field for a sample\n header.append('AD_alternate_'+display_name) # Add AD alternate field for a sample\n header.append('GT_quality_'+display_name) # Add Genotype quality field for a sample\n return header", "def add_to_pr_export(self, exp_template):", "def _create_empty_segy_file_object(self):\n self.textual_file_header = b''\n self.binary_file_header = None\n self.traces = []", "def write_header(_metadata, rename_padding=False):\n template = \"\"\"\\\n VERSION {version}\n FIELDS {fields}\n SIZE {size}\n TYPE {type}\n COUNT {count}\n WIDTH {width}\n HEIGHT {height}\n VIEWPOINT {viewpoint}\n POINTS {points}\n DATA {data}\n \"\"\"\n str_metadata = _metadata.copy()\n\n if not rename_padding:\n str_metadata['fields'] = ' '.join(_metadata['fields'])\n else:\n new_fields = []\n for f in _metadata['fields']:\n if f == '_':\n new_fields.append('padding')\n else:\n new_fields.append(f)\n str_metadata['fields'] = ' '.join(new_fields)\n str_metadata['size'] = ' '.join(map(str, _metadata['size']))\n str_metadata['type'] = ' '.join(_metadata['type'])\n str_metadata['count'] = ' '.join(map(str, _metadata['count']))\n str_metadata['width'] = str(_metadata['width'])\n str_metadata['height'] = str(_metadata['height'])\n str_metadata['viewpoint'] = ' '.join(map(str, _metadata['viewpoint']))\n str_metadata['points'] = str(_metadata['points'])\n tmpl = template.format(**str_metadata)\n return tmpl" ]
[ "0.6079765", "0.5885699", "0.58411974", "0.57001233", "0.5619722", "0.5597703", "0.5558174", "0.55084187", "0.54957384", "0.5491653", "0.5460945", "0.5460104", "0.5441996", "0.54364884", "0.5418627", "0.5417288", "0.5392408", "0.5381344", "0.53796035", "0.53658533", "0.53654695", "0.5345196", "0.53445286", "0.5330335", "0.5326384", "0.5320222", "0.52917874", "0.5262209", "0.5255363", "0.5251941" ]
0.62805873
0
Returns a list of variables to train.
def _get_variables_to_train(): if FLAGS.trainable_scopes is None: return tf.trainable_variables() else: scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')] variables_to_train = [] for scope in scopes: variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope) variables_to_train.extend(variables) return variables_to_train
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_variables_to_train(self):\n if self.trainable_scopes is None:\n return tf.trainable_variables()\n else:\n scopes = [scope.strip() for scope in self.trainable_scopes.split(',')]\n \n variables_to_train = []\n for scope in scopes:\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)\n variables_to_train.extend(variables)\n return variables_to_train", "def _get_variables_to_train():\n if FLAGS.trainable_scopes is None:\n return tf.trainable_variables()\n else:\n scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]\n\n variables_to_train = []\n for scope in scopes:\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)\n variables_to_train.extend(variables)\n return variables_to_train", "def _get_variables_to_train():\n if FLAGS.trainable_scopes is None:\n return tf.trainable_variables()\n else:\n scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]\n\n variables_to_train = []\n for scope in scopes:\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)\n variables_to_train.extend(variables)\n return variables_to_train", "def get_trainable_vars(self):\n return self.arch.variables", "def list_trainable_variables(self):\n self.variables_to_train = None\n if not self.train_encoder:\n self.variables_to_train = list(set(tf.trainable_variables()) - set(self.autoencoder_variables))", "def trainable_variables(self):\n return [v for p in self.parameters for v in p.trainable_variables]\n # TODO: look for variables NOT in parameters too\n # so users can mix-n-match tf.Variables and pf.Parameters in modules", "def get_variables_to_train(self):\n if self.checkpoint_exclude_scopes is None:\n return tf.trainable_variables()\n else:\n scopes = [scope.strip() for scope in self.checkpoint_exclude_scopes.split(',')]\n\n variables_to_train = []\n for scope in scopes:\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)\n variables_to_train.extend(variables)\n return variables_to_train", "def get_variables_to_train(flags):\n if flags.trainable_scopes is None:\n # print(tf.trainable_variables())\n return tf.trainable_variables()\n else:\n scopes = [scope.strip() for scope in flags.trainable_scopes.split(',')]\n\n variables_to_train = []\n for scope in scopes:\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)\n variables_to_train.extend(variables)\n return variables_to_train", "def get_trainable_vars(self):\n params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)\n return params", "def get_trainable_vars(name=None):\n return tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=name)", "def collect_trainable_variables(modules):\n if not isinstance(modules, (list, tuple)):\n modules = [modules]\n\n var_list = []\n for mod in modules:\n add_variable(mod.trainable_variables, var_list)\n\n return var_list", "def get_variables(self) -> typing.List:\n parts = (self.neural_net.encoder, self.neural_net.predictor, self.neural_net.dynamics)\n return [v for v_list in map(lambda n: n.weights, parts) for v in v_list]", "def get_vars(scope=''):\n return [x for x in tf.trainable_variables() if scope in x.name]", "def get_tf_t_vars(sess):\n return sess.graph.get_collection(\"trainable_variables\")", "def get_trainable_variables(scope=None, suffix=None):\n return get_variables(scope, suffix, ops.GraphKeys.TRAINABLE_VARIABLES)", "def variables(self):\n return [i.name for i in self.inputs + self.outputs]", "def variables(self):\n return [term.variable for term in self.terms]", "def variables(self):\n return self.dataset.data_vars", "def embedding_trainable_variables(self) -> Sequence[tf.Variable]:\n return self._embedding_layer.trainable_variables", "def dense_trainable_variables(self) -> Sequence[tf.Variable]:\n dense_vars = []\n for layer in self.layers:\n if layer != self._embedding_layer:\n dense_vars.extend(layer.trainable_variables)\n return dense_vars", "def trainable_variables(self, name=None):\n\n _m = [self.modules.get(name)] if name else self.modules.values()\n _var = []\n for i in _m:\n _var += filter(lambda p: p.requires_grad, i.parameters())\n return _var", "def get_variables(self):\n return [self.variables[key] for key in sorted(self.variables)]", "def get_model_variables():\n g = tf.get_default_graph()\n return set(g.get_collection(tf.GraphKeys.MODEL_VARIABLES))", "def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)", "def variables(self, *args, **kwargs):\n return self._optimizer.variables(*args, **kwargs)", "def get_vars(self):\n return [self.mu, self.var]", "def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]", "def get_variable_names(self):\n return [var[1] for var in self.variables]", "def get_variable_names(self):\n varNames = []\n for var in self.variables:\n # EstimationVariable\n varNames.append(var.name)\n return varNames", "def get_load_vars(self):\n all_vars = tf.compat.v1.global_variables()\n if self.params.cp_load_var is None:\n load_v = [v for v in all_vars if v not in self.full_model_load_ignore]\n else:\n load_v = []\n error_string = \"\\n\"\n for weight in self.params.cp_load_var:\n found=False\n for var in all_vars:\n error_string += \"\\t\" + var.name + \"\\n\"\n if var.name == weight:\n load_v.append(var)\n found=True\n break\n if not found:\n assert False, (\n \"Weight specified in cp_load_var \"+str(weight)+\" not found. All variables:\"+error_string)\n return load_v" ]
[ "0.78420085", "0.77309436", "0.77309436", "0.7696798", "0.76793075", "0.7623573", "0.75650865", "0.7555172", "0.75006074", "0.7315166", "0.7200911", "0.7186752", "0.7165649", "0.7107194", "0.70173025", "0.6995667", "0.69873655", "0.6917468", "0.6909653", "0.69072306", "0.6886075", "0.68454826", "0.67933214", "0.6777314", "0.66951007", "0.66498196", "0.66274095", "0.6607419", "0.6598396", "0.65920275" ]
0.77719444
1
Instantiate a CalcInterface object.
def __init__(self, proj=None, model=None, run=None, ens_mem=None, var=None, date_range=None, region=None, intvl_in=None, intvl_out=None, dtype_in_time=None, dtype_in_vert=None, dtype_out_time=None, dtype_out_vert=None, level=None, time_offset=None): if run not in model.runs: raise AttributeError("Model '{0}' has no run '{1}'. Calc object " "will not be generated.".format(model, run)) self.proj = proj self.model = model self.run = run self.default_start_date = self.run.default_start_date self.default_end_date = self.run.default_end_date self.data_loader = self.run.data_loader self.var = var self.name = self.var.name self.domain = self.var.domain self.def_time = self.var.def_time self.def_vert = self.var.def_vert try: self.function = self.var.func except AttributeError: self.function = lambda x: x if getattr(self.var, 'variables', False): self.variables = self.var.variables else: self.variables = (self.var,) self.ens_mem = ens_mem self.level = level self.intvl_in = intvl_in self.intvl_out = intvl_out self.dtype_in_time = dtype_in_time self.dtype_in_vert = dtype_in_vert self.ps = ps if isinstance(dtype_out_time, (list, tuple)): self.dtype_out_time = tuple(dtype_out_time) else: self.dtype_out_time = tuple([dtype_out_time]) self.dtype_out_vert = dtype_out_vert self.region = region self.months = utils.times.month_indices(intvl_out) if date_range == 'default': self.start_date = utils.times.ensure_datetime( self.run.default_start_date) self.end_date = utils.times.ensure_datetime( self.run.default_end_date) else: self.start_date = utils.times.ensure_datetime(date_range[0]) self.end_date = utils.times.ensure_datetime(date_range[-1]) self.time_offset = time_offset self.data_loader_attrs = dict( domain=self.domain, intvl_in=self.intvl_in, dtype_in_vert=self.dtype_in_vert, dtype_in_time=self.dtype_in_time, intvl_out=self.intvl_out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .indicadores_geosaude import IndicadoresGeosaude\n return IndicadoresGeosaude(iface)", "def createStandardInputObjFromCalcObjs(self, calcObjs):\n\t\treturn self._stdInpFromCalcObjs(self, calcObjs)", "def __init__(self, calcGrad, calcCost, input):\n\tself.calcGrad = calcGrad\n\tself.calcCost = calcCost\n\tself.input = np.asarray(input, dtype=np.float32)\n\tself.inp_shape = input.shape", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .quick_digitize import QuickDigitize\n return QuickDigitize(iface)", "def _get_cal_interface(self):\n plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')\n engine, info, extension = plugin()\n cal = plugin.get_interface(\"Cloud\")\n rwloggerctx = rwlogger.RwLog.Ctx.new(\"Cal-Log\")\n rc = cal.init(rwloggerctx)\n assert rc == RwTypes.RwStatus.SUCCESS\n\n return cal", "def _wrap_Interface(self, expr):\n functions = [self.scope.functions[self._wrapper_names_dict[f.name]] for f in expr.functions]\n functions = [f for f in functions if not isinstance(f, EmptyNode)]\n return Interface(expr.name, functions, expr.is_argument)", "def __init__(self, name, ic):\n\n self.ic = ic\n \n self.R_o = self.ic['RM']\n integrand = lambda x: x*x*self.gas_profile(x)\n self.M_o = 4.0*np.pi* integrate.quad(integrand, 0.0, self.R_o)[0]", "def __init__(self, logger, interface, numIntervals):\n\n self._log = logger\n self.interface = interface \n\n # counters\n self.countersData = CountersOperData() \n self._rxPacketsCounter = self.PeriodicCounter(numIntervals)\n self._txPacketsCounter = self.PeriodicCounter(numIntervals)\n self._rxBytesCounter = self.PeriodicCounter(numIntervals)\n self._txBytesCounter = self.PeriodicCounter(numIntervals)\n\n # rates\n self.rxPacketsPerSec = 0\n self.txPacketsPerSec = 0\n self.rxBitsPerSec = 0\n self.txBitsPerSec = 0", "def classFactory(iface):\n from .os_translator_ii import OsTranslatorII\n return OsTranslatorII(iface)", "def __init__(self):\n raise NotImplementedError('cannot create independent arc')", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .eco_valuator import EcoValuatorPlugin\n return EcoValuatorPlugin()", "def createInterface(self, iType, clsName, addr):\r\n uid = self._endpoint.getUID()\r\n interface = Interface(self._endpoint, self, uid)\r\n self.callRemote('createInterface', uid.bytes, iType, clsName,\r\n addr).chainDeferred(interface)\r\n return interface", "def setup_class(self):\n self.iqcalc = iqcalc_astropy.IQCalc(logger=self.logger)\n self.fwhm_funcs = (self.iqcalc.calc_fwhm_gaussian,\n self.iqcalc.calc_fwhm_moffat,\n self.iqcalc.calc_fwhm_lorentz)\n self.answers = ((2.8551, 2.7732), # Gaussian\n (2.77949, 2.6735), # Moffat\n (1.9570, 1.8113) # Lorentz\n )", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .nuclear_energy_plant_radiation_module import energy_plant_radiation_class\n return energy_plant_radiation_class(iface)", "def __init__(self):\n Cmd.__init__(self)\n self.calc = ReversePolishCalc()", "def __init__(self, interface, iType, clsName):\r\n super(Interface, self).__init__(interface)\r\n\r\n self.iType = iType\r\n self.clsName = clsName", "def __init__(self):\n super().__init__('drvr_06')\n self.comp = SimComp_6()", "def main():\n model = Calculator()", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHDSS.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHDUC.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSingleValuedCostFunction.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkCostFunction.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, *args):\n this = _ida_hexrays.new_operand_locator_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, interface):\r\n\r\n self.interface = interface", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .osm_tools import OSMtools\n return OSMtools(iface)", "def New(*args, **kargs):\n obj = itkPeakSignalToNoiseRatioCalculatorIF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkPeakSignalToNoiseRatioCalculatorIF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHFSS.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, osi, fy, fu, es, esh, eps_sh, eps_ult, r1=0.333, r2=18.0, r3=4.0):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.es = float(es)\n self.esh = float(esh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n self.r1 = float(r1)\n self.r2 = float(r2)\n self.r3 = float(r3)\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.es, self.esh, self.eps_sh, self.eps_ult, '-MPCurveParams', self.r1, self.r2, self.r3]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)" ]
[ "0.55461043", "0.54615575", "0.5398776", "0.53714937", "0.53408873", "0.52831453", "0.5282024", "0.5174737", "0.5168434", "0.5158829", "0.51377535", "0.5136189", "0.5122206", "0.50964415", "0.5093284", "0.5087388", "0.50600004", "0.5042441", "0.5030675", "0.5029526", "0.5009233", "0.50090045", "0.50076795", "0.49936482", "0.49928302", "0.49801266", "0.49704167", "0.4966449", "0.49654427", "0.49528256" ]
0.57148844
0
Create string of the data directory to store a tar file.
def _dir_tar_out(self): ens_label = utils.io.ens_label(self.ens_mem) return os.path.join(self.proj.tar_direc_out, self.proj.name, self.model.name, self.run.name, ens_label)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tmp_data_directory(tmp_path_factory):\n return str(tmp_path_factory.mktemp(\"datathon-mlapp-starter\"))", "def get_data_dir() -> str:\n os.makedirs(DEFAULT_OUTPUT_DIR, exist_ok=True)\n return DEFAULT_OUTPUT_DIR", "def datadir():\n return '../data/'", "def _create_data_directory(self):\n self.src_data_dir.mkdir(exist_ok=True, parents=True)", "def get_data_dir():\n\n data_dir = Path(get_project_dir() / 'data')\n data_dir.mkdir(parents=True, exist_ok=True)\n return data_dir", "def data_path(path: str, createdir: bool = False) -> str:\n path_obj = Path(path)\n if not path_obj.is_absolute():\n if inside_project():\n path_obj = Path(project_data_dir(), path)\n else:\n path_obj = Path(\".scrapy\", path)\n if createdir and not path_obj.exists():\n path_obj.mkdir(parents=True)\n return str(path_obj)", "def create_str_dir(config):\n\n # radiance directory\n os.makedirs(os.path.dirname(config['str_dir'] + '/radiance/'), exist_ok=True)\n # allsky images directory\n os.makedirs(os.path.dirname(config['str_dir'] + '/allsky/'), exist_ok=True)\n # simulation directory\n os.makedirs(os.path.dirname(config['str_dir'] + '/simulation/'), exist_ok=True)", "def data_dir():\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')", "def _add_data(self, data):\n _dir = self._add_dir()\n while 1:\n tmp = '%s/%s/%s%s' % (self.path, _dir, _name(self.rndhex),\n TEMPORARY_SUFFIX)\n try:\n if is_bytes(data):\n new_file = _file_create(tmp, umask=self.umask, utf8=False)\n else:\n new_file = _file_create(tmp, umask=self.umask, utf8=True)\n except EnvironmentError:\n error = sys.exc_info()[1]\n if error.errno == errno.ENOENT:\n _special_mkdir('%s/%s' % (self.path, _dir), self.umask)\n continue\n else:\n if new_file:\n break\n new_file.write(data)\n new_file.close()\n return _dir, tmp", "def data_directory(self):\n\n return self.get_raw(\"data_directory\")", "def data_dir():\n return os.path.join(os.path.dirname(_here), 'test', 'data')", "def tmpDir(package):\n\treturn 'debian/'+package", "def _create_user_data_dir(self):\n self._user_data_dir = tempfile.TemporaryDirectory(prefix=f'{self.USER_DATA_DIR_PREFIX}_tmp_')\n return self._user_data_dir.name", "def generate_structure(self):\n\n if self.basedir is None:\n raise ValueError('No base directory set.')\n\n data_folder = Path(self.basedir).joinpath(self.get_data_folder())\n data_folder.mkdir(parents=True, exist_ok=True)\n\n return data_folder", "def bear_data_dir(tmp_path_factory):\n base = tmp_path_factory.mktemp(\"bear_data\")\n db = base.joinpath(\"database.sqlite\")\n files = base.joinpath(\"Local Files\")\n\n if not db.is_file():\n create_bear_db(db)\n\n if not files.is_dir():\n create_bear_files(files)\n\n return base", "def data_dir(self) -> Path:\n return self._data_dir", "def _data_path(data_directory: str, name: str) -> str:\n if not os.path.isdir(data_directory):\n os.makedirs(data_directory)\n\n return os.path.join(data_directory, '{}.tfrecords'.format(name))", "def output_dir(tmpdir):\n return str(tmpdir.mkdir('templates'))", "def destDir(file, package, type='files'):\n\treturn tmpDir(package)+'/etc/univention/templates/'+type+'/'+os.path.dirname(file)", "def createdatafolder(name):\n folder = os.path.join(pathtofolder(),name)\n os.makedirs(folder)\n pass", "def datadir(cls): # pylint: disable=E0213\n mod = sys.modules[cls.__module__]\n return osp.join(osp.dirname(osp.abspath(mod.__file__)), 'data')", "def data_dir():\n return _config.datadir", "def user_data_dir():\n # TODO: hardcoded\n app_name = 'etheroll'\n data_dir = os.path.join('/sdcard', app_name)\n data_dir = os.path.expanduser(data_dir)\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n return data_dir", "def get_data_dir(self):\n return self.data_dir", "def tar_job(self):\n curr_dir = os.getcwd()\n\n os.chdir(self.temp_project_path)\n logging.log(level=logging.INFO, msg=\"Tarring up Filesystem and Environment\")\n tar_name = f\"{self.project_id}_fs\"\n tar_persistor = TarPersistor(base_file_name=tar_name,\n folder=\".\",\n paths_to_tar=os.listdir(),\n extract_path=False)\n _ = tar_persistor.persist()\n\n os.chdir(curr_dir)\n\n tar_path = os.path.join(self.temp_project_path, tar_name) + \".tar\"\n return tar_path", "def get_data_path():\n return os.getcwd() + \"/data/\"", "def test_data_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')", "def data_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\")", "def data_dir(self):\r\n return self._data_dir", "def get_default_data_dir(self):\n data_dir_path = os.path.join(self.comicsite.short_name,self.folder_prefix,self.cleantitle)\n return data_dir_path" ]
[ "0.668783", "0.66433996", "0.65262216", "0.64303285", "0.6403059", "0.6379541", "0.63335735", "0.6287801", "0.6276481", "0.6238254", "0.62279934", "0.62135416", "0.6202375", "0.6194497", "0.6191514", "0.618291", "0.6157835", "0.6141365", "0.6122145", "0.6097494", "0.6095356", "0.6094306", "0.60847414", "0.60808706", "0.6078516", "0.6078228", "0.6060987", "0.6055748", "0.604946", "0.6032441" ]
0.6959594
0
Add model grid attributes to a dataset
def _add_grid_attributes(self, ds): for name_int, names_ext in self._grid_attrs.items(): ds_coord_name = set(names_ext).intersection(set(ds.coords) | set(ds.data_vars)) model_attr = getattr(self.model, name_int, None) if ds_coord_name and (model_attr is not None): # Force coords to have desired name. ds = ds.rename({list(ds_coord_name)[0]: name_int}) ds = ds.set_coords(name_int) if not np.array_equal(ds[name_int], model_attr): if np.allclose(ds[name_int], model_attr): msg = ("Values for '{0}' are nearly (but not exactly) " "the same in the Run {1} and the Model {2}. " "Therefore replacing Run's values with the " "model's.".format(name_int, self.run, self.model)) logging.info(msg) ds[name_int].values = model_attr.values else: msg = ("Model coordinates for '{0}' do not match those" " in Run: {1} vs. {2}" "".format(name_int, ds[name_int], model_attr)) logging.info(msg) else: # Bring in coord from model object if it exists. ds = ds.load() if model_attr is not None: ds[name_int] = model_attr ds = ds.set_coords(name_int) if (self.dtype_in_vert == 'pressure' and internal_names.PLEVEL_STR in ds.coords): self.pressure = ds.level return ds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepopulate(self, model, exclude=[]):\n for col in model.columns():\n if col not in exclude and hasattr(self, col):\n setattr(getattr(self, col), 'data', getattr(model, col))", "def attribute(self, data, model, model_name):", "def _write_attributes_(self):\n #Open the Netcdf GRID file output from PreMOD\n try: dataset = Dataset(self.netcdf_file,'r+',format='NETCDF4')\n except Exception, e:\n print \"ERROR: %s\" % e\n sys.exit()\n\n dataset.title = self.title \n dataset.description = self.description\n dataset.ngh_file = self.ngh_file\n dataset.rtr_file = self.rtr_file\n dataset.netcdf_file = self.netcdf_file\n dataset.epsg = 4326\n dataset.close()", "def map_cols_to_attr(self):\n ## this is from the base class:\n ## for attr, label in zip(self.attr_names, self.labels):\n ## col_ind = self.col_inds[label]\n ## if len(self.data) > 0:\n ## setattr(self, attr, self.data[:,col_ind])\n #\n # hard coding based on what I know about saleae files:\n self.t = self.data[:,0]#.astype(float)\n nr, nc = self.data.shape\n self.num_cols = nc-1\n \n for i in range(0,self.num_cols):\n attr = 'ch_%i' % i\n j = i+1\n setattr(self, attr, self.data[:,j])#.astype(float))", "def add_to_dataset(self, dataset: Dataset):\n pass", "def read_attributes(self, dataset):\n if 'attributes' in self.configs:\n for key, value in self.configs['attributes'].items():\n setattr(dataset, key, value)", "def _attach_metadata(self):\n self.dataset.create_metadata(\"watertightness\", \"float\", \"1.0 if the mesh is watertight, 0.0 if it is not\")\n self.dataset.attach_metadata_func(\"watertightness\", DexNet.is_watertight, overwrite=False, store_func=True)\n self.dataset.create_metadata(\"num_con_comps\", \"float\", \"Number of connected components (may not be watertight) in the mesh\")\n self.dataset.attach_metadata_func(\"num_con_comps\", object(), overwrite=False, store_func=True)", "def _load_grid(self):\n\n grid_metrics = ['nbe', 'ntsn', 'nbsn', 'ntve', 'nbve', 'art1', 'art2', 'a1u', 'a2u']\n grid_variables = ['lon', 'lat', 'x', 'y', 'lonc', 'latc', 'xc', 'yc',\n 'h', 'siglay', 'siglev']\n\n # Get the grid data.\n for grid in grid_variables:\n try:\n setattr(self.grid, grid, self.ds.variables[grid][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[grid].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[grid], attribute))\n setattr(self.atts, grid, attributes)\n except KeyError:\n # Make zeros for this missing variable so we can convert from the non-missing data below.\n if grid.endswith('c'):\n setattr(self.grid, grid, np.zeros(self.dims.nele).T)\n else:\n setattr(self.grid, grid, np.zeros(self.dims.node).T)\n except ValueError as value_error_message:\n warn('Variable {} has a problem with the data. Setting value as all zeros.'.format(grid))\n print(value_error_message)\n setattr(self.grid, grid, np.zeros(self.ds.variables[grid].shape))\n\n # Load the grid metrics data separately as we don't want to set a bunch of zeros for missing data.\n for metric in grid_metrics:\n if metric in self.ds.variables:\n setattr(self.grid, metric, self.ds.variables[metric][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[metric].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[metric], attribute))\n setattr(self.atts, metric, attributes)\n\n # Fix the indexing and shapes of the grid metrics variables. Only transpose and offset indexing for nbe.\n try:\n if metric == 'nbe':\n setattr(self.grid, metric, getattr(self.grid, metric).T - 1)\n else:\n setattr(self.grid, metric, getattr(self.grid, metric))\n except AttributeError:\n # We don't have this variable, so just pass by silently.\n pass\n\n try:\n self.grid.nv = self.ds.variables['nv'][:].astype(int) # force integers even though they should already be so\n self.grid.triangles = copy.copy(self.grid.nv.T - 1) # zero-indexed for python\n except KeyError:\n # If we don't have a triangulation, make one.\n triangulation = tri.Triangulation(self.grid.lon, self.grid.lat)\n self.grid.triangles = triangulation.triangles\n self.grid.nv = self.grid.triangles.T + 1\n\n # Fix broken triangulations if necessary.\n if self.grid.nv.min() != 1:\n if self._debug:\n print('Fixing broken triangulation. Current minimum for nv is {} and for triangles is {} but they '\n 'should be 1 and 0, respectively.'.format(self.grid.nv.min(), self.grid.triangles.min()))\n self.grid.nv = (self.ds.variables['nv'][:].astype(int) - self.ds.variables['nv'][:].astype(int).min()) + 1\n self.grid.triangles = copy.copy(self.grid.nv.T) - 1\n\n # If we've been given an element dimension to subsample in, fix the triangulation here. We should really do\n # this for the nodes too.\n if 'nele' in self._dims:\n if self._debug:\n print('Fix triangulation table as we have been asked for only specific elements.')\n print('Triangulation table minimum/maximum: {}/{}'.format(self.grid.nv[:, self._dims['nele']].min(),\n self.grid.nv[:, self._dims['nele']].max()))\n # Redo the triangulation here too.\n new_nv = copy.copy(self.grid.nv[:, self._dims['nele']])\n for i, new in enumerate(np.unique(new_nv)):\n new_nv[new_nv == new] = i\n self.grid.nv = new_nv + 1\n self.grid.triangles = new_nv.T\n\n # Update dimensions to match those we've been given, if any. Omit time here as we shouldn't be touching that\n # dimension for any variable in use in here.\n for dim in self._dims:\n if dim != 'time':\n setattr(self.dims, dim, len(self._dims[dim]))\n\n # Add compatibility for FVCOM3 (these variables are only specified on the element centres in FVCOM4+ output\n # files). Only create the element centred values if we have the same number of nodes as in the triangulation.\n # This does not occur if we've been asked to extract an incompatible set of nodes and elements, for whatever\n # reason (e.g. testing). We don't add attributes for the data if we've created it as doing so is a pain.\n for var in 'h_center', 'siglay_center', 'siglev_center':\n try:\n setattr(self.grid, var, self.ds.variables[var][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[var].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[var], attribute))\n setattr(self.atts, var, attributes)\n except KeyError:\n if self.grid.nv.max() == len(self.grid.x):\n try:\n setattr(self.grid, var, nodes2elems(getattr(self.grid, var.split('_')[0]), self.grid.triangles))\n except IndexError:\n # Maybe the array's the wrong way around. Flip it and try again.\n setattr(self.grid, var, nodes2elems(getattr(self.grid, var.split('_')[0]).T, self.grid.triangles))\n\n # Convert the given W/E/S/N coordinates into node and element IDs to subset.\n if self._bounding_box:\n self._dims['node'] = np.argwhere((self.grid.lon > self._dims['wesn'][0]) &\n (self.grid.lon < self._dims['wesn'][1]) &\n (self.grid.lat > self._dims['wesn'][2]) &\n (self.grid.lat < self._dims['wesn'][3])).flatten()\n self._dims['nele'] = np.argwhere((self.grid.lonc > self._dims['wesn'][0]) &\n (self.grid.lonc < self._dims['wesn'][1]) &\n (self.grid.latc > self._dims['wesn'][2]) &\n (self.grid.latc < self._dims['wesn'][3])).flatten()\n\n # If we've been given dimensions to subset in, do that now. Loading the data first and then subsetting\n # shouldn't be a problem from a memory perspective because if you don't have enough memory for the grid data,\n # you probably won't have enough for actually working with the outputs. Also update dimensions to match the\n # given dimensions.\n if 'node' in self._dims:\n self.dims.node = len(self._dims['node'])\n for var in 'x', 'y', 'lon', 'lat', 'h', 'siglay', 'siglev':\n try:\n node_index = self.ds.variables[var].dimensions.index('node')\n var_shape = [i for i in np.shape(self.ds.variables[var])]\n var_shape[node_index] = self.dims.node\n if 'siglay' in self._dims and 'siglay' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglay')] = self.dims.siglay\n elif 'siglev' in self._dims and 'siglev' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglev')] = self.dims.siglev\n _temp = np.empty(var_shape)\n if 'siglay' in self.ds.variables[var].dimensions:\n for ni, node in enumerate(self._dims['node']):\n if 'siglay' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglay'], node]\n else:\n _temp[..., ni] = self.ds.variables[var][:, node]\n elif 'siglev' in self.ds.variables[var].dimensions:\n for ni, node in enumerate(self._dims['node']):\n if 'siglev' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglev'], node]\n else:\n _temp[..., ni] = self.ds.variables[var][:, node]\n else:\n for ni, node in enumerate(self._dims['node']):\n _temp[..., ni] = self.ds.variables[var][..., node]\n except KeyError:\n if 'siglay' in var:\n _temp = np.empty((self.dims.siglay, self.dims.node))\n elif 'siglev' in var:\n _temp = np.empty((self.dims.siglev, self.dims.node))\n else:\n _temp = np.empty(self.dims.node)\n setattr(self.grid, var, _temp)\n if 'nele' in self._dims:\n self.dims.nele = len(self._dims['nele'])\n for var in 'xc', 'yc', 'lonc', 'latc', 'h_center', 'siglay_center', 'siglev_center':\n try:\n nele_index = self.ds.variables[var].dimensions.index('nele')\n var_shape = [i for i in np.shape(self.ds.variables[var])]\n var_shape[nele_index] = self.dims.nele\n if 'siglay' in self._dims and 'siglay' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglay')] = self.dims.siglay\n elif 'siglev' in self._dims and 'siglev' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglev')] = self.dims.siglev\n _temp = np.empty(var_shape)\n if 'siglay' in self.ds.variables[var].dimensions:\n for ni, nele in enumerate(self._dims['nele']):\n if 'siglay' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglay'], nele]\n else:\n _temp[..., ni] = self.ds.variables[var][:, nele]\n elif 'siglev' in self.ds.variables[var].dimensions:\n for ni, nele in enumerate(self._dims['nele']):\n if 'siglev' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglev'], nele]\n else:\n _temp[..., ni] = self.ds.variables[var][:, nele]\n else:\n for ni, nele in enumerate(self._dims['nele']):\n _temp[..., ni] = self.ds.variables[var][..., nele]\n except KeyError:\n # FVCOM3 files don't have h_center, siglay_center and siglev_center, so make var_shape manually.\n if var.startswith('siglev'):\n var_shape = [self.dims.siglev, self.dims.nele]\n elif var.startswith('siglay'):\n var_shape = [self.dims.siglay, self.dims.nele]\n else:\n var_shape = self.dims.nele\n _temp = np.zeros(var_shape)\n setattr(self.grid, var, _temp)\n\n # Check if we've been given vertical dimensions to subset in too, and if so, do that. Check we haven't\n # already done this if the 'node' and 'nele' sections above first.\n for var in 'siglay', 'siglev', 'siglay_center', 'siglev_center':\n short_dim = copy.copy(var)\n # Assume we need to subset this one unless 'node' or 'nele' are missing from self._dims. If they're in\n # self._dims, we've already subsetted in the 'node' and 'nele' sections above, so doing it again here\n # would fail.\n subset_variable = True\n if 'node' in self._dims or 'nele' in self._dims:\n subset_variable = False\n # Strip off the _center to match the dimension name.\n if short_dim.endswith('_center'):\n short_dim = short_dim.split('_')[0]\n if short_dim in self._dims:\n if short_dim in self.ds.variables[var].dimensions and subset_variable:\n _temp = getattr(self.grid, var)[self._dims[short_dim], ...]\n setattr(self.grid, var, _temp)\n\n # Check ranges and if zero assume we're missing that particular type, so convert from the other accordingly.\n self.grid.lon_range = np.ptp(self.grid.lon)\n self.grid.lat_range = np.ptp(self.grid.lat)\n self.grid.lonc_range = np.ptp(self.grid.lonc)\n self.grid.latc_range = np.ptp(self.grid.latc)\n self.grid.x_range = np.ptp(self.grid.x)\n self.grid.y_range = np.ptp(self.grid.y)\n self.grid.xc_range = np.ptp(self.grid.xc)\n self.grid.yc_range = np.ptp(self.grid.yc)\n\n # Only do the conversions when we have more than a single point since the relevant ranges will be zero with\n # only one position.\n if self.dims.node > 1:\n if self.grid.lon_range == 0 and self.grid.lat_range == 0:\n self.grid.lon, self.grid.lat = lonlat_from_utm(self.grid.x, self.grid.y, zone=self._zone)\n if self.grid.lon_range == 0 and self.grid.lat_range == 0:\n self.grid.x, self.grid.y, _ = utm_from_lonlat(self.grid.lon, self.grid.lat)\n if self.dims.nele > 1:\n if self.grid.lonc_range == 0 and self.grid.latc_range == 0:\n self.grid.lonc, self.grid.latc = lonlat_from_utm(self.grid.xc, self.grid.yc, zone=self._zone)\n if self.grid.lonc_range == 0 and self.grid.latc_range == 0:\n self.grid.xc, self.grid.yc, _ = utm_from_lonlat(self.grid.lonc, self.grid.latc)", "def data(self, data):\n if data is None:\n return\n if type(data) != pd.DataFrame:\n logger.warning(\n \"Data is not a pandas data frame, trying to read data frame \"\n \"from csv\")\n try:\n data = pd.read_csv(data)\n except:\n logger.error(\"Could not load pandas data frame from data\")\n raise BaseException('Cannot load data')\n logger.info('Adding data to GeologicalModel with {} data points'.format(len(data)))\n self._data = data.copy()\n self._data['X'] -= self.origin[0]\n self._data['Y'] -= self.origin[1]\n self._data['Z'] -= self.origin[2]\n self._data['X'] /= self.scale_factor\n self._data['Y'] /= self.scale_factor\n self._data['Z'] /= self.scale_factor\n if 'type' in self._data:\n logger.warning(\"'type' is depreciated replace with 'feature_name' \\n\")\n self._data.rename(columns={'type':'feature_name'},inplace=True)\n if 'feature_name' not in self._data:\n logger.error(\"Data does not contain 'feature_name' column\")\n raise BaseException('Cannot load data')\n for h in all_heading():\n if h not in self._data:\n self._data[h] = np.nan\n if h == 'w':\n self._data[h] = 1.\n if h == 'coord':\n self._data[h] = 0\n self.data.loc[np.isnan(self.data['w']),'w'] = 1.\n if 'strike' in self._data and 'dip' in self._data:\n logger.info('Converting strike and dip to vectors')\n mask = np.all(~np.isnan(self._data.loc[:, ['strike', 'dip']]),\n axis=1)\n self._data.loc[mask, gradient_vec_names()] = strike_dip_vector(\n self._data.loc[mask, 'strike'], self._data.loc[mask, 'dip'])\n self._data.drop(['strike', 'dip'], axis=1, inplace=True)", "def extend_model_data(self, newdata):\n logger.warning(\"Extend data is untested and may have unexpected consequences\")\n data_temp = newdata.copy()\n data_temp['X'] -= self.origin[0]\n data_temp['Y'] -= self.origin[1]\n data_temp['Z'] -= self.origin[2]\n data_temp['X'] /= self.scale_factor\n data_temp['Y'] /= self.scale_factor\n data_temp['Z'] /= self.scale_factor\n self.data.concat([self.data, data_temp], sort=True)", "def _set_attributes(self):", "def add_attributes(data, **kwargs):\n for key in kwargs:\n data[key] = kwargs[key]", "def set_global_attributes(ds):\n ds.title = \"LPDM CO2 Concentration Footprints\"\n ds.summary = (\"Gridded CO2 concentration footprints from the output \"\n \"of the Lagrangian Particle Dispersion model \"\n \"described in Uliasz 1994.\")\n ds.Conventions = \"CF-1.6 ACDD-1.3\"\n ds.history = (\"{date:{acdd_format}} {user:s} \"\n \"created by {progname:s}\").format(\n date=RUN_DATE, user=os.environ[\"USER\"],\n acdd_format=ACDD_DATE,\n progname=sys.argv[0])\n ds.source = (\"Gridded outputs from LPDM v?.?.? \"\n \"written by Uliasz et al. and modified by Lauvaux\")\n ds.standard_name_vocabulary = \"CF Standard Name Table v32\"\n ds.date_created = \"{date:{acdd_format}}\".format(\n date=RUN_DATE, acdd_format=ACDD_DATE)\n ds.creator_name = \"Daniel Wesloh, Thomas Lauvaux\"\n ds.creator_institution = (\n \"The Pennsylvania State University \"\n \"Department of Meteorology and Atmospheric Science\")\n ds.date_modified = \"{date:{acdd_format}}\".format(\n date=RUN_DATE, acdd_format=ACDD_DATE)\n ds.date_metadata_modified = \"{date:{acdd_format}}\".format(\n date=RUN_DATE, acdd_format=ACDD_DATE)\n ds.product_version = \"Py_v1.0.0\"\n ds.references = \"\"\"Uliasz, M. 1994. Lagrangian particle dispersion modeling in mesoscale applications. Environ Model: Comput Methods and Softw for Simulat Environ Pollut and its Adverse Effects (CMP) 2 : 71-.\"\"\"\n\n ds.geospatial_vertical_min = 0\n ds.geospatial_vertical_max = CLOSE_TO_GROUND\n ds.geospatial_vertical_positive = \"up\"\n ds.geospatial_vertical_units = \"km AGL\"\n # Kind of a cross between Grid and Trajectory\n # Grid covers the first and last two axes;\n # trajectory covers third-to-last\n ds.cdm_data_type = \"Grid\"\n\n ds.institution = ds.creator_institution", "def _add_node_attributes(self):\n ensemble_mapping = SankeyLayout._ensemble_map(\n df=self.supergraph.gf.df, nxg=self.nxg, columns=SankeyLayout._COLUMNS\n )\n for idx, key in enumerate(ensemble_mapping):\n nx.set_node_attributes(self.nxg, name=key, values=ensemble_mapping[key])\n\n dataset_mapping = {}\n for run in self.runs:\n dataset_mapping[run] = SankeyLayout._dataset_map(\n df=self.supergraph.gf.df,\n nxg=self.nxg,\n tag=run,\n columns=SankeyLayout._COLUMNS,\n )\n nx.set_node_attributes(\n self.nxg, name=self.supergraph.tag, values=dataset_mapping[run]\n )", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def add_metadata(ds, metadata):\n\n ds.attrs.update(metadata)\n\n return ds", "def _add_metadata_as_attrs(data, units, description, dtype_out_vert):\n if isinstance(data, xr.DataArray):\n return _add_metadata_as_attrs_da(data, units, description,\n dtype_out_vert)\n else:\n for name, arr in data.data_vars.items():\n _add_metadata_as_attrs_da(arr, units, description,\n dtype_out_vert)\n return data", "def fill_attributes(ml_file, other_file):\n with xr.load_dataset(other_file) as other:\n with xr.open_dataset(ml_file) as ml:\n for variable in other.variables:\n if variable in ml.variables:\n other[variable].attrs = ml[variable].attrs\n other.to_netcdf(other_file)", "def transform(self, data, attr):\n data['point'] = torch.from_numpy(data['point'])\n data['feat'] = torch.from_numpy(data['feat'])\n data['label'] = torch.from_numpy(data['label'])\n\n return data", "def add_data(self, d, prop_title):\r\n ac = vtk.vtkDoubleArray()\r\n ac.SetName(prop_title)\r\n for iac in d.flatten(order='C'):\r\n ac.InsertNextTuple1(iac)\r\n self.Grid.GetCellData().AddArray(ac)", "def set_additional_fields(cls, model, data):\n for k, v in data.items():\n if not hasattr(model, k):\n setattr(model, k, v)", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')", "def set_data(self, dataset):\n if dataset is not None:\n self.infoa.setText('%d instances in input data set' % len(dataset))\n self.infob.setText('%d attributes in input data set' % len(dataset.domain.attributes))\n # Limited the batch size between 0.005 to 0.025, in\n # order tk=o make training fats and also accurate\n if(len(dataset) >= 200):\n self.batchsize = int(0.005 * len(dataset))\n self.batch_spin.setMinimum(int(0.005 * len(dataset)))\n self.batch_spin.setMaximum(int(0.025 * len(dataset)))\n else:\n # here the dataset is to small, hence fixed the\n # batch size programmatically\n self.batchsize = 1\n self.batch_spin.setMinimum(1)\n self.batch_spin.setMaximum(10)\n self.optionsBox.setDisabled(False)\n self.layerBox.setDisabled(False)\n self.updateLayer()\n self.dataset = dataset\n self.save_button.setDisabled(True)\n\n else:\n self.infoa.setText('No data on input yet, waiting to get something.')\n self.infob.setText('')\n self.optionsBox.setDisabled(True)\n self.layerBox.setDisabled(True)\n self.dataset = None", "def assign_set_data(name,data):\n df = DataFrame(name)\n df.setColumn(name,data)\n ampl.setData(df,name)", "def updateAttributesAfterAdding(self):\n layer = self.sender()\n while self.addedFeatures:\n featureId = self.addedFeatures.pop()\n #begining the edit command\n # layer.beginEditCommand(self.tr(\"DSG Tools reclassification tool: adjusting feature's attributes\"))\n #accessing added features\n editBuffer = layer.editBuffer()\n features = editBuffer.addedFeatures()\n for key in features.keys():\n #just checking the newly added feature, the other I don't care\n if key == featureId:\n feature = features[key]\n #setting the attributes using the reclassification dictionary\n self.setFeatureAttributes(feature, editBuffer)\n layer.endEditCommand()", "def setattrs(self, data, attrlist, id_array=None):\n\t\tassert len(data) == len(attrlist)\n\t\tfor d, attr in zip(data, attrlist):\n\t\t\tif id_array == None: setattr(self, attr, d)\n\t\t\telse:getattr(self, attr)[id_array] = d # Setting 1d array elements", "def add_attributes(self, attrs):\n self.attrs.add_attributes(attrs)", "def add_attributes(self, attrs):\n self.attrs.add_container(attrs)", "def _set_element(self,data):\n self._element._labels = [list(data[0]), list(data[1])]\n self._element._twin = [list(data[2]), list(data[3])]", "def setData(self, data):\n self.data = data\n dagPath, components = self.__getGeometryComponents()\n self.setInfluenceWeights(dagPath, components)\n self.setBlendWeights(dagPath, components)\n\n for attr in ['skinningMethod', 'normalizeWeights']:\n cmds.setAttr('%s.%s' % (self.node, attr), self.data[attr])" ]
[ "0.6093668", "0.60661185", "0.59482807", "0.5856596", "0.5852455", "0.5736706", "0.57010055", "0.5676482", "0.55973953", "0.5530213", "0.5523712", "0.5513159", "0.5505337", "0.5502275", "0.54875046", "0.5467387", "0.5464971", "0.54523236", "0.5370031", "0.53413594", "0.5339471", "0.5326623", "0.5308186", "0.52959245", "0.5290312", "0.5290014", "0.52749383", "0.52721786", "0.52709794", "0.5262952" ]
0.7780318
0
Get pressure or pressure thickness array for data on pcoords.
def _get_pressure_from_p_coords(self, ps, name='p'): if np.any(self.pressure): pressure = self.pressure else: pressure = self.model.level if name == 'p': return pressure if name == 'dp': return utils.vertcoord.dp_from_p(pressure, ps) raise ValueError("name must be 'p' or 'dp':" "'{}'".format(name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pshape(self):\n try:\n return plist([x.pshape() for x in self], root=self.__root__)\n except Exception:\n return plist([len(self)], root=self.__root__)", "def pressure(x, kind=\"geopotential\"):\n\n p = table(x, kind)[2]\n return p", "def _get_pdi(cls, data, windows):\n\t\twindow = cls.get_only_one_positive_int(windows)\n\t\tpdm_column = 'pdm_{}'.format(window)\n\t\ttr_column = 'atr_{}'.format(window)\n\t\tpdi_column = 'pdi_{}'.format(window)\n\t\tdata[pdi_column] = data[pdm_column] / data[tr_column] * 100\n\t\treturn data[pdi_column]", "def getDimensions(self):\n\t\tprint \"Returning\",self.x,self.y,self.slicesPerTimepoint\n\t\treturn (self.x, self.y, self.slicesPerTimepoint)", "def GetPolygons(self):\n if not self.VTKObject.GetPolys():\n return None\n return vtkDataArrayToVTKArray(\n self.VTKObject.GetPolys().GetData(), self)", "def P(self):\n return self.generic_getter(get_pressure, \"p\", \"convert_pressure\")", "def getPTData(*args):\n return args[0].Data.PTData.pt_data", "def points(self):\n return self._arr.T.ravel().view(\n dtype=[('x', self.dtype), ('y', self.dtype), ('z', self.dtype)])", "def _calc_pp_pwl_points(ppc_pwl_points):\n\n def construct_list_of_list(row):\n arr = pts[row, ::2]\n arr = np.concatenate((arr[:1], np.repeat(arr[1:-1], 2), arr[-1:])).reshape((-1, 2))\n arr = np.c_[arr, c[row, :]]\n arr = arr[~np.isnan(arr[:, 2])]\n return arr.tolist()\n\n pts = ppc_pwl_points\n if not (pts.shape[1] % 2) == 0:\n raise ValueError(\"_calc_pp_pwl_points() expects ppc_pwl_points with shape[1] is \"\n f\"multiple of 2. However, ppc_pwl_points.shape[1]={ppc_pwl_points}.\")\n c = (pts[:, 3::2] - pts[:, 1:-2:2]) / (pts[:, 2::2] - pts[:, :-2:2])\n return [construct_list_of_list(row) for row in range(pts.shape[0])]", "def _get_pressure_from_eta_coords(self, ps, name='p'):\n bk = self.model.bk\n pk = self.model.pk\n pfull_coord = self.model.pfull\n if name == 'p':\n return utils.vertcoord.pfull_from_ps(bk, pk, ps, pfull_coord)\n if name == 'dp':\n return utils.vertcoord.dp_from_ps(bk, pk, ps, pfull_coord)\n raise ValueError(\"name must be 'p' or 'dp':\"\n \"'{}'\".format(name))", "def p(self) -> np.ndarray:\n return self._vector[18:20]", "def GetPointToOneRingPointsArray(self, p_int):\n ...", "def _prepare_data(self, coords):\n return np.array([coords])", "def provide_data(self):\n return [(k, v.shape) for k, v in self.data]", "def as_numpy_array_2D(self):\n wx = []\n wy = []\n for wp in self.waypoints:\n wx.append(wp.location.x)\n wy.append(wp.location.y)\n return np.array([wx, wy])", "def get_points(self):\n return self._quadrature_points", "def shape(self):\n for component in ('x', 'y', 'z', 'r', 't'):\n arr = getattr(self, component)\n if arr is not None:\n return arr.shape\n return ()", "def pvalues(self) -> np.ndarray:\n\n poialt = None\n return self.calculator.pvalue(\n poinull=self.poinull, poialt=poialt, qtilde=self.qtilde, onesided=False\n )[0]", "def get_shape(self):\n if not self.channels_first:\n return [None] + self.w + [self.numoffeatures]\n else:\n return [None] + [self.numoffeatures] + self.w", "def dlib_point_to_np_array(point: dlib.point):\n return np.array([point.x, point.y])", "def getDimensions():", "def ppix(self):\n return self._ppix", "def obtain_points(self):\n # Swapaxes makes the output a column rather than a row\n return np.swapaxes(np.array([np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateX\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateY\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateZ\"][\" data\"][:, :, :])]), 0, 1)", "def getPureVaporPressures(self,T):\n\t\tanswer = list()\n\t\tfor c in self.components:\n\t\t\tanswer.append( c.getPureComponentVaporPressure(T) )\n\t\treturn numpy.array(answer)", "def curve_to_arrays(p, n, d):\n x_list = []\n y_list = []\n\n for i in range(0, n, 1):\n x_list.append(calc_value(p.x, 0 + i * d))\n y_list.append(calc_value(p.y, 0 + i * d))\n\n return x_list, y_list", "def get_points(self):\r\n return self.nx*self.ny*self.nz", "def _get_shape(t):\n return [x.value for x in t.shape]", "def Pcoord(modName=\"CylR\"):\n\t\tmodelNode = slicer.util.getNode(modName) # Read the node (module)\n\t\tsr = modelNode.GetPolyData() # module turn polygons\n\t\tpxyz = [0, 0, 0]\n\t\tNumP = sr.GetNumberOfPoints() # The number of points in the polygon\n\t\tfor i in range(NumP // 2): # circulate: i=NumP//2\n\t\t\tsr.GetPoint(i, pxyz) # Get the point coordinates in turn\n\t\t\t# becomes a matrix\n\t\t\tif i == 0:\n\t\t\t\tPxyz = np.array([pxyz])\n\t\t\telse:\n\t\t\t\tPxyz = np.append(Pxyz, np.array([pxyz]), axis=0)\n\t\taxisMed0 = (Pxyz[0] + Pxyz[NumP // 4]) / 2\n\t\taxisMed1 = (Pxyz[1] + Pxyz[1 + NumP // 4]) / 2\n\t\tdimeter = np.linalg.norm(Pxyz[0] - Pxyz[NumP // 4])\n\t\treturn np.array([axisMed0, axisMed1]), np.around(dimeter), Pxyz", "def _get_poly_coords(self, geometry, coord_type):\n ext = geometry.exterior\n return self._get_xy_coords(ext, coord_type)", "def shape(data):\n if hasattr(data, \"shape\"):\n return list(data.shape)\n else:\n try:\n length = len(data)\n return [length] + shape(data[0])\n except TypeError:\n return []" ]
[ "0.55917585", "0.5577419", "0.5542342", "0.53993183", "0.5391667", "0.5391056", "0.5374878", "0.5368606", "0.5305264", "0.5304141", "0.5295931", "0.5269179", "0.5235801", "0.52351403", "0.5233223", "0.5221241", "0.5218153", "0.51947767", "0.51916176", "0.51729184", "0.51564145", "0.5133708", "0.5122776", "0.5102254", "0.5088119", "0.50750756", "0.5070271", "0.50611967", "0.50529355", "0.50221646" ]
0.5965626
0
Get the data for a single variable over the desired date range.
def _get_input_data(self, var, start_date, end_date): logging.info(self._print_verbose("Getting input data:", var)) # Pass numerical constants as is. if isinstance(var, (float, int)): return var # aospy.Var objects remain. # Pressure handled specially due to complications from sigma vs. p. elif var.name in ('p', 'dp'): data = self._get_pressure_vals(var, start_date, end_date) if self.dtype_in_vert == internal_names.ETA_STR: return self._to_desired_dates(data) return data # Get grid, time, etc. arrays directly from model object elif var.name in (internal_names.LAT_STR, internal_names.LON_STR, internal_names.TIME_STR, internal_names.PLEVEL_STR, internal_names.PK_STR, internal_names.BK_STR, internal_names.SFC_AREA_STR): data = getattr(self.model, var.name) else: cond_pfull = ((not hasattr(self, internal_names.PFULL_STR)) and var.def_vert and self.dtype_in_vert == internal_names.ETA_STR) data = self.data_loader.load_variable(var, start_date, end_date, self.time_offset, **self.data_loader_attrs) name = data.name data = self._add_grid_attributes(data.to_dataset(name=data.name)) data = data[name] if cond_pfull: try: self.pfull_coord = data[internal_names.PFULL_STR] except KeyError: pass # Force all data to be at full pressure levels, not half levels. bool_to_pfull = (self.dtype_in_vert == internal_names.ETA_STR and var.def_vert == internal_names.PHALF_STR) if bool_to_pfull: data = utils.vertcoord.to_pfull_from_phalf(data, self.pfull_coord) if var.def_time: # Restrict to the desired dates within each year. if self.dtype_in_time != 'av': return self._to_desired_dates(data) else: return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_data(data=pd.DataFrame(), date_initial=\"2005-01-01\", date_final=\"2019-12-31\"):\n data = data[data.index >= date_initial]\n data = data[data.index <= date_final]\n return data", "def get_records_date(start_date, end_date):\n start = minus_one(start_date)\n temp = pd.read_sql_query(_query['by_date'],\n connect(),\n params=[start, end_date])\n return temp", "def get_data(table_name, end, num, start=None):\n if start == None:\n if table_name == \"days\": start = end - timedelta(days=num-1) \n if table_name == \"weeks\": start = end - timedelta(weeks=num-1) \n if table_name == \"months\": start = end - relativedelta(months=+num-1) \n if table_name == \"years\": start = end - relativedelta(years=+num-1) \n else: \n start = days.get_entry(table_name, start).date\n \n dates = []\n data = []\n weather = []\n density = []\n \n while start <= end:\n entry = days.get_entry(table_name, start)\n data.append(entry.sentiment)\n \n if table_name == \"days\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(days=1)\n if table_name == \"weeks\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(weeks=1) \n if table_name == \"months\": \n dates.append(entry.date.strftime(\"%B %Y\"))\n start = start + relativedelta(months=+1) \n if table_name == \"years\": \n dates.append(entry.date.strftime(\"%Y\"))\n start = start + relativedelta(years=+1) \n\n # 7/15/15 is the last entry in the current weather dictionary\n num_days = (min(start, date(2015,7,15)) - entry.date).days\n temp = {entry.date + timedelta(days=i): weather_dict[entry.date + timedelta(days=i)] for i in range(num_days)}\n weather.append(float(sum(temp.values()))/float(len(temp)))\n\n if density_dict != None:\n d = max(entry.date, date(2014,7,1))\n num_days = (min(start, date(2015,7,28)) - d).days\n rho = {d + timedelta(days=i): density_dict[d + timedelta(days=i)] for i in range(num_days)}\n density.append(float(sum(rho.values()))/float(len(rho)))\n\n return dates, data, weather, density", "def get_values_by_date(now, request):\n reg_data = get_reg_data(now, request)\n data = {\n \"is_after_7d_before_last_instruction\":\n is_after_7d_before_last_instruction(now, request),\n \"is_after_grade_submission_deadline\":\n is_before_bof_term(now, request),\n \"is_after_last_day_of_classes\":\n not is_before_last_day_of_classes(now, request),\n \"is_after_start_of_registration_display_period\":\n reg_data[\"after_start\"],\n \"is_after_start_of_summer_reg_display_period1\":\n reg_data[\"after_summer1_start\"],\n \"is_after_start_of_summer_reg_display_periodA\":\n reg_data[\"after_summerA_start\"],\n \"is_before_eof_7days_of_term\":\n is_before_eof_7d_after_class_start(now, request),\n \"is_before_end_of_finals_week\":\n is_before_eof_finals_week(now, request),\n \"is_before_end_of_registration_display_period\":\n reg_data[\"after_start\"],\n \"is_before_end_of_summer_reg_display_periodA\":\n reg_data[\"after_summerA_start\"],\n \"is_before_end_of_summer_reg_display_period1\":\n reg_data[\"after_summer1_start\"],\n \"is_before_first_day_of_term\":\n is_before_bof_term(now, request),\n \"is_before_last_day_of_classes\":\n is_before_last_day_of_classes(now, request),\n \"myplan_peak_load\": during_myplan_peak_load(now, request),\n \"reg_period1_started\": reg_data[\"period1_started\"],\n \"is_summer\": is_in_summer_quarter(request),\n \"is_after_summer_b\": is_in_summer_b_term(request),\n \"in_coursevel_fetch_window\": in_coursevel_fetch_window(request),\n \"within_grading_period\": within_grading_period(request),\n \"comparison_date\": get_comparison_datetime(request)\n }\n try:\n last_term = get_previous_quarter(request)\n data[\"current_summer_term\"] = \"{},summer\".format(last_term.year)\n data[\"last_term\"] = \"{},{}\".format(last_term.year, last_term.quarter)\n except Exception:\n log_err(logger, \"get_previous_quarter\", traceback, request)\n return data", "def get_data(self, date_time):\n\n query = \"Select * from {table} where START_DATE <= '{datetime}' and END_DATE > '{datetime}'\"\n query = query.format(table=self.table_name, datetime=date_time)\n return pd.read_sql_query(query, con=self.con)", "def get_data(self, gauge_name, date_key):\n pass", "def get_google_trends_data(keyword, from_date, to_date):\r\n \r\n from_year, from_month = datetime.date.fromisoformat(from_date).year, datetime.date.fromisoformat(from_date).month\r\n to_year, to_month = datetime.date.fromisoformat(to_date).year, datetime.date.fromisoformat(to_date).month\r\n\r\n data = dailydata.get_daily_data(keyword, from_year, from_month, to_year, to_month)\r\n \r\n return data[keyword]", "def get_data_with_strict_range(ts_code: int, begin: str, end: str) -> List:\n data = get_data(ts_code, begin, end)\n \n first_record_date = to_datetime(data[0][\"data\"], \"pt\")\n period_start_date = to_datetime(begin, 'pt')\n \n try:\n is_out_of_range = first_record_date < period_start_date #type: ignore\n if is_out_of_range:\n raise ValueError\n except TypeError:\n print(\"ERROR: Serie \" + str(ts_code) + \" - Please, use 'DD/MM/YYYY' format for date strings.\")\n data = []\n except ValueError:\n print(\"WARNING: Serie \" + str(ts_code) + \" - There is no data for the requested period, but there's previous data.\")\n data = []\n \n return data", "def _get_all_data(self, start_date, end_date):\n return [self._prep_data(self._get_input_data(var, start_date,\n end_date),\n self.var.func_input_dtype)\n for n, var in enumerate(self.variables)]", "def get_data(self, ti=None, tf=None):\n # set date range defaults\n if ti is None:\n ti = self.ti\n if tf is None:\n tf = self.tf\n\n # convert datetime format\n ti = datetimeify(ti)\n tf = datetimeify(tf)\n\n # subset data\n inds = (self.df.index>=ti)&(self.df.index<tf)\n return self.df.loc[inds]", "def get_values(self, dates):\n ret = []\n for d in dates:\n ret.append(self.data[d])\n return ret", "def get_values_between_dates(self, date_start=None, date_end=None, dt_max=0.0, start_strict=False, end_strict=True):\n \n if start_strict:\n start_diff_operator = '>'\n else:\n start_diff_operator = '>='\n if end_strict:\n end_diff_operator = '<'\n else:\n end_diff_operator = '<='\n \n if dt_max < 0.:\n raise Exception('dt_max must be > 0')\n \n if (date_start is not None) and (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) AND datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%(start_diff_operator, end_diff_operator), \\\n params=[self.date2str(date_start-timedelta(dt_max)), self.date2str(date_end+timedelta(dt_max))])\n elif (date_start is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%start_diff_operator, \\\n params=[self.date2str(date_start-timedelta(dt_max))])\n elif (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%end_diff_operator, \\\n params=[self.date2str(date_end+timedelta(dt_max))])\n else:\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO ORDER BY datetime(date_data)\")", "def get_data(day, unixstart=None, unixend=None):\n global subnet\n df_data = pd.DataFrame([])\n while len(df_data.index)<=0:\n try:\n df_data = pd.read_feather(f'/home/pi/studies/ardmore/homeserver/h{subnet}_{day}.feather')\n except Exception as e:\n # print(f\"Error grid_server.get_data:{e}\")\n pass\n if unixstart!=None:\n df_data = df_data[(df_data['unixtime']>=unixstart)&(df_data['unixtime']<=unixend)]\n float_cols = [x for x in df_data.columns if not x.startswith('timezone')]\n df_data = df_data[float_cols].astype(float)\n return df_data", "def date_search(data, start_date, end_date):\n # change dates for date search\n data['timestamp'] = pd.to_datetime(data['timestamp']).dt.date\n d1 = datetime.datetime.strptime(f'{start_date}', '%Y-%m-%d').date()\n d2 = datetime.datetime.strptime(f'{end_date}', '%Y-%m-%d').date()\n\n # constrict data by date search parameters\n less_data = data[(data['timestamp'] >= d1) & (data['timestamp'] <= d2)]\n\n return less_data", "def get_data(\n begin_date, end_date, stationid, product, datum=None, bin_num=None,\n interval=None, units='metric', time_zone='gmt'):\n # Convert dates to datetime objects so deltas can be calculated\n begin_datetime = parse_known_date_formats(begin_date)\n end_datetime = parse_known_date_formats(end_date)\n delta = end_datetime - begin_datetime\n\n # If the length of our data request is less or equal to 31 days,\n # we can pull the data from API in one request\n if delta.days <= 31:\n data_url = build_query_url(\n begin_datetime.strftime(\"%Y%m%d %H:%M\"),\n end_datetime.strftime(\"%Y%m%d %H:%M\"),\n stationid, product, datum, bin_num, interval, units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is less than 365 days\n # AND the product is hourly_height or high_low, we can pull data directly\n # from the API in one request\n elif delta.days <= 365 and (\n product == 'hourly_height' or product == 'high_low'):\n data_url = build_query_url(\n begin_date, end_date, stationid, product, datum, bin_num, interval,\n units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is greater than 365 days\n # AND the product is hourly_height or high_low, we need to load data from\n # the API in365 day blocks.\n elif product == 'hourly_height' or product == 'high_low':\n # Find the number of 365 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_365day_blocks = int(math.floor(delta.days / 365))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 365 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_365day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 365))\n end_datetime_loop = begin_datetime_loop + timedelta(days=365)\n\n # If end_datetime_loop of the current 365 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build url for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_365day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # If the length of the user specified data request is greater than 31 days\n # for any other products, we need to load data from the API in 31 day\n # blocks\n else:\n # Find the number of 31 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_31day_blocks = int(math.floor(delta.days / 31))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 31 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_31day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 31))\n end_datetime_loop = begin_datetime_loop + timedelta(days=31)\n\n # If end_datetime_loop of the current 31 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build URL for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_31day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # Rename output dataframe columns based on requested product\n # and convert to useable data types\n if product == 'water_level':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'q': 'QC', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'QC', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'hourly_height':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'high_low':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'ty': 'high_low',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Separate to high and low dataframes\n df_HH = df[df['high_low'] == \"HH\"].copy()\n df_HH.rename(columns={'date_time': 'date_time_HH',\n 'water_level': 'HH_water_level'},\n inplace=True)\n\n df_H = df[df['high_low'] == \"H \"].copy()\n df_H.rename(columns={'date_time': 'date_time_H',\n 'water_level': 'H_water_level'},\n inplace=True)\n\n df_L = df[df['high_low'].str.contains(\"L \")].copy()\n df_L.rename(columns={'date_time': 'date_time_L',\n 'water_level': 'L_water_level'},\n inplace=True)\n\n df_LL = df[df['high_low'].str.contains(\"LL\")].copy()\n df_LL.rename(columns={'date_time': 'date_time_LL',\n 'water_level': 'LL_water_level'},\n inplace=True)\n\n # Extract dates (without time) for each entry\n dates_HH = [x.date() for x in pd.to_datetime(df_HH['date_time_HH'])]\n dates_H = [x.date() for x in pd.to_datetime(df_H['date_time_H'])]\n dates_L = [x.date() for x in pd.to_datetime(df_L['date_time_L'])]\n dates_LL = [x.date() for x in pd.to_datetime(df_LL['date_time_LL'])]\n\n # Set indices to datetime\n df_HH['date_time'] = dates_HH\n df_HH.index = df_HH['date_time']\n df_H['date_time'] = dates_H\n df_H.index = df_H['date_time']\n df_L['date_time'] = dates_L\n df_L.index = df_L['date_time']\n df_LL['date_time'] = dates_LL\n df_LL.index = df_LL['date_time']\n\n # Remove flags and combine to single dataframe\n df_HH = df_HH.drop(\n columns=['flags', 'high_low'])\n df_H = df_H.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_L = df_L.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_LL = df_LL.drop(columns=['flags', 'high_low',\n 'date_time'])\n\n # Keep only one instance per date (based on max/min)\n maxes = df_HH.groupby(df_HH.index).HH_water_level.transform(max)\n df_HH = df_HH.loc[df_HH.HH_water_level == maxes]\n maxes = df_H.groupby(df_H.index).H_water_level.transform(max)\n df_H = df_H.loc[df_H.H_water_level == maxes]\n mins = df_L.groupby(df_L.index).L_water_level.transform(max)\n df_L = df_L.loc[df_L.L_water_level == mins]\n mins = df_LL.groupby(df_LL.index).LL_water_level.transform(max)\n df_LL = df_LL.loc[df_LL.LL_water_level == mins]\n\n df = df_HH.join(df_H, how='outer')\n df = df.join(df_L, how='outer')\n df = df.join(df_LL, how='outer')\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(\n ['date_time', 'date_time_HH', 'date_time_H', 'date_time_L',\n 'date_time_LL'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df.index)\n df['date_time_HH'] = pd.to_datetime(df['date_time_HH'])\n df['date_time_H'] = pd.to_datetime(df['date_time_H'])\n df['date_time_L'] = pd.to_datetime(df['date_time_L'])\n df['date_time_LL'] = pd.to_datetime(df['date_time_LL'])\n\n elif product == 'predictions':\n if interval == 'h':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n\n elif interval == 'hilo':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl',\n 'type': 'hi_lo'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'hi_lo'])\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'currents':\n # Rename columns for clarity\n df.rename(columns={'b': 'bin', 'd': 'direction',\n 's': 'speed', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'wind':\n # Rename columns for clarity\n df.rename(columns={'d': 'dir', 'dr': 'compass',\n 'f': 'flags', 'g': 'gust_spd',\n 's': 'spd', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags', 'compass'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_pressure':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_press'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'water_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'water_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n # Set datetime to index (for use in resampling)\n df.index = df['date_time']\n df = df.drop(columns=['date_time'])\n\n # Handle hourly requests for water_level and currents data\n if (product == 'water_level') | (product == 'currents') & (\n interval == 'h'):\n df = df.resample('H').first() # Only return the hourly data\n\n return df", "def get_daily(Data, Y, M, D):\n start = datetime(year=Y, month=M, day=D, hour=0, minute=0)\n end = datetime(year=Y, month=M, day=D, hour=23, minute=59, second=59)\n return Data[start:end][\"clouds\"].map(value_by_cloud)", "def getValuesFromDatetimeRange(self, data_field, start_datetime, end_datetime, additional_where_clause=\"\"):\r\n\r\n start_datetime = start_datetime.strftime(self.options['datetime_field_format'])\r\n end_datetime = end_datetime.strftime(self.options['datetime_field_format'])\r\n datetime_sql_cast = self.options['datetime_sql_cast'] # this is important if the underlying SQL type changes\r\n datetime_field = self.options['datetime_field']\r\n \r\n where_clause = \"%s <= %s \\'%s\\'\" % (datetime_field, datetime_sql_cast, start_datetime)\r\n where_clause += \"AND %s >= %s \\'%s\\'\" % (datetime_field, datetime_sql_cast, end_datetime)\r\n where_clause += additional_where_clause # this is optional. It is available for specific queries that do not only contain a datetime range.\r\n print \"where_clause\",where_clause\r\n \r\n return self.arc_table_utils.getValuesFromField(self.fullpath, where_clause, data_field)", "def read_cable_var(fcable, var_name):\n\n print(\"carry on read_cable_var\")\n cable = nc.Dataset(fcable, 'r')\n Time = nc.num2date(cable.variables['time'][:],cable.variables['time'].units)\n if var_name in [\"TVeg\", \"ESoil\", \"Rainf\", \"GPP\"]:\n var = pd.DataFrame(cable.variables[var_name][:,0,0]*1800., columns=['cable'])\n else:\n var = pd.DataFrame(cable.variables[var_name][:,0,0], columns=['cable'])\n var['Date'] = Time\n var = var.set_index('Date')\n if var_name in [\"TVeg\", \"ESoil\", \"Rainf\", \"GPP\"]:\n var = var.resample(\"D\").agg('sum')\n else:\n print(\"is here\")\n var = var.resample(\"D\").agg('mean')\n var.index = var.index - pd.datetime(2011,12,31)\n var.index = var.index.days\n var = var.sort_values(by=['Date'])\n\n return var", "def get_data(end_date, n, local, foreign):\n URL = \"https://api.exchangeratesapi.io/history\"\n PARAMS = {'start_at': str(get_weekday_n_days_ago(end_date, n)),\n 'end_at': str(end_date),\n 'symbols': foreign,\n 'base': local}\n r = requests.get(url=URL, params=PARAMS)\n data = r.json()\n input_data = []\n for day in data['rates']:\n input_data.append([datetime.strptime(day, '%Y-%m-%d').date(),\n float(\"{:.8f}\".format(data['rates'][day][foreign]))])\n input_data.sort(key=lambda x: x[0])\n return input_data[-n:]", "def read_cable_GPP_year(fcable, var_name):\n\n print(\"carry on read_cable_var\")\n cable = nc.Dataset(fcable, 'r')\n Time = nc.num2date(cable.variables['time'][:],cable.variables['time'].units)\n var = pd.DataFrame(cable.variables[var_name][:,0,0]*1800., columns=['cable'])\n var['Date'] = Time\n var = var.set_index('Date')\n var = var.resample(\"Y\").agg('sum')\n\n #var.index = var.index - pd.datetime(2011,12,31)\n #var.index = var.index.days\n var = var.sort_values(by=['Date'])\n\n return var", "def extract_data():\n raw_data = pd.read_csv(\"../../../resource/DataVisualization/vaccinations.csv\")\n raw_data = raw_data[[\"location\", \"date\", \"people_fully_vaccinated_per_hundred\"]]\n raw_data.date = pd.to_datetime(raw_data.date, format=\"%Y-%m-%d\")\n min_date = raw_data.date.min()\n raw_data.date = raw_data.date-min_date\n raw_data.date = pd.Series([x.days for x in raw_data.date])\n raw_data.drop(raw_data.loc[raw_data.people_fully_vaccinated_per_hundred.isnull()].index,\n axis=0, inplace=True)\n raw_data[\"people_fully_vaccinated_per_hundred\"] /= 100\n\n data_dict = dict()\n for country in raw_data.location.unique():\n if len(raw_data.loc[raw_data.location == country]) >= 100:\n tmp_data = raw_data.loc[raw_data.location == country]\n tmp_data.drop(\"location\", axis=1, inplace=True)\n data_dict[country] = {\"data\":tmp_data}\n else:\n raw_data.drop(raw_data.loc[raw_data.location ==\n country].index, inplace=True)\n return data_dict, min_date, raw_data", "def fetch_data(universe='all', start=None, end=None, connection=None, tablename=None, where_clause = None):\n from datetime import datetime, timedelta\n if end is None:\n end = datetime.today().strftime('%Y-%m-%d')\n if start is None:\n start = (datetime.today() - timedelta(days=30)).strftime('%Y-%m-%d')\n q = []\n select = \"SELECT * from {tablename} where \".format(tablename=tablename)\n if universe != 'all':\n q.append(\"symbol in {universe}\")\n q.append(\"timestamp >= '{start}'\")\n q.append(\"timestamp <= '{end}'\")\n if where_clause:\n [q.append(x)for x in where_clause]\n order_by = ' ORDER BY timestamp'\n query = ' AND '.join(q).format(universe=tuple(universe), \n start=start, end=end)\n query = select + query + order_by\n # This should be any column\n data = pd.read_sql_query(query, connection, parse_dates=['timestamp'])\n # Delete index column if any\n if 'index' in data.columns:\n del data['index']\n return data", "def temp_data(start_date):\n \"\"\"for all dates greater than and equal to the start date.\"\"\"\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).all()\n\n # Convert list of tuples into normal list\n all_date = list(np.ravel(results))\n\n return jsonify(all_date)", "def fetch_daterange(self, start_date, end_date=None, table='fashion'):\n\n if end_date is None:\n end_date = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n\n end_date_obj = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')\n end_day = '{:04d}-{:02d}-{:02d}'.format(end_date_obj.year, \n end_date_obj.month, \n end_date_obj.day)\n\n start_date_obj = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day)\n \n record_lookup_stmt = \"SELECT * FROM {} WHERE date=%s AND t>%s and t<%s\".format(table)\n \n record_list = []\n while curr_day <= end_day: \n record_list += self.session.execute(record_lookup_stmt, [curr_day, \n start_date,\n end_date])\n start_date_obj += timedelta(days=1)\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day) \n\n return record_list", "def query_api(self, start_date, end_date):\n headers = {\"token\": self.api_key}\n params = {\n \"datasetid\": \"GHCND\", # Daily weather\n \"stationid\": self.stations,\n \"datatypeid\": \"TMAX\", # Max Temperature\n \"units\": \"standard\", # Fahrenheit\n \"limit\": 1000, # Maximum request size\n \"startdate\": start_date.strftime(\"%Y-%m-%d\"),\n \"enddate\": end_date.strftime(\"%Y-%m-%d\"),\n }\n\n response = requests.get(\n \"https://www.ncdc.noaa.gov/cdo-web/api/v2/data\",\n headers=headers,\n params=params,\n )\n\n if response.ok:\n return response.json()\n else:\n print(response.text)\n return None", "def test_date_range():\n year = 2012\n cres_m = get_curtailment(year, curt_fn='curtailment.json')[0]\n cres_dr = get_curtailment(year, curt_fn='curtailment_date_range.json')[0]\n for df_res, site in cres_m:\n gid = int(site.name)\n assert np.allclose(df_res['windspeed'], cres_dr[gid]['windspeed'])", "def get_data(self, variable):\n return self.data.get(variable)", "def get_val(self, date):\n \n for row in self.data:\n if row['valid_from'] <= date <= row['valid_to']:\n return row['val']\n \n raise Exception, \"No value for the given date: %s, for %s\"% (str(date), self.name)", "def get_data( obj, prm, lev, date, timelevel=0 ):\n \n parameter = obj( name = prm, level = lev, dataDate = date )[ timelevel ]\n print( parameter.dataDate )\n \n #-----Checking grit type----------------------------------------------\n if parameter.gridType == \"sh\":\n lat, lon, data = sh( parameter.values )\n elif parameter.gridType == \"reduced_gg\":\n lat, lon = parameter.latlons() #very easy implementastion with a gg\n lon = lon - 180. #else it only draws on half the map\n data = parameter.values\n elif parameter.gridType == \"regular_gg\":\n lat, lon = parameter.latlons() #very easy implementastion with a gg\n lon = lon - 180. #else it only draws on half the map\n data = parameter.values\n else: \n print ( parameter.gridType )\n \n return lat, lon, data", "def get_weather_data(filename, dates, highs, lows, date_index, high_index,\n low_index):\n with open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n\n # Get data temp.\n for row in reader:\n current_date = datetime.strptime(row[date_index], '%Y-%m-%d')\n try:\n high = int(row[high_index])\n low = int(row[low_index])\n except ValueError:\n print(f\"No data for {current_date}\")\n else:\n dates.append(current_date)\n highs.append(high)\n lows.append(low)" ]
[ "0.64027274", "0.60679734", "0.60579973", "0.6029412", "0.6007587", "0.59154254", "0.5905602", "0.58029187", "0.57601243", "0.57441825", "0.5735495", "0.570289", "0.5694353", "0.5680198", "0.5679844", "0.5646513", "0.5644005", "0.5620897", "0.56159776", "0.561455", "0.5593017", "0.55863744", "0.55542445", "0.55508476", "0.55490255", "0.55435884", "0.5532437", "0.55313104", "0.55301857", "0.5530064" ]
0.6285674
1
Perform the specified time reduction on a local timeseries.
def _time_reduce(self, arr, reduction): if self.dtype_in_time == 'av': return arr reductions = { 'None': lambda xarr: xarr, 'ts': lambda xarr: xarr, 'av': lambda xarr: xarr.mean(internal_names.YEAR_STR), 'std': lambda xarr: xarr.std(internal_names.YEAR_STR), } try: return reductions[reduction](arr) except KeyError: raise ValueError("Specified time-reduction method '{}' is not " "supported".format(reduction))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _apply_all_time_reductions(self, full_ts, monthly_ts, eddy_ts):\n logging.info(self._print_verbose(\"Applying desired time-\"\n \"reduction methods.\"))\n # Determine which are regional, eddy, time-mean.\n reduc_specs = [r.split('.') for r in self.dtype_out_time]\n reduced = {}\n for reduc, specs in zip(self.dtype_out_time, reduc_specs):\n func = specs[-1]\n if 'eddy' in specs:\n data = eddy_ts\n elif 'time-mean' in specs:\n data = monthly_ts\n else:\n data = full_ts\n if 'reg' in specs:\n reduced.update({reduc: self.region_calcs(data, func)})\n else:\n reduced.update({reduc: self._time_reduce(data, func)})\n return OrderedDict(sorted(reduced.items(), key=lambda t: t[0]))", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def reduce_time_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax is None:\n print \"WARNING- no time axis in\",mv.id\n return None\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n print \"WARNING- cannot compute climatology for\",mv.id,seasons.seasons\n print \"...probably there is no data for times in the requested season.\"\n return None\n avmv = mvseas\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv", "def utlibr(time):\n\n if time.size == 1:\n return iers.utlibr(time.tt.mjd)\n else:\n # Only loop over unique epochs\n _, idx, r_idx = np.unique(np.asarray(time), return_index=True, return_inverse=True)\n return np.array([iers.utlibr(t.tt.mjd) for t in time[idx]])[r_idx]", "def set_analysis_time(self, t):\n for z in self.zones:\n z.set_demand_rate_per_t(t)", "def reduce_time( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id=='time' ]\n axes_string = '('+')('.join(axis_names)+')'\n if len(axes_string)>2:\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n avmv = averager( mv, axis=axes_string )\n else:\n avmv = mv\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv", "def forecast(time_series, args, session_file=None):\n\n local_time_series = TimeSeries(time_series,\n api=args.retrieve_api_)\n\n output = args.predictions\n # Local forecasts: Forecasts are computed locally\n message = u.dated(\"Creating local forecasts.\\n\")\n u.log_message(message, log_file=session_file, console=args.verbosity)\n input_data = []\n if args.test_set is not None:\n input_data = [u.read_json(args.test_set)]\n elif args.horizon is not None:\n input_data = [{local_time_series.objective_id: { \\\n \"horizon\": args.horizon}}]\n write_forecasts(local_time_series.forecast(*input_data),\n output)", "def aggregate_time(self, temporal_window, aggregationfunction) -> Series :\n pickled_lambda = cloudpickle.dumps(aggregationfunction)\n\n process_id = 'reduce_by_time'\n args = {\n 'imagery':self.graph,\n 'temporal_window': temporal_window,\n 'function': str(base64.b64encode(pickled_lambda), \"UTF-8\")\n }\n\n return self.graph_add_process(process_id, args)", "def aggregate_time(self, temporal_window, aggregationfunction) -> Series :\n pickled_lambda = cloudpickle.dumps(aggregationfunction)\n\n process_id = 'reduce_by_time'\n args = {\n 'imagery':self.graph,\n 'temporal_window': temporal_window,\n 'function': str(base64.b64encode(pickled_lambda), \"UTF-8\")\n }\n\n return self.graph_add_process(process_id, args)", "def do_dt(r, t):\n return -o(r,t)/(2*t)", "def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt", "def dm_time_behaviour(sps, use_env=True):\n pts = get_envelope(sps) if use_env else sps\n _,_,R,_,_ = linregress(pts.time, pts.dm)\n return R**2", "def _timeseries_calc(ifg_paths, params, vcmt, tiles, preread_ifgs):\n if params[cf.TIME_SERIES_CAL] == 0:\n log.info('Time Series Calculation not required')\n return\n\n if params[cf.TIME_SERIES_METHOD] == 1:\n log.info('Calculating time series using Laplacian Smoothing method')\n elif params[cf.TIME_SERIES_METHOD] == 2:\n log.info('Calculating time series using SVD method')\n\n output_dir = params[cf.TMPDIR]\n total_tiles = len(tiles)\n process_tiles = mpiops.array_split(tiles)\n for t in process_tiles:\n log.debug(\"Calculating time series for tile \"+str(t.index)+\" out of \"+str(total_tiles))\n ifg_parts = [shared.IfgPart(p, t, preread_ifgs, params) for p in ifg_paths]\n mst_tile = np.load(os.path.join(output_dir, 'mst_mat_{}.npy'.format(t.index)))\n res = timeseries.time_series(ifg_parts, params, vcmt, mst_tile)\n tsincr, tscum, _ = res\n np.save(file=os.path.join(output_dir, 'tsincr_{}.npy'.format(t.index)), arr=tsincr)\n np.save(file=os.path.join(output_dir, 'tscuml_{}.npy'.format(t.index)), arr=tscum)\n mpiops.comm.barrier()\n log.debug(\"Finished timeseries calc!\")", "def resample(is_local, baz, ac):\n if is_local == 'local':\n ac.data = ac.data[0: 1800 * ac[0].stats.sampling_rate]\n ac.decimate(factor=2)\n sec = 5\n cutoff = 2.0 # local events\n elif is_local == 'non-local':\n ac.decimate(factor=4)\n sec = 120\n cutoff = 1.0 # nonlocal events\n else:\n ac.data = trr.data[0: 1800 * ac[0].stats.sampling_rate]\n ac.decimate(factor=2)\n sec = 3\n cutoff = 4.0 # close events\n \n return ac, sec, cutoff", "def _compute_full_ts(self, data, monthly_mean=False, zonal_asym=False):\n # Get results at each desired timestep and spatial point.\n # Here we need to provide file read-in dates (NOT xarray dates)\n full_ts, dt = self._compute(data, monthly_mean=monthly_mean)\n if zonal_asym:\n full_ts = full_ts - full_ts.mean(internal_names.LON_STR)\n # Vertically integrate.\n vert_types = ('vert_int', 'vert_av')\n if self.dtype_out_vert in vert_types and self.var.def_vert:\n # Here we need file read-in dates (NOT xarray dates)\n full_ts = utils.vertcoord.int_dp_g(\n full_ts, self._get_pressure_vals(dp, self.start_date,\n self.end_date)\n )\n if self.dtype_out_vert == 'vert_av':\n full_ts *= (GRAV_EARTH / self._to_desired_dates(self._ps_data))\n return full_ts, dt", "def __call__ (self, t):\n #if t <= self.last_t:\n #raise SpaceTimeContinuumError(\n #\"We're moving back in time! Last t = {}, now = {}\".format(\n #self.last_t, t))\n\n #samp = self._sample(t)\n #self.last_t = t\n #self.last_samp = samp\n #return samp\n pass", "def run(self, time_or_fn):\n raise NotImplementedError", "def run(self, input_time_series=None, num_iter=None, record=False,\n output=False):\n pass", "def reduce_run():", "def evaluate(self, time) -> float:\n ...", "def convolve_and_sum_slow(loadings, unit_response_functions=None):\n\n loadings = loadings.T\n print(loadings.shape)\n print(\"Convolving\")\n if (\n unit_response_functions is None\n ): # this logic is temporary, but have a safeguard so it's not accidentally used in production\n if settings.DEBUG:\n unit_response_functions = numpy.ones(\n [loadings.shape[0], loadings.shape[1], loadings.shape[2]],\n dtype=numpy.float64,\n )\n else:\n raise ValueError(\"Must provide Unit Response Functions!\")\n\n time_span = loadings.shape[0]\n output_matrix = numpy.zeros(\n [loadings.shape[0], loadings.shape[1], loadings.shape[2]], dtype=numpy.float64\n )\n\n for year in range(time_span):\n print(year)\n URF_length = time_span - year\n\n print(\"Subset\")\n subset_start = arrow.utcnow()\n current_year_loadings = loadings[\n year,\n :,\n :,\n ]\n subset_end = arrow.utcnow()\n print(subset_end - subset_start)\n\n # print(\"Reshape\")\n # reshape_start = arrow.utcnow()\n # reshaped_loadings = current_year_loadings.reshape(current_year_loadings.shape)\n # print(reshaped_loadings.shape)\n # repeated_loadings = numpy.repeat(reshaped_loadings, URF_length, 2)\n # reshape_end = arrow.utcnow()\n # print(reshape_end - reshape_start)\n\n print(\"Multiply\")\n multiply_start = arrow.utcnow()\n new_loadings = numpy.multiply(\n current_year_loadings, unit_response_functions[:URF_length, :, :]\n )\n multiply_end = arrow.utcnow()\n print(multiply_end - multiply_start)\n\n print(\"Add and Insert Back in\")\n add_start = arrow.utcnow()\n numpy.add(output_matrix[year:, :, :], new_loadings, output_matrix[year:, :, :])\n add_end = arrow.utcnow()\n print(add_end - add_start)\n # multiply this year's matrix * URFs matrix sliced to represent size of future\n # then add result to output_matrix\n\n results = numpy.sum(output_matrix, [1, 2]) # sum in 2D space", "def _local_ts(self, *data):\n arr = self.function(*data)\n if self.var.func_input_dtype == 'numpy':\n arr = xr.DataArray(arr, coords=self.coords)\n arr.name = self.name\n return arr", "def evaluate(self, time: Time, task: ComputeTask) -> Time:\n\n return cast(Time, time + self._evaluate_independent(task))", "def <start>/<end>(<start>/<end>)\ndef calc_temps(start_date, end_date):", "def local_to_utc(local: pd.Series, tz: Iterable, **kwargs: Any) -> pd.Series:\n return local.groupby(tz).transform(\n lambda x: x.dt.tz_localize(\n datetime.timezone(datetime.timedelta(hours=x.name))\n if isinstance(x.name, (int, float))\n else x.name,\n **kwargs,\n ).dt.tz_convert(None)\n )", "def time_update(self, U):\n self.X = self.runge_kutta(self.process_model, self.X, U, self.dt)\n \n J = self.F.subs({'d_t': self.dt, 'v': U[0], '\\Theta': self.X[2]})\n \n self.P = J*self.P*J.T + self.Q", "def timeseries_list_reduce(data, series_list, reducer, field_spec=None):\n\n if not isinstance(series_list, list):\n msg = 'A list of TimeSeries must be supplied to reduce'\n raise TimeSeriesException(msg)\n\n if not is_function(reducer):\n msg = 'reducer function must be supplied, for example, avg()'\n raise TimeSeriesException(msg)\n\n event_list = list()\n\n for i in series_list:\n for ii in i.events():\n event_list.append(ii)\n\n if field_spec is not None:\n events = reducer(event_list, field_spec)\n else:\n # like when calling Event.merge()\n events = reducer(event_list)\n\n coll = Collection(events)\n if coll.is_chronological() is False:\n coll = coll.sort_by_time()\n\n ret = TimeSeries(dict(collection=coll, **data))\n\n return ret", "def runave_time_correction(xaxis, time_freq):\n \n return rrule(eval(time_freq), dtstart=round_datetime(xaxis[0], time_freq), count=len(xaxis))", "def local_func(f, t, x, w):\n x_func = np.zeros_like(t, dtype='f')\n for i, jd in enumerate(t.jd):\n sel = (t.jd >= (jd - w)) & (t.jd <= (jd + w))\n x_func[i] = f(x[sel])\n return x_func", "def resample_to_delta_t(timeseries, delta_t, method='butterworth'):\n\n if not isinstance(timeseries,TimeSeries):\n raise TypeError(\"Can only resample time series\")\n\n if timeseries.kind is not 'real':\n raise TypeError(\"Time series must be real\")\n\n if timeseries.delta_t == delta_t:\n return timeseries * 1\n\n if method == 'butterworth':\n lal_data = timeseries.lal()\n _resample_func[timeseries.dtype](lal_data, delta_t)\n data = lal_data.data.data \n \n elif method == 'ldas': \n factor = int(delta_t / timeseries.delta_t)\n \n if factor == 8:\n timeseries = resample_to_delta_t(timeseries, timeseries.delta_t * 4.0, method='ldas')\n factor = 2\n elif factor == 16:\n timeseries = resample_to_delta_t(timeseries, timeseries.delta_t * 4.0, method='ldas')\n factor = 4 \n elif factor == 32:\n timeseries = resample_to_delta_t(timeseries, timeseries.delta_t * 8.0, method='ldas')\n factor = 4 \n elif factor == 64:\n timeseries = resample_to_delta_t(timeseries, timeseries.delta_t * 16.0, method='ldas')\n factor = 4 \n\n try:\n filter_coefficients = LDAS_FIR_LP[factor]\n except:\n raise ValueError('Unsupported resample factor, %s, given' %factor)\n \n # apply the filter\n series = scipy.signal.lfilter(filter_coefficients, 1.0, \n timeseries.numpy())\n \n # reverse the time shift caused by the filter\n corruption_length = len(filter_coefficients)\n data = numpy.zeros(len(timeseries))\n data[:len(data)-corruption_length/2] = series[corruption_length/2:]\n \n # zero out corrupted region\n data[0:corruption_length/2] = 0\n data[len(data)-corruption_length/2:] = 0 \n\n # Decimate the time series\n data = data[::factor] * 1\n \n else:\n raise ValueError('Invalid resampling method: %s' % method)\n \n return TimeSeries(data, delta_t = delta_t,\n dtype=timeseries.dtype, \n epoch=timeseries._epoch)" ]
[ "0.5988665", "0.51748985", "0.5174669", "0.51686645", "0.51683694", "0.5122315", "0.5104745", "0.50596696", "0.50596696", "0.5038005", "0.50319225", "0.5011385", "0.49402636", "0.49358612", "0.49357003", "0.49233362", "0.491421", "0.48677415", "0.48568976", "0.4825794", "0.48207754", "0.48194525", "0.47992092", "0.47874346", "0.47690737", "0.4733843", "0.47286698", "0.47081605", "0.4699379", "0.46764344" ]
0.6599274
0
Perform a calculation for all regions.
def region_calcs(self, arr, func): # Get pressure values for data output on hybrid vertical coordinates. bool_pfull = (self.def_vert and self.dtype_in_vert == internal_names.ETA_STR and self.dtype_out_vert is False) if bool_pfull: pfull = self._full_to_yearly_ts(self._prep_data( self._get_input_data(Var('p'), self.start_date, self.end_date, 0), self.var.func_input_dtype ), arr[internal_names.TIME_WEIGHTS_STR]).rename('pressure') # Loop over the regions, performing the calculation. reg_dat = {} for reg in self.region: # Just pass along the data if averaged already. if 'av' in self.dtype_in_time: data_out = reg.ts(arr) # Otherwise perform the calculation. else: method = getattr(reg, func) data_out = method(arr) if bool_pfull: # Don't apply e.g. standard deviation to coordinates. if func not in ['av', 'ts']: method = reg.ts # Convert Pa to hPa coord = method(pfull) * 1e-2 data_out = data_out.assign_coords( **{reg.name + '_pressure': coord} ) reg_dat.update(**{reg.name: data_out}) return xr.Dataset(reg_dat)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_calculate_all(self, **kwargs):\n _return = False\n\n # Calculate all Allocations, skipping the top node in the tree.\n for _node in self.tree.all_nodes()[1:]:\n if _node.identifier != 0:\n self.do_calculate(_node.identifier, **kwargs)\n\n return _return", "def calc(self):\n\t\tfor neuron in self.neurons.items():\n\t\t\tneuron.calculate()", "def output_results(self):\n for ba in self.regions:\n if (ba in self.import_regions) or (ba in self.generation_regions):\n continue\n if ba in BA_930_INCONSISTENCY[self.year]:\n logger.warning(f\"Using D instead of (G-TI) for consumed calc in {ba}\")\n self.results[ba][\"net_consumed_mwh\"] = self.eia930.df[\n KEYS[\"E\"][\"D\"] % ba\n ][self.generation.index]\n else:\n self.results[ba][\"net_consumed_mwh\"] = (\n self.generation[ba] - self.eia930.df[KEYS[\"E\"][\"TI\"] % ba]\n )[self.generation.index]\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n self.results[ba][get_column(pol, adjustment=adj)] = (\n self.results[ba][\n get_rate_column(pol, adjustment=adj, generated=False)\n ]\n * self.results[ba][\"net_consumed_mwh\"]\n )\n\n # Although we directly calculate rates, to calculate annual average rates\n # we sum emissions and generation then divide.\n for time_resolution in TIME_RESOLUTIONS:\n time_dat = self.results[ba].copy(deep=True)\n\n # Get local timezone\n assert not pd.isnull(self.ba_ref.loc[ba, \"timezone_local\"])\n time_dat[\"datetime_local\"] = time_dat.index.tz_convert(\n self.ba_ref.loc[ba, \"timezone_local\"]\n )\n time_dat = time_dat.reset_index() # move datetime_utc to column\n time_dat = time_dat[\n time_dat.datetime_local.dt.year == self.year\n ] # keep year of local data\n\n if time_resolution == \"hourly\":\n # No resampling needed; keep timestamp cols in output\n time_cols = [\"datetime_utc\", \"datetime_local\"]\n missing_hours = time_dat[time_dat.isna().any(axis=1)]\n if len(missing_hours) > 0:\n logger.warning(\n f\"{len(missing_hours)} hours are missing in {ba} consumed data\"\n )\n elif time_resolution == \"monthly\":\n time_dat[\"month\"] = time_dat.datetime_local.dt.month\n # Aggregate to appropriate resolution\n time_dat = (\n time_dat.groupby(\"month\")[EMISSION_COLS + [\"net_consumed_mwh\"]]\n .sum()\n .reset_index() # move \"month\" to column\n )\n time_cols = [\"month\"]\n elif time_resolution == \"annual\":\n time_dat[\"year\"] = time_dat.datetime_local.dt.year\n # Aggregate to appropriate resolution\n time_dat = (\n time_dat.groupby(\"year\")[EMISSION_COLS + [\"net_consumed_mwh\"]]\n .sum()\n .reset_index() # move \"year\" to column\n )\n time_cols = [\"year\"]\n\n # Calculate rates from summed emissions, consumption\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n rate_col = get_rate_column(pol, adj, generated=False)\n emission_col = get_column(pol, adj)\n time_dat[rate_col] = (\n time_dat[emission_col] / time_dat[\"net_consumed_mwh\"]\n )\n\n # Output\n output_to_results(\n time_dat[time_cols + CONSUMED_EMISSION_RATE_COLS],\n ba,\n f\"/carbon_accounting/{time_resolution}/\",\n self.prefix,\n skip_outputs=self.skip_outputs,\n )\n return", "def calc_regional_values(infiles, variable, time_constraint, area_cube):\n\n cube, coord_names, aux_coord_names, grid_type = read_data(infiles, variable, time_constraint)\n\n cube_list = iris.cube.CubeList([])\n for region in ['globe', 'nh', 'sh', 'nhext', 'tropics', 'shext']:\n region_sum = calc_region_sum(cube, coord_names, aux_coord_names, grid_type, area_cube, region)\n region_sum = rename_cube(region_sum, region + ' sum')\n cube_list.append(region_sum)\n\n return cube_list", "def process_regions(mask, t1, ignore_vals=[0]):\n regions = np.unique(mask)\n regions = [i for i in regions if i not in ignore_vals]\n colors = map_label_colors(regions)\n images = [process_region(mask, t1, colors, region) for region in regions]\n montage = create_montage(images, direction='v')\n return(montage)", "def calc_region_sum(cube, coord_names, aux_coord_names, grid_type, area_cube, region):\n\n if grid_type == 'curvilinear':\n assert area_cube, \"Must provide an area cube of curvilinear data\"\n\n cube = cube.copy() \n coord_names = coord_names.copy()\n lat_bounds = region_bounds[region]\n\n # Extract region\n if lat_bounds:\n if grid_type == 'curvilinear':\n cube = extract_region_curvilinear(cube, lat_bounds)\n else:\n cube = extract_region_latlon(cube, lat_bounds)\n\n if 'm-2' in str(cube.units):\n # Get area weights \n if area_cube:\n if grid_type == 'latlon' and lat_bounds:\n area_cube = extract_region_latlon(area_cube, lat_bounds)\n area_data = uconv.broadcast_array(area_cube.data, [1, 2], cube.shape)\n else:\n area_data = spatial_weights.area_array(cube)\n\n # Multiply by area\n cube.data = cube.data * area_data\n units = str(cube.units)\n cube.units = units.replace('m-2', '')\n\n assert cube.units == 'J'\n\n coord_names.remove('time')\n spatial_agg = cube.collapsed(coord_names, iris.analysis.SUM)\n \n spatial_agg.remove_coord('latitude')\n spatial_agg.remove_coord('longitude')\n if grid_type == 'curvilinear':\n spatial_agg.remove_coord(coord_names[0])\n spatial_agg.remove_coord(coord_names[1])\n\n return spatial_agg", "def compute_statistics(self, region):\n x = 0.0\n y = 0.0\n n = 1\n for pixel in region:\n n = n + 1\n x = x + pixel[0]\n y = y + pixel[1]\n\n x = x / n\n y = y / n\n k = 1\n print(\"Region: \" + str(k) + \", Centroid: (\" + str(x) + \",\" + str(y) + \"), Area: \" + str(n))\n\n # Please print your region statistics to stdout\n # <region number>: <location or center>, <area>\n # print(stats)\n\n return n", "def walk(self):\n for group in self.all_groups.values():\n yield from group.calculations", "def compute_all(self) -> None:\n self.compute_j_matrix()\n self.compute_outter_distribution()\n self.compute_max_prior()\n self.compute_max_poutter()", "def compute_gradient_for_all(self):\r\n\r\n # YOUR CODE HERE\r\n self.compute_gradient_for_subset(0, self.DATAPOINTS)", "def main():\n region = 'Kanto'\n year = 2000\n # callParallelGA(region)\n callParallelReducedGA(region)\n \n\n region = 'EastJapan'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)\n\n\n region = 'Tohoku'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)\n\n \n region = 'Kansai'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)", "def calculate(self):", "def island_aging(self):\n for y in self.island_map:\n for cell in y:\n cell.aging()", "def compute(self):\n self.find_n()\n\n # call hotspot field plots\n for scenario in self.scenarios:\n fields_dict = {}\n ancestor_files = []\n for filename in io.get_all_ancestor_files(self.cfg,\n pattern='hotspot_*.nc'):\n key = os.path.basename(os.path.dirname(filename))\n splitname = os.path.basename(filename).split(\"_\")\n if key.split(\"_\")[-1] == scenario:\n fields_dict[(\n f\"{splitname[-1].split('.nc')[0]}_\"\n f\"{splitname[1]}_{key}\")] = iris.load_cube(filename)\n ancestor_files.append(filename)\n fields_dict[\"scenario\"] = scenario\n fields_dict[\"ancestors\"] = ancestor_files\n self.hotspot_fields_plot(fields_dict)\n\n # call scatter plots\n for season in self.seasons:\n timeseries_dict = {\"large_scale\": {}, \"regional\": {}}\n for region, value in timeseries_dict.items():\n for filename in io.get_all_ancestor_files(\n self.cfg,\n pattern=f'rolling_mean_{region}_{season}.nc'):\n value[os.path.basename(os.path.dirname(filename))] = (\n iris.load_cube(filename))\n value[os.path.basename(\n os.path.dirname(filename))] = (filename)\n for var_combination in self.var_combinations:\n self.timeseries_scatter_plot(deepcopy(timeseries_dict), season,\n var_combination)", "def calc(self):\n self.proc_blocks = [cluster.cells for cluster in self.clusters]\n self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]\n self.particle_loads = [cluster.np for cluster in self.clusters]\n self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)", "def test_region_selection(ed_fueltype_regs_yh):\n modelled_days = 1\n hours_modelled = modelled_days * 24\n\n _sum_day_selection = 0\n for fuels in ed_fueltype_regs_yh:\n for region_fuel in fuels:\n _sum_day_selection += np.sum(region_fuel[: hours_modelled])\n len_dict = region_fuel.shape[0]\n\n _sum_all = 0\n for fuels in ed_fueltype_regs_yh:\n for region_fuel in fuels:\n _sum_all += np.sum(region_fuel)\n\n return", "def perform_calculations(collector):\n result = {}\n try:\n radius, mass = Calculator.calculate_radius_mass(collector)\n result['radius'] = radius\n result['mass'] = mass\n average_density = Calculator.calculate_average_density(radius,\n mass)\n result['average_density'] = average_density\n escape_velocity = Calculator.calculate_escape_velocity(radius,\n mass)\n result['escape_velocity'] = escape_velocity\n earth_similarity_index = Calculator.calculate_esi_index(\n radius, mass, collector.get_average_temperature())\n result['earth_similarity_index'] = earth_similarity_index\n except NoDataError:\n pass\n\n try:\n avg_atm_molar_mass = Calculator.calculate_molar_mass(collector)\n except NoDataError:\n avg_atm_molar_mass = None\n if avg_atm_molar_mass is not None and avg_atm_molar_mass <= 0:\n logging.getLogger('Analyzer').debug('Molar mass <= 0: %d',\n avg_atm_molar_mass)\n avg_atm_molar_mass = None\n\n if avg_atm_molar_mass is not None:\n result['avg_atm_molar_mass'] = avg_atm_molar_mass\n avg_molecule_mass = avg_atm_molar_mass / Calculator.A\n result['avg_molecule_mass'] = avg_molecule_mass\n specific_gas_const = Calculator.R / avg_atm_molar_mass\n result['specific_gas_const'] = specific_gas_const\n\n try:\n speed_of_sound = Kundt.speed_of_sound(collector.kundt)\n result['speed_of_sound'] = speed_of_sound\n\n if avg_atm_molar_mass is None:\n # All further calculations require valid molar mass\n return result\n\n # Since calculate_molar_mass already uses get_average_temperature\n # and get_ground_pressure, it's safe to use these functions here\n # without worrying about NoDataError\n adiabatic_index = Calculator.calculate_adiabatic_index(\n collector, speed_of_sound, avg_atm_molar_mass)\n result['adiabatic_index'] = adiabatic_index\n\n atmosphere_density = (adiabatic_index *\n collector.get_ground_pressure() /\n speed_of_sound ** 2)\n result['atmosphere_density'] = atmosphere_density\n\n refractive_index = (3 * avg_atm_molar_mass *\n collector.get_ground_pressure() /\n atmosphere_density / Calculator.R /\n collector.get_average_temperature() - 2) ** 0.5\n result['refractive_index'] = refractive_index\n\n molar_refractivity = (avg_atm_molar_mass /\n atmosphere_density *\n (refractive_index ** 2 - 1) /\n (refractive_index ** 2 + 2))\n result['molar_refractivity'] = molar_refractivity\n\n atm_speed_of_light = Calculator.C / refractive_index\n result['atm_speed_of_light'] = atm_speed_of_light\n except NoDataError:\n pass\n\n return result", "def regions(self):\n\n class RegionIter(object):\n def __init__(self, region_based):\n self._region_based = region_based\n\n def __len__(self):\n return self._region_based._region_len()\n\n def __iter__(self):\n return self()\n\n def _fix_chromosome(self, regions):\n for r in regions:\n r.fix_chromosome(copy=True)\n\n def __call__(self, key=None, *args, **kwargs):\n fix_chromosome = kwargs.pop('fix_chromosome', False)\n\n if key is None:\n iterator = self._region_based._region_iter(*args, **kwargs)\n else:\n if isinstance(key, string_types) or isinstance(key, GenomicRegion):\n iterator = self._region_based.region_subset(key, *args, **kwargs)\n else:\n iterator = self._region_based._get_regions(key, *args, **kwargs)\n\n if fix_chromosome:\n return self._fix_chromosome(iterator)\n else:\n return iterator\n\n def __getitem__(self, item):\n if isinstance(item, string_types) or isinstance(item, GenomicRegion):\n return self._region_based.region_subset(item)\n return self._region_based._get_regions(item)\n\n return RegionIter(self)", "def process(cls, df):\n\n # Calculate totals for both genders together\n for g in cls.GROUPS[1:]:\n\n # the columns to sum\n cols_to_sum = [f\"{tag}_{g}\" for tag in [\"male\", \"female\"]]\n\n # approximate the sum\n new_cols = [f\"total_{g}\", f\"total_{g}_moe\"]\n df[new_cols] = df.apply(approximate_sum, cols=cols_to_sum, axis=1)\n\n # Calculate custom group sets\n groupsets = collections.OrderedDict(\n {\n \"16_to_21_employed\": [\"16_to_19_employed\", \"20_to_21_employed\"],\n \"22_to_29_employed\": [\"22_to_24_employed\", \"25_to_29_employed\"],\n \"30_to_44_employed\": [\"30_to_34_employed\", \"35_to_44_employed\"],\n \"45_to_64_employed\": [\n \"45_to_54_employed\",\n \"55_to_59_employed\",\n \"60_to_61_employed\",\n \"62_to_64_employed\",\n ],\n \"65_and_over_employed\": [\n \"65_to_69_employed\",\n \"70_to_74_employed\",\n \"75_and_over_employed\",\n ],\n \"16_to_64_employed\": [\n \"16_to_19_employed\",\n \"20_to_21_employed\",\n \"22_to_24_employed\",\n \"25_to_29_employed\",\n \"30_to_34_employed\",\n \"35_to_44_employed\",\n \"45_to_54_employed\",\n \"55_to_59_employed\",\n \"60_to_61_employed\",\n \"62_to_64_employed\",\n ],\n }\n )\n\n # Sum over the custom groups\n for groupset, group_list in groupsets.items():\n for tag in [\"total\", \"male\", \"female\"]:\n\n # cols to sum over\n cols_to_sum = [f\"{tag}_{f}\" for f in group_list]\n\n # do the aggregation\n newcols = [f\"{tag}_{groupset}\", f\"{tag}_{groupset}_moe\"]\n df[newcols] = df.apply(approximate_sum, cols=cols_to_sum, axis=1)\n\n return df", "def __calcSpinors(self):\n self.__allSpinors = self.__evalSpinors(self.__allMomenta)", "def calculate(self):\n pass", "def calculate(self):\n\n return self._calculate_area(self.segmentation, self.slice_number)", "def __iteratively_retain(\n self,\n orf_regions: List[Tuple[int, int]]) -> List[Tuple[int, int]]:\n\n ret = []\n\n arr = np.zeros((len(self.seq), ))\n\n for start, end in orf_regions:\n ret.append((start, end))\n arr[start-1:end] = 1\n orf_coverage = np.sum(arr) / len(arr)\n if orf_coverage > self.min_orf_coverage:\n break\n\n return ret", "def calculate(self):\r\n\r\n pass", "def calculate(self):\n self.results['max'] = numpy.max(self.data)\n self.results['min'] = numpy.min(self.data)\n if self.type == 0:\n self.group_discrete_data()\n if self.type == 1:\n self.group_continuous_data()\n\n self.results['arithAvg'] = self.average([self.data[i] * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences)\n self.results['quadAvg'] = math.sqrt(\n self.average([(self.data[i] * self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n if self.results['min'] > 0:\n self.results['geoAvg'] = math.exp(\n self.average([numpy.log(self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n self.results['harmAvg'] = 1 / self.average(\n [(self.occurrences[i] / self.data[i]) for i in range(len(self.data))],\n self.totalOccurrences)\n else:\n self.results['geoAvg'] = self.results['harmAvg'] = \"N/A\"\n self.results['momentsR'] = self.moments(self.data, self.occurrences, 4)\n self.results['centralMomentsR'] = self.moments([(i - self.results['arithAvg']) for i in self.data],\n self.occurrences, 4)\n self.results['std'] = self.average(\n [self.occurrences[i] * abs(self.data[i] - self.results['arithAvg']) for i in range(len(self.data))],\n self.totalOccurrences)", "def calculate(self):\r\n pass", "def updateall(self, params):\n for gauge in self.gauges:\n self.safexec(gauge.updateall, params)", "def _do_compute(self, var_map):\n raise Exception(\"Not implemented. Subclass responsibility\")", "def run(self) -> list:\n logger.debug('Fetching date %s', self._day.strftime('%Y/%m/%d'))\n \n regions = [r() for r in regions_list]\n air_quality = list()\n \n # fetch air quality of each region\n for r in regions:\n r.fetch_air_quality(self._day)\n \n # gather results from all regions\n for r in regions:\n # wait until region has fetched his data\n r.wait_for_quality()\n logging.info('Fetched region:%s for day:%s', r.name, self._day)\n air_quality.append({\n 'name': r.name,\n 'provinces': [\n {'name': x.name, 'short': x.short_name, 'quality': x.quality.asdict()} \n for x in r.provinces]\n })\n\n self._fetcher.fetched_result(self._day, air_quality)", "def __call__(self, args):\n if isinstance(self.transform, list) and len(self.transform) > 1:\n result = self.regions_mask[x, y]\n unique_regions = np.unique(result)\n for i in unique_regions:\n indices = result==i\n transform=self.get_forward_transform(i)\n result[indices]=transform(x[indices], y[indices])\n print('resut', result)\n return result\n else:\n return self.transform(x, y)" ]
[ "0.6194143", "0.5827072", "0.5770518", "0.56490785", "0.5578175", "0.5562212", "0.55582315", "0.5550116", "0.5517527", "0.5458629", "0.5393012", "0.5392545", "0.5353598", "0.5342892", "0.5308947", "0.52641714", "0.5259804", "0.523855", "0.52160764", "0.5192724", "0.5178875", "0.5170722", "0.51651585", "0.5161665", "0.5159957", "0.51497835", "0.5148859", "0.5136154", "0.51140255", "0.5106104" ]
0.6156202
1
Apply all requested time reductions to the data.
def _apply_all_time_reductions(self, full_ts, monthly_ts, eddy_ts): logging.info(self._print_verbose("Applying desired time-" "reduction methods.")) # Determine which are regional, eddy, time-mean. reduc_specs = [r.split('.') for r in self.dtype_out_time] reduced = {} for reduc, specs in zip(self.dtype_out_time, reduc_specs): func = specs[-1] if 'eddy' in specs: data = eddy_ts elif 'time-mean' in specs: data = monthly_ts else: data = full_ts if 'reg' in specs: reduced.update({reduc: self.region_calcs(data, func)}) else: reduced.update({reduc: self._time_reduce(data, func)}) return OrderedDict(sorted(reduced.items(), key=lambda t: t[0]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _time_reduce(self, arr, reduction):\n if self.dtype_in_time == 'av':\n return arr\n reductions = {\n 'None': lambda xarr: xarr,\n 'ts': lambda xarr: xarr,\n 'av': lambda xarr: xarr.mean(internal_names.YEAR_STR),\n 'std': lambda xarr: xarr.std(internal_names.YEAR_STR),\n }\n try:\n return reductions[reduction](arr)\n except KeyError:\n raise ValueError(\"Specified time-reduction method '{}' is not \"\n \"supported\".format(reduction))", "def reduce_run():", "def compute_total_times(self):\n rval = {}\n for fgraph, node in self.apply_time:\n if node not in rval:\n self.fill_node_total_time(fgraph, node, rval)\n return rval", "def set_analysis_time(self, t):\n for z in self.zones:\n z.set_demand_rate_per_t(t)", "def apply_all_accumulators(self):\n self._require_state(\"APPLYING\")\n for mi in self._accums.keys():\n self._apply_one_accum_set(mi)", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def custom_processing(self, funct: callable, data_tmp: np.ndarray, **kwargs) -> np.ndarray:\n tic = time.time()\n data_tmp = funct(data_tmp, **kwargs)\n self.process_time.append(time.time() - tic)\n return data_tmp", "def scaling_data(self, data):\n df_modif = data.copy()\n np.seterr(divide='ignore')\n if self.one_timestamp:\n summ = data.groupby(['task'])['duration'].max().to_dict()\n dur_act_norm = (lambda x: x['duration']/summ[x['task']]\n if summ[x['task']] > 0 else 0)\n df_modif['dur_act_norm'] = df_modif.apply(dur_act_norm, axis=1)\n else:\n summ = data.groupby(['task'])['processing_time'].max().to_dict()\n proc_act_norm = (lambda x: x['processing_time']/summ[x['task']]\n if summ[x['task']] > 0 else 0)\n df_modif['proc_act_norm'] = df_modif.apply(proc_act_norm, axis=1)\n # ---\n summ = data.groupby(['task'])['waiting_time'].max().to_dict()\n wait_act_norm = (lambda x: x['waiting_time']/summ[x['task']]\n if summ[x['task']] > 0 else 0)\n df_modif['wait_act_norm'] = df_modif.apply(wait_act_norm, axis=1)\n return df_modif", "def minimizeTimes(self):\n from copy import deepcopy as dcp\n tmin = self.get_tmin()\n for t in self.srcData: \n old = dcp(self.srcData[t])\n new_t = t - tmin\n self.outData[new_t] = old", "def __apply_accumulators():\n self.__xdata = np.array([])\n self.__ydata = np.array([])\n for acc in self.signal_accumulators:\n self.__xdata = __array_append(self.__xdata,acc.attempt)\n self.__ydata = __array_append(self.__ydata,acc.count)\n self.__applied = True", "def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))", "def evaluate_multiple_time(self, time=200, save_dir='../multi_eval/INN/'):\r\n tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))\r\n save_dir += self.flags.data_set\r\n for i in range(time):\r\n self.evaluate(save_dir=save_dir, prefix='inference' + str(i))\r\n tk.record(i)", "def time_analysis(self, method='MEAN'):\n new_cubelist = []\n for cube in self.cubelist:\n new_cubelist.append(self.cube_coordinate_analysis(cube, \n self.time_coord, \n method))\n self.cubelist = iris.cube.CubeList(new_cubelist)\n self.processes.append('time_analysis')\n return self.cubelist", "def _reduce_cells(self):\n\n def reduce_cell(result, cell):\n # We assume only _sum aggergation\n # All measures should be prepared so we can to this\n for aggregate in self.aggregate_names:\n result[aggregate] = result.get(aggregate, 0) + \\\n cell.get(aggregate, 0)\n return result\n\n # 1. Map cells to reduced time path\n #\n reduced_map = defaultdict(list)\n reduced_len = len(self.time_levels)\n\n for key, cell in self.time_cells.items():\n time_path = key[0]\n reduced_path = time_path[0:reduced_len]\n\n reduced_key = (reduced_path, key[1])\n\n # self.logger.debug(\"reducing %s -> %s\" % (key, reduced_key))\n reduced_map[reduced_key].append(cell)\n\n self.browser.logger.debug(\"response cell count: %s reduced to: %s\" %\n (len(self.time_cells), len(reduced_map)))\n\n # 2. Reduce the cells\n #\n # See the function reduce_cell() above for aggregation:\n #\n reduced_cells = {}\n for key, cells in reduced_map.items():\n # self.browser.logger.debug(\"Reducing: %s -> %s\" % (key, cells))\n cell = reduce(reduce_cell, cells, {})\n\n reduced_cells[key] = cell\n\n self.time_cells = reduced_cells", "def extract_time_expressions(data_dir) -> None:\r\n path = _get_path(data_dir)\r\n data = jsonhandler.load(path)\r\n tweets_df = fileio.CSVHandler(data_dir).read_tweets()\r\n for id_, df in tweets_df.groupby(\"id\", observed=True):\r\n if id_ in data:\r\n continue\r\n info = df.iloc[0].to_dict()\r\n sentence = info[\"full_text\"]\r\n doc_time = info[\"created_at\"] + dt.timedelta(hours=9) # utc to jst\r\n try:\r\n time_expressions = timenormalizer.extract_time(sentence, doc_time)\r\n except requests.HTTPError as e:\r\n print(f\"{id_}: {e}\")\r\n break\r\n data[id_] = time_expressions\r\n jsonhandler.dump(data, path)", "def reduce_data():\n snapshots = Snapshot.objects.all()\n locations = Location.objects.all()\n lst = []\n for snapshot in snapshots:\n lst.append([snapshot.location.name, snapshot.avail_bikes,\n snapshot.free_stands, snapshot.timestamp])\n cols = ['location', 'avail_bikes', 'free_stands', 'timestamp']\n df = pd.DataFrame(lst, columns=cols)\n df['time'] = df['timestamp'].dt.round('30min').dt.strftime('%H:%M')\n\n group = df.groupby(['location', 'time'])\n means = group.mean()\n sd = group.std()\n today = date.today()\n first = today.replace(day=1)\n last_month = first - timedelta(days=1)\n\n for name, time in means.index:\n subset_mean = means.xs((name, time), level=(0, 1), axis=0)\n subset_sd = sd.xs((name, time), level=(0, 1), axis=0)\n m = Stat.objects.get_or_create(\n location=locations.get(name=name),\n avail_bikes_mean=subset_mean['avail_bikes'],\n free_stands_mean=subset_mean['free_stands'],\n avail_bikes_sd=subset_sd['avail_bikes'],\n free_stands_sd=subset_sd['free_stands'],\n time=time,\n month=last_month\n )\n\n # snaps = Snapshot.objects.all()\n # i = 0\n # length = len(snaps)\n # for s in snaps:\n # i += 1\n # print(i)\n # if i > 35000:\n # s.save()\n # reduce_data()", "def variable_time_collate_fn(batch, args, device = torch.device(\"cpu\"), data_type = \"train\", \n\tdata_min = None, data_max = None):\n\tD = batch[0][2].shape[1]\n\tcombined_tt, inverse_indices = torch.unique(torch.cat([ex[1] for ex in batch]), sorted=True, return_inverse=True)\n\tcombined_tt = combined_tt.to(device)\n\n\toffset = 0\n\tcombined_vals = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n\tcombined_mask = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n\t\n\tcombined_labels = None\n\tN_labels = 1\n\n\tcombined_labels = torch.zeros(len(batch), N_labels) + torch.tensor(float('nan'))\n\tcombined_labels = combined_labels.to(device = device)\n\t\n\tfor b, (record_id, tt, vals, mask, labels) in enumerate(batch):\n\t\ttt = tt.to(device)\n\t\tvals = vals.to(device)\n\t\tmask = mask.to(device)\n\t\tif labels is not None:\n\t\t\tlabels = labels.to(device)\n\n\t\tindices = inverse_indices[offset:offset + len(tt)]\n\t\toffset += len(tt)\n\n\t\tcombined_vals[b, indices] = vals\n\t\tcombined_mask[b, indices] = mask\n\n\t\tif labels is not None:\n\t\t\tcombined_labels[b] = labels\n\n\tcombined_vals, _, _ = utils.normalize_masked_data(combined_vals, combined_mask, \n\t\tatt_min = data_min, att_max = data_max)\n\n\tif torch.max(combined_tt) != 0.:\n\t\tcombined_tt = combined_tt / torch.max(combined_tt)\n\t\t\n\tdata_dict = {\n\t\t\"data\": combined_vals, \n\t\t\"time_steps\": combined_tt,\n\t\t\"mask\": combined_mask,\n\t\t\"labels\": combined_labels}\n\n\tdata_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)\n\treturn data_dict", "def calculate_scaleup_vars(self):\n\n for label, fn in self.scaleup_fns.items():\n self.vars[label] = fn(self.time)", "def set_meas_time_all(instrument, time_meas):\n set_meas_time_current(instrument, time_meas)\n set_meas_time_voltage(instrument, time_meas)\n set_meas_time_resistance(instrument, time_meas)", "def convolve_and_sum_slow(loadings, unit_response_functions=None):\n\n loadings = loadings.T\n print(loadings.shape)\n print(\"Convolving\")\n if (\n unit_response_functions is None\n ): # this logic is temporary, but have a safeguard so it's not accidentally used in production\n if settings.DEBUG:\n unit_response_functions = numpy.ones(\n [loadings.shape[0], loadings.shape[1], loadings.shape[2]],\n dtype=numpy.float64,\n )\n else:\n raise ValueError(\"Must provide Unit Response Functions!\")\n\n time_span = loadings.shape[0]\n output_matrix = numpy.zeros(\n [loadings.shape[0], loadings.shape[1], loadings.shape[2]], dtype=numpy.float64\n )\n\n for year in range(time_span):\n print(year)\n URF_length = time_span - year\n\n print(\"Subset\")\n subset_start = arrow.utcnow()\n current_year_loadings = loadings[\n year,\n :,\n :,\n ]\n subset_end = arrow.utcnow()\n print(subset_end - subset_start)\n\n # print(\"Reshape\")\n # reshape_start = arrow.utcnow()\n # reshaped_loadings = current_year_loadings.reshape(current_year_loadings.shape)\n # print(reshaped_loadings.shape)\n # repeated_loadings = numpy.repeat(reshaped_loadings, URF_length, 2)\n # reshape_end = arrow.utcnow()\n # print(reshape_end - reshape_start)\n\n print(\"Multiply\")\n multiply_start = arrow.utcnow()\n new_loadings = numpy.multiply(\n current_year_loadings, unit_response_functions[:URF_length, :, :]\n )\n multiply_end = arrow.utcnow()\n print(multiply_end - multiply_start)\n\n print(\"Add and Insert Back in\")\n add_start = arrow.utcnow()\n numpy.add(output_matrix[year:, :, :], new_loadings, output_matrix[year:, :, :])\n add_end = arrow.utcnow()\n print(add_end - add_start)\n # multiply this year's matrix * URFs matrix sliced to represent size of future\n # then add result to output_matrix\n\n results = numpy.sum(output_matrix, [1, 2]) # sum in 2D space", "def variable_time_collate_fn3(batch, args, device = torch.device(\"cpu\"), data_type = \"train\", \n data_min = None, data_max = None):\n D = batch[0][2].shape[1]\n len_tt = [ex[1].size(0) for ex in batch]\n maxlen = np.max(len_tt)\n enc_combined_tt = torch.zeros([len(batch), maxlen]).to(device)\n enc_combined_vals = torch.zeros([len(batch), maxlen, D]).to(device)\n enc_combined_mask = torch.zeros([len(batch), maxlen, D]).to(device)\n for b, (record_id, tt, vals, mask, labels) in enumerate(batch):\n currlen = tt.size(0)\n enc_combined_tt[b, :currlen] = tt.to(device) \n enc_combined_vals[b, :currlen] = vals.to(device) \n enc_combined_mask[b, :currlen] = mask.to(device) \n \n enc_combined_vals, _, _ = utils.normalize_masked_data(enc_combined_vals, enc_combined_mask, \n att_min = data_min, att_max = data_max)\n\n if torch.max(enc_combined_tt) != 0.:\n enc_combined_tt = enc_combined_tt / torch.max(enc_combined_tt)\n \n data_dict = {\n \"observed_data\": enc_combined_vals, \n \"observed_tp\": enc_combined_tt,\n \"observed_mask\": enc_combined_mask}\n\n return data_dict", "def time_stats(df):", "def test_plt_mag_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n # create the arrays per filter and readout pattern\n nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], []\n nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], []\n filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout']\n max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr']\n for i, val in enumerate(max_val_box):\n if '140' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(val)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(val)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif '110' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(val)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(val)\n nrsrapidd6_clear.append(np.NaN)\n else:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(val)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(val)\n # add to the bokeh data structure\n ta.source.data[\"nrsrapid_f140x\"] = nrsrapid_f140x\n ta.source.data[\"nrsrapid_f110w\"] = nrsrapid_f110w\n ta.source.data[\"nrsrapid_clear\"] = nrsrapid_clear\n ta.source.data[\"nrsrapidd6_f140x\"] = nrsrapidd6_f140x\n ta.source.data[\"nrsrapidd6_f110w\"] = nrsrapidd6_f110w\n ta.source.data[\"nrsrapidd6_clear\"] = nrsrapidd6_clear\n result = ta.plt_mag_time()\n\n assert bokeh_plot_type == type(result)", "def preproc_data(base_dir):\n n_users = 182\n user_datas = []\n\n for user in tqdm(range(n_users)): \n #iterate through each trace of user \n user_traces = []\n if (len(str(user)) == 1): \n user_id = '00' + str(user)\n elif (len(str(user)) == 2): \n user_id = '0' + str(user)\n else: \n user_id = str(user)\n dir_name = base_dir + user_id + '/Trajectory/'\n for filename in os.listdir(dir_name): \n #load trajectory\n trajectory_raw = pd.read_csv(\n dir_name + filename,\n delimiter = ',', header = None, skiprows = 6,\n names = ['lat','lon', '0', 'altitude', 't_1899', 'date', 'time' ])\n\n #modify time\n traj = trajectory_raw.copy()\n traj.drop(columns = '0', inplace = True)\n\n traj['t_1899'] = (trajectory_raw['t_1899'] - trajectory_raw['t_1899'].values[0]) * 24 * 60 * 60\n traj_newcols = np.array(traj.columns)\n traj_newcols[traj_newcols == 't_1899'] = 'seconds'\n traj.columns = traj_newcols\n\n #get X_vals: \n R_earth = 6.371e6\n deg_to_rad = np.pi / 180\n traj['X'] = R_earth * np.cos(deg_to_rad * traj['lat'].values) * np.cos(deg_to_rad * traj['lon'].values)\n traj['Y'] = R_earth * np.cos(deg_to_rad * traj['lat'].values) * np.sin(deg_to_rad * traj['lon'].values)\n\n #Normalize to first point: \n origin_x = traj['X'].values[0]\n origin_y = traj['Y'].values[0]\n\n traj['X'] = traj['X'] - origin_x\n traj['Y'] = traj['Y'] - origin_y\n\n txy = traj.copy()\n txy = traj[['seconds', 'X', 'Y']]\n txy.columns = ['T', 'X', 'Y']\n if not np.isnan(txy.values).any(): \n user_traces.append(txy.values)\n\n user_datas.append(user_traces)\n return user_datas", "def collapse_using_timeStr(self):\n if self.modified == True:\n raise Exception('Probabilities already modified.\\nCollapsing after modification will lead to incorrect results.')\n timeUnits = np.array(process_time_string(self.timeStr))\n if len(self.timeslices) + 1 == np.sum(timeUnits):\n if timeUnits[-1] == 1:\n timeUnits = timeUnits[:-1]\n else:\n timeUnits[-1] -= 1\n if len(self.timeslices) != np.sum(timeUnits):\n raise Exception('Total number of timeslices is different.')\n ind = 0\n cnt = 0\n curr_rates = np.matrix(np.zeros((np.shape(self.obsRates)[0], len(timeUnits))))\n curr_times = []\n for i in timeUnits:\n curr_rates[:, cnt] = np.sum(self.obsRates[:, ind:ind + i], axis=1)\n curr_times.append(np.sum(self.timeslices[ind:ind + i]))\n ind += i\n cnt += 1\n\n self.obsRates = curr_rates\n self.timeslices = curr_times", "def monitor(data_feeder):\n _total_time = 0.\n _costs = []\n _data_feeder = data_feeder(BATCH_SIZE,\n SEQ_LEN,\n OVERLAP,\n Q_LEVELS,\n Q_ZERO,\n Q_TYPE)\n\n for _seqs, _reset, _mask in _data_feeder:\n _start_time = time.time()\n _cost = test_fn(_seqs, _mask)\n _total_time += time.time() - _start_time\n\n _costs.append(_cost)\n\n return numpy.mean(_costs), _total_time", "def compute_time_full(model, loss_fun):\n test_fw_time = compute_time_eval(model)\n train_fw_time, train_bw_time = compute_time_train(model, loss_fun)\n return {\n \"test_fw_time\": test_fw_time,\n \"train_fw_time\": train_fw_time,\n \"train_bw_time\": train_bw_time,\n \"train_fw_bw_time\": train_fw_time + train_bw_time,\n }", "def _fill_moment_results(self):\n toprocess = [('stock_tom', self.c_stock, 2),\n ('stock_woody', self.c_stock, 3),\n ('stock_non_woody', self.c_stock, 4),\n ('stock_acid', self.c_stock, 5),\n ('stock_water', self.c_stock, 6),\n ('stock_ethanol', self.c_stock, 7),\n ('stock_non_soluble', self.c_stock, 8),\n ('stock_humus', self.c_stock, 9),\n ('change_tom', self.c_change, 2),\n ('change_woody', self.c_change, 3),\n ('change_non_woody', self.c_change, 4),\n ('change_acid', self.c_change, 5),\n ('change_water', self.c_change, 6),\n ('change_ethanol', self.c_change, 7),\n ('change_non_soluble', self.c_change, 8),\n ('change_humus', self.c_change, 9),\n ('co2', self.co2_yield, 2)]\n for (resto, dataarr, dataind) in toprocess:\n # filter time steps\n ts = numpy.unique(dataarr[:,1])\n # extract data for the timestep\n for timestep in ts:\n ind = numpy.where(dataarr[:,1]==timestep)\n mean = stats.mean(dataarr[ind[0], dataind])\n mode_res = stats.mode(dataarr[ind[0], dataind])\n mode = mode_res[0]\n var = stats.var(dataarr[ind[0], dataind])\n skew = stats.skew(dataarr[ind[0], dataind])\n kurtosis = stats.kurtosis(dataarr[ind[0], dataind])\n if var>0.0:\n sd2 = 2 * math.sqrt(var)\n else:\n sd2 = var\n res = [[timestep, mean, mode[0], var, skew, kurtosis,\n mean - sd2, mean + sd2]]\n if resto=='stock_tom':\n self.md.stock_tom = numpy.append(self.md.stock_tom,\n res, axis=0)\n elif resto=='stock_woody':\n self.md.stock_woody = numpy.append(self.md.stock_woody,\n res, axis=0)\n elif resto=='stock_non_woody':\n self.md.stock_non_woody = numpy.append(\\\n self.md.stock_non_woody, res, axis=0)\n elif resto=='stock_acid':\n self.md.stock_acid = numpy.append(self.md.stock_acid,\n res, axis=0)\n elif resto=='stock_water':\n self.md.stock_water = numpy.append(self.md.stock_water,\n res, axis=0)\n elif resto=='stock_ethanol':\n self.md.stock_ethanol = numpy.append(self.md.stock_ethanol,\n res, axis=0)\n elif resto=='stock_non_soluble':\n self.md.stock_non_soluble= numpy.append(\n self.md.stock_non_soluble, res, axis=0)\n elif resto=='stock_humus':\n self.md.stock_humus = numpy.append(self.md.stock_humus,\n res, axis=0)\n elif resto=='change_tom':\n self.md.change_tom = numpy.append(self.md.change_tom,\n res, axis=0)\n elif resto=='change_woody':\n self.md.change_woody = numpy.append(self.md.change_woody,\n res, axis=0)\n elif resto=='change_non_woody':\n self.md.change_non_woody = numpy.append(\\\n self.md.change_non_woody, res, axis=0)\n elif resto=='change_acid':\n self.md.change_acid = numpy.append(self.md.change_acid,\n res, axis=0)\n elif resto=='change_water':\n self.md.change_water = numpy.append(self.md.change_water,\n res, axis=0)\n elif resto=='change_ethanol':\n self.md.change_ethanol = numpy.append(\n self.md.change_ethanol, res, axis=0)\n elif resto=='change_non_soluble':\n self.md.change_non_soluble=numpy.append(\n self.md.change_non_soluble, res, axis=0)\n elif resto=='change_humus':\n self.md.change_humus = numpy.append(self.md.change_humus,\n res, axis=0)\n elif resto=='co2':\n self.md.co2 = numpy.append(self.md.co2, res, axis=0)", "def output_results(self):\n for ba in self.regions:\n if (ba in self.import_regions) or (ba in self.generation_regions):\n continue\n if ba in BA_930_INCONSISTENCY[self.year]:\n logger.warning(f\"Using D instead of (G-TI) for consumed calc in {ba}\")\n self.results[ba][\"net_consumed_mwh\"] = self.eia930.df[\n KEYS[\"E\"][\"D\"] % ba\n ][self.generation.index]\n else:\n self.results[ba][\"net_consumed_mwh\"] = (\n self.generation[ba] - self.eia930.df[KEYS[\"E\"][\"TI\"] % ba]\n )[self.generation.index]\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n self.results[ba][get_column(pol, adjustment=adj)] = (\n self.results[ba][\n get_rate_column(pol, adjustment=adj, generated=False)\n ]\n * self.results[ba][\"net_consumed_mwh\"]\n )\n\n # Although we directly calculate rates, to calculate annual average rates\n # we sum emissions and generation then divide.\n for time_resolution in TIME_RESOLUTIONS:\n time_dat = self.results[ba].copy(deep=True)\n\n # Get local timezone\n assert not pd.isnull(self.ba_ref.loc[ba, \"timezone_local\"])\n time_dat[\"datetime_local\"] = time_dat.index.tz_convert(\n self.ba_ref.loc[ba, \"timezone_local\"]\n )\n time_dat = time_dat.reset_index() # move datetime_utc to column\n time_dat = time_dat[\n time_dat.datetime_local.dt.year == self.year\n ] # keep year of local data\n\n if time_resolution == \"hourly\":\n # No resampling needed; keep timestamp cols in output\n time_cols = [\"datetime_utc\", \"datetime_local\"]\n missing_hours = time_dat[time_dat.isna().any(axis=1)]\n if len(missing_hours) > 0:\n logger.warning(\n f\"{len(missing_hours)} hours are missing in {ba} consumed data\"\n )\n elif time_resolution == \"monthly\":\n time_dat[\"month\"] = time_dat.datetime_local.dt.month\n # Aggregate to appropriate resolution\n time_dat = (\n time_dat.groupby(\"month\")[EMISSION_COLS + [\"net_consumed_mwh\"]]\n .sum()\n .reset_index() # move \"month\" to column\n )\n time_cols = [\"month\"]\n elif time_resolution == \"annual\":\n time_dat[\"year\"] = time_dat.datetime_local.dt.year\n # Aggregate to appropriate resolution\n time_dat = (\n time_dat.groupby(\"year\")[EMISSION_COLS + [\"net_consumed_mwh\"]]\n .sum()\n .reset_index() # move \"year\" to column\n )\n time_cols = [\"year\"]\n\n # Calculate rates from summed emissions, consumption\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n rate_col = get_rate_column(pol, adj, generated=False)\n emission_col = get_column(pol, adj)\n time_dat[rate_col] = (\n time_dat[emission_col] / time_dat[\"net_consumed_mwh\"]\n )\n\n # Output\n output_to_results(\n time_dat[time_cols + CONSUMED_EMISSION_RATE_COLS],\n ba,\n f\"/carbon_accounting/{time_resolution}/\",\n self.prefix,\n skip_outputs=self.skip_outputs,\n )\n return", "def refresh(self):\n for i in self.data:\n values = self.data[i]\n try:\n if values[\"state\"] == \"Teardown\":\n t_delta = (values[\"t_end\"] or values[\n \"date\"]) - values[\"ts\"]\n else:\n t_delta = values[\"date\"] - values[\"ts\"]\n\n if t_delta.total_seconds() < 0:\n t_delta = values[\"ts\"] - values[\"ts\"]\n values[\"duration\"] = str(t_delta.total_seconds())\n except:\n print sys.exc_info()\n # print values\n values[\"duration\"] = 0" ]
[ "0.6698724", "0.58304536", "0.5740768", "0.57148784", "0.5643352", "0.55625993", "0.55606973", "0.54921097", "0.5462226", "0.5439169", "0.53766143", "0.5346602", "0.5320928", "0.5296981", "0.5267185", "0.52655417", "0.5249101", "0.5232051", "0.5216631", "0.51879865", "0.51837116", "0.518146", "0.51782316", "0.5170916", "0.516767", "0.5146919", "0.51385355", "0.5137504", "0.51271117", "0.51228094" ]
0.75268835
0
Create full, monthlymean, and eddy timeseries of data.
def _make_full_mean_eddy_ts(self, data): bool_monthly = (['monthly_from' in self.dtype_in_time] + ['time-mean' in dout for dout in self.dtype_out_time]) bool_eddy = ['eddy' in dout for dout in self.dtype_out_time] if not all(bool_monthly): full, full_dt = self._compute_full_ts(data, monthly_mean=False) else: full = False if any(bool_eddy) or any(bool_monthly): monthly, monthly_dt = self._compute_full_ts(data, monthly_mean=True) else: monthly = False if any(bool_eddy): eddy = full - utils.times.monthly_mean_at_each_ind(monthly, full) else: eddy = False # Average within each year. if not all(bool_monthly): full = self._full_to_yearly_ts(full, full_dt) if any(bool_monthly): monthly = self._full_to_yearly_ts(monthly, monthly_dt) if any(bool_eddy): eddy = self._full_to_yearly_ts(eddy, full_dt) return full, monthly, eddy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_full_ts(self, data, monthly_mean=False, zonal_asym=False):\n # Get results at each desired timestep and spatial point.\n # Here we need to provide file read-in dates (NOT xarray dates)\n full_ts, dt = self._compute(data, monthly_mean=monthly_mean)\n if zonal_asym:\n full_ts = full_ts - full_ts.mean(internal_names.LON_STR)\n # Vertically integrate.\n vert_types = ('vert_int', 'vert_av')\n if self.dtype_out_vert in vert_types and self.var.def_vert:\n # Here we need file read-in dates (NOT xarray dates)\n full_ts = utils.vertcoord.int_dp_g(\n full_ts, self._get_pressure_vals(dp, self.start_date,\n self.end_date)\n )\n if self.dtype_out_vert == 'vert_av':\n full_ts *= (GRAV_EARTH / self._to_desired_dates(self._ps_data))\n return full_ts, dt", "def aggregate_data(tdata):\n # extract the unique mean and daily pair\n unique_pair = np.unique(np.vstack((tdata.mean_temp,\n tdata.daily_temp)).T, axis=0)\n mean_temp = unique_pair[:, 0]\n daily_temp = unique_pair[:, 1]\n\n obs_mean = []\n obs_std = []\n\n for p in unique_pair:\n valid_id = (tdata.mean_temp == p[0]) &\\\n (tdata.daily_temp == p[1]) &\\\n (tdata.trimming_weights > 0.5)\n obs_mean_atp = tdata.obs_mean[valid_id]\n obs_std_atp = tdata.obs_std[valid_id]\n\n ivar = 1.0/obs_std_atp**2\n obs_mean_atp = obs_mean_atp.dot(ivar)/np.sum(ivar)\n obs_std_atp = np.sqrt(1.0/np.sum(ivar))\n # obs_std_atp = np.mean(obs_std_atp)\n\n obs_mean.append(obs_mean_atp)\n obs_std.append(obs_std_atp)\n\n obs_mean = np.array(obs_mean)\n obs_std = np.array(obs_std)\n\n study_id = np.arange(obs_mean.size)\n data_id = None\n\n return utils.TempData(mean_temp,\n daily_temp,\n obs_mean,\n obs_std,\n study_id,\n data_id)", "def data_read(variable):\t\t\r\n\tdef day2datetime(scenario,days):\r\n\t\t\"\"\"\r\n\t\t# convert days from a reference into int datetime \r\n\t\t# do not take leap years into account\r\n\t\t\"\"\"\r\n\t\tdate_int = np.empty((len(days)));date_int[:]=np.nan\r\n\t\tif scenario =='T1970C': start_year =1970\r\n\t\telse: start_year =2010\r\n\t\tstart =(start_year*365)\r\n\t\tith=0\t\r\n\t\tfor iday in days:\r\n\t\t\tmonth_days =np.array([31,28,31,30,31,30,31,31,30,31,30,31])\r\n\t\t\tcalendar_days = np.array([0,31,59,90,120,151,181,212,243,273,304,334,365])\r\n\t\t\ttotal_days = int(iday) + start; \r\n\t\t\tyear = total_days//365; \r\n\t\t\tremainder = total_days%365\r\n\t\t\tif remainder ==0: year=year-1;month=12;day=31\r\n\t\t\telse: \r\n\t\t\t\tmonth = 1+[layer for layer in range(len(calendar_days)) if calendar_days[layer]< remainder and calendar_days[layer+1]>=remainder][0]\r\n\t\t\t\tday = int(remainder - calendar_days[month-1])\r\n\t\t\t\tif day == 0: day = month_days[month-1]\r\n\t\t\tdate_int[ith] = year*10000+month*100+day\r\n\t\t\tith=ith+1\r\n\t\treturn date_int.astype(int)\r\n\t\t\r\n\tdef mon_mean2annual_mean(scenario,time,data):\r\n\t\tannual_mean=np.empty((30,192,288));annual_mean[:]=np.nan\r\n\t\tcalendar_day = np.array([31,28,31,30,31,30,31,31,30,31,30,31])\r\n\t\tif scenario=='T1970RCP':\r\n\t\t\tyear_series = range(2020,2050)\r\n\t\telif scenario=='EdgEne':\r\n\t\t\tyear_series = range(2200,2230)\r\n\t\telif scenario=='Edg70GO':\r\n\t\t\tyear_series = range(2070,2100)\r\n\t\telse:\r\n\t\t\tyear_series = range(2130,2160)\r\n\t\tfor iyear in year_series:\r\n\t\t\t\r\n\t\t\tif (iyear == year_series[0] and time[0]//100 >= year_series[0] *100+1):\r\n\t\t\t\tlayer_b=0\r\n\t\t\telse:\r\n\t\t\t\tlayer_b = [layer for layer in range(len(time)) if time[layer]//100 == iyear*100+1][0] #June01\r\n\t\t\tif (iyear == year_series[-1] and time[-1]//100 <= year_series[-1] *100+12):\r\n\t\t\t\tlayer_e=-2\r\n\t\t\telse:\r\n\t\t\t\tlayer_e = [layer for layer in range(len(time)) if time[layer]//100 == iyear*100+12][0] #August 31\r\n\t\t\tdata_cache = data[layer_b:layer_e+1,:,:]\r\n\t\t\tannual_mean[iyear-year_series[0],:,:] = stats.nanmean(data_cache,axis=0)\r\n\t\treturn annual_mean\r\n\r\n\tdef data_netcdf(scenario,variable):\r\n\t\tinput_path ='/exports/csce/datastore/geos/users/s1667168/CESM_EDGAR/ModelOutput/FullCp/'\r\n\t\tvar_path = input_path+scenario+'/mon/atm/'+scenario+'.atm.mon.'+variable+'.nc'\r\n\t\t# print var_path\r\n\t\tnc_fid = nc4.Dataset(var_path,mode='r')\r\n\t\tlat = nc_fid.variables['lat'][:]\r\n\t\tlon = nc_fid.variables['lon'][:]\r\n\t\tdays = nc_fid.variables['time'][:]; time = day2datetime(scenario,days);#print time\r\n\t\tif variable ==\"VQ\" or variable == \"VT\":\r\n\t\t\tdata = np.nanmean(nc_fid.variables[variable][:,23:30,:,:],axis=1) # 850hpa\r\n\t\telse:\r\n\t\t\tdata = nc_fid.variables[variable][:]#-273.15\r\n\t\tnc_fid.close()\r\n\t\tvar40map = mon_mean2annual_mean(scenario,time,data)\r\n\t\treturn lon,lat,var40map\r\n\t\r\n\tlon,lat,Edg70GO = data_netcdf('Edg70GO',variable)\r\n\t_,_,T1970 = data_netcdf('T1970RCP',variable)\r\n\t_,_,EdgRef = data_netcdf('EdgRef',variable)\r\n\t_,_,Edg70Oz = data_netcdf('Edg70Oz',variable)\r\n\t_,_,EdgEne = data_netcdf('EdgEne',variable)\r\n\t_,_,EdgTech = data_netcdf('EdgTech',variable)\r\n\treturn lon,lat,T1970,Edg70GO,Edg70Oz,EdgRef,EdgEne,EdgTech", "def obtain_monthly_mean(data=pd.DataFrame()):\n return data.resample(\"MS\").mean()", "def monthly_mean(args_file):\n product, start_date, end_date, variable_name, shape_file = Utility.read_yml_params(args_file)\n stat = Statistic.Mean\n time = TimePeriod.Monthly\n \n\n ds = get_data_set(product, shape_file)\n\n result = Utility.Apply_stat(ds, start_date, end_date, variable_name, stat, time)\n ds.close()\n result.close()\n return result", "def get_series(self,year):\n year_dates, year_dc = self.year_data(year)\n mean_dc = []\n std_dc = []\n for date in year_dates:\n day = date.day\n month = date.month\n idx = [i for i in range(self.dates.shape[0]) \\\n if (self.dates[i].month == month and \\\n self.dates[i].day == day)]\n mean_dc.append(np.ma.mean(self.dc[idx]))\n std_dc.append(np.ma.std(self.dc[idx]))\n\n return np.array(mean_dc), np.array(std_dc)", "def reduce_data():\n snapshots = Snapshot.objects.all()\n locations = Location.objects.all()\n lst = []\n for snapshot in snapshots:\n lst.append([snapshot.location.name, snapshot.avail_bikes,\n snapshot.free_stands, snapshot.timestamp])\n cols = ['location', 'avail_bikes', 'free_stands', 'timestamp']\n df = pd.DataFrame(lst, columns=cols)\n df['time'] = df['timestamp'].dt.round('30min').dt.strftime('%H:%M')\n\n group = df.groupby(['location', 'time'])\n means = group.mean()\n sd = group.std()\n today = date.today()\n first = today.replace(day=1)\n last_month = first - timedelta(days=1)\n\n for name, time in means.index:\n subset_mean = means.xs((name, time), level=(0, 1), axis=0)\n subset_sd = sd.xs((name, time), level=(0, 1), axis=0)\n m = Stat.objects.get_or_create(\n location=locations.get(name=name),\n avail_bikes_mean=subset_mean['avail_bikes'],\n free_stands_mean=subset_mean['free_stands'],\n avail_bikes_sd=subset_sd['avail_bikes'],\n free_stands_sd=subset_sd['free_stands'],\n time=time,\n month=last_month\n )\n\n # snaps = Snapshot.objects.all()\n # i = 0\n # length = len(snaps)\n # for s in snaps:\n # i += 1\n # print(i)\n # if i > 35000:\n # s.save()\n # reduce_data()", "def make_data():\n now = int(time())\n n = 20.0\n series1 = [[i*1000,sin(i/n)] for i in range(now-100, now)]\n series2 = [[i*1000,abs(sin(i/n))**((i%(2*n))/n)] for i in range(now-100, now)]\n series3 = [[i*1000,cos(i/(n+1))*1.5] for i in range(now-100, now)]\n series4 = [[series2[i][0], series2[i][1] * series3[i][1]] for i in range(len(series3))]\n data = [series1, series2, series3,series4]\n return data", "def aggregate_qcdata(qcdata):\n\n # DD\n log.debug('Aggregating daily DD QC Data')\n curr_ts, last_ts = datetime.strptime(qcdata['TIMESTAMP_START'][0], '%Y%m%d%H%M'), datetime.strptime(qcdata['TIMESTAMP_START'][-1], '%Y%m%d%H%M')\n curr_y, first_y, last_y = curr_ts.year, curr_ts.year, last_ts.year\n entries_dd = 0\n while curr_y <= last_y:\n days_in_year = 366 if calendar.isleap(curr_y) else 365\n entries_dd += days_in_year\n curr_y += 1\n dtype = [(vlabel, 'f8') for vlabel in NEW_METEO_VARS if vlabel in qcdata.dtype.names]\n dtype_ext = dtype + [(vlabel + PERC_LABEL, 'f8') for vlabel, _ in dtype]\n dtype_dd = TIMESTAMP_DTYPE_BY_RESOLUTION['dd'] + dtype_ext\n data_dd = numpy.empty(entries_dd, dtype=dtype_dd)\n data_dd.fill(-9999)\n day_diff = timedelta(hours=24)\n curr_idx = 0\n while curr_ts <= last_ts:\n if curr_idx % 300 == 0:\n log.debug(\"Aggregating day {d}\".format(d=curr_idx))\n dd = curr_ts.strftime('%Y%m%d')\n data_dd['TIMESTAMP'][curr_idx] = dd\n mask_dd = numpy.char.startswith(qcdata['TIMESTAMP_START'], dd)\n for vlabel, _ in dtype:\n values = qcdata[vlabel][mask_dd]\n perc = float(values[values > -9999].size) / float(values.size)\n if perc > 0.5:\n mean = numpy.mean(values[values > -9999])\n else:\n mean = -9999\n perc = -9999\n data_dd[vlabel][curr_idx] = mean\n data_dd[vlabel + PERC_LABEL][curr_idx] = perc\n curr_idx += 1\n curr_ts += day_diff\n\n # WW\n log.debug('Aggregating weekly WW QC Data')\n recs_ww = 52\n dtype_ww = TIMESTAMP_DTYPE_BY_RESOLUTION['ww'] + dtype_ext\n data_ww = numpy.empty((last_y - first_y + 1) * recs_ww, dtype=dtype_ww)\n data_ww.fill(-9999)\n for idx, year in enumerate(range(first_y, last_y + 1)):\n first_rec = idx * recs_ww\n curr_rec = first_rec\n last_rec = first_rec + recs_ww\n f = datetime(year, 1, 1, 0, 0)\n data_ww['TIMESTAMP_START'][first_rec:last_rec] = [(f + timedelta(days=i * 7)).strftime('%Y%m%d') for i in xrange(0, recs_ww)]\n data_ww['TIMESTAMP_END'][first_rec:last_rec] = [(datetime.strptime(i, '%Y%m%d') + timedelta(days=6)).strftime('%Y%m%d') for i in data_ww['TIMESTAMP_START'][first_rec:last_rec]]\n data_ww['TIMESTAMP_END'][last_rec - 1] = datetime(year, 12, 31, 0, 0).strftime('%Y%m%d')\n while curr_rec < last_rec:\n first_idx, last_idx = numpy.where(data_dd['TIMESTAMP'] == data_ww['TIMESTAMP_START'][curr_rec])[0][0], numpy.where(data_dd['TIMESTAMP'] == data_ww['TIMESTAMP_END'][curr_rec])[0][0]\n for vlabel, _ in dtype:\n values = data_dd[vlabel][first_idx:last_idx]\n values_perc = data_dd[vlabel + PERC_LABEL][first_idx:last_idx]\n values_perc[values_perc <= -9999] = 0.0\n perc = numpy.mean(values_perc)\n if perc > 0.5:\n mean = numpy.mean(values[values > -9999])\n else:\n mean = -9999\n perc = -9999\n data_ww[vlabel][curr_rec] = mean\n data_ww[vlabel + PERC_LABEL][curr_rec] = perc\n curr_rec += 1\n\n\n # MM\n log.debug('Aggregating monthly MM QC Data')\n recs_mm = 12\n dtype_mm = TIMESTAMP_DTYPE_BY_RESOLUTION['mm'] + dtype_ext\n data_mm = numpy.empty((last_y - first_y + 1) * recs_mm, dtype=dtype_mm)\n data_mm.fill(-9999)\n data_mm['TIMESTAMP'] = [str(y) + str(m).zfill(2) for y in range(first_y, last_y + 1) for m in range(1, recs_mm + 1)]\n for idx, mm in enumerate(data_mm['TIMESTAMP']):\n mask_mm = numpy.char.startswith(data_dd['TIMESTAMP'], mm)\n for vlabel, _ in dtype:\n values = data_dd[vlabel][mask_mm]\n values_perc = data_dd[vlabel + PERC_LABEL][mask_mm]\n values_perc[values_perc <= -9999] = 0.0\n perc = numpy.mean(values_perc)\n if perc > 0.5:\n mean = numpy.mean(values[values > -9999])\n else:\n mean = -9999\n perc = -9999\n data_mm[vlabel][idx] = mean\n data_mm[vlabel + PERC_LABEL][idx] = perc\n\n # YY\n log.debug('Aggregating yearly YY QC Data')\n dtype_yy = TIMESTAMP_DTYPE_BY_RESOLUTION['yy'] + dtype_ext\n data_yy = numpy.empty((last_y - first_y + 1), dtype=dtype_yy)\n data_yy.fill(-9999)\n data_yy['TIMESTAMP'] = [str(y) for y in range(first_y, last_y + 1)]\n for idx, yy in enumerate(data_yy['TIMESTAMP']):\n mask_yy = numpy.char.startswith(data_dd['TIMESTAMP'], yy)\n for vlabel, _ in dtype:\n values = data_dd[vlabel][mask_yy]\n values_perc = data_dd[vlabel + PERC_LABEL][mask_yy]\n values_perc[values_perc <= -9999] = 0.0\n perc = numpy.mean(values_perc)\n if perc > 0.5:\n mean = numpy.mean(values[values > -9999])\n else:\n mean = -9999\n perc = -9999\n data_yy[vlabel][idx] = mean\n data_yy[vlabel + PERC_LABEL][idx] = perc\n\n return data_dd, data_ww, data_mm, data_yy", "def generate_timeseries(data_list, setname=\"MagneticFields\"):\n full_data = TimeSeriesList()\n for seg in sorted(data_list):\n hfile = h5py.File(data_list[seg], \"r\")\n full_data.append(retrieve_data_timeseries(hfile, \"MagneticFields\"))\n hfile.close()\n return full_data", "def simple_time_series(full_df, test_period, display_graphs=True):\n df = full_df.copy()\n df = df.filter([\"Canteen\"])\n\n train = df.iloc[:-test_period]\n test = df.iloc[-test_period:]\n\n resulting_prediction, predictions = prediction(train, test)\n\n if display_graphs is True:\n plt.figure(figsize=(14, 7))\n plt.plot(train)\n plt.plot(resulting_prediction)\n plt.legend([\"Real values\", \"Prediction\"], loc=\"best\")\n plt.xlabel(\"Date\")\n plt.ylabel(\"Number of people\")\n\n print(\n \"The mean absolute error (MAE) for the Simple Time Series model is {0:.0f} people\".format(\n find_MAE(test, predictions)\n )\n )", "def yearly_mean(args_file):\n product, start_date, end_date, variable_name, shape_file = Utility.read_yml_params(args_file)\n stat = Statistic.Mean\n time = TimePeriod.Yearly\n\n ds = get_data_set(product, shape_file)\n\n result = Utility.Apply_stat(ds, start_date, end_date, variable_name, stat, time)\n return result", "def seasonal_means(t, y, edges=None, hard=False):\n ts, ys = seasonal_series(t, y, edges=edges, hard=hard)\n t_means = [t.jyear.mean() for t in ts]\n t_means = astropy.time.Time(t_means, format='jyear', scale=t.scale)\n y_means = np.array([y.mean() for y in ys])\n y_std = np.array([y.std() for y in ys])\n y_N = np.array([y.size for y in ys])\n return t_means, y_means, y_std, y_N", "def fake_data(sample_rate=512,psd_segment_length=60,nsegs=16):\n epoch = 1153742417.0\n ts_data = numpy.random.normal(0,1,sample_rate*psd_segment_length*nsegs)\n ts_data = types.TimeSeries(ts_data,delta_t=1.0/sample_rate,epoch=epoch)\n return ts_data", "def test_basic_daily_mean(self):\n self.testInst.bounds = self.bounds1\n ans = avg.mean_by_day(self.testInst, 'dummy4')\n assert np.all(ans == 86399 / 2.0)\n\n return", "def temporal_only(\n db_engine, station_id, start, end, sample_size=int(1.0e5),\n balance=None):\n\n data = get_and_adjust_data(\n db_engine, station_id, start, end, sample_size)\n\n # Balance or set to sample_size\n if balance is None:\n data = data.sample(n=sample_size)\n else:\n data = bal(data, balance)\n\n # Ensure shuffling.\n data = data.iloc[np.random.permutation(len(data))]\n\n X = []\n yempty = []\n yfull = []\n\n for row in data.iteritems():\n features = [\n (1 if row[0].dayofweek == 0 else 0),\n (1 if row[0].dayofweek == 1 else 0),\n (1 if row[0].dayofweek == 2 else 0),\n (1 if row[0].dayofweek == 3 else 0),\n (1 if row[0].dayofweek == 4 else 0),\n (1 if row[0].dayofweek == 5 else 0),\n (1 if row[0].dayofweek == 6 else 0),\n float(((row[0].hour * 60) + row[0].minute)) / 1440.0,\n float(row[0].month) / 12.0\n ]\n\n X.append(features)\n yempty.append(1 if row[1] == \"empty\" else 0)\n yfull.append(1 if row[1] == \"full\" else 0)\n\n return {'X': X, 'yempty': yempty, 'yfull': yfull}", "def generate_time_series_df(eviction_df):\n evictions_by_month = get_counts_by_month(eviction_df, \"month\", \"total-eviction-filings\")\n timeseries_df = evictions_by_month\n return timeseries_df", "def getDayMeans(gdf,\r\n year_min,month_min,day_min,\r\n year_max,month_max,day_max,\r\n Long_min,Long_max,\r\n Lat_min,Lat_max,\r\n ValueName,Error_name = '',UnCorr_name=''):\r\n output_all = gdf[(gdf.Date >= datetime.date(year_min,month_min,day_min))\r\n & (gdf.Date <= datetime.date(year_max,month_max,day_max))\r\n & (gdf.Long >= Long_min)\r\n & (gdf.Long <= Long_max)\r\n & (gdf.Lat >= Lat_min)\r\n & (gdf.Lat <= Lat_max)].groupby(['Year','Month','Day'])[ValueName].mean().reset_index()\r\n\r\n output = output_all.copy(); print('Caution, min number of mean value = 0')\r\n #output = output_all[(output_all.number >= 10)]\r\n print(len(output_all.Year))\r\n print(len(output.Year))\r\n date = output.apply(lambda x: datetime.date(int(x.Year),int(x.Month),int(x.Day)),axis=1)\r\n output.insert(loc=1,column='Date',value=date)\r\n return output", "def _timeseries_to_dataframe_mean_and_scenarios(timeseries, name):\n width = timeseries.total_values_per_item()\n # Column headers\n columns = [\n [name] * width,\n [timeseries.instance_or_contract_dataframe_column_header()] * width,\n [''] + timeseries.scenario_names\n ]\n # Convert a time series of (date, scenarios[])\n df = pd.DataFrame.from_records(\n ((v.value, *v.scenarios) for v in timeseries.data),\n columns=columns,\n index=[v.date for v in timeseries],\n )\n df.index.name = 'date'\n return df", "def forecast_means(data):\n\t# collect dates\n\tdate_keys = [x.date() for x in list(data)]\n\t# filter out full days\n\tdays = set([x for x in date_keys if date_keys.count(x) == 8])\n\t# group temperature by dates from the filtered list\n\ttemps_grouped = map(lambda x: [v for (k, v) in data.items() if x == k.date()], list(sorted(days)))\n\t# return a dictionary with dates and mean temperature\n\treturn dict([(x, round(statistics.mean(y), 2)) for x, y in zip(list(sorted(days)), list(temps_grouped))])", "def daily_avg(self, run_id):\n time_series = self.get_data(run_id=run_id,\n metric_ids=['00003', '00060', '00001'])\n if len(time_series) == 0:\n return None\n\n precip = time_series[time_series.metric_id == '00003']\n precip['date_time'] = pd.to_datetime(precip['date_time'], utc=True)\n precip.index = precip['date_time']\n precip_daily = precip.resample('D').sum()\n\n flow = time_series[time_series.metric_id == '00060']\n flow['date_time'] = pd.to_datetime(flow['date_time'], utc=True)\n flow.index = flow['date_time']\n flow_daily = flow.resample('D').mean()\n\n temp = time_series[time_series.metric_id == '00001']\n temp['date_time'] = pd.to_datetime(temp['date_time'], utc=True)\n temp.index = temp['date_time']\n temp_daily = temp.resample('D').mean()\n\n time_series_daily = temp_daily\\\n .merge(flow_daily,\n how='inner',\n left_index=True,\n right_index=True) \\\n .merge(precip_daily,\n how='inner',\n left_index=True,\n right_index=True)\n time_series_daily.columns = ['temp', 'flow', 'precip']\n time_series_daily = time_series_daily.dropna()\n return time_series_daily", "def _full_to_yearly_ts(self, arr, dt):\n time_defined = self.def_time and not ('av' in self.dtype_in_time)\n if time_defined:\n arr = utils.times.yearly_average(arr, dt)\n return arr", "def resample_timeseries_data(data, frequency, datetime_field, decimal_places):\r\n if not data:\r\n return []\r\n else:\r\n df = pd.DataFrame(data)\r\n df[datetime_field] = pd.to_datetime(df[datetime_field])\r\n time_indexed_data = df.set_index(datetime_field)\r\n resampled_average_concentrations = time_indexed_data.resample(frequency).mean().round(decimal_places) \r\n resampled_timeseries = [{'pollutant_value':row.pollutant_value,\r\n 'time':datetime.strftime(index,'%b,%Y')}\r\n for index, row in resampled_average_concentrations.iterrows() ] \r\n return resampled_timeseries", "def seasonal_mean(args_file):\n product, start_date, end_date, variable_name, shape_file = Utility.read_yml_params(args_file)\n stat = Statistic.Mean\n time = TimePeriod.Seasonal\n\n ds = get_data_set(product, shape_file)\n\n result = Utility.Apply_stat(ds, start_date, end_date, variable_name, stat, time)\n return result", "def _get_aggregated_data(self, data_start, data_end, resolution, aggregation_type):\n from .timeseriesdata import TimeSeriesData\n\n # Multiple of resolution\n # We extract just the values_list here because doing it in a\n # separate statement results in django querying the database\n # twice...\n raw = TimeSeriesData.objects.filter(\n ts__gte=data_start,\n ts__lt=data_end,\n sensor=self,\n ).values_list(\"value\", \"ts\")\n\n if not raw:\n # This should raise above but for some reason it doesn't when using\n # values_list\n raise TimeSeriesData.DoesNotExist\n\n # How many samples we would expect if there was no missing data\n expected_samples = (data_end - data_start).total_seconds()/self.resolution\n\n if resolution is AGGREGATE_TO_ONE_VALUE:\n aggregation_factor = expected_samples\n else:\n # Already checked that this divides nicely\n # NOTE\n # should aggregation_factor ALWAYS be expected_samples?\n aggregation_factor = int(resolution//self.resolution)\n\n logger.debug(\"%s objects to aggregate\", len(raw))\n\n aggregation_engine = aggregation_implementations[settings.ZCONNECT_TS_AGGREGATION_ENGINE]\n\n logger.debug(\"Aggregating '%s' with %s, factor %s\",\n aggregation_type, settings.ZCONNECT_TS_AGGREGATION_ENGINE,\n aggregation_factor)\n\n data = aggregation_engine(\n raw,\n aggregation_type,\n aggregation_factor,\n expected_samples,\n data_start,\n data_end,\n self,\n )\n\n return data", "def generate_dateset(data, monthly_size=20):\n data_id = 0\n data_X = []\n data_y = []\n\n for month in range(1, 13):\n start_points_num = len(data[str(month).zfill(2)].columns) - 9\n\n if monthly_size == 'all':\n indexes = list(range(start_points_num))\n else:\n # random sample from each month\n indexes = np.random.choice(start_points_num, monthly_size,\n replace=False)\n\n for ind, i in enumerate(indexes):\n dff = data[str(month).zfill(2)]\n\n X = dff[dff.columns[i:i + 9]].reset_index()\n\n # add id column\n X[0] = 'id_{}'.format(str(data_id))\n\n # change column order\n X = X[[X.columns[-1]] + X.columns[:-1].tolist()]\n X.columns = [i for i in range(len(X.columns))]\n\n PM25 = dff[dff.columns[i + 9]].loc['PM2.5']\n y = ['id_{}'.format(str(data_id)), PM25]\n\n data_X.append(X)\n data_y.append(y)\n\n data_id += 1\n\n return data_X, data_y", "def generate_time_series(length, M):\n #standard normal values\n X = np.random.normal(0,1,[length,M])\n return pd.DataFrame(X)", "def export(self, data, **config):\n self.create_timeseries(data, **config)", "def fit_timeseries(xdates, ydata):\n\n pass", "def get_time_series_stats(time_series):\n return pd.Series([np.mean(time_series), np.std(time_series), get_frequency(time_series)])" ]
[ "0.6448815", "0.6376945", "0.62100995", "0.6183168", "0.6156314", "0.60932374", "0.58807534", "0.5847974", "0.58189225", "0.57914835", "0.5777638", "0.57754767", "0.57584393", "0.56558776", "0.5642424", "0.5631541", "0.5627859", "0.5620441", "0.5610955", "0.55953705", "0.55761796", "0.55719113", "0.5568465", "0.55624443", "0.5562061", "0.55595213", "0.5550263", "0.55363727", "0.55292517", "0.55231225" ]
0.74621826
0
Save the data to netcdf files in direc_out.
def _save_files(self, data, dtype_out_time): path = self.path_out[dtype_out_time] if not os.path.isdir(self.dir_out): os.makedirs(self.dir_out) if 'reg' in dtype_out_time: try: reg_data = xr.open_dataset(path) except (EOFError, RuntimeError, IOError): reg_data = xr.Dataset() reg_data.update(data) data_out = reg_data else: data_out = data if isinstance(data_out, xr.DataArray): data_out = xr.Dataset({self.name: data_out}) data_out.to_netcdf(path, engine='netcdf4', format='NETCDF3_64BIT')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_netcdf(self, outfile):", "def save_to_disk(self, filename='ens_state.nc'):\n self.to_netcdf(filename)", "def save_to_netcdf(img, filename):\n filename = os.path.join(datadir, filename + '.nc')\n print('Saving: ' + filename)\n img.to_netcdf(filename)", "def output_netcdf(forecast,proj_dict,grid_dict,start_hour,end_hour,\n stride,size,run_date,target_dataset,smoothing,config):\n for d,date in enumerate(run_date):\n date_outpath = config.forecast_out_path+'20{0}/netcdf/'.format(\n date)\n \n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n \n map_data = make_proj_grids(proj_dict,grid_dict)\n lons = map_data[\"lon\"]\n lats = map_data[\"lat\"]\n \n filtered_forecast = gaussian_filter(forecast[d],smoothing,mode='constant')\n \n filename = date_outpath + \"{0}_{6}_Hail_{1}_Cali_NMEP_{2}mm_{3}_Hours_{4}-{5}.nc\".format(\n config.ensemble_name,\n target_dataset,\n size,\n date,\n start_hour,end_hour,config.forecast_model_names)\n\n \n out_file = Dataset(filename, \"w\")\n out_file.createDimension(\"x\", filtered_forecast.shape[0])\n out_file.createDimension(\"y\", filtered_forecast.shape[1])\n out_file.createVariable(\"Longitude\", \"f4\", (\"x\", \"y\"))\n out_file.createVariable(\"Latitude\", \"f4\",(\"x\", \"y\"))\n out_file.createVariable(\"Data\", \"f4\", (\"x\", \"y\"))\n out_file.variables[\"Longitude\"][:,:] = lons\n out_file.variables[\"Latitude\"][:,:] = lats\n out_file.variables[\"Data\"][:,:] = filtered_forecast\n out_file.projection = proj_dict[\"proj\"]\n out_file.lon_0 = proj_dict[\"lon_0\"]\n out_file.lat_0 = proj_dict[\"lat_0\"]\n out_file.lat_1 = proj_dict[\"lat_1\"]\n out_file.lat_2 = proj_dict[\"lat_2\"]\n out_file.close()\n \n print(\"Writing to \" + filename)\n return", "def Save2Nc(self):\r\n\r\n frameNumber = self.spinBox_FrameNum.value()\r\n\r\n segmentNumber = self.spinBox_SegmentNum.value()\r\n\r\n exposeTime = self.spinBox_ExpTime.value()\r\n width = self.spinBox_Width.value()\r\n xshift = self.spinBox_XShift.value()\r\n hight = self.spinBox_Hight.value()\r\n yshift = self.spinBox_Yshift.value()\r\n\r\n print(\"frameNumber, segmentNumber, width, high is: \", frameNumber, segmentNumber, width, hight)\r\n app = ReadData(noteObj = self.textBrowser_SetMeasureInf, frameNumber=frameNumber, segmentFrame=segmentNumber, width=width, hight=hight)\r\n self.multiFrameData = app.ImageData()\r\n\r\n options = QFileDialog.Options()\r\n options |= QFileDialog.DontUseNativeDialog\r\n # it just provides the name of file that you want to write into\r\n fileName, _= QFileDialog.getSaveFileName(self,\"QFileDialog.getSaveFileName()\",\"\",\"All Files (*);;NC Files (*.nc)\", options=options)\r\n \r\n if fileName:\r\n print(fileName)\r\n\r\n self.multiFrameData.to_netcdf(fileName + '.nc')\r\n self.textBrowser_SetMeasureInf.setTextColor(QtCore.Qt.green)\r\n self.textBrowser_SetMeasureInf.append(\"the data has saved as .nc file! \")", "def save_dataset(self, out_dir, therm_frac=0.):\n dataset = self.get_dataset(therm_frac)\n out_file = os.path.join(out_dir, 'dataset.nc')\n io.log(f'Saving dataset to: {out_file}.')\n try:\n dataset.to_netcdf(os.path.join(out_dir, 'dataset.nc'))\n except ValueError:\n io.log('Unable to save dataset! Continuing...')\n\n return dataset", "def save_data(data, filename, *, group=\"posterior\", coords=None, dims=None):\n inference_data = convert_to_inference_data(data, group=group, coords=coords, dims=dims)\n return inference_data.to_netcdf(filename)", "def save_2d_netcdf(lat, lon, vals, varname, units, savename, description, overwrite=False):\n fileexists = os.path.isfile(savename)\n if (fileexists & overwrite) | (not fileexists):\n\n if fileexists: # remove if we want to create a new version\n os.remove(savename)\n\n fout = Dataset(savename, 'w')\n\n nlat = len(lat)\n nlon = len(lon)\n fout.createDimension('lat', nlat)\n fout.createDimension('lon', nlon)\n\n latnc = fout.createVariable('lat', 'f8', ('lat',))\n lonnc = fout.createVariable('lon', 'f8', ('lon',))\n varnc = fout.createVariable(varname, 'f8', ('lat', 'lon'))\n\n fout.description = description\n\n latnc.units = 'degree_north'\n lonnc.units = 'degree_east'\n varnc.units = units\n\n latnc[:] = lat\n lonnc[:] = lon\n varnc[:, :] = vals\n\n fout.close()\n\n return", "def write_netcdf(ncinfo):\r\n\t# ========== Create new netcdf ==========\r\n\tNAME=nc.netcdf_file(ncinfo.fname,'w')\r\n\t\r\n\t# ========== Set up the Dimensions ==========\r\n\tNAME.createDimension('time', None) #Question: Shouldn't time be unlimited?\r\n\t# NAME.createDimension('lev',11)\r\n\tNAME.createDimension('lat',ncinfo.lat)\r\n\tNAME.createDimension('lon',ncinfo.lon)\r\n\t\r\n\t# ========== Setup the Variables ==========\r\n\ttime=NAME.createVariable('time',np.float64,('time',))\r\n\t# lev=NAME.createVariable('lev',np.int32,('lev',))\r\n\tlat=NAME.createVariable('lat',np.float64,('lat',))\r\n\tlon=NAME.createVariable('lon',np.float64,('lon',))\r\n\t# VAR=NAME.createVariable(str(VAR),np.float64,('time','lev','lat','lon'),)\r\n\tVAR=NAME.createVariable(ncinfo.var_name,np.float64,('time','lat','lon'),)\r\n\t# setting the missing value is super important for the file to be cdo readable\r\n\tsetattr(VAR,'missing_value',ncinfo.fill)\r\n\tsetattr(VAR, 'standard_name', ncinfo.var_lname) \r\n\t\r\n\t# ========== Set the units ==========\r\n\ttime.units= 'day as %Y%m%d'\r\n\t# lev.units = '-'\r\n\tlat.units = 'degrees_north'\r\n\tlon.units = 'degrees_east'\r\n\tVAR.units = ncinfo.units\r\n\r\n\t# ========== Add data ==========\r\n\t\r\n\t# creates time vector using the date_range function\r\n\t# time[:]=[t for t in date_range('20110101.5','20111231.5')] \r\n\t# lev[:]=PFT_vector\r\n\tlat[:] = ncinfo.latitudes\r\n\tlon[:] = ncinfo.longitudes\r\n\t# THis is a Bodge for singe variable data\r\n\tVAR[:] = ncinfo.data\r\n\r\n\t#Add global attributes\r\n\tNAME.description = ncinfo.description\r\n\tNAME.history = ncinfo.history\r\n\r\n\t# WHATS MISSING\r\n\t# metadata a whole bunch of metadata\r\n\t# the standard_name and long_name of the variables\r\n\r\n\t# ========== Close the netcdf ==========\r\n\tNAME.close()", "def write_netcdf(file,xc,xc_bnd,yc,yc_bnd,times,hydrographs,fractions,loc,Flist,velocity,diffusion,NODATA,verbose):\n \n f = Dataset(file,'w', format='NETCDF4')\n\n # set dimensions\n time = f.createDimension('time', None)\n x = f.createDimension('x',xc.shape[1])\n y = f.createDimension('y',xc.shape[0])\n nv4 = f.createDimension('nv4',4)\n\n # initialize variables\n time = f.createVariable('time','f8',('time',))\n xcs = f.createVariable('xc','f8',('y','x',))\n ycs = f.createVariable('yc','f8',('y','x',))\n xc_bnds = f.createVariable('xc_bnds','f8',('y','x','nv4',))\n yc_bnds = f.createVariable('yc_bnds','f8',('y','x','nv4',))\n fraction = f.createVariable('fraction','f8',('y','x',),fill_value=NODATA)\n UHS = f.createVariable('unit_hydrograph','f8',('time','y','x',),fill_value=NODATA)\n\n # write attributes for netcdf\n f.description = 'Aggregated UH_S and Fraction Vars for full RASM domain'\n f.history = 'Created: {}\\n'.format(tm.ctime(tm.time()))\n f.history += ' '.join(sys.argv) + '\\n'\n f.source = sys.argv[0] # prints the name of script used\n f.velocity = velocity\n f.diffusion = diffusion\n f.outlet_lon = loc[0]\n f.outlet_lat = loc[1]\n f.includes = str(len(Flist))+' files'\n\n ycs.long_name = 'latitude of grid cell center'\n ycs.standard_name = 'latitude'\n ycs.units = 'degrees_north'\n ycs._CoordinateAxisType = 'Lat'\n ycs.bounds = 'yc_bnds'\n\n xcs.long_name = 'longitude of grid cell center'\n xcs.standard_name = 'longitude'\n xcs.units = 'degrees_east'\n xcs._CoordinateAxisType = 'Lon'\n xcs.bounds = 'xc_bnds'\n\n time.standard_name = 'time'\n time.units = 'seconds'\n time.description = 'Seconds since initial impulse'\n time.calendar = 'proleptic_gregorian'\n\n UHS.units = 'unitless'\n UHS.description = 'unit hydrograph for each grid cell with respect to basin outlet location'\n \n fraction.units = 'unitless'\n fraction.description = 'fraction of grid cell contributing to guage location'\n\n # write data to variables initialized above\n time[:]= times\n xcs[:,:] = xc\n ycs[:,:] = yc\n xc_bnds[:,:,:] = xc_bnd\n yc_bnds[:,:,:] = yc_bnd\n UHS[:,:,:] = hydrographs\n fraction[:,:]= fractions\n f.close()\n\n return", "def _write_nc(self, FN, data):\n n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]\n from netCDF4 import Dataset\n grid_nc = Dataset(FN, 'w', format='NETCDF4')\n grid_nc.createDimension('one', 1)\n grid_nc.createDimension('n_cartesian', 3)\n grid_nc.createDimension('n_points', n_points)\n grid_nc.createVariable('origin', 'f8', ('one', 'n_cartesian'))\n grid_nc.createVariable('counts', 'i8', ('one', 'n_cartesian'))\n grid_nc.createVariable('spacing', 'f8', ('one', 'n_cartesian'))\n grid_nc.createVariable('vals', 'f8', ('one', 'n_points'), zlib=True)\n for key in data.keys():\n grid_nc.variables[key][:] = data[key]\n grid_nc.close()", "def write_netcdf(file, lons, lats, times, hydrographs, fractions, loc, grid_id,\n inds, Flist, velocity, diffusion, fill_value, verbose):\n f = Dataset(file,'w', format='NETCDF4')\n\n # set dimensions\n time = f.createDimension('time', None)\n lon = f.createDimension('lon', (len(lons)))\n lat = f.createDimension('lat', (len(lats)))\n\n # initialize variables\n time = f.createVariable('time','f8',('time',))\n lon = f.createVariable('lon','f8',('lon',))\n lat = f.createVariable('lat','f8',('lat',))\n fraction = f.createVariable('fraction','f8',('lat','lon',),fill_value=fill_value)\n UHS = f.createVariable('unit_hydrograph','f8',('time','lat','lon',),fill_value=fill_value)\n\n # write attributes for netcdf\n f.description = 'Aggregated UH_S and Fraction Vars'\n f.history = 'Created: {}\\n'.format(tm.ctime(tm.time()))\n f.history += ' '.join(sys.argv) + '\\n'\n f.source = sys.argv[0] # prints the name of script used\n f.velocity = velocity\n f.diffusion = diffusion\n f.outlet_id = str(grid_id.astype(np.int64))\n f.outlet_y= str(inds[0].astype(np.int64))\n f.outlet_x = str(inds[1].astype(np.int64)) # this is change is a cdo work around. Othewise cdo removes the attribute. \n f.outlet_lat = loc[0]\n f.outlet_lon = loc[1]\n f.includes = ', '.join(Flist)\n\n lat.long_name = 'latitude coordinate'\n lat.standard_name = 'latitude'\n lat.units = 'degrees_north'\n\n lon.long_name = 'longitude coordinate'\n lon.standard_name = 'longitude'\n lon.units = 'degrees_east'\n\n time.units = 'seconds since 0001-1-1 0:0:0'\n time.calendar = 'noleap'\n time.longname = 'time'\n time.type_prefered = 'float'\n time.description = 'Seconds since initial impulse'\n\n UHS.units = 'unitless'\n UHS.description = 'unit hydrograph for each grid cell with respect to downstream grid location'\n \n fraction.units = 'unitless'\n fraction.description = 'fraction of grid cell contributing to guage location'\n\n # write data to variables initialized above\n time[:]= times\n lon[:] = lons\n lat[:] = lats\n UHS[:,:,:] = hydrographs\n fraction[:,:]= fractions\n f.close()", "def save_ecco_dataset_to_netcdf(ecco_ds,\n output_dir,\n dataset_name = 'by_variable',\n time_method = 'by_record',\n output_array_precision = np.float32,\n output_freq_code=None):\n\n\n # Create a name of the files if not specified\n # ---------------------------------------------\n if dataset_name =='by_variable':\n # concat all data variables together into a single string\n dataset_name = '_'.join(list(ecco_ds.data_vars))\n\n\n # force load coordinate values in case they are in dask array\n # -----------------------------------------------------------\n for coord in ecco_ds.coords:\n ecco_ds[coord].load()\n\n\n # Define fill values for NaN\n # ---------------------------------------------\n if output_array_precision == np.float32:\n netcdf_fill_value = nc4.default_fillvals['f4']\n\n elif output_array_precision == np.float64:\n netcdf_fill_value = nc4.default_fillvals['f8']\n\n\n # Create NetCDF encoding directives\n # ---------------------------------------------\n print('\\n... creating variable encodings')\n # ... data variable encoding directives\n dv_encoding = dict()\n for dv in ecco_ds.data_vars:\n dv_encoding[dv] = {'zlib':True, \\\n 'complevel':5,\\\n 'shuffle':True,\\\n '_FillValue':netcdf_fill_value}\n\n # ... coordinate encoding directives\n print('\\n... creating coordinate encodings')\n coord_encoding = dict()\n for coord in ecco_ds.coords:\n # set default no fill value for coordinate\n if output_array_precision == np.float32:\n coord_encoding[coord] = {'_FillValue':None, 'dtype':'float32'}\n elif output_array_precision == np.float64:\n coord_encoding[coord] = {'_FillValue':None, 'dtype':'float64'}\n\n # force 64 bit ints to be 32 bit ints\n if (ecco_ds[coord].values.dtype == np.int32) or \\\n (ecco_ds[coord].values.dtype == np.int64) :\n coord_encoding[coord]['dtype'] ='int32'\n\n # fix encoding of time\n if coord == 'time' or coord == 'time_bnds':\n coord_encoding[coord]['dtype'] ='int32'\n\n if 'units' in ecco_ds[coord].attrs:\n # apply units as encoding for time\n coord_encoding[coord]['units'] = ecco_ds[coord].attrs['units']\n # delete from the attributes list\n del ecco_ds[coord].attrs['units']\n\n elif coord == 'time_step':\n coord_encoding[coord]['dtype'] ='int32'\n\n # ... combined data variable and coordinate encoding directives\n encoding = {**dv_encoding, **coord_encoding}\n\n\n # Create directory for output files\n # ---------------------------------------------\n filepath = output_dir / dataset_name\n\n if not filepath.exists():\n filepath.mkdir(parents=True, exist_ok=True)\n\n\n # Determine output freqency code.\n # ---------------------------------------------\n # user can specify directory or it can be found if the dataset\n # has the 'time_coverage_resolution' global attribute\n if output_freq_code == None:\n if 'time_coverage_resolution' in ecco_ds.attrs:\n\n print('dataset time averaging from metadata')\n time_coverage_resolution = ecco_ds.attrs['time_coverage_resolution']\n if time_coverage_resolution == 'P1M':\n output_freq_code='AVG_MON'\n elif time_coverage_resolution == 'P1D':\n output_freq_code='AVG_DAY'\n elif time_coverage_resolution == 'P0S':\n output_freq_code='SNAP'\n else:\n print('output_freq_code not defined and not available in dataset metadata')\n print('... using full record time in filename')\n\n\n # Write records to disk as NetCDF\n # ---------------------------------------------\n # one file per time level\n\n if time_method == 'by_record':\n for time_i, rec_time in enumerate(ecco_ds.time):\n\n cur_ds = ecco_ds.isel(time=time_i)\n\n # cast data variables to desired precision (if necessary)\n #for data_var in cur_ds.data_vars:\n # if cur_ds[data_var].values.dtype != output_array_precision:\n # cur_ds[data_var].values = cur_ds[data_var].astype(output_array_precision)\n\n time_date_info =\\\n make_date_str_from_dt64(cur_ds.time.values, output_freq_code)\n\n # sort comments alphabetically\n print('\\n... sorting global attributes')\n cur_ds.attrs = sort_attrs(cur_ds.attrs)\n\n # add one final comment (PODAAC request)\n cur_ds.attrs[\"coordinates_comment\"] = \\\n \"Note: the global 'coordinates' attribute descibes auxillary coordinates.\"\n\n fname = dataset_name + '_' + time_date_info['short'] +\\\n '_' + time_date_info['ppp_tttt'] + '.nc'\n\n print(fname)\n print(cur_ds)\n netcdf_output_filename = filepath / fname\n\n # SAVE\n print('\\n... saving to netcdf ', netcdf_output_filename)\n cur_ds.to_netcdf(netcdf_output_filename, encoding=encoding)\n cur_ds.close()\n\n # one file per year\n elif time_method == 'by_year':\n unique_years = np.unique(ecco_ds.time.dt.year)\n print(unique_years)\n\n for year in unique_years:\n # pull out only records for this year\n cur_ds = ecco_ds.sel(time=slice(str(year), str(year)))\n\n first_time = cur_ds.time.values[0]\n last_time = cur_ds.time.values[-1]\n\n first_time_date_info =\\\n make_date_str_from_dt64(first_time, output_freq_code)\n\n last_time_date_info =\\\n make_date_str_from_dt64(last_time, output_freq_code)\n\n # sort comments alphabetically\n print('\\n... sorting global attributes')\n cur_ds.attrs = sort_attrs(cur_ds.attrs)\n\n # add one final comment (PODAAC request)\n cur_ds.attrs[\"coordinates_comment\"] = \\\n \"Note: the global 'coordinates' attribute descibes auxillary coordinates.\"\n\n fname = dataset_name + '_' +\\\n first_time_date_info['short'] + '_' +\\\n last_time_date_info['short'] + '_' +\\\n first_time_date_info['ppp_tttt']+ '.nc'\n\n print(fname)\n print(cur_ds)\n netcdf_output_filename = filepath / fname\n\n # SAVE\n print('\\n... saving to netcdf ', netcdf_output_filename)\n cur_ds.to_netcdf(netcdf_output_filename, encoding=encoding)\n cur_ds.close()", "def write_netcdf(self):\n\n if self.eigr2d_fnames:\n dim_fname = self.eigr2d_fnames[0]\n elif self.gkk_fnames:\n dim_fname = self.gkk_fnames[0]\n elif self.fan_fnames:\n dim_fname = self.fan_fnames[0]\n else:\n raise Exception('Need at least one file to read the dimensions: ' +\n 'EIGR2D, GKK, or FAN. ' +\n 'How did you even get there?')\n\n create_directory(self.nc_output)\n\n # Write on a NC files with etsf-io name convention\n with nc.Dataset(self.nc_output, 'w') as ds:\n\n # FIXME Reading from EIGR2D file is too restrictive\n # Should handle GKK.nc only\n # Read dim from first EIGR2D file\n dim = nc.Dataset(dim_fname, 'r')\n\n # Determine nsppol from reading occ\n nsppol = len(dim.variables['occupations'][:,0,0])\n if nsppol > 1:\n warnings.warn(\"nsppol > 1 has not been tested.\")\n mband = len(dim.dimensions['product_mband_nsppol']) / nsppol\n\n # Create dimension\n ds.createDimension('number_of_atoms',\n len(dim.dimensions['number_of_atoms']))\n ds.createDimension('number_of_kpoints',\n len(dim.dimensions['number_of_kpoints']))\n ds.createDimension('product_mband_nsppol',\n len(dim.dimensions['product_mband_nsppol']))\n\n ds.createDimension('cartesian', 3)\n ds.createDimension('cplex', 2)\n ds.createDimension('number_of_qpoints', self.nqpt)\n ds.createDimension('number_of_spins',\n len(dim.dimensions['number_of_spins']))\n ds.createDimension('max_number_of_states',self.nband_se)\n ds.createDimension('number_of_modes',\n 3*len(dim.dimensions['number_of_atoms']))\n\n ds.createDimension('number_of_temperature', len(self.temperatures))\n ds.createDimension('number_of_frequencies', len(self.omegase))\n\n # Write data on the eigenvalues\n data = ds.createVariable('reduced_coordinates_of_kpoints', 'd',\n ('number_of_kpoints','cartesian'))\n data[:,:] = dim.variables['reduced_coordinates_of_kpoints'][:,:]\n\n data = ds.createVariable(\n 'eigenvalues','d',\n ('number_of_spins','number_of_kpoints','max_number_of_states'))\n data[:,:,:] = dim.variables['eigenvalues'][:,:,:]\n\n data = ds.createVariable(\n 'occupations','i',\n ('number_of_spins','number_of_kpoints','max_number_of_states'))\n data[:,:,:] = dim.variables['occupations'][:,:,:]\n\n data = ds.createVariable(\n 'primitive_vectors', 'd',\n ('cartesian','cartesian'))\n\n data[:,:] = dim.variables['primitive_vectors'][:,:]\n\n dim.close()\n\n # Write epc data\n data = ds.createVariable('renormalization_is_dynamical', 'i1')\n data[:] = self.renormalization_is_dynamical\n\n data = ds.createVariable('broadening_is_dynamical', 'i1')\n data[:] = self.broadening_is_dynamical\n\n data = ds.createVariable('temperatures','d',\n ('number_of_temperature'))\n data[:] = self.temperatures[:]\n\n data = ds.createVariable('smearing', 'd')\n data[:] = self.smearing\n\n data = ds.createVariable('omegase', 'd',\n ('number_of_frequencies'))\n data[:] = self.omegase[:]\n\n # qpt\n data = ds.createVariable(\n 'reduced_coordinates_of_qpoints','d',\n ('number_of_qpoints', 'cartesian'))\n if self.qred is not None:\n data[...] = self.qred[...]\n\n # omega\n data = ds.createVariable(\n 'phonon_mode_frequencies','d',\n ('number_of_qpoints', 'number_of_modes'))\n if self.omega is not None:\n data[...] = self.omega[...]\n\n\n # ZPR\n zpr = ds.createVariable(\n 'zero_point_renormalization','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states'))\n\n #fan = ds.createVariable(\n # 'fan_zero_point_renormalization','d',\n # ('number_of_spins', 'number_of_kpoints',\n # 'max_number_of_states'))\n\n #ddw = ds.createVariable(\n # 'ddw_zero_point_renormalization','d',\n # ('number_of_spins', 'number_of_kpoints',\n # 'max_number_of_states'))\n\n if self.zero_point_renormalization is not None:\n # FIXME number of spin\n zpr[0,:,:] = self.zero_point_renormalization[:,:].real\n #fan[0,:,:] = self.fan_zero_point_renormalization[:,:].real\n #ddw[0,:,:] = self.ddw_zero_point_renormalization[:,:].real\n\n # TDR\n data = ds.createVariable(\n 'temperature_dependent_renormalization','d',\n ('number_of_spins','number_of_kpoints',\n 'max_number_of_states','number_of_temperature'))\n\n if self.temperature_dependent_renormalization is not None:\n # FIXME number of spin\n data[0,:,:,:] = (\n self.temperature_dependent_renormalization[:,:,:].real)\n\n # ZPR\n data = ds.createVariable(\n 'zero_point_broadening','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states'))\n\n if self.zero_point_broadening is not None:\n # FIXME number of spin\n data[0,:,:] = self.zero_point_broadening[:,:].real\n\n zpr_modes = ds.createVariable(\n 'zero_point_renormalization_by_modes','d',\n ('number_of_modes', 'number_of_spins', 'number_of_kpoints',\n 'max_number_of_states'))\n\n if self.zero_point_renormalization_modes is not None:\n zpr_modes[:,0,:,:] = (\n self.zero_point_renormalization_modes[:,:,:])\n\n # TDB\n data = ds.createVariable(\n 'temperature_dependent_broadening','d',\n ('number_of_spins','number_of_kpoints',\n 'max_number_of_states','number_of_temperature'))\n\n if self.temperature_dependent_broadening is not None:\n # FIXME number of spin\n data[0,:,:,:] = (\n self.temperature_dependent_broadening[:,:,:].real)\n\n # ZSE\n self_energy = ds.createVariable('self_energy','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_frequencies', 'cplex'))\n\n if self.self_energy is not None:\n\n # FIXME number of spin\n self_energy[0,:,:,:,0] = self.self_energy[:,:,:].real\n self_energy[0,:,:,:,1] = self.self_energy[:,:,:].imag\n\n # ZSE fan active\n self_energy_fan_active = ds.createVariable('self_energy_fan_active','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_frequencies', 'cplex'))\n\n if self.self_energy_fan_active is not None:\n\n # FIXME number of spin\n self_energy_fan_active[0,:,:,:,0] = self.self_energy_fan_active[:,:,:].real\n self_energy_fan_active[0,:,:,:,1] = self.self_energy_fan_active[:,:,:].imag\n\n # ZSE static\n data = ds.createVariable(\n 'self_energy_static','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states'))\n\n if self.self_energy_static is not None:\n # FIXME number of spin\n data[0,:,:] = self.self_energy_static[:,:].real\n\n # TSE\n self_energy_T = ds.createVariable(\n 'self_energy_temperature_dependent','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_frequencies',\n 'number_of_temperature', 'cplex'))\n\n if self.self_energy_T is not None:\n # FIXME number of spin\n self_energy_T[0,:,:,:,:,0] = self.self_energy_T[:,:,:,:].real\n self_energy_T[0,:,:,:,:,1] = self.self_energy_T[:,:,:,:].imag\n\n # TSE static\n data = ds.createVariable(\n 'self_energy_static_T','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_temperature'))\n\n if self.self_energy_static_T is not None:\n # FIXME number of spin\n data[0,:,:,:] = self.self_energy_static_T[:,:,:].real\n\n # ZSF\n spectral_function = ds.createVariable(\n 'spectral_function','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_frequencies'))\n\n if self.spectral_function is not None:\n # FIXME number of spin\n spectral_function[0,:,:,:] = self.spectral_function[:,:,:]\n\n spectral_function_T = ds.createVariable(\n 'spectral_function_temperature_dependent','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_frequencies',\n 'number_of_temperature'))\n\n # TSF\n if self.spectral_function_T is not None:\n # FIXME number of spin\n spectral_function_T[0,:,:,:,:] = (\n self.spectral_function_T[:,:,:,:])\n return", "def test_34_save_ds(self, tempfile_h5):\n example = Example(groups=7, origins=5, )\n example.save_dataset_to_netcdf(tempfile_h5)", "def save(\n self,\n savepath=\"./boutdata.nc\",\n filetype=\"NETCDF4\",\n variables=None,\n save_dtype=None,\n separate_vars=False,\n pre_load=False,\n ):\n\n if variables is None:\n # Save all variables\n to_save = self.data\n else:\n to_save = self.data[variables]\n\n if savepath == \"./boutdata.nc\":\n print(\n \"Will save data into the current working directory, named as\"\n \" boutdata_[var].nc\"\n )\n if savepath is None:\n raise ValueError(\"Must provide a path to which to save the data.\")\n\n # make shallow copy of Dataset, so we do not modify the attributes of the data\n # when we change things to save\n to_save = to_save.copy()\n\n options = to_save.attrs.pop(\"options\")\n if options:\n # TODO Convert Ben's options class to a (flattened) nested\n # dictionary then store it in ds.attrs?\n warnings.warn(\n \"Haven't decided how to write options file back out yet - deleting \"\n \"options for now. To re-load this Dataset, pass the same inputfilepath \"\n \"to open_boutdataset when re-loading.\"\n )\n # Delete placeholders for options on each variable and coordinate\n for var in chain(to_save.data_vars, to_save.coords):\n try:\n del to_save[var].attrs[\"options\"]\n except KeyError:\n pass\n\n # Store the metadata as individual attributes instead because\n # netCDF can't handle storing arbitrary objects in attrs\n def dict_to_attrs(obj, section):\n for key, value in obj.attrs.pop(section).items():\n obj.attrs[section + \":\" + key] = value\n\n dict_to_attrs(to_save, \"metadata\")\n # Must do this for all variables and coordinates in dataset too\n for varname, da in chain(to_save.data_vars.items(), to_save.coords.items()):\n try:\n dict_to_attrs(da, \"metadata\")\n except KeyError:\n pass\n\n if \"regions\" in to_save.attrs:\n # Do not need to save regions as these can be reconstructed from the metadata\n try:\n del to_save.attrs[\"regions\"]\n except KeyError:\n pass\n for var in chain(to_save.data_vars, to_save.coords):\n try:\n del to_save[var].attrs[\"regions\"]\n except KeyError:\n pass\n\n if save_dtype is not None:\n encoding = {v: {\"dtype\": save_dtype} for v in to_save}\n else:\n encoding = None\n\n if separate_vars:\n # Save each major variable to a different netCDF file\n\n # Determine which variables are \"major\"\n # Defined as time-dependent, but not solely time-dependent\n major_vars, minor_vars = _find_major_vars(to_save)\n\n print(\"Will save the variables {} separately\".format(str(major_vars)))\n\n # Save each one to separate file\n # TODO perform the save in parallel with save_mfdataset?\n for major_var in major_vars:\n # Group variables so that there is only one time-dependent\n # variable saved in each file\n minor_data = [to_save[minor_var] for minor_var in minor_vars]\n single_var_ds = xr.merge([to_save[major_var], *minor_data])\n\n # Add the attrs back on\n single_var_ds.attrs = to_save.attrs\n\n if pre_load:\n single_var_ds.load()\n\n # Include the name of the variable in the name of the saved\n # file\n path = Path(savepath)\n var_savepath = (\n str(path.parent / path.stem) + \"_\" + str(major_var) + path.suffix\n )\n if encoding is not None:\n var_encoding = {major_var: encoding[major_var]}\n else:\n var_encoding = None\n print(\"Saving \" + major_var + \" data...\")\n with ProgressBar():\n single_var_ds.to_netcdf(\n path=str(var_savepath),\n format=filetype,\n compute=True,\n encoding=var_encoding,\n )\n\n # Force memory deallocation to limit RAM usage\n single_var_ds.close()\n del single_var_ds\n gc.collect()\n else:\n # Save data to a single file\n print(\"Saving data...\")\n with ProgressBar():\n to_save.to_netcdf(\n path=savepath, format=filetype, compute=True, encoding=encoding\n )\n\n return", "def _write_output_file(\n result_ds: xr.Dataset,\n input_time_encoding: dict | None,\n netcdf_version: NetcdfVersion,\n file_path: str,\n) -> None:\n if input_time_encoding:\n time_encoding = {\n \"calendar\": input_time_encoding.get(\"calendar\"),\n UNITS_KEY: input_time_encoding.get(UNITS_KEY),\n \"dtype\": input_time_encoding.get(\"dtype\"),\n }\n else:\n time_encoding = {UNITS_KEY: \"days since 1850-1-1\"}\n result_ds.to_netcdf(\n file_path,\n format=netcdf_version.name,\n encoding={\"time\": time_encoding},\n )", "def write_to_netCDF(nc_filename, data,\n ncformat='NETCDF4_CLASSIC',\n all_variables=False,\n verbose=True):\n ncfile = Dataset(nc_filename,'w', format=ncformat, clobber=True)\n for dd,dim in enumerate(data['dims']):\n ncfile.createDimension(data['dimname'][dd],dim)\n for vv,varname in enumerate(data['varn']):\n if all_variables:\n newvar = ncfile.createVariable(varname,\n data['vardtype'][vv],\n data['vardims'][vv])\n newvar[:] = data['data'][vv]\n newvar.units = data['units'][vv]\n else:\n if varname in core_variables:\n newvar = ncfile.createVariable(varname,\n data['vardtype'][vv],\n data['vardims'][vv],\n fill_value=data['fillValue'])\n newvar[:] = data['data'][vv]\n if verbose:\n print(varname)\n print(newvar[newvar == np.nan])\n newvar[newvar == np.nan] = data['fillValue']\n newvar.units = data['units'][vv]\n ncfile.createDimension('nchars',19)\n newvar[:] = data['time']\n ncfile.description = data['description']\n ncfile.station = data['station']\n ncfile.sensor = data['sensor']\n ncfile.latitude = data['latitude']\n ncfile.longitude = data['longitude']\n ncfile.altitude = data['altitude']\n ncfile.createdon = datetime.now().strftime(standard_datetime_fmt)\n ncfile.createdby = data['author']\n ncfile.close()", "def netcdf_file(self, tmpdir, filename, dim=\"time\", values=[1234],\n units=None, global_attrs=None):\n path = str(tmpdir.join(filename))\n ds = Dataset(path, \"w\")\n ds.createDimension(dim, None)\n var = ds.createVariable(dim, np.float32, (dim,))\n if units:\n var.units = units\n var[:] = values\n if global_attrs:\n for attr, value in global_attrs.items():\n setattr(ds, attr, value)\n ds.close()\n return path", "def save_stats(self, filename, save_full=False, overwrite=True):\n output_ds = self.covs_ds\n if save_full and self.nam_covar_var not in self.covs_ds.data_vars:\n output_ds = self.define_full_ds()\n self.covs_ds.close()\n\n if self.COMPRESS_OUTPUT:\n compress_vars = self.list_of_vars\n if save_full and self.num_cross_covs != 0:\n compress_vars = self.list_of_full_vars\n encoding = {}\n for var in compress_vars:\n if not var in self.exclude_compress:\n encoding.update({var: {\"zlib\": True, \"complevel\": 1}})\n else:\n encoding = None\n\n print(\"Writing stats data to file {}\".format(filename), flush=True)\n if overwrite:\n temp_filename = tempfile.mktemp()\n output_ds.to_netcdf(temp_filename, unlimited_dims=[\"time_counter\"],\n encoding=encoding)\n shutil.move(temp_filename, filename)\n else:\n output_ds.to_netcdf(filename, unlimited_dims=[\"time_counter\"],\n encoding=encoding)", "def save_ncds(vardict,coords,attrs={},filename=None):\n \n if 'time' in coords.keys():\n newtime = [np.double((t-dt(1800,1,1)).total_seconds()/3600) for t in coords['time']['data']]\n delta_t = np.gradient(newtime)[0]\n \n coords['time']['data'] = newtime\n coords['time']['attrs'] = {'long_name':\"Time\",\n 'delta_t':f\"0000-00-{int(delta_t/24):02} {int(delta_t%24):02}:00:00\",\n 'standard_name':\"time\",\n 'axis': \"T\",\n 'units':\"hours since 1800-01-01 00:00:0.0\"}\n \n if 'climo' in vardict.keys():\n long_name = vardict['climo']['attrs']['long_name']\n vardict['climo']['attrs']['long_name'] = 'Climatology of '+long_name\n \n encoding = {k: {'dtype': 'double', '_FillValue': 1e30} for k in coords.keys()}\n for k in vardict.keys():\n encoding.update({k: {'dtype': 'single', '_FillValue': 1e30}})\n \n ds = xr.Dataset.from_dict({\n 'coords':coords,\n 'data_vars':vardict,\n 'dims':[k for k,v in coords.items()],\n 'attrs':attrs,\n })\n \n if isinstance(filename,str):\n ds.to_netcdf(filename,encoding=encoding,mode='w',engine='scipy')\n ds.close()\n else:\n print('filename must be a string')", "def save(self, savepath='./boutdata.nc', filetype='NETCDF4',\n variables=None, save_dtype=None, separate_vars=False, pre_load=False):\n\n if variables is None:\n # Save all variables\n to_save = self.data\n else:\n to_save = self.data[variables]\n\n if savepath == './boutdata.nc':\n print(\"Will save data into the current working directory, named as\"\n \" boutdata_[var].nc\")\n if savepath is None:\n raise ValueError('Must provide a path to which to save the data.')\n\n if save_dtype is not None:\n # Workaround to keep attributes while calling astype. See\n # https://github.com/pydata/xarray/issues/2049\n # https://github.com/pydata/xarray/pull/2070\n for da in chain(to_save.values(), to_save.coords.values()):\n da.data = da.data.astype(save_dtype)\n\n # make shallow copy of Dataset, so we do not modify the attributes of the data\n # when we change things to save\n to_save = to_save.copy()\n\n options = to_save.attrs.pop('options')\n if options:\n # TODO Convert Ben's options class to a (flattened) nested\n # dictionary then store it in ds.attrs?\n warnings.warn(\n \"Haven't decided how to write options file back out yet - deleting \"\n \"options for now. To re-load this Dataset, pass the same inputfilepath \"\n \"to open_boutdataset when re-loading.\"\n )\n # Delete placeholders for options on each variable and coordinate\n for var in chain(to_save.data_vars, to_save.coords):\n try:\n del to_save[var].attrs['options']\n except KeyError:\n pass\n\n # Store the metadata as individual attributes instead because\n # netCDF can't handle storing arbitrary objects in attrs\n def dict_to_attrs(obj, section):\n for key, value in obj.attrs.pop(section).items():\n obj.attrs[section + \":\" + key] = value\n dict_to_attrs(to_save, 'metadata')\n # Must do this for all variables and coordinates in dataset too\n for varname, da in chain(to_save.data_vars.items(), to_save.coords.items()):\n try:\n dict_to_attrs(da, 'metadata')\n except KeyError:\n pass\n\n if separate_vars:\n # Save each major variable to a different netCDF file\n\n # Determine which variables are \"major\"\n # Defined as time-dependent, but not solely time-dependent\n major_vars, minor_vars = _find_major_vars(to_save)\n\n print(\"Will save the variables {} separately\"\n .format(str(major_vars)))\n\n # Save each one to separate file\n # TODO perform the save in parallel with save_mfdataset?\n for major_var in major_vars:\n # Group variables so that there is only one time-dependent\n # variable saved in each file\n minor_data = [to_save[minor_var] for minor_var in minor_vars]\n single_var_ds = xr.merge([to_save[major_var], *minor_data])\n\n # Add the attrs back on\n single_var_ds.attrs = to_save.attrs\n\n if pre_load:\n single_var_ds.load()\n\n # Include the name of the variable in the name of the saved\n # file\n path = Path(savepath)\n var_savepath = str(path.parent / path.stem) + '_' \\\n + str(major_var) + path.suffix\n print('Saving ' + major_var + ' data...')\n with ProgressBar():\n single_var_ds.to_netcdf(path=str(var_savepath),\n format=filetype, compute=True)\n\n # Force memory deallocation to limit RAM usage\n single_var_ds.close()\n del single_var_ds\n gc.collect()\n else:\n # Save data to a single file\n print('Saving data...')\n with ProgressBar():\n to_save.to_netcdf(path=savepath, format=filetype, compute=True)\n\n return", "def write_flat_netcdf(outFile,time,frac,uh,x,y,xc,yc,inGlobs,inAttrs):\n f = Dataset(outFile, 'w', format='NETCDF4')\n\n # set dimensions\n times = f.createDimension('time', len(time))\n npoints = f.createDimension('npoints', len(frac))\n \n # initialize variables\n times = f.createVariable('time','f8',('time',))\n fracs = f.createVariable('fraction','f8',('npoints',))\n xis = f.createVariable('xi','i4',('npoints',))\n yis = f.createVariable('yi','i4',('npoints',))\n xcs = f.createVariable('xc','f8',('npoints',))\n ycs = f.createVariable('yc','f8',('npoints',))\n uhs = f.createVariable('unit_hydrograph','f8',('time','npoints',))\n \n # deal with attributes\n f.description = 'Flattened uh/fraction grid file'\n f.history = 'Created ' + tm.ctime(tm.time())\n f.velocity = inGlobs['velocity']\n f.diffusion = inGlobs['diffusion']\n f.outlet_lon = inGlobs['outlet_lon']\n f.outlet_lat = inGlobs['outlet_lat']\n f.outlet_y = inGlobs['outlet_y']\n f.outlet_x = inGlobs['outlet_x']\n try:\n f.includes = inGlobs['includes']\n except:\n pass\n \n times.standard_name = inAttrs['time']['standard_name']\n times.units = inAttrs['time']['units']\n times.calendar = inAttrs['time']['calendar']\n \n try:\n fracs.units = inAttrs['fraction']['units']\n except:\n fracs.units = '%'\n fracs.description = inAttrs['fraction']['description']\n \n uhs.units = inAttrs['unit_hydrograph']['units']\n uhs.description = inAttrs['unit_hydrograph']['description']\n \n xis.standard_name = 'x_ind'\n xis.description = 'x index location'\n \n yis.standard_name = 'y_ind'\n yis.description = 'y index location'\n \n xcs.standard_name =inAttrs['xc']['standard_name']\n xcs.long_name = inAttrs['xc']['long_name']\n xcs.units =inAttrs['xc']['units']\n \n ycs.standard_name =inAttrs['yc']['standard_name']\n ycs.long_name = inAttrs['yc']['long_name']\n ycs.units =inAttrs['yc']['units']\n \n times[:] = time\n fracs[:] = frac\n uhs[:,:] = uh\n xis[:] = x\n yis[:] = y\n xcs[:] = xc\n ycs[:] = yc\n\n f.close()\n \n return", "def write_merged_file(self):\n \n #out_name = os.getcwd() + '/FAST_INDEX_merged_' + [ x for x in self.datasets[ list(self.datasets_keys)[0]].split('/') if '.nc' in x ] [0] \n \n \"\"\" Loading the econding of variables created from the harvester script \"\"\"\n encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n \n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True)\n \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n \n logging.info('Writing the observations_tables to the netCDF output via xarray to_netcdf() ')\n #obs_tab = self.MergedObs[ ['date_time' , 'latitude', 'longitude' , 'observation_value' , 'observed_variable' , 'source_id' , 'observation_id', 'z_coordinate' ] ] # including only some columns \n obs_tab = self.MergedObs # including only some columns \n obs_tab = self.add_cdm_missing_columns(obs_tab) \n \n \"\"\" \n # Old using xarray\n obs_tab = obs_tab.to_xarray() \n for v in obs_tab.variables:\n if v == \"index\" or v == \"hdrlen\" or 'string' in v:\n continue\n obs_tab[v].attrs['external_table'] = self.attributes['observations_table'][v]['external_table']\n obs_tab[v].attrs['description'] = self.attributes['observations_table'][v]['description']\n \"\"\"\n\n for k in obs_tab.columns:\n print('Writing the observation table using h5py new method for the variable: ' , k )\n df = obs_tab[ [k] ] # making a 1 column dataframe \n write_dict_h5(out_name, df, k, encodings['observations_table'], var_selection=[], mode='a', attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')})\n \n #obs_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='w' , group = 'observations_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the header_table to the netCDF output via xarray ')\n head_tab = self.MergedHead.to_xarray()\n for v in head_tab.variables: \n if v == \"index\" or v == \"hdrlen\" or v == \"string80\":\n continue\n head_tab[v].attrs['external_table'] = self.attributes['header_table'][v]['external_table']\n head_tab[v].attrs['description'] = self.attributes['header_table'][v]['description']\n \n head_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = 'header_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the station_configuration and source_configurations tables to the netCDF output via xarray ') \n for k in self.data.keys():\n if k == 'cdm_tables':\n continue \n group_name = k + '_station_configuration'\n sc = self.data[k]['station_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n group_name = k + '_source_configuration'\n sc = self.data[k]['source_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n \"\"\" To be fixed ! \"\"\"\n #group_name = k + '_source_configuration'\n #sc = self.data[k]['source_configuration'][:1].to_xarray()\n #sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name ) \n \n logging.info('Writing the merged record indices to the netCDF output ') \n di = self.MergedRecordIndex\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n \n logging.info('Writing the merged feedback to the netCDF output ') \n group_name = 'era5fb' \n di = self.MergedFeedback\n di = di.to_xarray()\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n logging.info('Writing the standard cdm tables to the netCDF output ') \n for t in self.data['cdm_tables'].keys(): \n d = self.data['cdm_tables'][t]\n d.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = t )\n \n logging.info('*** Done writing the output netCDF file ')", "def write_grid(self):\n \n self.fout = self.create_savename()\n ncout = Dataset(self.fout, 'w')\n print('Writing: %s' % self.fout)\n \n # Create dimensions\n lon = ncout.createDimension(self.xvar, self.nx)\n lat = ncout.createDimension(self.yvar, self.ny)\n depth = ncout.createDimension(self.zvar, self.nz)\n tdim = ncout.createDimension('time', None)\n bndsDim = ncout.createDimension('bnds', 2)\n\n # Create variables\n varx = ncout.createVariable(self.xvar, 'float64', (self.xvar,))\n vary = ncout.createVariable(self.yvar, 'float64', (self.yvar,))\n varz = ncout.createVariable(self.zvar, 'float64', (self.zvar,))\n\n varx.standard_name = 'longitude'\n varx.units = 'degrees'\n ncout.variables['LONGITUDE'].bounds = 'lon_bnds'\n lonBndsVar = ncout.createVariable('lon_bnds', 'float64', (self.xvar, 'bnds'))\n xboundaries = np.concatenate([self.xminbounds, np.reshape(self.xmaxbounds[-1],(1,1))[0]])\n lonBndsVar[:,:] = np.array([xboundaries[:-1], xboundaries[1:]]).T\n\n vary.standard_name = 'latitude'\n vary.units = 'degrees'\n ncout.variables['LATITUDE'].bounds = 'lat_bnds'\n latBndsVar = ncout.createVariable('lat_bnds', 'float64', (self.yvar, 'bnds'))\n yboundaries = np.concatenate([self.yminbounds, np.reshape(self.ymaxbounds[-1],(1,1))[0]])\n latBndsVar[:,:] = np.array([yboundaries[:-1], yboundaries[1:]]).T\n \n varz.standard_name = 'depth'\n varz.units = 'metres'\n ncout.variables['DEPH_CORRECTED'].bounds = 'depth_bnds'\n depthBndsVar = ncout.createVariable('depth_bnds', 'float64', (self.zvar, 'bnds'))\n zboundaries = np.concatenate([self.zminbounds, np.reshape(self.zmaxbounds[-1],(1,1))[0]])\n depthBndsVar[:,:] = np.array([zboundaries[:-1], zboundaries[1:]]).T\n\n vartmean = ncout.createVariable('tmean', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varmtmean = ncout.createVariable(self.datavar, 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varsum = ncout.createVariable('sum', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varmsum = ncout.createVariable('meansum', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varcount = ncout.createVariable('count', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n# varmax = ncout.createVariable('gmax', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n# varmin = ncout.createVariable('gmin', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n# varmed = ncout.createVariable('median', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n varpcount = ncout.createVariable('pcount', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n vartime = ncout.createVariable('time', 'float64', ('time',))\n vartime.units = 'hours since 0001-01-01 00:00:00'\n vartime.calendar = 'gregorian'\n\n # Write to variables\n varx[:] = self.xgrid\n vary[:] = self.ygrid\n varz[:] = self.zgrid\n vartmean[:] = self.grid_tmean[np.newaxis]\n varmtmean[:] = self.grid_meantmean[np.newaxis]\n varsum[:] = self.grid_sum[np.newaxis]\n varmsum[:] = self.grid_meansum[np.newaxis]\n varcount[:] = self.grid_count[np.newaxis]\n varpcount[:] = self.grid_pcount[np.newaxis]\n# varmax[:] = self.grid_max[np.newaxis]\n# varmin[:] = self.grid_min[np.newaxis]\n# varmed[:] = self.grid_med[np.newaxis]\n vartime[:] = date2num(self.dt, units=vartime.units, calendar=vartime.calendar)\n \n # Add global attributes\n ncout.history = 'Created ' + time.ctime(time.time())\n \n # Save\n ncout.close()", "def create_netcdf(self):\n\n # NetCDF file and global attributes\n file_name = self.output_dir / f\"{author.replace(' ', '_').lower()}_{self.sos_file.split('_')[0]}.nc\"\n ds = Dataset(file_name, 'w')\n ds.author = self.author\n ds.contact = self.email\n ds.sos_file = self.sos_file\n ds.production_date = datetime.now().strftime('%d-%b-%Y %H:%M:%S')\n\n # Groups\n for source in self.priors_dict.keys():\n # Check if the group has data\n if self.priors_dict[source]:\n # Create groups for each prior and populate with data\n for prior, data in self.priors_dict[source].items():\n # Group\n g = ds.createGroup(f\"{source}_{prior}\")\n\n # Attribute\n g.run_type = data[\"run_type\"]\n\n # Dimensions\n if \"reach_ids\" in data.keys(): \n self.create_dimensions(source, g, num_reaches=len(data[\"reach_ids\"]))\n else:\n self.create_dimensions(source, g, num_nodes=len(data[\"node_ids\"]))\n\n # Variables\n self.create_variables(source, prior, data, g)\n\n # Close dataset file\n ds.close()", "def Writefile(self, outfile, verbose=True):\n \n self.outfile = outfile\n \n # Write SUNTANS grid to file\n nc = Dataset(outfile, 'w', format='NETCDF3_CLASSIC')\n nc.Description = 'SUNTANS subsetted history file'\n nc.Author = ''\n nc.Created = datetime.now().isoformat()\n nc.type = 'SUNTANS HIS file'\n #pdb.set_trace()\n nc.createDimension('Nc', self.Nc)\n nc.createDimension('Np', self.Np)\n nc.createDimension('Ne', self.Ne)\n nc.createDimension('Nk', self.Nk)\n nc.createDimension('numsides', self.numsides)\n \n nc.createDimension('time', None)\n \n def write_nc_var(var, name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n nc.variables[name][:] = var\n if verbose:\n print ' ... wrote ', name\n \n def create_nc_var(name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n if verbose:\n print ' ... wrote ', name\n \n # Grid variables\n write_nc_var(self.xv, 'xv', ('Nc'))\n write_nc_var(self.yv, 'yv', ('Nc'))\n write_nc_var(self.xp, 'xp', ('Np'))\n write_nc_var(self.yp, 'yp', ('Np'))\n write_nc_var(self.xe, 'xe', ('Ne'))\n write_nc_var(self.ye, 'ye', ('Ne'))\n write_nc_var(self.dz, 'dz', ('Nk'))\n write_nc_var(self.dv, 'dv', ('Nc'))\n write_nc_var(self.Ac, 'Ac', ('Nc'))\n write_nc_var(self.Nk, 'Nk', ('Nc'))\n write_nc_var(self.face, 'face', ('Nc','numsides'))\n write_nc_var(self.mark, 'mark', ('Ne'))\n write_nc_var(self.cells, 'cells', ('Nc','numsides'))\n \n \n # Create the data variables\n create_nc_var('time',('time'),'seconds since 1990-01-01 00:00:00')\n create_nc_var('salt',('time','Nk','Nc'),'psu')\n create_nc_var('temp',('time','Nk','Nc'),'degrees C')\n create_nc_var('uc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('vc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('nu_v',('time','Nk','Nc'),'m2 s-1')\n create_nc_var('rho',('time','Nk','Nc'),'kg m-3')\n create_nc_var('tau_x',('time','Nc'),'N m-2')\n create_nc_var('tau_y',('time','Nc'),'N m-2')\n create_nc_var('eta',('time','Nc'),'m')\n \n nc.close()", "def read_netcdf(self,filename):", "def write_file(_data, _label, _clinical, _contour, _type):\n pickle.dump(np.array(_data), open(_type + '_data.pxl', 'wb'))\n pickle.dump(np.array(_label), open(_type + '_label.pxl', 'wb'))\n pickle.dump(np.array(_clinical), open(_type + '_clinical.pxl', 'wb'))\n pickle.dump(np.array(_contour), open(_type + '_contour.pxl', 'wb'))", "def Writedata(self, tstep):\n \n nc = Dataset(self.outfile, 'a')\n \n nc.variables['time'][tstep] = self.time\n nc.variables['salt'][tstep] = self.salt\n nc.variables['temp'][tstep] = self.temp\n nc.variables['uc'][tstep] = self.uc\n nc.variables['vc'][tstep] = self.vc\n nc.variables['nu_v'][tstep] = self.nu_v\n nc.variables['rho'][tstep] = self.rho\n nc.variables['tau_x'][tstep] = self.tau_x\n nc.variables['tau_y'][tstep] = self.tau_y\n nc.variables['eta'][tstep] = self.eta\n \n nc.close()" ]
[ "0.7944753", "0.70882285", "0.69628674", "0.6949173", "0.6941277", "0.6930326", "0.683638", "0.68314844", "0.6766005", "0.67641866", "0.6610118", "0.6586224", "0.65797985", "0.6492392", "0.64117795", "0.6389692", "0.6355074", "0.6350296", "0.63153183", "0.62991965", "0.6270947", "0.6219923", "0.6202825", "0.61927253", "0.61446553", "0.61292684", "0.6103632", "0.6102644", "0.60941434", "0.60799944" ]
0.77034247
1
Add the data to the tar file in tar_out_direc.
def _write_to_tar(self, dtype_out_time): # When submitted in parallel and the directory does not exist yet # multiple processes may try to create a new directory; this leads # to an OSError for all processes that tried to make the # directory, but were later than the first. try: os.makedirs(self.dir_tar_out) except OSError: pass # tarfile 'append' mode won't overwrite the old file, which we want. # So open in 'read' mode, extract the file, and then delete it. # But 'read' mode throws OSError if file doesn't exist: make it first. utils.io.dmget([self.path_tar_out]) with tarfile.open(self.path_tar_out, 'a') as tar: pass with tarfile.open(self.path_tar_out, 'r') as tar: old_data_path = os.path.join(self.dir_tar_out, self.file_name[dtype_out_time]) try: tar.extract(self.file_name[dtype_out_time], path=old_data_path) except KeyError: pass else: # The os module treats files on archive as non-empty # directories, so can't use os.remove or os.rmdir. shutil.rmtree(old_data_path) retcode = subprocess.call([ "tar", "--delete", "--file={}".format(self.path_tar_out), self.file_name[dtype_out_time] ]) if retcode: msg = ("The 'tar' command to save your aospy output " "exited with an error. Most likely, this is due " "to using an old version of 'tar' (especially if " "you are on a Mac). Consider installing a newer " "version of 'tar' or disabling tar output by " "setting `write_to_tar=False` in the " "`calc_exec_options` argument of " "`submit_mult_calcs`.") logging.warn(msg) with tarfile.open(self.path_tar_out, 'a') as tar: tar.add(self.path_out[dtype_out_time], arcname=self.file_name[dtype_out_time])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tar_dir(output_path, source_dir):\n with tarfile.open(output_path, \"w:gz\") as tar:\n tar.add(source_dir, arcname=os.path.basename(source_dir))", "def _outside_tar(self):\r\n outside_tar = self.unsafe_common_dir / \"unsafe_file.tar.gz\"\r\n with tarfile.open(outside_tar, \"w:gz\") as tar:\r\n tar.addfile(tarfile.TarInfo(str(self.content_dir / \"a_file\")))\r\n\r\n return outside_tar", "def create_tar(self):\n with tarfile.open(self.tgzfile, \"w:gz\") as tar_handle:\n for root, _, files in os.walk(self.dirname):\n for file in files:\n tar_handle.add(os.path.join(root, file))", "def _outside_tar2(self):\r\n outside_tar = self.unsafe_common_dir / \"unsafe_file.tar.gz\"\r\n with tarfile.open(outside_tar, \"w:gz\") as tar:\r\n tar.addfile(tarfile.TarInfo(str(self.unsafe_common_dir / \"../a_file\")))\r\n\r\n return outside_tar", "def add_file(self, fname):\n if not no_tarball:\n out_tar = os.path.join(outputdir, self.name)\n t_fname = os.path.join(outputdir, fname)\n archive = tarfile.open(out_tar, 'a')\n\n archive.add(t_fname, fname.replace(runID, ''))\n archive.close()\n try:\n if not os.path.isdir(t_fname):\n os.remove(t_fname)\n else:\n shutil.rmtree(t_fname)\n except OSError:\n log.error(\"Added to archive, but could not delete {0}.\".format(t_fname))", "def _taradd(func, tar_file, name):\n with tempfile.NamedTemporaryFile('wb', delete=False) as temp_file:\n func(temp_file)\n temp_file.close()\n tar_file.add(temp_file.name, arcname=name)\n if os.path.isfile(temp_file.name):\n os.remove(temp_file.name)", "def save_tar(self, target_dir):\n # type: (Text) -> None\n raise NotImplementedError(\"\")", "def put_file(self, path, contents):\n data = io.BytesIO()\n with tarfile.open(fileobj=data, mode='w') as tarfile_:\n file_contents = contents.encode() if isinstance(contents, str) else contents\n tarinfo = tarfile.TarInfo(path)\n\n # We set the modification time to now because some systems (e.g. logging) rely upon\n # timestamps to determine whether to read config files.\n tarinfo.mtime = time.time()\n tarinfo.size = len(file_contents)\n tarfile_.addfile(tarinfo, io.BytesIO(file_contents))\n data.seek(0)\n\n self.container.put_archive(path='/', data=data)", "def archive(self, virtual_path_to_tar_files, root, target_name):\n\n\n # TODO: RSYNC and do a diff. if there are no changes, we can just skip this part of the dockerfile to maximize layering\n for x in virtual_path_to_tar_files:\n assert os.path.isabs(x)\n\n rel_to_root = [os.path.relpath(x, '/') for x in virtual_path_to_tar_files]\n real_path = [os.path.join(root, x) for x in rel_to_root ]\n\n tup = zip(virtual_path_to_tar_files, real_path)\n\n tar = tarfile.open(os.path.join(self.dir, target_name), 'w')\n\n for vp, rp in tup:\n tar.add(rp, arcname=vp)\n\n tar.close()\n\n self.df.add_docker_cmd('ADD %s /' % target_name)", "def archive_directory(dir_: str, tar_path: str):\n with tarfile.open(tar_path, 'w', encoding='utf-8') as tar:\n tar.add(dir_, arcname=os.path.sep)", "def _place_dataset(self, origin_file_path, out_dir):\n from distutils.dir_util import copy_tree\n\n # copy subdirectory example\n copy_tree(origin_file_path, out_dir)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n else:\n print_info(\"Found previous version at {}\".format(out_dir))\n return\n\n print_info(\"Writing data to {}...\".format(out_dir))", "def tar_file(self, name, contents, mtime=None):\n length = len(contents)\n tar_data = self.tar_file_header(name, length, mtime=mtime)\n tar_data += contents\n tar_data += self.tar_file_padding(length)\n return tar_data", "def save_tar(self, target_dir):\n # type: (Text) -> None\n\n if not os.path.isdir(target_dir):\n raise ValueError(\"Target directory '{}' not found.\".format(target_dir))\n\n base_name = os.path.basename(target_dir)\n base_dir = os.path.dirname(target_dir)\n tarname = shutil.make_archive(base_name, 'gztar', root_dir=base_dir, base_dir=base_name)\n filekey = os.path.basename(tarname)\n self.s3.Object(self.bucket_name, filekey).put(Body=open(tarname, 'rb'))", "def save_tar(self, target_dir):\n # type: (Text) -> None\n if not os.path.isdir(target_dir):\n raise ValueError('target_dir %r not found.' % target_dir)\n\n base_name = os.path.basename(target_dir)\n base_dir = os.path.dirname(target_dir)\n tarname = shutil.make_archive(base_name, 'gztar', root_dir=base_dir, base_dir=base_name)\n filekey = os.path.basename(tarname)\n blob = self.bucket.blob(filekey)\n blob.upload_from_filename(tarname)", "def _dir_tar_out(self):\n ens_label = utils.io.ens_label(self.ens_mem)\n return os.path.join(self.proj.tar_direc_out, self.proj.name,\n self.model.name, self.run.name,\n ens_label)", "def _symlink_tar(self):\r\n outsidep = self.unsafe_common_dir / \"unsafe_file.txt\"\r\n symlinkp = self.unsafe_common_dir / \"symlink.txt\"\r\n symlink_tar = self.unsafe_common_dir / \"symlink.tar.gz\"\r\n outsidep.symlink(symlinkp)\r\n with tarfile.open(symlink_tar, \"w:gz\") as tar:\r\n tar.add(symlinkp)\r\n\r\n return symlink_tar", "def make_tar(self, package, input_dir, build_dir, add_args=None):\n tar = self.options.tar_command\n\n # Generate the .tar.gz file\n filename = package + '.tar.gz'\n out_file = open(os.path.join(build_dir, filename), \"w\")\n args = [tar, '--format=gnu', '--exclude-vcs', '-C', build_dir]\n if self.config.get('tar', {}).get('ignore', []):\n for patt in self.config['tar']['ignore']:\n args += ['--exclude', patt]\n if add_args:\n args += add_args\n args += ['-c', input_dir]\n logging.debug(\"Creating %s\", filename)\n tar_proc = subprocess.Popen(args, stdout=subprocess.PIPE)\n gzip_proc = subprocess.Popen(['gzip', '-9'], stdin=tar_proc.stdout,\n stdout=out_file)\n\n if tar_proc.wait() != 0 or gzip_proc.wait() != 0:\n logging.error(\"tar/gzip failed, exiting\")\n sys.exit(1)\n out_file.close()\n logging.info('%s written', filename)\n return filename", "def _add_data(self, data):\n _dir = self._add_dir()\n while 1:\n tmp = '%s/%s/%s%s' % (self.path, _dir, _name(self.rndhex),\n TEMPORARY_SUFFIX)\n try:\n if is_bytes(data):\n new_file = _file_create(tmp, umask=self.umask, utf8=False)\n else:\n new_file = _file_create(tmp, umask=self.umask, utf8=True)\n except EnvironmentError:\n error = sys.exc_info()[1]\n if error.errno == errno.ENOENT:\n _special_mkdir('%s/%s' % (self.path, _dir), self.umask)\n continue\n else:\n if new_file:\n break\n new_file.write(data)\n new_file.close()\n return _dir, tmp", "async def create_tarball(output_path: str) -> bytes:\n file_paths = []\n for root, _, filenames in os.walk(output_path):\n for filename in filenames:\n file_path = os.path.join(root, filename)\n log.info(f'Adding {file_path} to tarball.')\n file_paths.append(file_path)\n\n with ByteStream() as stream:\n with tarfile.TarFile.open(fileobj=stream, mode='w:gz',\n compresslevel=1) as tar:\n for file_path in file_paths:\n tar_info = tar.gettarinfo(name=file_path)\n tar.addfile(tar_info)\n # Yield the header for the tarinfo file.\n yield stream.pop()\n\n with open(file_path, 'rb') as in_fp:\n # Read the input file in chunks of stream.block_size bytes.\n while True:\n data = in_fp.read(stream.block_size)\n if len(data) > 0:\n # Write the data to the buffer.\n tar.fileobj.write(data)\n # Yield a compressed file chunk so the client can receive it.\n yield stream.pop()\n # Write padding if necessary.\n if len(data) < stream.block_size:\n blocks, remainder = divmod(tar_info.size, tarfile.BLOCKSIZE)\n if remainder > 0:\n tar.fileobj.write(tarfile.NUL * (tarfile.BLOCKSIZE - remainder))\n yield stream.pop()\n blocks += 1\n tar.offset += blocks * tarfile.BLOCKSIZE\n break\n\n # Yield end-of-archive marker.\n yield stream.pop()", "def write_tarball(args, tarfilename, archivefiles=[]):\n if not archivefiles:\n return None\n \n manifest_filename, manifest_uuid = render_manifest(args, archivefiles)\n try:\n with tarfile.open(tarfilename, f\"{FILE_FLAG}:gz\") as tarball:\n file_count = 0\n for fname in archivefiles:\n LOG.debug(f\"Adding {fname} to {tarfilename}: \")\n if fname.endswith(\".csv\"):\n upload_name = f\"{manifest_uuid}_openshift_usage_report.{file_count}.csv\"\n tarball.add(fname, arcname=upload_name)\n file_count += 1\n tarball.add(manifest_filename, arcname=\"manifest.json\")\n except FileExistsError as exc:\n LOG.critical(exc)\n sys.exit(2)\n LOG.info(f\"Wrote: {tarfilename}\")\n return f\"{tarfilename}\"", "def tar_gz_compress(self, destination):\n\n if destination is not None and isinstance(destination, str):\n with tarfile_open(destination, \"w:gz\") as tar:\n tar.add(self.file)", "def _decompress_tarball(*, in_fileobj, out_fileobj):\n with tarfile.open(fileobj=in_fileobj, mode=\"r\") as it, tarfile.open(\n fileobj=out_fileobj, mode=\"w|\"\n ) as ot:\n for member in it.getmembers():\n extracted = it.extractfile(member)\n ot.addfile(member, extracted)", "def create_backup_file(self, source_dir, archive_file):\n tar_file = tarfile.open(archive_file, 'w|gz')\n try:\n tar_file.add(source_dir)\n finally:\n tar_file.close()", "def _make_tar_gz_file(output_filename, source_dir):\n with tarfile.open(output_filename, \"w:gz\") as tar:\n for f in os.listdir(source_dir):\n tar.add(os.path.join(source_dir, f), arcname=f)", "def add(self, subcmd, tariff, cpu, disk, ram, io):\n\n self.__connect_db().add_tariff(name=tariff, cpu=cpu, disk=disk, ram=ram, io=io)", "def _download_untar(self, dep):\n tar = sh.Command(\"tar\")\n download_url = self.dependency_dict[dep][\"tarball\"]\n dlname = download_url.split(\"/\")[-1]\n download_path = Path(\".\") / dlname\n logger.debug(f\"downloading {dep} at {download_url} to {dlname}\")\n if self.quiet:\n trackers = ()\n else:\n trackers = (ProgressTracker(DataTransferBar()),)\n request_download(download_url, download_path, trackers=trackers)\n logger.debug(\n f\"downloaded file {download_path}, size\"\n f\" {download_path.stat().st_size}\"\n )\n try:\n tar(\"xvf\", download_path, **self.output_kwargs)\n except:\n logger.error(f\"untar of {download_path} failed\")\n sys.exit(1)", "def add(self, data):\n _dir, path = self._add_data(data)\n return self._add_path(path, _dir)", "def _setup_input(self, g):\n tarbytes = io.BytesIO()\n with tempfile.NamedTemporaryFile() as f:\n g.serialize(f.name, format=\"turtle\")\n tar = tarfile.open(name=\"out.tar\", mode=\"w\", fileobj=tarbytes)\n tar.add(f.name, arcname=\"input.ttl\")\n tar.close()\n # seek to beginning so our file is not empty when docker sees it\n tarbytes.seek(0)\n return tarbytes", "def _tar_with_filter(\n path: Path,\n tar_filter: Callable[[tarfile.TarInfo], Optional[tarfile.TarInfo]],\n) -> io.BytesIO:\n tarstream = io.BytesIO()\n with tarfile.TarFile(fileobj=tarstream, mode='w') as tar:\n tar.add(name=str(path), arcname='/', filter=tar_filter)\n tarstream.seek(0)\n\n return tarstream", "def untar(tarfile, outdir):\n tmpdir = tempfile.mkdtemp()\n try:\n untared = _open_archive(tarfile, tmpdir)\n files = [f for f in untared if os.path.isfile(os.path.join(tmpdir, f))]\n dirs = [d for d in untared if os.path.isdir(os.path.join(tmpdir, d))]\n assert len(files) + len(dirs) == len(untared), 'Only files and directories'\n if _files_same(tmpdir, outdir, files) and _dirs_same(tmpdir, outdir, dirs):\n # Nothing new or different in the tarfile.\n return False\n # Some or all of the files / directories are new.\n _move_files(tmpdir, outdir, files)\n _move_dirs(tmpdir, outdir, dirs)\n return True\n finally:\n if os.path.isdir(tmpdir):\n shutil.rmtree(tmpdir)" ]
[ "0.6786602", "0.6693681", "0.6670188", "0.6648034", "0.64866704", "0.6451304", "0.6295874", "0.61914855", "0.61045814", "0.6101494", "0.6056289", "0.60557306", "0.604684", "0.6011332", "0.5993754", "0.59463626", "0.58494645", "0.583224", "0.5801141", "0.577348", "0.5759886", "0.575819", "0.5756215", "0.57554144", "0.56715935", "0.5669834", "0.5613171", "0.55839866", "0.55781776", "0.55693096" ]
0.70681113
0
Append the data of the given dtype_out to the data_out attr.
def _update_data_out(self, data, dtype): try: self.data_out.update({dtype: data}) except AttributeError: self.data_out = {dtype: data}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cast(self, dtype):\n self.dtype = np.dtype(dtype)\n self.preprocess = False\n self.set_data(self.data)", "def setRxDataOut(self, rx_data_out):\n self.rx_data_out = rx_data_out", "def _save_files(self, data, dtype_out_time):\n path = self.path_out[dtype_out_time]\n if not os.path.isdir(self.dir_out):\n os.makedirs(self.dir_out)\n if 'reg' in dtype_out_time:\n try:\n reg_data = xr.open_dataset(path)\n except (EOFError, RuntimeError, IOError):\n reg_data = xr.Dataset()\n reg_data.update(data)\n data_out = reg_data\n else:\n data_out = data\n if isinstance(data_out, xr.DataArray):\n data_out = xr.Dataset({self.name: data_out})\n data_out.to_netcdf(path, engine='netcdf4', format='NETCDF3_64BIT')", "def _add_metadata_as_attrs(data, units, description, dtype_out_vert):\n if isinstance(data, xr.DataArray):\n return _add_metadata_as_attrs_da(data, units, description,\n dtype_out_vert)\n else:\n for name, arr in data.data_vars.items():\n _add_metadata_as_attrs_da(arr, units, description,\n dtype_out_vert)\n return data", "def outputDataType(self):\n raise NotImplementedError()", "def save(self, data, dtype_out_time, dtype_out_vert=False,\n save_files=True, write_to_tar=False):\n self._update_data_out(data, dtype_out_time)\n if save_files:\n self._save_files(data, dtype_out_time)\n if write_to_tar and self.proj.tar_direc_out:\n self._write_to_tar(dtype_out_time)\n logging.info('\\t{}'.format(self.path_out[dtype_out_time]))", "def out(self, out):\n\n self._out = out", "def _add_metadata_as_attrs_da(data, units, description, dtype_out_vert):\n if dtype_out_vert == 'vert_int':\n if units != '':\n units = '(vertical integral of {0}): {0} kg m^-2)'.format(units)\n else:\n units = '(vertical integral of quantity with unspecified units)'\n data.attrs['units'] = units\n data.attrs['description'] = description\n return data", "def add(self, node, **offset):\n return self.dtype.add(self, node, **offset)", "def add_data(self, x=1, y=1, dtype=1, name=\"New data\", description=\"New data\"):\n assert 1 <= dtype <= 3, \"Bad value for data type\"\n new_data = _Data(dtype, name=name, description=description)\n new_data.set_data(x=x, y=y, xunits=self.time_units, yunits=self.len_units)\n self.data.append(new_data)", "def test_out_dtype(self):\n byt = bytscl(self.array1)\n dtype = byt.dtype\n self.assertEqual(dtype, 'uint8')", "def put_data(self, var_name, data_class):\n assert(data_class.data.shape == self.var_dict[var_name][1])\n\n if self.writer is not None:\n # retrieve the adios variable from the dict\n adios_var = self.var_dict[var_name][0]\n # Assert that the data is continuous, as implicitly required by ADIOS2.\n # The burden to produce contiguous data is on the data producer.\n assert(data_class.data.flags.contiguous)\n tic = time.perf_counter()\n self.writer.Put(adios_var, data_class.data, adios2.Mode.Sync)\n toc = time.perf_counter()\n\n num_bytes = np.product(data_class.data.shape) * data_class.data.itemsize\n dt = toc - tic\n self.stats.add_transfer(num_bytes, dt)", "def add_data(self, data):\n self._data += data", "def append(self,data_struct,memory_address=None, extra=None):\n \n # Write the header if needed\n \n if self.log_init is False or self.reprint_header:\n keys = [str(x) for x in data_struct.keys()]\n line_list = [\"Timestamp\"]+keys\n\n # Was extra Data Provided?\n if extra is not None:\n line_list += extra.keys()\n \n # Was a memory address provided?\n if memory_address is not None:\n line_list += [\"Memory Address\"]\n \n line = self.separator.join(line_list)\n self.outfd.write(line)\n self.log_init = True\n self.outfd.write(\"\\n\")\n \n # Write the actual data\n values = [str(x) for x in data_struct.values()]\n line_list = [str(time.time())]+values\n\n # Was extra data provided?\n if extra is not None:\n for k in extra.keys():\n line_list.append(extra[k])\n \n # Was a memory address provided?\n if memory_address is not None: \n line_list += [\"0x%016X\"%memory_address]\n \n line = self.separator.join(line_list)\n self.outfd.write(line+\"\\n\")\n \n self.outfd.flush()", "def set_dtype(self, dtype):\n self.mean_.set_dtype(dtype)\n for filter_k in self.samples_:\n filter_k.set_dtype(dtype)\n self.dtype = self.mean_.dtype", "def append_data(self, key, data):\n with self.write():\n try:\n self.handle.append(\n key, data, data_columns=True, complevel=5, complib='blosc')\n except AttributeError:\n self.handle.append(key, data, complevel=5, complib='blosc')", "def add_data(self, data):\n self.data = self.data + data", "def data_out(f):\n @functools.wraps(f)\n def dec(*args, **kwargs):\n rtn, dout = f(*args, **kwargs)\n _stash_set_data(dout)\n return rtn\n return dec", "def write(self, p_out, **kwargs):\n nodata = kwargs.pop(\"nodata\", self.nodata_value)\n driver = kwargs.pop(\"driver\", \"GTiff\")\n return ImageIO.gdal_write(driver, self.array, p_out, self.projection, self.geotransform,\n nodata=nodata, **kwargs)", "def addOutputMetaData(nnSpec):\n for i, featureMapDimension in enumerate(featureMapDimensions):\n nnSpec.description.output[i].type.multiArrayType.shape.append(1)\n nnSpec.description.output[i].type.multiArrayType.shape.append(3)\n nnSpec.description.output[i].type.multiArrayType.shape.append(\n featureMapDimension\n )\n nnSpec.description.output[i].type.multiArrayType.shape.append(\n featureMapDimension\n )\n # pc, bx, by, bh, bw, c (no of class class labels)\n nnSpec.description.output[i].type.multiArrayType.shape.append(outputSize)\n nnSpec.description.output[\n i\n ].type.multiArrayType.dataType = (\n ct.proto.FeatureTypes_pb2.ArrayFeatureType.DOUBLE\n )", "def modify_data(self, sim, dat_in, dat_out):\n return dat_in, dat_out, list(range(len(dat_in.dtype.names)))", "def dtype(self) -> np.dtype:\n ...", "def data_out_mux(self):\n return self._data_out_mux", "def write(self, data):\n self._output_list.append(data)", "def define_attribute(self, name, atype, data=None):\n self.attributes.append(name)\n self.attribute_types[name] = atype\n self.attribute_data[name] = data", "def writeNetCDFData(out_nc, hrus, dr_time, hru_type, remapped_data, var_meta, var_attrs, var_encodings, remap_idx):\n\n dataset = xr.Dataset()\n\n for varname, meta in var_meta.items():\n foo = xr.DataArray(remapped_data[varname][:, remap_idx],\n dims=['time', 'basinID'],\n name=varname)\n\n foo.encoding = var_encodings[varname]\n foo.attrs = var_attrs[varname]\n\n dataset[varname] = foo\n\n # HRU ID variables\n dataset['basinID'] = xr.DataArray(hrus[remap_idx], dims=['basinID'])\n dataset['basinID'].encoding = {'dtype': hru_type, '_FillValue': None}\n dataset['basinID'].attrs = {'long_name': 'Basin ID'}\n\n dataset[TIME_DIM_NAME] = dr_time\n\n dataset.to_netcdf(out_nc, unlimited_dims='time')", "def astype(self, dtype):\n return type(self)(self.data.astype(dtype), self.bset)", "def add_data(self, op):\n self.__data += [AssemblerVariable(op)]\n self.refresh_name_label()\n self.refresh_name_end_label()", "def _write(self, h5_group, _) -> None:\n # Convert text from unicode to byte-string to avoid error in h5py\n data = np.asarray(self.data, dtype=np.string_)\n h5_field = h5_group.create_dataset(h5_group.attrs[\"fieldname\"], self.data.shape, dtype=data.dtype)\n h5_field[...] = data", "def append(self, output):\n if len(self) != get_key_slice(output.key).start:\n raise ValueError('Appending a non matching slice')\n\n self._delayed_outputs.append(output)\n self._stored_mask.append(False)\n if self._store:\n self._store.write(output, done_callback=self._set_stored)" ]
[ "0.557317", "0.5526732", "0.55255765", "0.54921657", "0.54495966", "0.53745854", "0.5360902", "0.526652", "0.5238675", "0.52305984", "0.51603323", "0.5156865", "0.5149443", "0.5124915", "0.50758576", "0.50730234", "0.50729704", "0.50701445", "0.5064776", "0.50359225", "0.5", "0.49285612", "0.4923693", "0.49147785", "0.4886856", "0.48443", "0.4838261", "0.48113468", "0.48083246", "0.48072147" ]
0.7611866
0
Subset the data array to the specified time/level/lat/lon, etc.
def _get_data_subset(self, data, region=False, time=False, vert=False, lat=False, lon=False): if region: raise NotImplementedError if np.any(time): data = data[time] if 'monthly_from_' in self.dtype_in_time: data = np.mean(data, axis=0)[np.newaxis, :] if np.any(vert): if self.dtype_in_vert == internal_names.ETA_STR: data = data[{PFULL_STR: vert}] else: if np.max(self.model.level) > 1e4: # Convert from Pa to hPa. lev_hpa = self.model.level*1e-2 else: lev_hpa = self.model.level level_index = np.where(lev_hpa == self.level) if 'ts' in self.dtype_out_time: data = np.squeeze(data[:, level_index]) else: data = np.squeeze(data[level_index]) if np.any(lat): raise NotImplementedError if np.any(lon): raise NotImplementedError return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_subset_by_time(self):\n\n this_satellite_dict = satellite_io.subset_by_time(\n satellite_dict=copy.deepcopy(SATELLITE_DICT_ALL_EXAMPLES),\n desired_times_unix_sec=DESIRED_TIMES_UNIX_SEC\n )[0]\n\n self.assertTrue(compare_satellite_dicts(\n this_satellite_dict, SATELLITE_DICT_SUBSET_BY_TIME\n ))", "def get_subset(self, times=None, varids=None):\n if times is None:\n dat = self.ix[:,varids]\n times = self.times\n elif varids is None:\n dat = self.ix[times,:]\n varids = self.varids\n else:\n dat = self.ix[times,varids]\n subtraj = Trajectory(dat=dat, times=times, varids=varids) \n return subtraj", "def remove_data(ds, nh_lim, sh_lim, time_max, lat_name='lat', time_name='time'):\n return xr.where((ds[lat_name] < nh_lim) &\n (ds[lat_name] > sh_lim) &\n (ds[time_name] < pd.to_datetime([time_max]).values),\n np.nan,\n ds)", "def sel(self, time=None, lat=None, lon=None, vmax=None, mslp=None,\n dvmax_dt=None, dmslp_dt=None, stormtype=None, method='exact'):\n\n # create copy of storm object\n new_dict = copy.deepcopy(self.dict)\n new_dict['subset'] = True\n NEW_STORM = Storm(new_dict)\n idx_final = np.arange(len(self.time))\n\n # apply time filter\n if time is None:\n idx = copy.copy(idx_final)\n\n elif isinstance(time, dt):\n time_diff = np.array([(time - i).total_seconds()\n for i in NEW_STORM.time])\n idx = np.abs(time_diff).argmin()\n if time_diff[idx] != 0:\n if method == 'exact':\n msg = f'no exact match for {time}. Use different time or method.'\n raise ValueError(msg)\n elif method == 'floor' and time_diff[idx] < 0:\n idx += -1\n if idx < 0:\n msg = f'no points before {time}. Use different time or method.'\n raise ValueError(msg)\n elif method == 'ceil' and time_diff[idx] > 0:\n idx += 1\n if idx >= len(time_diff):\n msg = f'no points after {time}. Use different time or method.'\n raise ValueError(msg)\n\n elif isinstance(time, (tuple, list)) and len(time) == 2:\n time0, time1 = time\n if time0 is None:\n time0 = min(NEW_STORM.time)\n elif not isinstance(time0, dt):\n msg = 'time bounds must be of type datetime.datetime or None.'\n raise TypeError(msg)\n if time1 is None:\n time1 = max(NEW_STORM.time)\n elif not isinstance(time1, dt):\n msg = 'time bounds must be of type datetime.datetime or None.'\n raise TypeError(msg)\n tmptimes = np.array(NEW_STORM.time)\n idx = np.where((tmptimes >= time0) & (tmptimes <= time1))[0]\n if len(idx) == 0:\n msg = f'no points between {time}. Use different time bounds.'\n raise ValueError(msg)\n\n else:\n msg = 'time must be of type datetime.datetime, tuple/list, or None.'\n raise TypeError(msg)\n\n # update idx_final\n idx_final = list(set(idx_final) & set(listify(idx)))\n\n # apply lat/lon filter\n if lat is None and lon is None:\n idx = copy.copy(idx_final)\n\n elif is_number(lat) and is_number(lon):\n dist = np.array([great_circle((lat, lon), (x, y)).kilometers for x, y in zip(\n NEW_STORM.lon, NEW_STORM.lat)])\n idx = np.abs(dist).argmin()\n if dist[idx] != 0:\n if method == 'exact':\n msg = f'no exact match for {lat}/{lon}. Use different location or method.'\n raise ValueError(msg)\n elif method in ('floor', 'ceil'):\n warnings.warn(\n 'floor and ceil do not apply to lat/lon filtering. Using nearest instead.')\n\n elif (isinstance(lat, (tuple, list)) and len(lat) == 2) or (isinstance(lon, (tuple, list)) and len(lon) == 2):\n if not isinstance(lat, (tuple, list)):\n lat = (None, None)\n if not isinstance(lon, (tuple, list)):\n lon = (None, None)\n lat0, lat1 = lat\n lon0, lon1 = lon\n if lat0 is None:\n lat0 = min(NEW_STORM.lat)\n elif not is_number(lat0):\n msg = 'lat/lon bounds must be of type float/int or None.'\n raise TypeError(msg)\n if lat1 is None:\n lat1 = max(NEW_STORM.lat)\n elif not is_number(lat1):\n msg = 'lat/lon bounds must be of type float/int or None.'\n raise TypeError(msg)\n if lon0 is None:\n lon0 = min(NEW_STORM.lon)\n elif not is_number(lon0):\n msg = 'lat/lon bounds must be of type float/int or None.'\n raise TypeError(msg)\n if lon1 is None:\n lon1 = max(NEW_STORM.lon)\n elif not is_number(lon1):\n msg = 'lat/lon bounds must be of type float/int or None.'\n raise TypeError(msg)\n\n tmplat, tmplon = np.array(\n NEW_STORM.lat), np.array(NEW_STORM.lon) % 360\n idx = np.where((tmplat >= lat0) & (tmplat <= lat1) &\n (tmplon >= lon0 % 360) & (tmplon <= lon1 % 360))[0]\n if len(idx) == 0:\n msg = f'no points in {lat}/{lon} box. Use different lat/lon bounds.'\n raise ValueError(msg)\n\n else:\n msg = 'lat and lon must be of the same type: float/int, tuple/list, or None.'\n raise TypeError(msg)\n\n # update idx_final\n idx_final = list(set(idx_final) & set(listify(idx)))\n\n # apply vmax filter\n if vmax is None:\n idx = copy.copy(idx_final)\n\n elif isinstance(vmax, (tuple, list)) and len(vmax) == 2:\n vmax0, vmax1 = vmax\n if vmax0 is None:\n vmax0 = np.nanmin(NEW_STORM.vmax)\n elif not is_number(vmax0):\n msg = 'vmax bounds must be of type float/int or None.'\n raise TypeError(msg)\n if vmax1 is None:\n vmax1 = np.nanmax(NEW_STORM.vmax)\n elif not is_number(vmax1):\n msg = 'vmax bounds must be of type float/int or None.'\n raise TypeError(msg)\n tmpvmax = np.array(NEW_STORM.vmax)\n idx = np.where((tmpvmax >= vmax0) & (tmpvmax <= vmax1))[0]\n if len(idx) == 0:\n msg = f'no points with vmax between {vmax}. Use different vmax bounds.'\n raise ValueError(msg)\n\n else:\n msg = 'vmax must be of type tuple/list, or None.'\n raise TypeError(msg)\n\n # update idx_final\n idx_final = list(set(idx_final) & set(listify(idx)))\n\n # apply mslp filter\n if mslp is None:\n idx = copy.copy(idx_final)\n\n elif isinstance(mslp, (tuple, list)) and len(mslp) == 2:\n mslp0, mslp1 = mslp\n if mslp0 is None:\n mslp0 = np.nanmin(NEW_STORM.mslp)\n elif not is_number(mslp0):\n msg = 'mslp bounds must be of type float/int or None.'\n raise TypeError(msg)\n if mslp1 is None:\n mslp1 = np.nanmax(NEW_STORM.mslp)\n elif not is_number(mslp1):\n msg = 'mslp bounds must be of type float/int or None.'\n raise TypeError(msg)\n tmpmslp = np.array(NEW_STORM.mslp)\n idx = np.where((tmpmslp >= mslp0) & (tmpmslp <= mslp1))[0]\n if len(idx) == 0:\n msg = f'no points with mslp between {mslp}. Use different dmslp_dt bounds.'\n raise ValueError(msg)\n\n else:\n msg = 'vmax must be of type tuple/list, or None.'\n raise TypeError(msg)\n\n # update idx_final\n idx_final = list(set(idx_final) & set(listify(idx)))\n\n # apply dvmax_dt filter\n if dvmax_dt is None:\n idx = copy.copy(idx_final)\n\n elif 'dvmax_dt' not in NEW_STORM.dict.keys():\n msg = 'dvmax_dt not in storm data. Create new object with interp first.'\n raise KeyError(msg)\n\n elif isinstance(dvmax_dt, (tuple, list)) and len(dvmax_dt) == 2:\n dvmax_dt0, dvmax_dt1 = dvmax_dt\n if dvmax_dt0 is None:\n dvmax_dt0 = np.nanmin(NEW_STORM.dvmax_dt)\n elif not is_number(dvmax_dt0):\n msg = 'dmslp_dt bounds must be of type float/int or None.'\n raise TypeError(msg)\n if dvmax_dt1 is None:\n dvmax_dt1 = np.nanmax(NEW_STORM.dvmax_dt)\n elif not is_number(dvmax_dt1):\n msg = 'dmslp_dt bounds must be of type float/int or None.'\n raise TypeError(msg)\n\n tmpvmax = np.array(NEW_STORM.dvmax_dt)\n idx = np.where((tmpvmax >= dvmax_dt0) & (tmpvmax <= dvmax_dt1))[0]\n if len(idx) == 0:\n msg = f'no points with dvmax_dt between {dvmax_dt}. Use different dvmax_dt bounds.'\n raise ValueError(msg)\n\n # update idx_final\n idx_final = list(set(idx_final) & set(listify(idx)))\n\n # apply dmslp_dt filter\n if dmslp_dt is None:\n idx = copy.copy(idx_final)\n\n elif 'dmslp_dt' not in NEW_STORM.dict.keys():\n msg = 'dmslp_dt not in storm data. Create new object with interp first.'\n raise KeyError(msg)\n\n elif isinstance(dmslp_dt, (tuple, list)) and len(dmslp_dt) == 2:\n dmslp_dt0, dmslp_dt1 = dmslp_dt\n if dmslp_dt0 is None:\n dmslp_dt0 = np.nanmin(NEW_STORM.dmslp_dt)\n elif not is_number(dmslp_dt0):\n msg = 'dmslp_dt bounds must be of type float/int or None.'\n raise TypeError(msg)\n if dmslp_dt1 is None:\n dmslp_dt1 = np.nanmax(NEW_STORM.dmslp_dt)\n elif not is_number(dmslp_dt1):\n msg = 'dmslp_dt bounds must be of type float/int or None.'\n raise TypeError(msg)\n tmpmslp = np.array(NEW_STORM.dmslp_dt)\n idx = np.where((tmpmslp >= dmslp_dt0) & (tmpmslp <= dmslp_dt1))[0]\n if len(idx) == 0:\n msg = f'no points with dmslp_dt between {dmslp_dt}. Use different dmslp_dt bounds.'\n raise ValueError(msg)\n\n # update idx_final\n idx_final = list(set(idx_final) & set(listify(idx)))\n\n # apply stormtype filter\n if stormtype is None:\n idx = copy.copy(idx_final)\n\n elif isinstance(stormtype, (tuple, list, str)):\n idx = [i for i, j in enumerate(\n NEW_STORM.type) if j in listify(stormtype)]\n if len(idx) == 0:\n msg = f'no points with type {stormtype}. Use different stormtype.'\n raise ValueError(msg)\n\n else:\n msg = 'stormtype must be of type tuple/list, str, or None.'\n raise TypeError(msg)\n\n # update idx_final\n idx_final = sorted(list(set(idx_final) & set(listify(idx))))\n\n # Construct new storm dict with subset elements\n for key in NEW_STORM.dict.keys():\n if isinstance(NEW_STORM.dict[key], list):\n NEW_STORM.dict[key] = [NEW_STORM.dict[key][i]\n for i in idx_final]\n else:\n NEW_STORM.dict[key] = NEW_STORM.dict[key]\n\n # Add other attributes to new storm object\n if key == 'realtime':\n continue\n if not isinstance(NEW_STORM.dict[key], list) and not isinstance(NEW_STORM.dict[key], dict):\n NEW_STORM[key] = NEW_STORM.dict[key]\n NEW_STORM.attrs[key] = NEW_STORM.dict[key]\n if isinstance(NEW_STORM.dict[key], list) and not isinstance(NEW_STORM.dict[key], dict):\n NEW_STORM.vars[key] = np.array(NEW_STORM.dict[key])\n NEW_STORM[key] = np.array(NEW_STORM.dict[key])\n\n return NEW_STORM", "def filter_meteo_data(self, startdate, enddate):\n self.all_meteo_data.columns.values[0]='Datum-tijd'\n self.all_meteo_data['datetime']=pd.to_datetime(self.all_meteo_data['Datum-tijd'], format='%Y-%m-%dT%H:%M:%SZ')\n self.all_meteo_data.drop(['Datum-tijd'],axis=1, inplace=True)\n mask = (self.all_meteo_data['datetime'] > startdate) & (self.all_meteo_data['datetime'] <= enddate)\n meteodata = self.all_meteo_data.loc[mask].copy()\n meteodata.set_index('datetime',inplace=True)\n return meteodata", "def spatial_subset(dataset: xr.Dataset,\n bbox: Tuple[float, float, float, float]) -> xr.Dataset:\n x1, y1, x2, y2 = bbox\n gm = GridMapping.from_dataset(dataset)\n x_name, y_name = gm.xy_dim_names\n return dataset.sel({\n x_name: slice(x1, x2),\n y_name: slice(y1, y2) if gm.is_j_axis_up else slice(y2, y1)\n })", "def set_data_subset(self, subset):\n self.data_subset = subset", "def latlonsel(array, lat, lon, latname='lat', lonname='lon'):\n assert latname in array.coords, f\"Coord. {latname} not present in array\"\n assert lonname in array.coords, f\"Coord. {lonname} not present in array\"\n\n if isinstance(lat, slice):\n lat1 = lat.start\n lat2 = lat.stop\n elif isinstance(lat, list):\n lat1 = lat[0]\n lat2 = lat[-1]\n if isinstance(lon, slice):\n lon1 = lon.start\n lon2 = lon.stop\n elif isinstance(lon, list):\n lon1 = lon[0]\n lon2 = lon[-1]\n\n lonmask = (array[lonname] < lon2) & (array[lonname] > lon1)\n latmask = (array[latname] < lat2) & (array[latname] > lat1)\n array = array.where(lonmask, drop=True).where(latmask, drop=True)\n return array", "def select_annotation_by_ts(csv_data, lbound=None, rbound=None, by=None):\n if by==None:\n if not lbound:\n lbound = csv_data[st_col].iloc[0] # iloc is faster than head() or tail()\n if not rbound:\n rbound = csv_data[et_col].iloc[-1]\n # start_flags = np.array(csv_data[et_col].apply(lambda x: x>lbound)) ## Note it's too slow\n flags = (csv_data[et_col] > lbound) & (csv_data[st_col] < rbound)\n # end_flags = np.array(csv_data[st_col].apply(lambda x:x<rbound)) ## Note it's too slow\n subset_annotation_data = csv_data[flags]\n # subset_annotation_data = subset_annotation_data.reset_index(drop=True) ## Don't reset index\n subset_annotation_data[st_col].iloc[0] = max(lbound,subset_annotation_data[st_col].iloc[0])\n subset_annotation_data[et_col].iloc[-1] = min(rbound,subset_annotation_data[et_col].iloc[-1])\n else:\n groupby_annotation = csv_data.groupby(by)\n subset_group_datas = []\n for group_name, group_data in groupby_annotation:\n if lbound == None:\n lbound = group_data[st_col].iloc[0]\n if rbound == None:\n rbound = group_data[et_col].iloc[-1]\n # start_flags = np.array(group_data[et_col].apply(lambda x: x>lbound)) ## Note it's too slow\n start_flags = group_data[et_col] > lbound\n # end_flags = np.array(group_data[st_col].apply(lambda x:x<rbound)) ## Note it's too slow\n end_flags = group_data[st_col] < rbound\n subset_group_data = group_data[np.logical_and(start_flags,end_flags)]\n subset_group_data[st_col].iloc[0] = max(lbound,subset_group_data[st_col].iloc[0])\n subset_group_data[et_col].iloc[-1] = min(rbound,subset_group_data[et_col].iloc[-1])\n # subset_group_data = subset_group_data.reset_index(drop=True) ## Don't reset index\n subset_group_datas.append(subset_group_data)\n subset_annotation_data = annotation_data_consolidator(subset_group_datas)\n return subset_annotation_data", "def get_subset(self, tile, band=0):\r\n # access window bounds\r\n bounds = rasterio.windows.bounds(tile, self.dataset.transform)\r\n return (\r\n self.__arr[(band,) + tile.toslices()],\r\n bounds,\r\n ) # Shape of array is announced with (bands, height, width)\r", "def pick_data(eop_data, time, window):\n if time.isscalar:\n start_time = np.floor(time.utc.mjd) - window // 2\n end_time = np.ceil(time.utc.mjd) + window // 2\n else:\n start_time = np.floor(time.min().utc.mjd) - window // 2\n end_time = np.ceil(time.max().utc.mjd) + window // 2\n\n try:\n return {d: eop_data[d].copy() for d in np.arange(start_time, end_time + 1)}\n except KeyError:\n paths = [str(files.path(k)) for k in _EOP_FILE_KEYS]\n raise MissingDataError(\n \"Not all days in the time period {:.0f} - {:.0f} MJD were found in EOP-files {}\"\n \"\".format(start_time, end_time, \", \".join(paths))\n )", "def _trimTime(time,data,tStart,tStop):\t\n\tif tStart is None:\n\t\tiStart=0;\n\t\tiStop=len(time);\n\telse:\n\t\t# determine indices of cutoff regions\n\t\tiStart=_process.findNearest(time,tStart); # index of lower cutoff\n\t\tiStop=_process.findNearest(time,tStop);\t # index of higher cutoff\n\t\t\n\t# trim time\n\ttime=time[iStart:iStop];\n\t\n\t# trim data\n\tif type(data) is not list:\n\t\tdata=[data];\n\tfor i in range(0,len(data)):\n\t\tdata[i]=data[i][iStart:iStop];\n\t\t\n\treturn time, data", "def load_subset_data(data_path, subset_name, timesteps):\n\n selected_subset_paths = subset_paths(os.path.join(data_path, subset_name))\n selected_subset_arrays = subset_arrays(selected_subset_paths)\n\n load_selected_timesteps = lambda x: np.load(x)\n\n if timesteps is not None:\n selected_subset_timesteps = load_selected_timesteps(timesteps)\n else:\n selected_subset_timesteps = np.array(range(int(np.sum(selected_subset_arrays[\"seq_lens\"]))))\n\n return selected_subset_arrays, selected_subset_timesteps", "def _filterTimes(self):\n print(self.tRange)\n idT = np.where((self.tRange[0] > np.array(self.rawD['Epoch'][:])) & \n (self.tRange[1] < np.array(self.rawD['Epoch'][:])))[0]\n #print(self.rawD['Epoch'][:100])\n print(idT)\n # Filter data\n for key in filter(lambda x: ('Epoch' in x or \n ('Counts' in x and x[-1] == 's')), self.rawD.keys()):\n self.d[key] = self.rawD[key].copy()[idT]\n return", "def get_subset_by_areas(sess_no, raw_path, \n align_on, from_time, to_time, \n target_areas,\n only_correct_trials = True, renorm = False, elec_type = 'grid' ):\n tinfo_path = raw_path + 'trial_info.mat'\n rinfo_path = raw_path + 'recording_info.mat'\n \n # get all data\n data_filtered = get_preprocessed_from_raw(sess_no, raw_path, \n align_on, from_time, to_time)\n \n # don't keep missing data // keep only_correct_trials if True\n \n responses = io.get_responses(tinfo_path)\n if only_correct_trials == False:\n ind_to_keep = (responses == responses).flatten()\n else:\n ind_to_keep = (responses == 1).flatten()\n \n #data1 =data1[ind_to_keep, :, :] # in the same time\n #data2 =data2[ind_to_keep, :, :]\n \n data_filtered = data_filtered[ind_to_keep,:,:]\n\n \n # select electrode and cut the additionnal time\n \n area_names = io.get_area_names(rinfo_path)\n \n idx = []\n for count, area in enumerate(area_names):\n if area in target_areas:\n idx.append(count) \n \n data_filtered = data_filtered[:, idx, :] \n \n\n ## change type \n data_filtered = data_filtered.astype(np.float32)\n \n if elec_type == 'single':\n data_filtered = data_filtered.reshape(data_filtered.shape[0]*data_filtered.shape[1], data_filtered.shape[2])\n data_filtered = np.expand_dims(data_filtered, axis=1)\n \n\n \n elif elec_type == 'average':\n data_filtered = np.mean(data_filtered, axis=1, keepdims=True)\n\n \n #elif elec_type == 'grid':\n #data_filtered = data_filtered\n\n elif elec_type != 'grid':\n raise ValueError('Type \\'' + elec_type + '\\' not supported. Please ' + \n 'choose one of \\'single\\'|\\'grid\\'|\\'average\\'.')\n \n # renorm data : mean = 0 and var = 1\n if renorm == True :\n data_filtered = pp.renorm(data_filtered)\n \n ### variable for shape\n #n_chans1 = len(idx)\n \n #samples_per_trial = data_filtered.shape[2] \n \n return( data_filtered )", "def data_array(self) -> xr.Dataset:\n\n xr_data = xr.open_mfdataset(self.path_to_files,\n chunks=self.chunks,\n parallel=True)\n\n if not all(x in list(xr_data.coords) for x in self.DIMS):\n xr_data = xr_data.rename({\n 'latitude': 'lat',\n 'longitude': 'lon',\n })\n\n if self.subset_dict is not None:\n print(f'Cutting data using {self.subset_dict}')\n xr_data = self.cut(xr_data)\n\n if self.season is not None:\n xr_data = xr_data.where(xr_data.time.dt.season == self.season,\n drop=True)\n\n if self.rescale_longitude is True:\n xr_data = xr_data.assign_coords(lon=(((xr_data.lon + 180) % 360) -\n 180)).sortby('lon')\n\n return xr_data", "def _process_data(data, band):\n\n meta = {key:value for key,value in data[0].items() if key != \"subset\" }\n meta['band'] = band\n data_dict = {'dates': [], 'arrays': [], 'metadata': meta}\n for i in data:\n for j in i['subset']:\n if j['band'] == band:\n data_dict['dates'].append(j['calendar_date'])\n data = []\n for x in j['data']:\n try:\n data.append(float(x))\n except ValueError:\n data.append(np.nan) \n data_dict['arrays'].append(np.array(data).reshape(meta['nrows'], \n meta['ncols'])) \n dtdates = [dt.datetime.strptime(d,\"%Y-%m-%d\") for d in data_dict['dates']]\n xcoordinates = ([float(meta['xllcorner'])] + \n [i * meta['cellsize'] + float(meta['xllcorner']) \n for i in range(1, meta['ncols'])])\n ycoordinates = ([float(meta['yllcorner'])] + \n [i * meta['cellsize'] + float(meta['yllcorner'])\n for i in range(1, meta['nrows'])])\n return xr.DataArray(name = band,\n data = np.flipud(np.dstack(data_dict['arrays'])),\n coords = [np.array(ycoordinates), \n np.array(xcoordinates), dtdates],\n dims = [ \"y\", \"x\", \"time\" ],\n attrs = meta)", "def subset(arr, start, end):\n return [[row_data for row_data in row[start[1]:end[1]]] for row in arr[start[0]:end[0]]]", "def at(self, time_slices):\n\n if self.base is not None:\n return self.base.at(time_slices)\n\n if isinstance(time_slices, TimeSlice):\n time_slices = [time_slices]\n\n # join the time slice values\n timed_data = pd.DataFrame(columns=self.data.columns)\n\n # make the new data\n for slice_t in time_slices:\n slice_index = (slice_t.time <= self.data.index) & (\n self.data.index < slice_t.time + slice_t.duration\n )\n timed_data.loc[slice_t.time] = self.aggregate(\n self.data[slice_index], axis=0\n )\n\n # return the new feature object\n return Feature(\n data=timed_data,\n aggregate=self.aggregate,\n base=self,\n time_slices=time_slices,\n )", "def zoomData(self, factor, neariso=False):\n \n new_data = np.zeros( (self.data.shape[0], self.data.shape[1], self.data.shape[2]*(2**factor), self.data.shape[3]*(2**factor)), dtype=self.data.dtype)\n for time_index in range(self.data.shape[0]):\n for z_index in range(self.data.shape[1]):\n new_data[time_index, z_index, :, :] = np.asarray(self.frombuffer(self.data[time_index, z_index, :, :]).resize([new_data.shape[3], new_data.shape[2]]))\n self.data = new_data", "def select_hours(self, lhours):\n sel = []\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n hour = stime[3]\n for ih in lhours:\n ihour, fhour = ih\n if ihour <= hour < fhour:\n sel.append(i)\n data = STData(self.wpath, self.city, self.application)\n data.dataset = self.dataset[sel]\n return data", "def get_data_subsets(t0, t1):\n\n # Iridium data:\n irid = iridium[(iridium.time >= t0) & (iridium.time <= t1)]\n irid_B = np.vstack((irid.B_e.values, irid.B_n.values, irid.B_r.values))\n irid_coords = np.vstack((irid.lon.values, irid.lat.values, irid.r.values))\n\n # SuperMAG data:\n smag = supermag.loc[t0:t1, :]\n smag_B = np.vstack((smag.Be.values, smag.Bn.values, smag.Bu.values))\n smag_coords = np.vstack((smag.lon.values, smag.lat.values))\n\n # SuperDARN data:\n sd = superdarn.loc[t0:t1, :]\n vlos = sd['vlos'].values\n sd_coords = np.vstack((sd['glon'].values, sd['glat'].values))\n los = np.vstack((sd['le'].values, sd['ln'].values))\n\n\n # Make the data objects. The scale keyword determines a weight for the dataset. Increase it to reduce weight\n iridium_data = lompe.Data(irid_B * 1e-9, irid_coords, datatype = 'space_mag_fac', scale = 200e-9)\n supermag_data = lompe.Data(smag_B * 1e-9, smag_coords, datatype = 'ground_mag' , scale = 100e-9)\n superdarn_data = lompe.Data(vlos , sd_coords , LOS = los, datatype = 'convection' , scale = 500 )\n\n return(iridium_data, supermag_data, superdarn_data)", "def subset_data_based_bbox(inps,dataset):\n # metadata\n atr = readfile.read_attribute(\"\".join(inps.file))\n ul_lat = float(atr['Y_FIRST'])\n ul_lon = float(atr['X_FIRST'])\n lat_step = float(atr[\"Y_STEP\"])\n lon_step = float(atr[\"X_STEP\"])\n # bbox\n user_lat0 = float(inps.SNWE[1])\n user_lon0 = float(inps.SNWE[2])\n user_lat1 = float(inps.SNWE[0])\n user_lon1 = float(inps.SNWE[3])\n if user_lat0 < user_lat1:\n parser.print_usage()\n raise Exception('input bounding box error! Wrong latitude order!')\n elif user_lon0 > user_lon1:\n parser.print_usage()\n raise Exception('input bounding box error! Wrong longitude order!')\n \n row = int((user_lat0 - ul_lat) / lat_step + 0.5)\n sample = int((user_lon0 - ul_lon) / lon_step + 0.5) \n rows = int((user_lat1 - user_lat0) / lat_step + 0.5) + 1\n samples = int((user_lon1 - user_lon0) / lon_step + 0.5) + 1 \n \n # subset data\n data,atr = readfile.read(dataset)\n atr['LENGTH'] = str(rows)\n atr['WIDTH'] = str(samples)\n writefile.write(data, out_file=dataset, metadata=atr)\n\n return", "def slice_data(xdata, ydata, x_range):\n\tdata = zip(xdata, ydata)\n\tsliced_data = [d for d in data if d[0] >= x_range[0] and d[0] <= x_range[1]]\n\treturn array(zip(*sliced_data))", "def subset_by_time(prediction_dict, desired_times_unix_sec):\n\n error_checking.assert_is_numpy_array(\n desired_times_unix_sec, num_dimensions=1\n )\n error_checking.assert_is_integer_numpy_array(desired_times_unix_sec)\n\n desired_indices = numpy.array([\n numpy.where(prediction_dict[VALID_TIMES_KEY] == t)[0][0]\n for t in desired_times_unix_sec\n ], dtype=int)\n\n prediction_dict = subset_by_index(\n prediction_dict=prediction_dict, desired_indices=desired_indices\n )\n\n return prediction_dict, desired_indices", "def _select_ds(self, first_date: np.datetime64,\n last_date: np.datetime64) -> xr.Dataset:\n first_ts = self._floor_to_dt(first_date)\n last_ts = self._floor_to_dt(last_date) + self._dt\n first_month = first_ts.astype(\"M8[M]\")\n last_month = last_ts.astype(\"M8[M]\")\n ts = self._ts[\"date\"]\n if first_month < ts[0] or last_month > ts[-1]:\n upper_limit = (ts[-1] + np.timedelta64(1, 'M')).astype(\"M8[s]\")\n raise IndexError(\n f\"period [{first_date}, {last_date}] is out of range: \"\n f\"[{ts[0].astype('M8[s]')}, {upper_limit}[\")\n mask = (ts >= first_month) & (ts <= last_month)\n\n paths = self._ts[\"path\"][mask]\n ds = xr.open_dataset(paths[0]).isel(ocean_time=slice(0, -1, None))\n z0 = ds.sel(ocean_time=slice(first_ts, last_ts))\n\n if len(paths) > 1:\n ds = xr.open_dataset(paths[1]).isel(ocean_time=slice(0, -1, None))\n z1 = ds.sel(ocean_time=slice(first_ts, last_ts))\n return xr.concat([z0, z1], dim=\"ocean_time\")\n return z0", "def truncate_data(self, width):\n times_from_mid = self.time - self.midtime\n idxs = np.abs(times_from_mid) < 0.5 * width * self.duration\n self.time = self.time[idxs]\n self.flux = self.flux[idxs]", "def at(self, t, tol=None):\r\n return self.data[..., self.time.index_at(t)]", "def getDataWithTimeIndex(self, t):\n\n return self.sensorDf.iloc[t,:self.sensorChannels].values", "def subsetlatlon(df, lat_range, lon_range):\n return df.loc[df['lat'].isin(lat_range) & df['lon'].isin(lon_range)]" ]
[ "0.61725897", "0.60649836", "0.5830782", "0.5813046", "0.56072766", "0.55780876", "0.5566176", "0.55291396", "0.5466052", "0.54169613", "0.5414688", "0.5406871", "0.5399697", "0.53718024", "0.5317835", "0.5213868", "0.5211027", "0.51781565", "0.5149405", "0.5148659", "0.51393837", "0.51390374", "0.51226526", "0.51171595", "0.51143813", "0.5114055", "0.5109763", "0.5079562", "0.50746655", "0.5050336" ]
0.659993
0
Add metadata attributes to Dataset or DataArray
def _add_metadata_as_attrs(data, units, description, dtype_out_vert): if isinstance(data, xr.DataArray): return _add_metadata_as_attrs_da(data, units, description, dtype_out_vert) else: for name, arr in data.data_vars.items(): _add_metadata_as_attrs_da(arr, units, description, dtype_out_vert) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_metadata(ds, metadata):\n\n ds.attrs.update(metadata)\n\n return ds", "def add_metadata(self, metadata: dict) -> None:", "def set_metadata(self, data):\r\n pass", "def _add_metadata_as_attrs_da(data, units, description, dtype_out_vert):\n if dtype_out_vert == 'vert_int':\n if units != '':\n units = '(vertical integral of {0}): {0} kg m^-2)'.format(units)\n else:\n units = '(vertical integral of quantity with unspecified units)'\n data.attrs['units'] = units\n data.attrs['description'] = description\n return data", "def dataset_meta(self, dataset_meta: dict) -> None:\n self._dataset_meta = dataset_meta", "def metadata(self, df):\n raise NotImplementedError(\"missing metadata() method\")", "def _attach_metadata(self):\n self.dataset.create_metadata(\"watertightness\", \"float\", \"1.0 if the mesh is watertight, 0.0 if it is not\")\n self.dataset.attach_metadata_func(\"watertightness\", DexNet.is_watertight, overwrite=False, store_func=True)\n self.dataset.create_metadata(\"num_con_comps\", \"float\", \"Number of connected components (may not be watertight) in the mesh\")\n self.dataset.attach_metadata_func(\"num_con_comps\", object(), overwrite=False, store_func=True)", "def add_metadata(self, metdatadata_list):\n self._metadata.append((metdatadata_list, ))", "def add_metadata(self, metdatadata_list):\n self._metadata.append((metdatadata_list, ))", "def add_metadata (self, name, value):\n self.metadata[name] = value\n return self", "def add_metadata(self, metadata_dict: Dict[str, any]) -> None:\n for key in metadata_dict:\n self.metadata[key] = metadata_dict[key]\n if key == \"label\":\n self.set_label(metadata_dict[key])\n elif key == \"max_offset\":\n self.add_max_offset(metadata_dict[\"max_offset\"])", "def set_dataset_metadata(metadata):\n set_to_db(key='metadata', str_value=json.dumps(metadata))", "def __appendMetaData(self, filename):\n metadata = {'Model': 'LFM',\n 'Source': filename,\n 'Date processed': datetime.datetime.now(),\n 'Start date': self.startDate\n }\n \n self.data.append(key='meta',\n name='Metadata for LFM Solar Wind file',\n units='n/a',\n data=metadata)", "def metadata(self, metadata: Mapping[str, str]):\r\n self._metadata = metadata", "def metadata(self, metadata: Mapping[str, str]):\r\n self._metadata = metadata", "def add_metadata(self, key, value):\n self._h5.attrs[key] = value", "def set_metadata(self, attribute, value):\n self.metadata[attribute] = value", "def set_timeseries_metadata(self, dataset_names):\n for dataset_name in dataset_names:\n if dataset_name in self:\n self[dataset_name].dataset_metadata.update({\n 'version': SCHEMA_VERSION,\n 'units': self.config[dataset_name]['units']\n })\n self[dataset_name].group_metadata.update({'source': 'lmt'})", "def set_metadata(d, metadata):\n for data in metadata:\n d = set_dict_attrs(d, {'.'.join(data.keys()[0].split('.')[1:]): data.values()[0]})\n return d", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def define_metadata(cls, pcm):\n raise NotImplementedError()", "def WriteMetadata(self, metadata, overwrite=True):\n if not overwrite and 'meta' in metadata:\n raise errors.KeyczarError('\"meta\" attribute already exists')\n self.dict['meta'] = str(metadata)", "def add_metadata(data):\n data[\"last_downloaded\"] = util.utc_now()\n return data", "def set_metadata_about_dataset(self):\n date=QDateTime(QDate.currentDate(),QTime.currentTime())\n self.dataset_attributes.child('dataset_info','date_time').setValue(date)\n res = self.show_file_attributes('dataset')\n return res" ]
[ "0.7955883", "0.7114015", "0.70907336", "0.70366824", "0.6820789", "0.66713685", "0.6642246", "0.6585722", "0.6585722", "0.65347093", "0.64802325", "0.6474943", "0.6464878", "0.6454285", "0.6454285", "0.64433736", "0.63965815", "0.63449633", "0.6330062", "0.62801987", "0.62801987", "0.62801987", "0.62801987", "0.62801987", "0.62801987", "0.62801987", "0.6269898", "0.6265209", "0.6255382", "0.6228402" ]
0.80313355
0
Find the .whl file in the dist folder.
def _find_wheel(ctx): wheel = ctx.path.ant_glob("dist/*-" + VERSION + "-*.whl") if not len(wheel) == 1: ctx.fatal("No wheel found (or version mismatch)") else: wheel = wheel[0] Logs.info("Wheel %s", wheel) return wheel
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensure_wheel():\n wheels = sorted(DIST.glob(\"*.whl\"))\n if not wheels:\n subprocess.check_call([\"pyproject-build\", \".\", \"--wheel\", \"--no-isolation\"], cwd=ROOT)\n wheels = sorted(DIST.glob(\"*.whl\"))\n return wheels[-1]", "def upload_wheels():\n build()\n sh(\"%s -m twine upload dist/*.whl\" % PYTHON)", "def dist(c, wheel=False):\n commands = \"sdist\" if not wheel else \"sdist bdist_wheel\"\n c.run(f\"python {SETUP_FILE} {commands}\")", "def dist(context):\n context.run(\"python setup.py sdist\")\n context.run(\"python setup.py bdist_wheel\")", "def dist_build(c):\n c.run(\"python setup.py sdist\")\n c.run(\"python setup.py bdist_wheel\")\n c.run(\"twine check dist/*\")", "def get_built_wheel_path(link):\n sdist_path = download_file(\n link.url, filename=link.filename, check=link.check_download,\n )\n container = os.path.dirname(sdist_path)\n\n unpacked_dir = tempfile.mkdtemp()\n atexit.register(shutil.rmtree, unpacked_dir, ignore_errors=True)\n unpack_file(sdist_path, unpacked_dir, None, PipLink(link))\n\n wheel_content_dir = tempfile.mkdtemp()\n atexit.register(shutil.rmtree, wheel_content_dir, ignore_errors=True)\n proc = subprocess.Popen(\n [sys.executable, 'setup.py', 'bdist_wheel', '-d', container],\n cwd=unpacked_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n )\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n warnings.warn('failed to build wheel\\n{}'.format(stderr))\n return None\n for fn in os.listdir(container):\n if fn != os.path.basename(sdist_path):\n return os.path.join(container, fn)\n warnings.warn('failed to find built wheel')\n return None", "def test_find_wheel_supported(\n self, data: TestData, monkeypatch: pytest.MonkeyPatch\n ) -> None:\n monkeypatch.setattr(\n pip._internal.utils.compatibility_tags,\n \"get_supported\",\n lambda **kw: [(\"py2\", \"none\", \"any\")],\n )\n\n req = install_req_from_line(\"simple.dist\")\n finder = make_test_finder(find_links=[data.find_links])\n found = finder.find_requirement(req, True)\n assert found is not None\n assert found.link.url.endswith(\"simple.dist-0.1-py2.py3-none-any.whl\"), found", "def wheels():\n with lcd(env.local_path):\n put('./requirements.txt', '/srv/build/wheel_requirements.txt')\n put('./etc/base_image/image_requirements.txt',\n '/srv/build/requirements.txt')\n\n with cd('/srv/build/wheelhouse'):\n run('rm -rf *.whl')\n\n compose(cmd='-f service.yml -p %s run --rm wheel-factory' %\n env.project_name, path='/srv/build')", "def find_build_dir(hw, r):\n os.chdir(hw)\n find_cache(hw, r);\n os.chdir(\"..\")", "def install():\n return {\n \"actions\": [TaskCreator.get_pip() + \" install --upgrade dist/*.whl\"],\n \"verbosity\": 2,\n \"setup\": [\"make_distribution\"],\n }", "def test_wheel_over_sdist_priority(self, data: TestData) -> None:\n req = install_req_from_line(\"priority\")\n finder = make_test_finder(find_links=[data.find_links])\n found = finder.find_requirement(req, True)\n assert found is not None\n assert found.link.url.endswith(\"priority-1.0-py2.py3-none-any.whl\"), found", "def perform_register(path, file_name):\n subprocess.call(\n [sys.executable, 'setup.py', 'sdist', 'bdist_wheel'], cwd=path)\n subprocess.call(['twine', 'register', '-r', 'pypi', os.path.join(\n path, 'dist', file_name + '.tar.gz')])\n subprocess.call(['twine', 'register', '-r', 'pypi', os.path.join(\n path, 'dist', file_name + '-py3-none-any.whl')])", "def check():\n dist_path = Path(DIST_PATH)\n if not dist_path.exists() or not list(dist_path.glob('*')):\n print(\"No distribution files found. Please run 'build' command first\")\n return\n\n subprocess.check_call(['twine', 'check', 'dist/*'])", "def get_exe_path(exe):\n for type_, path in get_possible_paths():\n full_path = os.path.join(path, exe)\n if os.path.exists(full_path):\n if type_ == 'bundled':\n bundled_warning()\n return full_path\n return None", "def find_cache(hw, r):\n\n cmd = \"find . -name CMakeCache.txt\"\n p = Popen(cmd, shell=True, stdout=PIPE)\n out = p.stdout.readlines()\n p.stdout.close()\n p.wait()\n \n # Transform the output into something readable.\n for i in out:\n found = i.split(':')\n \n # Scrub the path name\n path = os.path.dirname(found[0])[2:]\n if path == \"__build__\":\n continue\n r.note(\"found build files in '{0}'\".format(path))", "def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path", "def _maybe_add_setuptools(self, ws, dist):\n pass", "def _maybe_add_setuptools(self, ws, dist):\n pass", "def find_py_atleast( minver ):\n wantver = canon_ver(minver)\n\n for pyexe, info in get_python_verlist():\n \n thisver = canon_ver(info[2])\n\n if thisver >= wantver:\n return pyexe\n\n # can't satisfy requirement\n return None", "def collectstatic(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('bin/django collectstatic --noinput')", "def sdist():\n pass", "def locate_vcredist_dir(plat):\n from setuptools import msvc\n\n vcvars = msvc.msvc14_get_vc_env(plat)\n try:\n vcruntime = vcvars[\"py_vcruntime_redist\"]\n except KeyError:\n warn(f\"platform={plat}, vcvars=\")\n pprint(vcvars, stream=sys.stderr)\n\n warn(\n \"Failed to get py_vcruntime_redist via vcvars, may need to set it in %PATH%\"\n )\n return None\n redist_dir, dll = os.path.split(vcruntime)\n # add redist dir to $PATH so that it can be found\n os.environ[\"PATH\"] += os.pathsep + redist_dir\n return redist_dir", "def find_data(relpath):\n # Because we are in a py_binary, Bazel's wrapper script sets up our\n # $PYTHONPATH to have our resources somewhere on a sys.path entry.\n for one_path in sys.path:\n possible = os.path.join(one_path, relpath)\n if os.path.exists(possible):\n return possible\n raise IOError(\n errno.ENOENT,\n \"Could not find data {}\".format(relpath),\n relpath)", "def find_project(hw, r):\n\n # Search in the current directory for a CMakeLists.txt file that\n # contains something like the given project.\n cmd = \"find . -name CMakeLists.txt -exec grep -Hi {0} {{}} \\; | grep -i project\".format(hw)\n p = Popen(cmd, shell=True, stdout=PIPE)\n out = p.stdout.read()\n p.stdout.close()\n p.wait()\n\n # Transform the output into something readable.\n for i in out:\n found = i.split(':')\n \n # Scrub the path name\n path = os.path.dirname(found[0])[2:]\n if not path:\n path = \"top-level directory\"\n else:\n path = \"directory '{0}'\".format(path)\n r.note(\" possible candidate in the {0}\".format(path))", "def _get_next_wb(self) -> Optional[Package]:\n for dist in self.distributions:\n for arch in self.architectures:\n response = self._query_wannabuild(arch, dist,\n '--list=needs-build')\n pending = response.split('\\n')\n if not pending[0]:\n continue\n result = self._take(pending[0])\n if result:\n return result\n return None", "def build():\n local('python' + python_version + ' setup.py bdist_egg')", "def full_build_path(self, version='latest'):\n return os.path.join(self.conf_dir(version), \"_build\", \"html\")", "def get_setup_file():\n repo_fs()\n return SETUP_FILES", "def dist():\n PackCommandExecutor().pack()\n DistCommandExecutor().dist()", "def get_htdocs_dirs(self):\n\t\tfrom pkg_resources import resource_filename\n\t\treturn [('hw', resource_filename(__name__, 'htdocs'))]" ]
[ "0.60192287", "0.58031386", "0.535494", "0.5350329", "0.5279466", "0.5263522", "0.523158", "0.5209095", "0.5129796", "0.5092573", "0.50611657", "0.50155604", "0.5009956", "0.49787986", "0.49689916", "0.49234095", "0.49003977", "0.49003977", "0.48994312", "0.48742357", "0.47858614", "0.47393313", "0.47359252", "0.4715705", "0.47076553", "0.46943313", "0.46922183", "0.46818975", "0.4658748", "0.46368796" ]
0.7416635
0
Add an Inline Auth Helper to a Pluggable Auth Service.
def addInlineAuthHelper(dispatcher, id, title=None, REQUEST=None): iah = InlineAuthHelper(id, title) dispatcher._setObject(iah.getId(), iah) if REQUEST is not None: REQUEST['RESPONSE'].redirect('%s/manage_workspace' '?manage_tabs_message=' 'InlineAuthHelper+added.' % dispatcher.absolute_url())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_virtual_authenticator(self, config):\n pass", "def manage_addBlowfishExtendedCookieAuthHelper(self, id, title='',\n RESPONSE=None, **kw):\n\n self = self.this()\n\n o = BlowfishExtendedCookieAuthHelper(id, title, **kw)\n self._setObject(o.getId(), o)\n o = getattr(aq_base(self), id)\n\n if RESPONSE is not None:\n RESPONSE.redirect('manage_workspace')", "def add_auth(self, http_request):\r\n pass", "def InvocationAddCallerAuthid(builder, callerAuthid):\n return AddCallerAuthid(builder, callerAuthid)", "def register_auth(self):\n\n # pylint: disable=missing-return-doc, missing-return-type-doc\n def decorator(func):\n self.auth_func = func\n return func\n\n return decorator", "def add_auth_opts(options, service_type=None):\n def add_options(opts, opts_to_add):\n for new_opt in opts_to_add:\n for opt in opts:\n if opt.name == new_opt.name:\n break\n else:\n opts.append(new_opt)\n\n opts = copy.deepcopy(options)\n opts.insert(0, ks_loading.get_auth_common_conf_options()[0])\n # NOTE(dims): There are a lot of auth plugins, we just generate\n # the config options for a few common ones\n plugins = ['password', 'v2password', 'v3password']\n for name in plugins:\n plugin = ks_loading.get_plugin_loader(name)\n add_options(opts, ks_loading.get_auth_plugin_conf_options(plugin))\n add_options(opts, ks_loading.get_session_conf_options())\n if service_type:\n adapter_opts = ks_loading.get_adapter_conf_options(\n include_deprecated=False)\n # adding defaults for valid interfaces\n cfg.set_defaults(adapter_opts, service_type=service_type,\n valid_interfaces=DEFAULT_VALID_INTERFACES)\n add_options(opts, adapter_opts)\n opts.sort(key=lambda x: x.name)\n return opts", "def auth(self, user):", "def add_extra_login(request):\n context = RequestContext(request, {\n 'providers': provider_details()})\n return render(request, 'add-login.html', context)", "def auth():\n pass", "def auth():\n pass", "def enable_auth(self):\n\n self._api_manager.enable_auth()", "def authentication_hook(self):\n pass", "def _setup_threat_intel_auth_subparser(subparsers):\n generate_subparser(\n subparsers,\n 'update-auth',\n description='Enable, disable, or configure the threat intel downloader function',\n subcommand=True\n )", "def add_user_auth(user_id, name=None):\n col = ['id']\n add_entry(choose_database(\"auth\"), \"users\", col, [user_id])\n if name: # use username for settings\n settings_col = ['user']\n add_entry(choose_database(\"datadb\"), \"settings\", settings_col, [name])\n if user_id < 0: # group\n settings_col = ['id']\n add_entry(choose_database(\"datadb\"), \"group_settings\", settings_col, [user_id])", "def configure_ext_login(app):\n lm.init_app(app)\n\n @lm.user_loader\n def load_user(userid):\n \"\"\"\n Needed for flask-login.\n \"\"\"\n return models.User.query.get(int(userid))\n\n @app.before_request\n def set_g_user():\n g.user = current_user", "def add_tomcat7_idp():\n pass", "def require_admin(handler_method):\n def Decorate(self):\n if not users.is_current_user_admin():\n self.error(401)\n html = '<html><body><a href=\"%s\">Sign in</a></body></html>'\n self.response.out.write(html % (users.create_login_url(self.request.url)))\n return\n return handler_method(self)\n return Decorate", "def enable_third_party_auth():\r\n\r\n from third_party_auth import settings as auth_settings\r\n auth_settings.apply_settings(settings.THIRD_PARTY_AUTH, settings)", "def enable_aaa_authentication_login(device,auth_list,auth_db1,auth_db2=None):\n\n cmd = f'aaa authentication login {auth_list} {auth_db1}'\n if auth_db2:\n cmd += f' {auth_db2}'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure aaa authentication login:\\n{e}'\n )", "def manage_addTaskQueueAuthPlugin(dispatcher, id, title=None, REQUEST=None):\n sp = TaskQueueAuthPlugin(id, title=title)\n dispatcher._setObject(id, sp)\n\n if REQUEST is not None:\n REQUEST.response.redirect(\n '%s/manage_workspace?'\n 'manage_tabs_message=TaskQueue+PAS+plugin+created.' %\n dispatcher.absolute_url()\n )", "def authenticator():", "def addAuth(self, hostmask):\n if self.checkHostmask(hostmask, useAuth=False) or not self.secure:\n self.auth.append((time.time(), hostmask))\n else:\n raise ValueError, 'secure flag set, unmatched hostmask'", "def attach_interface(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n return Interface(self.session, self, func(self, *args, **kwargs))\n return wrapper", "def add_helper(self, helper, name=None, static=False, package=None,\n replace=False):\n helper = load_object(helper, package=package)\n if name is None:\n name = helper.__name__\n if static:\n helper = staticmethod(helper)\n self.register('helper', helper, name, replace=replace)\n if abcs.AHelpers not in self:\n helpers_factory = self.get_setting('helpers_factory')\n self.register(abcs.AHelpers, load_object(helpers_factory))", "def add_endpoint_to_sipserver(self, endpoint: str, password: str) -> None:", "def register(app):\n app.register_plugin('jwtvalidate', execute, helptext())", "def authenticate(self, func):\n self._authentication_callback = func\n return func", "def mocked_authenticator(f):\n def internal(name):\n return f(name)\n return internal", "def set_credentials_helper(cls, cred_helper):\n cls.credentials_helper = cred_helper", "def get_service_auth(context, endpoint, service_auth):\n # TODO(pas-ha) use auth plugin from context when it is available\n user_auth = token_endpoint.Token(endpoint, context.auth_token)\n return service_token.ServiceTokenAuthWrapper(user_auth=user_auth,\n service_auth=service_auth)" ]
[ "0.5557414", "0.55137914", "0.53382605", "0.53051865", "0.5261152", "0.50570595", "0.5031519", "0.5024108", "0.50065255", "0.50065255", "0.4975993", "0.49440524", "0.4900964", "0.48620814", "0.48328233", "0.48326203", "0.48200023", "0.48074985", "0.48062065", "0.47664997", "0.4760247", "0.47250843", "0.47204617", "0.47065312", "0.47018167", "0.46772534", "0.4634753", "0.46074268", "0.45920247", "0.45757964" ]
0.74920344
0
Extract credentials from cookie or 'request'.
def extractCredentials(self, request): creds = {} # Look in the request for the names coming from the login form login = request.get('__ac_name', '') password = request.get('__ac_password', '') if login: creds['login'] = login creds['password'] = password if creds: creds['remote_host'] = request.get('REMOTE_HOST', '') try: creds['remote_address'] = request.getClientAddr() except AttributeError: creds['remote_address'] = request.get('REMOTE_ADDR', '') return creds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extractCredentials(self, request):\n\n cookie = request.cookies.get('.ASPXAUTH')\n creds = {}\n creds['cookie'] = cookie\n creds['plugin'] = self.getId()\n\n return creds", "def extractCredentials( self, request ):\n #log( 'extractCredentials')\n\n creds = {}\n session = request.SESSION\n username = None\n\n tokenTool = getToolByName(self, 'onetimetoken_storage')\n\n ob = session.get(self.session_var)\n if ob is not None and isinstance(ob, UsernameStorage):\n username = ob._getUsername()\n #log( \"session username: %s\" % username )\n \n if username is None: \n loginCode = request.get('logincode')\n\n if not loginCode:\n return None # not authenticated\n\n try:\n username = tokenTool.verifyToken(loginCode)\n except:\n log( \"Error, token tool refused token: %s\" % sys.exc_info()[0] )\n\n if not username:\n return None # not authenticated\n\n #log( \"token username: %s\" % username )\n\n userstorage = UsernameStorage()\n userstorage._setUsername(username)\n session[self.session_var] = userstorage\n\n creds['remote_host'] = request.get('REMOTE_HOST', '')\n try:\n creds['remote_address'] = request.getClientAddr()\n except AttributeError:\n creds['remote_address'] = request.get('REMOTE_ADDR', '')\n\n\n creds['login'] = username\n\n # log( \"returning username: %s\" % username )\n\n return creds", "def extractCredentials(self, request):\n \n creds = {}\n# import pdb\n# pdb.set_trace()\n if self.jid_auth_header in request.keys():\n dn = request.get(self.jid_auth_header, '')\n if not bool(dn):return creds\n # fetch remote ip\n creds['clientip'] = get_ip(request)\n # Looking into the cookie first...\n if self.cookie_name in request.keys():\n try:\n creds[\"cookie\"] = binascii.a2b_base64(\n request.get(self.cookie_name)\n )\n except binascii.Error:\n # If we have a cookie which is not properly base64 encoded it\n # can not be ours.\n return creds\n else:\n ticket = creds[\"cookie\"] \n ticket_data = self._validateTicket(ticket)\n if ticket_data is not None:\n (digest, userid, tokens, user_data, timestamp) = ticket_data\n creds[\"login\"] = userid\n creds[ 'password' ] = userid\n creds['init_login'] = False\n creds[\"source\"] = \"emc.session\" \n return creds \n \n login_pw = self.extractAuthGWInfo(dn) \n if login_pw is not None:\n id, name, idnumber = login_pw\n creds[ 'login' ] = id\n creds[ 'password' ] = idnumber \n creds[\"cookie\"] = \"\"\n creds['init_login'] = True\n creds[\"url\"] = request['URL']\n creds[\"source\"] = \"emc.session\"\n return creds\n\n else:\n if self.cookie_name in request.keys():\n\n try:\n creds[\"cookie\"] = binascii.a2b_base64(\n request.get(self.cookie_name)\n )\n except binascii.Error:\n # If we have a cookie which is not properly base64 encoded it\n # can not be ours.\n return creds\n else:\n ticket = creds[\"cookie\"] \n ticket_data = self._validateTicket(ticket)\n if ticket_data is not None:\n# (digest, userid, tokens, user_data, timestamp) = ticket_data\n #fire a logout event and call resetCredentials\n logging.info(\"logout\")\n from plone import api\n url = \"%s/index.html\" % api.portal.get().absolute_url()\n if url == request['URL']:\n logout(request)\n self.resetCredentials(request, request['RESPONSE']) \n return creds\n else:\n return creds\n \n else:\n return creds", "def load_session_credentials(request_handler):\n session = sessions.LilCookies(request_handler, SESSION_SECRET)\n userid = session.get_secure_cookie(name='userid')\n if userid:\n return userid, StorageByKeyName(Credentials, userid, 'credentials').get()\n else:\n return None, None", "def extract_cookie_info():\n\t# setup cookie jar\n\tcj = cookielib.CookieJar()\n\tlogin_data = urllib.urlencode({ID_USERNAME: USERNAME, \n\t\tID_PASSWORD: PASSWORD})\n\t# create url opener\n\topener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n\tresp = opener.open(LOGIN_URL, login_data)\n\n\t# Send login info\n\tfor cookie in cj:\n\t\tprint \"----first time cookie: %s --> %s\" % (cookie.name, cookie.value)\n\tprint \"Headers: %s\" % resp.headers", "def extract_cookie_info():\n # setup cookie jar\n cj = cookielib.CookieJar()\n login_data = urllib.urlencode({ID_USERNAME: USERNAME,ID_PASSWORD: PASSWORD})\n # create url opener\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n resp = opener.open(LOGIN_URL, login_data)\n # send login info\n for cookie in cj:\n print \"----First time cookie: %s --> %s\" % (cookie.name,cookie.value)\n print \"Headers: %s\" % resp.headers\n # now access without any login info\n resp = opener.open(NORMAL_URL)\n for cookie in cj:\n print \"++++Second time cookie: %s --> %s\" % (cookie.name,cookie.value)\n print \"Headers: %s\" % resp.headers", "def extract_credentials(url):\n parts = urlsplit(url)\n netloc = parts[1]\n if '@' in netloc:\n creds, netloc = netloc.split('@')\n credentials = tuple(_unquote(i) for i in creds.split(':'))\n parts = list(parts)\n parts[1] = netloc\n else:\n credentials = None\n return urlunsplit(parts), credentials", "def parse_cookies( headers ):", "def _config_credentials_get():\n user = input(\"username:\")\n password = getpass.getpass()\n url = input(\"url:\")\n return user, password, url", "def get_auth_cookies(self):\n # type: () -> CookiesTupleType\n cookies = []\n if self.request and self.request.http_request:\n for name in [\"Cookie\", \"Set-Cookie\"]:\n headers = get_cookie_headers(self.request.http_request, name)\n cookies.extend([(key, value) for key, value in headers.items()])\n return cookies", "async def authenticate(self, request: web.Request) -> Dict[str, Any]:", "def parse_cookie(request):\n\n raw_cookie = request.message.get('Cookie','')\n return SimpleCookie(raw_cookie)", "def authenticate(request=None, **credentials):\n print request, credentials", "def _get_client_info():\n if hasattr(request.authorization, 'username'):\n auth_user = request.authorization.username\n else:\n auth_user = 'Unknown'\n info = request.headers\n origin_string = info.get(\"User-Agent\", \"\")\n origin_props = {}\n if origin_string:\n try:\n origin_props = dict(\n [_.split(\"/\", 1) for _ in origin_string.split()]\n )\n except ValueError:\n pass\n prog_name = origin_props.get(\"prog_name\", \"Unknown\")\n uuid = origin_props.get(\"uuid\", uuid4())\n host = info.get(\"Host\", \"Unknown\")\n if info.get(\"From\") and \"@\" in info[\"From\"]:\n user = info[\"From\"].split(\"@\")[0]\n else:\n user = (\"Unknown\")\n return auth_user, prog_name, user, host, uuid", "def GetUserCredentials():\n email = options.email\n if email is None:\n email = GetEmail(\"Email (login for uploading to %s)\" % options.server)\n password = getpass.getpass(\"Password for %s: \" % email)\n return (email, password)", "def decode_auth_headers(request: Request) -> Tuple[str, str]:\n authorization = request.headers.get(\"Authorization\", \"\")\n\n headers = CaseInsensitiveDict({\"WWW-Authenticate\": \"Basic\"})\n\n scheme, param = get_authorization_scheme_param(authorization)\n if not authorization or scheme.lower() != \"basic\":\n raise InvalidClientError(request=request, headers=headers)\n\n try:\n data = b64decode(param).decode(\"ascii\")\n except (ValueError, UnicodeDecodeError, binascii.Error):\n raise InvalidClientError(request=request, headers=headers)\n\n client_id, separator, client_secret = data.partition(\":\")\n\n if not separator:\n raise InvalidClientError(request=request, headers=headers)\n\n return client_id, client_secret", "def parse_auth(header):\r\n try:\r\n method, data = header.split(None, 1)\r\n if method.lower() == 'basic':\r\n #TODO: Add 2to3 save base64[encode/decode] functions.\r\n user, pwd = touni(base64.b64decode(tob(data))).split(':',1)\r\n return user, pwd\r\n except (KeyError, ValueError):\r\n return None", "def identify(self, environ):\n \n path_info = environ['PATH_INFO']\n script_name = environ.get('SCRIPT_NAME') or '/'\n query = parse_dict_querystring(environ)\n \n if path_info == self.login_handler_path:\n ## We are on the URL where repoze.who processes authentication. ##\n # Let's append the login counter to the query string of the\n # \"came_from\" URL. It will be used by the challenge below if\n # authorization is denied for this request.\n form = parse_formvars(environ)\n form.update(query)\n try:\n credentials = {\n 'login': form['login'].lower(),\n 'password': form['password']\n }\n except KeyError:\n credentials = None\n referer = environ.get('HTTP_REFERER', script_name)\n destination = form.get('came_from', referer)\n \n if self.post_login_url:\n # There's a post-login page, so we have to replace the\n # destination with it.\n destination = self._get_full_path(self.post_login_url,\n environ)\n if 'came_from' in query:\n # There's a referrer URL defined, so we have to pass it to\n # the post-login page as a GET variable.\n destination = self._insert_qs_variable(destination,\n 'came_from',\n query['came_from'])\n failed_logins = self._get_logins(environ, True)\n new_dest = self._set_logins_in_url(destination, failed_logins)\n environ['repoze.who.application'] = HTTPFound(new_dest)\n return credentials\n\n elif path_info == self.logout_handler_path:\n ## We are on the URL where repoze.who logs the user out. ##\n form = parse_formvars(environ)\n form.update(query)\n referer = environ.get('HTTP_REFERER', script_name)\n came_from = form.get('came_from', referer)\n # set in environ for self.challenge() to find later\n environ['repoze.who.application'] = HTTPUnauthorized()\n return None\n \n elif path_info == self.login_form_url or self._get_logins(environ):\n ## We are on the URL that displays the from OR any other page ##\n ## where the login counter is included in the query string. ##\n # So let's load the counter into the environ and then hide it from\n # the query string (it will cause problems in frameworks like TG2,\n # where this unexpected variable would be passed to the controller)\n environ['repoze.who.logins'] = self._get_logins(environ, True)\n # Hiding the GET variable in the environ:\n if self.login_counter_name in query:\n del query[self.login_counter_name]\n environ['QUERY_STRING'] = urlencode(query, doseq=True)", "def get_account_credentials(call):\n account = call.data.get(CONF_SPOTIFY_ACCOUNT)\n user = username\n pwd = password\n if account is not None:\n _LOGGER.debug('setting up with different account than default %s', account)\n user = accounts.get(account).get(CONF_USERNAME)\n pwd = accounts.get(account).get(CONF_PASSWORD)\n return user, pwd", "def extract_token_from_cookie(request):\n try:\n token = request.headers.cookie['csrf_token'].value\n except KeyError:\n token = None\n else:\n token = _sanitize_token(token)\n\n # Don't set a CSRF cookie on assets, to avoid busting the cache due to the\n # Vary header we set below. Don't set it on callbacks, because we use IP\n # filtering there.\n\n if request.path.raw.startswith('/assets/') or request.path.raw.startswith('/callbacks/'):\n token = None\n else:\n token = token or _get_new_token()\n\n return {'csrf_token': token}", "def user_credentials(self):\r\n credentials = {}\r\n if EMAIL_AUTHENTICATION:\r\n credentials[\"email\"] = self.cleaned_data[\"email\"]\r\n else:\r\n credentials[\"username\"] = self.cleaned_data[\"username\"]\r\n credentials[\"password\"] = self.cleaned_data[\"password\"]\r\n return credentials", "def __init__(self, request, field='staff_token'):\n self.request = request\n self.token_input = request.cookies.get(field)", "def user_credentials(self):\r\n credentials = {}\r\n if EMAIL_AUTHENTICATION:\r\n credentials[\"email\"] = self.cleaned_data[\"email\"]\r\n else:\r\n credentials[\"username\"] = self.cleaned_data[\"username\"]\r\n credentials[\"password\"] = self.cleaned_data[\"password1\"]\r\n return credentials", "def get_headers(req):\n user = req.headers.get('X-User-ID', None)\n tenant = req.headers.get('X-Tenant-ID', None)\n return user, tenant", "def decode_username_and_password():\n try:\n # cherrypy.log.error(\"decoding username and password\")\n user_name = str(base64_decode(cherrypy.request.json[\"user_name\"]).decode())\n password = str(base64_decode(cherrypy.request.json[\"password\"]).decode())\n except Exception as e:\n cherrypy.log.error(str(e))\n # cherrypy.log.error(\"username and password could not be decoded\")\n cherrypy.log.error(\"slycat-standard-authentication.py authenticate\", \"cherrypy.HTTPError 400\")\n raise cherrypy.HTTPError(400)\n return user_name, password", "def get_cookies2():\n cookies = {\n\n }\n\n return cookies", "def get_cookie():\n response = requests.get(base_url)\n cookie = response.url.split('/')[3]\n return cookie", "def decode(self, response, request):\n log.debug(\"Decoding authorization.\")\n auth = self._parseAuth(response)\n try:\n self._verifyChallenge(auth[\"challenge\"], request)\n creds = self.buildCredentials(auth, request)\n except KeyError, ke:\n raise LoginFailed(\"{0!r} not in authorization\".format(*ke.args))\n except LoginFailed, lf:\n log.warn(lf)\n raise\n log.debug(\"Decoded credentials: {0}\".format(creds))\n return creds", "def get_secure_cookie( name, value=None ):", "def credentials(self) -> Mapping:" ]
[ "0.7835554", "0.7634402", "0.7204632", "0.65182984", "0.6486379", "0.6455087", "0.61710817", "0.61479807", "0.611163", "0.60100037", "0.594791", "0.5943985", "0.5915417", "0.5796954", "0.5777253", "0.5771008", "0.57167053", "0.57153827", "0.5690814", "0.5690609", "0.56588995", "0.5610726", "0.55961496", "0.5573668", "0.5549058", "0.5541878", "0.55139095", "0.55115503", "0.55036163", "0.5487126" ]
0.779748
1
Get an example list_cluster call (For mocking)
def list_cluster_response(): return { "clusters": [ EXAMPLE_NAME ] }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_cluster_list(self):\n return self.__cluster_list", "def test_get_hyperflex_cluster_list(self):\n pass", "def test_list_cluster_network(self):\n pass", "def cluster_list():\n request_debug(r, logger)\n json_body = r.get_json(force=True, silent=True) or {}\n result = cluster_handler.list(filter_data=json_body)\n response_ok[\"data\"] = result\n return jsonify(response_ok), CODE_OK", "def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)", "def list(args, config):\n\n api = config['API']\n headers = {}\n if args.stack_name:\n headers = {'stack-name': args.stack_name} # put stack name in headers\n r = requests.get(api['list'], headers=headers) # send the GET request\n print('\\nThe following clusters exist:\\n{}\\n'.format(r.json()))\n return", "def ListClusters(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))", "def test_list_cluster_policy(self):\n pass", "def run_example_cluster_cmd(example_module_name, example_argv):\n run_example_cluster(example_module_name, example_argv)", "def test_read_cluster_network(self):\n pass", "def describe_cluster_response():\n return {\n \"cluster\": {\n \"status\": \"ACTIVE\",\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def test_get_hyperflex_cluster_profile_list(self):\n pass", "def test_list_cluster_role(self):\n pass", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def list_clusters(ctx, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n clusters = ctx.obj.groups[project.id].clusters.get()\n pprint(clusters.data)", "def list_namespaced_cluster_network(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_cluster_network\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/clusternetworks'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ClusterNetworkList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def list_vsan_clusters(self, detail=False, params=None, return_body=False):\n url = 'clusters'\n if detail:\n url += '/detail'\n if params:\n url += '?%s' % self._prepare_params(params)\n\n key = None if return_body else 'clusters'\n return self._ext_get(url, key)", "def get_clusters(cluster_name: Optional[str] = None,\n cluster_states: Optional[Sequence[str]] = None,\n cluster_types: Optional[Sequence[str]] = None,\n ids: Optional[Sequence[str]] = None,\n max_results: Optional[int] = None,\n name_regex: Optional[str] = None,\n next_token: Optional[str] = None,\n output_file: Optional[str] = None,\n payment_types: Optional[Sequence[str]] = None,\n resource_group_id: Optional[str] = None,\n tags: Optional[Mapping[str, Any]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClustersResult:\n __args__ = dict()\n __args__['clusterName'] = cluster_name\n __args__['clusterStates'] = cluster_states\n __args__['clusterTypes'] = cluster_types\n __args__['ids'] = ids\n __args__['maxResults'] = max_results\n __args__['nameRegex'] = name_regex\n __args__['nextToken'] = next_token\n __args__['outputFile'] = output_file\n __args__['paymentTypes'] = payment_types\n __args__['resourceGroupId'] = resource_group_id\n __args__['tags'] = tags\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('alicloud:emrv2/getClusters:getClusters', __args__, opts=opts, typ=GetClustersResult).value\n\n return AwaitableGetClustersResult(\n cluster_name=pulumi.get(__ret__, 'cluster_name'),\n cluster_states=pulumi.get(__ret__, 'cluster_states'),\n cluster_types=pulumi.get(__ret__, 'cluster_types'),\n clusters=pulumi.get(__ret__, 'clusters'),\n id=pulumi.get(__ret__, 'id'),\n ids=pulumi.get(__ret__, 'ids'),\n max_results=pulumi.get(__ret__, 'max_results'),\n name_regex=pulumi.get(__ret__, 'name_regex'),\n names=pulumi.get(__ret__, 'names'),\n next_token=pulumi.get(__ret__, 'next_token'),\n output_file=pulumi.get(__ret__, 'output_file'),\n payment_types=pulumi.get(__ret__, 'payment_types'),\n resource_group_id=pulumi.get(__ret__, 'resource_group_id'),\n tags=pulumi.get(__ret__, 'tags'),\n total_count=pulumi.get(__ret__, 'total_count'))", "def _load_cluster(self):", "def list_cluster(self, ip, x_api_session):\n log.log_debug(\"cluster object list is started\")\n list_object = ListModule.ListModule()\n object_list = list_object.listing(\"uom\", ip,\n self.root, self.content_type,\n \"Cluster\", x_api_session)\n log.log_debug(\"cluster object list is returned\")\n return object_list", "def clusters(self,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n project_id = project_id if project_id != '' else self.__project_id\n return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))", "def list(**kwargs):\n cluster_call(\"secret_list\", **kwargs)", "def test_get_hyperflex_cluster_by_moid(self):\n pass", "def atlas_clusters():\n pass", "def get_clusters(self):\n fields = ['name', ]\n return self.get_data(\"clusters\", fields)", "def cluster_nodes(self) -> ResponseT:\n return self.execute_command(\"CLUSTER NODES\")", "def test_create_cluster_network(self):\n pass", "def describe_cluster_creating_response():\n return {\n \"cluster\": {\n \"status\": \"CREATING\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {},\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def get_cluster_list():\n\n cluster_list_command = [\"pcluster\", \"list\",\n \"--region\", AWS_REGION]\n\n cluster_list_returncode, cluster_list_stdout, cluster_list_stderr = run_subprocess_proc(cluster_list_command,\n capture_output=True)\n\n cluster_columns = [\"Name\", \"Status\", \"Version\"]\n\n if cluster_list_stdout is not None and not cluster_list_stdout.strip() == \"\":\n clusters_as_df = pd.DataFrame([row.split()\n for row in cluster_list_stdout.strip().split(\"\\n\")],\n columns=cluster_columns)\n else:\n logger.info(\"No clusters found\")\n sys.exit(0)\n\n return clusters_as_df" ]
[ "0.7539311", "0.7504863", "0.7477707", "0.7102973", "0.69092697", "0.6877012", "0.6751242", "0.6587598", "0.65839416", "0.65274453", "0.6473518", "0.64674175", "0.64640313", "0.6455506", "0.6433894", "0.6427904", "0.63871676", "0.6361137", "0.63391733", "0.63274634", "0.6314185", "0.62967765", "0.62958705", "0.6291808", "0.6287367", "0.62625223", "0.6237339", "0.623475", "0.6227805", "0.620256" ]
0.7792407
0
Get an example describe_cluster call during creation
def describe_cluster_creating_response(): return { "cluster": { "status": "CREATING", "name": EXAMPLE_NAME, "certificateAuthority": {}, "roleArn": "arn:aws:iam::111222333444/eksRole", "resourcesVpcConfig": { "subnetIds": [ "subnet-00000000000000000", "subnet-00000000000000001", "subnet-00000000000000002" ], "vpcId": "vpc-00000000000000000", "securityGroupIds": [ "sg-00000000000000000" ] }, "version": "1.10", "arn": "arn:aws:eks:region:111222333444:cluster/" + EXAMPLE_NAME, "createdAt": 1500000000.000 } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_cluster_response():\n return {\n \"clusters\": [\n EXAMPLE_NAME\n ]\n }", "def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)", "def describe_cluster_response():\n return {\n \"cluster\": {\n \"status\": \"ACTIVE\",\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def run_example_cluster_cmd(example_module_name, example_argv):\n run_example_cluster(example_module_name, example_argv)", "def _setup_test_cluster(self, return_cluster, name, create_args):\n stack_name = '{0}_stack'.format(name)\n templ, self.stack = self._setup_test_stack(stack_name, TEMPLATE)\n cluster_instance = cbd.CloudBigData('%s_name' % name,\n templ.resource_definitions(\n self.stack)['cbd_cluster'],\n self.stack)\n self._stubout_create(return_cluster)\n return cluster_instance", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def describe_cluster_no_status_response():\n return {\n \"cluster\": {\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def get_cluster_def():\n if settings.NO_OP:\n return None\n\n ensure_in_custer()\n\n cluster = os.getenv('POLYAXON_CLUSTER', None)\n try:\n return json.loads(cluster) if cluster else None\n except (ValueError, TypeError):\n print('Could get cluster definition, '\n 'please make sure this is running inside a polyaxon job.')\n return None", "def test_create_cluster_network(self):\n pass", "def test_get_hyperflex_cluster_by_moid(self):\n pass", "def cluster_description(self):\n if self._cluster_description is None:\n if self._parsed_globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._parsed_globals.region,\n endpoint_url=self._parsed_globals.endpoint_url,\n verify=self._parsed_globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] not in [\"ACTIVE\", \"UPDATING\"]:\n raise EKSClusterError(\"Cluster status is {0}\".format(\n self._cluster_description[\"status\"]\n ))\n\n return self._cluster_description", "def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))", "def test_create_hyperflex_cluster_profile(self):\n pass", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def __str__(self):\n return \"Cluster\"", "def test_get_hyperflex_cluster_list(self):\n pass", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def describe(self) -> None:\n return {\n 'cluster_metadata': self.cluster_metadata,\n 'master_url': self.master_url\n }", "def create_cluster(module, switch_list):\n global CHANGED_FLAG\n output = ''\n new_cluster = False\n\n node1 = switch_list[0]\n node2 = switch_list[1]\n\n name = node1 + '-' + node2 + '-cluster'\n\n cli = pn_cli(module)\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = run_cli(module, cli)\n\n if cluster_list is not None:\n cluster_list = cluster_list.split()\n if name not in cluster_list:\n new_cluster = True\n\n if new_cluster or cluster_list is None:\n cli = pn_cli(module)\n cli += ' switch %s cluster-create name %s ' % (node1, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n output += '%s: Created cluster %s\\n' % (node1, name)\n\n return output", "def test_list_cluster_network(self):\n pass", "def createCluster(method, n_clust=3, min_samples=5):\n if method == 'SpectralClustering':\n clust = SpectralClustering(n_clusters=n_clust)\n clust.fit(PC)\n scat = plt.scatter(-100, -100, zorder=2)\n elif method == 'OPTICS':\n clust = OPTICS(min_samples=min_samples)\n clust.fit(PC)\n scat = plt.scatter(PC[clust.labels_ == -1, 0],\n PC[clust.labels_ == -1, 1], c='k')\n return clust, scat", "def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp", "def create_cluster():\n config = get_kube_config()\n command = CLUSTER_CREATE_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n machine_type=config['machine_type'],\n disk_size=config['disk_size'],\n nodes=config['nodes'],\n zone=config['zone'])\n print \"Creating cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))\n command = AUTH_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n zone=config['zone'])\n print \"Authenticating with cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))", "def _load_cluster(self):", "def get_cluster_entry(self):\n\n cert_data = self.cluster_description.get(\"certificateAuthority\", {}).get(\"data\", \"\")\n endpoint = self.cluster_description.get(\"endpoint\")\n arn = self.cluster_description.get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])", "def test_eks_cluster_exists(self) -> None:\n cluster = self.eks.describe_cluster(name='andrew-jarombek-eks-v2')\n\n cluster_name = cluster.get('cluster').get('name')\n kubernetes_version = cluster.get('cluster').get('version')\n platform_version = cluster.get('cluster').get('platformVersion')\n cluster_status = cluster.get('cluster').get('status')\n\n self.assertEqual('andrew-jarombek-eks-v2', cluster_name)\n self.assertEqual('1.24', kubernetes_version)\n self.assertEqual('eks.6', platform_version)\n self.assertEqual('ACTIVE', cluster_status)", "def __str__(self):\n return \"Clustering\"", "def Cluster(request, io_loop):\n\n def ClusterConstructor(**kwargs):\n log = logging.getLogger(__file__)\n log.setLevel(logging.DEBUG)\n log.handlers = [logging.StreamHandler(sys.stdout)]\n kwargs['log'] = log\n engine_launcher_class = kwargs.get(\"engine_launcher_class\")\n\n if (\n isinstance(engine_launcher_class, str)\n and \"MPI\" in engine_launcher_class\n and shutil.which(\"mpiexec\") is None\n ):\n pytest.skip(\"requires mpiexec\")\n\n cfg = kwargs.setdefault(\"config\", Config())\n cfg.EngineLauncher.engine_args = ['--log-level=10']\n cfg.ControllerLauncher.controller_args = ['--log-level=10']\n kwargs.setdefault(\"controller_args\", ['--ping=250'])\n\n c = cluster.Cluster(**kwargs)\n assert c.config is cfg\n request.addfinalizer(c.stop_cluster_sync)\n return c\n\n yield ClusterConstructor", "def get_cluster_output(cluster_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetClusterResult]:\n ..." ]
[ "0.69354606", "0.680464", "0.6495944", "0.64750975", "0.6322822", "0.6270452", "0.62655216", "0.62579936", "0.62399894", "0.6176743", "0.6172865", "0.615929", "0.6146113", "0.61262864", "0.61135745", "0.6100122", "0.60850996", "0.60792947", "0.60370797", "0.5967636", "0.5960124", "0.59288204", "0.5926981", "0.59158", "0.58648115", "0.58603954", "0.582353", "0.5822605", "0.58224833", "0.5821543" ]
0.7227927
0
Get an example describe_cluster call during deletion
def describe_cluster_deleting_response(): return { "cluster": { "status": "DELETING", "endpoint": "https://endpoint.amazonaws.com", "name": EXAMPLE_NAME, "certificateAuthority": { "data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t" }, "roleArn": "arn:aws:iam::111222333444/eksRole", "resourcesVpcConfig": { "subnetIds": [ "subnet-00000000000000000", "subnet-00000000000000001", "subnet-00000000000000002" ], "vpcId": "vpc-00000000000000000", "securityGroupIds": [ "sg-00000000000000000" ] }, "version": "1.10", "arn": "arn:aws:eks:region:111222333444:cluster/" + EXAMPLE_NAME, "createdAt": 1500000000.000 } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_cluster(self):", "def Run(self, args):\n cluster_ref = args.CONCEPTS.cluster.Parse()\n items = [command_util.ClusterMessage(name=cluster_ref.vmwareClustersId)]\n\n if not args.validate_only:\n command_util.ConfirmationPrompt('cluster', items, 'deleted')\n\n client = apis.ClustersClient()\n operation = client.Delete(args)\n\n if args.validate_only:\n return None\n\n # when using --allow-missing without --async on a non-existing resource,\n # it would return an operation object with an empty name.\n # return early to avoid potential polling error.\n if operation.name is None:\n return None\n\n if args.async_:\n log.DeletedResource(cluster_ref, 'Anthos Cluster on VMware', args.async_)\n return operation\n else:\n operation_client = operations.OperationsClient()\n response = operation_client.Wait(operation)\n log.DeletedResource(cluster_ref, 'Anthos Cluster on VMware', args.async_)\n return response", "def delete_cluster(ctx, project_name, cluster_name):\n project = ctx.obj.groups.byName[project_name].get().data\n ctx.obj.groups[project.id].clusters[cluster_name].delete().data\n click.echo(\"DONE!\")", "def test_delete_cluster_network(self):\n pass", "def test_delete_hyperflex_cluster_profile(self):\n pass", "def cluster_delete():\n logger.info(\"/cluster action=\" + r.method)\n request_debug(r, logger)\n if not r.form[\"id\"] or not r.form[\"col_name\"]:\n logger.warning(\"cluster operation post without enough data\")\n response_fail[\"error\"] = \"cluster delete without enough data\"\n response_fail[\"data\"] = r.form\n return jsonify(response_fail), CODE_BAD_REQUEST\n else:\n logger.debug(\"cluster delete with id={0}, col_name={1}\".format(\n r.form[\"id\"], r.form[\"col_name\"]))\n if r.form[\"col_name\"] == \"active\":\n result = cluster_handler.delete(id=r.form[\"id\"])\n else:\n result = cluster_handler.delete_released(id=r.form[\"id\"])\n if result:\n return jsonify(response_ok), CODE_OK\n else:\n logger.debug(\"cluster deletion failed\")\n response_fail[\"error\"] = \"Failed to delete cluster {}\".format(\n r.form[\"id\"])\n return jsonify(response_fail), CODE_BAD_REQUEST", "def delete_cluster(self):\n cf_namespace_id = self.create_or_fetch_namespace()\n self.delete_action(cf_namespace_id)\n self.create_action(cf_namespace_id)\n self.invoke_action(cf_namespace_id)", "def list_cluster_response():\n return {\n \"clusters\": [\n EXAMPLE_NAME\n ]\n }", "def delete_cluster(self, cluster, *args, **kwargs):\n raise NotImplementedError", "def delete_cluster(t2_url, t2_token, id):\n response = requests.delete(f\"{t2_url}/api/clusters/{id}\", headers={ \"t2-token\": t2_token })\n if(response.status_code != 200):\n log(f\"API call to delete cluster returned error code {response.status_code}\")\n return None\n return response.json()", "def delete(id):\n cluster = clusters.get_by_id(id)\n\n if cluster is None:\n raise HTTPException(status_code=404, detail=\"Cluster not found for ID: {0}\".format(id))\n\n results = clusters.delete(cluster)\n\n if results.acknowledged:\n return {\"message\" : \"cluster deleted\"}\n raise HTTPException(status_code=400, detail=results.raw_result)", "def test_cluster_delete(self, mock_is_service_available):\n\n mock_is_service_available.return_value = True\n fake_cluster = FakeCluster(**RETURN_CLUSTER_1)\n cluster = self._create_test_cluster(\n fake_cluster, 'stack_delete', CREATE_CLUSTER_ARG_1)\n scheduler.TaskRunner(cluster.create)()\n self.m.UnsetStubs()\n self.setup_cluster_delete(cluster)\n scheduler.TaskRunner(cluster.delete)()\n self.assertEqual((cluster.DELETE, cluster.COMPLETE), cluster.state)\n self.m.VerifyAll()\n self.m.UnsetStubs()", "def delete(self):\n logger.info(\"/cluster action=\" + r.method)\n # request_data = r.get_json(force=True, silent=True)\n # if r.form:\n # cluster_id = r.form[\"id\"]\n # col_name = r.form[\"col_name\"]\n # else:\n # cluster_id = request_data.get(\"id\")\n # col_name = request_data.get(\"col_name\")\n # request_debug(r, logger)\n args = cluster_delete_parser.parse_args()\n cluster_id = args.get('cluster_id')\n # col_name = args.get('state')\n if not cluster_id:\n error_msg = \"缺少参数\"\n logger.warning(error_msg)\n return make_fail_resp(error=error_msg)\n else:\n logger.debug(\"cluster delete with id={0}\".format(\n cluster_id))\n try:\n cluster = ClusterModel.objects.get(id=cluster_id)\n except Exception as e:\n logger.error(e)\n return {'stat': 400, 'msg': '不存在'}\n # status = cluster.state\n delete_cluster(cluster_id=cluster_id, status='active')\n return make_ok_resp()", "def cluster_destroy(extra_args=None):\n cmd = [\"pcs\", \"cluster\", \"destroy\"]\n\n if isinstance(extra_args, (list, tuple)):\n cmd += extra_args\n\n log.debug(\"Running cluster destroy: %s\", cmd)\n\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)", "def test_delete_cluster_policy(self):\n pass", "def test_delete_collection_cluster_network(self):\n pass", "def clear_cluster(name):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Clearing cluster statistics\"\n return ret\n\n __salt__[\"trafficserver.clear_cluster\"]()\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Cleared cluster statistics\"\n return ret", "def remove(self):\n method = \"remove_cluster\"\n params = {\n \"cluster_id\": self.id\n }\n make_request = self._client.connection.make_request\n return make_request(method, params)", "def delete(self):\n # delete the named cluster\n # don't wait for operation to finish\n print(\"+ Deleting cluster {} (async).\".format(self.name_hyphenated))\n util.syscall(\"gcloud container clusters delete {} --quiet --async\".\n format(self.name))\n self.started = False\n self.deleted = True", "def describe_cluster_no_status_response():\n return {\n \"cluster\": {\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def test_delete_cluster_role(self):\n pass", "def test_delete_cluster_resource_quota(self):\n pass", "def describe_cluster_response():\n return {\n \"cluster\": {\n \"status\": \"ACTIVE\",\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def describe_cluster_creating_response():\n return {\n \"cluster\": {\n \"status\": \"CREATING\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {},\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def cleanup(self) -> None:\n try:\n self._cluster_client.delete_cluster(\n request={\n 'project_id': self.cluster_metadata.project_id,\n 'region': self.cluster_metadata.region,\n 'cluster_name': self.cluster_metadata.cluster_name,\n })\n except Exception as e:\n if e.code == 403:\n _LOGGER.error(\n 'Due to insufficient project permissions, '\n 'unable to clean up the default cluster: %s',\n self.cluster_metadata.cluster_name)\n raise ValueError(\n 'You cannot delete a cluster in project: {}'.format(\n self.cluster_metadata.project_id))\n elif e.code == 404:\n _LOGGER.error(\n 'Cluster does not exist: %s', self.cluster_metadata.cluster_name)\n raise ValueError(\n 'Cluster was not found: {}'.format(\n self.cluster_metadata.cluster_name))\n else:\n _LOGGER.error(\n 'Failed to delete cluster: %s', self.cluster_metadata.cluster_name)\n raise e", "def clear_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"metric\", \"clear\", \"--cluster\")\n else:\n cmd = _traffic_line(\"-C\")\n\n return _subprocess(cmd)", "def delete_cluster(cluster_id: str, sg_id: str = None):\n print(\"INFO: Deleting cluster %s\" % cluster_id)\n emr = get_emr_client()\n emr.terminate_job_flows(JobFlowIds=[cluster_id])\n print(\"INFO: Cluster deleted.\")\n\n print(\"INFO: Waiting before deleting SG. . .\")\n sleep(300)\n if sg_id is not None:\n delete_sg(sg_id)\n\n os.remove(META_FILE)\n os.remove(\"connection.bash\")", "def deregister_ecs_cluster(EcsClusterArn=None):\n pass", "def Run(self, args):\n cli = self.context['clusteradmin']\n msg = (self.context['clusteradmin-msgs'].\n BigtableclusteradminProjectsZonesClustersDeleteRequest(\n name=util.ClusterUrl(args)))\n result = cli.projects_zones_clusters.Delete(msg)\n log.DeletedResource(args.cluster, kind='cluster',\n details='in zone [{0}]'.format(args.zone))\n return result", "def test_delete_collection_cluster_policy(self):\n pass" ]
[ "0.75909144", "0.68696296", "0.6740679", "0.6721878", "0.66755325", "0.6675259", "0.6641943", "0.66240346", "0.6619215", "0.6527228", "0.6468657", "0.6459234", "0.64044845", "0.6308383", "0.61893874", "0.61806893", "0.6159998", "0.61259925", "0.6114047", "0.607198", "0.60609525", "0.60534066", "0.6045324", "0.5994704", "0.5987493", "0.59568727", "0.5805806", "0.5790834", "0.5782352", "0.57555944" ]
0.75442225
1
Return a string representing a presigned url
def presigned_url(): return 'https://presignedurl.test.com'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def url(self, url):\n return self.presigned_url(url)", "def presigned_url(self, url, expiration=3600, force_download=False):\n force_download = \"?force_download=1\" if force_download else \"\"\n public_url = Path(self.config.get(\"public_url\", \"\"))\n resource_url = public_url / url\n return resource_url.as_posix() + force_download", "def sign_url(self, url, expiration=None):\n if not expiration:\n expiration = self._s3_presigned_url_expiration\n\n bucket, key = self.split_url(url)\n url = self.client.generate_presigned_url(\n 'get_object',\n ExpiresIn=int(expiration),\n Params={\n 'Bucket': bucket,\n 'Key': key\n }\n )\n\n return url", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n pass", "def presign(self, s3uri, **kwargs):\n return self.exec_command('presign %s' % (s3uri), **kwargs)[0].strip()", "def create_presigned_url(bucket_name, bucket_key, expiration=3600, signature_version=s3_signature['v4']):\n s3_client = boto3.client('s3',\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n config=Config(signature_version=signature_version),\n region_name=AWS_DEFAULT_REGION\n )\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={'Bucket': bucket_name,\n 'Key': bucket_key},\n ExpiresIn=expiration)\n print(s3_client.list_buckets()['Owner'])\n for key in s3_client.list_objects(Bucket=bucket_name, Prefix=bucket_key)['Contents']:\n print(key['Key'])\n except ClientError as e:\n logging.error(e)\n return None\n # The response contains the presigned URL\n return response", "def generate_presigned_url(file_path):\n\n session = boto3.session.Session(\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n region_name='eu-central-1')\n s3Client = session.client('s3', config=Config(signature_version='s3v4'))\n\n # Create a URL valid for 30 seconds.\n return s3Client.generate_presigned_url('get_object',\n Params={\n 'Bucket':\n AWS_STORAGE_BUCKET_NAME,\n 'Key':\n file_path},\n ExpiresIn=30)", "def get_presigned_url2(self, timeout):\n # requestbuilder 0.3\n self.preprocess()\n if self.__should_use_sigv4():\n # UNSIGNED-PAYLOAD is a magical string used for S3 V4 query auth.\n auth = requestbuilder.auth.aws.QueryHmacV4Auth.from_other(\n self.auth, timeout=timeout, payload_hash='UNSIGNED-PAYLOAD')\n else:\n auth = requestbuilder.auth.aws.QueryHmacV1Auth.from_other(\n self.auth, timeout=timeout)\n return self.service.get_request_url(\n method=self.method, path=self.path, params=self.params,\n auth=auth)", "def create_presigned_url_expanded(objName):\n\n # Generate a presigned URL for the S3 client method\n s3_client = boto3.client('s3')\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={\n 'Bucket': 'ece1779-a3-bucket',\n 'Key': objName,\n },\n ExpiresIn=30)\n except ClientError as e:\n logging.error(e)\n return None\n\n # The response contains the presigned URL\n return response", "def get_presigned_url_for_download(file):\n url = S3_CLIENT.generate_presigned_url(\n ClientMethod='get_object',\n Params={\n 'Bucket': runtime_context.BUCKET_NAME,\n 'Key': file['id'],\n 'ResponseContentDisposition': 'attachment; filename=\"{}\"'.format(file['name']),\n 'ResponseContentType': file['type']\n },\n ExpiresIn=runtime_context.EXPIRATION\n )\n LOGGER.debug('Presigned URL generated. service=s3 method=get_object id={}'.format(file['id']))\n return url", "def generate_presigned_GET_url(\n self,\n bucket: str,\n object_name: str,\n **kwargs) -> str:\n # TODO: things like http ranges need to be explicit parameters.\n # users of this API should not need to know the argument names presented\n # to the cloud API.\n raise NotImplementedError()", "def create_presigned_url(s3_uri, expiration=86400):\n\n bucket_name, object_name = split_uri(s3_uri)\n\n # Generate a presigned URL for the S3 object\n s3_client = boto3.client(\"s3\", config=botocore.config.Config(signature_version=\"s3v4\"))\n try:\n response = s3_client.generate_presigned_url(\n \"get_object\",\n Params={\"Bucket\": bucket_name, \"Key\": object_name},\n ExpiresIn=expiration,\n )\n except botocore.exceptions.ClientError as err:\n # Soft failure.\n logger.error(\"failed to generate presigned url: %s\", err)\n return None\n\n # The response contains the presigned URL\n return response", "def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):\n pass", "def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):\n pass" ]
[ "0.82590914", "0.778759", "0.72649896", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7126", "0.7020326", "0.6789058", "0.67851526", "0.67817545", "0.6712114", "0.67071176", "0.6671495", "0.66579384", "0.6611054", "0.6611054" ]
0.83870244
0
The exception decorator adds an exception method to the decorated input_fn that catches any exception raised by the function. If an exception is caught, it the output from sys.exc_info() is stored in a .exception member. If not exception is caught, input_fn.exception will be None. Call input_fn.exception after a calling input_fn() to get the last's call's exception.
def exception(input_fn, *args, **kwargs): if hasattr(input_fn, "exception"): raise AttributeError("Cannot decorate input_fn because it already has and 'exception' attribute") def new(*args, **kwargs): from sys import exc_info try : new.exception = None ret = input_fn(*args, **kwargs) except: new.exception = exc_info() raise return ret new.__dict__ = input_fn.__dict__ new.exception = None return new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wrap_function(self, fn):\n\n @functools.wraps(fn)\n def wrapper(*args):\n fname = fn.__name__\n if len(args) > len(self._insigs):\n raise TypeError(\n (\"{}() takes {} positional arguments but {} were \" \"given\").format(\n fname, len(self._insigs), len(args)\n )\n )\n try:\n newargs = [self._input(self._insigs[i], arg) for i, arg in enumerate(args)]\n except Exception as e:\n message = \"{}: converting input arguments for function '{}\".format(e, fname)\n raise type(e)(message) from None\n try:\n result = fn(*newargs)\n except Exception as e:\n message = \"{}: raised by {}()\".format(e, fname)\n raise type(e)(message) from e\n try:\n return self._output(self._outsig, result)\n except Exception as e:\n raise type(e)(\"{} for output of {}()\".format(e, fname)) from None\n\n return wrapper", "def _log_exception(fn):\n\n @functools.wraps(fn)\n def decorated(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception: # pylint: disable=broad-except\n err_str = traceback.format_exc()\n logging.error(f'Exception occured for {args}, {kwargs}:\\n' + err_str)\n raise\n\n return decorated", "def log_error(fn: Callable) -> Any:\n def wrapper(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n logger.error(str(e))\n raise e\n return wrapper", "def _raise_ex(fn):\n\n def _decorated(*args, **kwargs):\n v = fn(*args, **kwargs)\n if isinstance(v, Exception): raise v\n return v\n\n return _decorated", "def log_exceptions(*args, **attrs):\n\n def decorator(func):\n if not _is_enabled:\n return func\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if _context.get().call_stack:\n # we're already inside usage context\n # let it handle exception\n return func(*args, **kwargs)\n\n fn_call = FnCall(\n id=uuid.uuid4().hex, fn_name=_fn_fullname(func), start=datetime.utcnow()\n )\n try:\n return func(*args, **kwargs)\n except Exception:\n _, exc, traceback = sys.exc_info()\n\n fn_call.end = datetime.utcnow()\n\n ctx = UsageContext()\n ctx.exception = exc\n ctx.traceback = _trace_to_log(traceback)\n ctx.attributes = attrs\n ctx.completed_calls.append(fn_call)\n _produce_event(ctx)\n\n if traceback:\n raise exc.with_traceback(traceback)\n\n raise exc\n\n return wrapper\n\n if args:\n return decorator(args[0])\n\n return decorator", "def try_except(fn):\n def wrapped(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception, e:\n et, ei, tb = sys.exc_info()\n raise SerialException, \"Failed to '%s': %s\" % (fn.__name__, SerialException(e)), tb\n return wrapped", "def catch_errors(self, function: Callable) -> Callable:\n\n def wrapper(*args, **kwargs) -> Any:\n result = None\n try:\n result = function(*args, **kwargs)\n except BaseException:\n self._new_error(ExceptionInfo(*sys.exc_info()))\n return result\n\n return wrapper", "def handle_exception(function):\n def wrapper(*args, **kwargs):\n \"\"\" The wrapper function \"\"\"\n try:\n return function(*args, **kwargs)\n except Exception as ex:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n error(\"The traceback is ::::\\n\"+\"\" \\\n .join(traceback.format_exception(exc_type, exc_value,\n exc_traceback)))\n error (\"Exception Occurred: %s\" %str(ex))\n try:\n img = args[0].take_screenshot()\n info(\"check screen shot %s\" % img)\n except Exception as exc:\n info(\"not able to take screen shot : %s\" % str(exc))\n try:\n args[0].exit_app()\n except Exception as exc :\n info(\"not able to exit the app : %s\" % str(exc))\n raise Exception(str(ex))\n return wrapper", "def wrap_exceptions(fun):\n @functools.wraps(fun)\n def wrapper(self, *args, **kwargs):\n try:\n return fun(self, *args, **kwargs)\n except OSError as err:\n raise convert_oserror(err, pid=self.pid, name=self._name)\n return wrapper", "def 报错(自身, func):\n 自身.错误处理 = func\n return func", "def _log_exception(func):\n\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except KeyboardInterrupt:\n self = args[0]\n self.log(\"KeyboardInterrupt detected abort process\")\n except Exception:\n self = args[0]\n exc_type, exc_value, exc_traceback = sys.exc_info()\n self.log(\"\\n\", \"\".join(traceback.format_tb(exc_traceback)))\n\n return wrapper", "def exception(logger):\n def decorator(func):\n \n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except:\n # log the exception\n err = \"There was an exception in \"\n err += func.__name__\n logger.exception(err)\n \n # re-raise the exception\n raise\n return wrapper\n return decorator", "def None_if_exception(f, val):\n def decorated_f(*args, **kwargs):\n try:\n x = f(*args, **kwargs)\n except Exception:\n return None\n else:\n return x\n return decorated_f", "def exception_logger(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n logger = get_logger()\n try:\n # try to call the function\n return function(*args, **kwargs)\n except:\n # log the exception\n err = \"There was an exception in: {0:s}\".format(function.__name__)\n\n if logger is not None:\n logger.exception(err)\n else:\n print err\n return wrapper", "def try_decorator(func):\n @wraps(func)\n def wrap(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception:\n logging.exception('%r with %r and %r', func.__name__, args, kwargs)\n return wrap", "def catcher(fn):\n def wrap(*args,**kw):\n try:\n return fn(*args, **kw)\n except Exception:\n retl = []\n exc_type, exc_value, exc_traceback = sys.exc_info()\n \n retl.append(\" EEE %s failed: \" % fn.__name__)\n\n for exline in traceback.format_exception(exc_type, exc_value,\n exc_traceback):\n for line in exline.split('\\n'):\n retl.append(\" EEE %s\" % str(line))\n return retl\n return wrap", "def try_decorator(func):\n\n @wraps(func)\n def wrap(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception:\n logging.exception('%r with %r and %r', func.__name__, args, kwargs)\n\n return wrap", "def exception(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except requests.exceptions.HTTPError as e:\n log.error(e)\n except requests.exceptions.RequestException as e: \n log.error(e)\n return wrapper", "def raise_on_request_error(\n func: Callable[Concatenate[_T, _P], Awaitable[None]]\n) -> Callable[Concatenate[_T, _P], Coroutine[Any, Any, None]]:\n\n async def decorator(self: _T, *args: _P.args, **kwargs: _P.kwargs) -> None:\n \"\"\"Decorate.\"\"\"\n try:\n await func(self, *args, **kwargs)\n except RainMachineError as err:\n raise HomeAssistantError(\n f\"Error while executing {func.__name__}: {err}\",\n ) from err\n\n return decorator", "def raises(self, exception_type, function, *args, **kwargs):\n try:\n result = function(*args, **kwargs)\n self.log_error(\"{} did not throw exception {}\".format(\n function.__name__,\n exception_type.__name__\n ), None)\n return result\n except Exception as e:\n if type(e) != exception_type:\n self.log_error(\"{} did raise {}: {}\".format(\n function.__name__,\n type(e).__name__, e\n ), None)", "def proc_wrap(func, *args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n logging.exception(e)\n raise", "def safe_run(self, function: Callable) -> Callable:\n\n def wrapper(*args, **kwargs) -> Any:\n result = None\n try:\n result = function(*args, **kwargs)\n except BaseException:\n self._new_error(ExceptionInfo(*sys.exc_info()))\n\n self._show_info()\n return result\n\n return wrapper", "def __call__(self, function: BaseException):\n self._add_attr(function)\n return function", "def make_wrapped_function(self):\n decorator = self.decorator()\n\n @decorator\n def passthrough(argument):\n if isinstance(argument, Exception):\n raise argument\n return argument\n\n return passthrough", "def ipdb_on_exception(fun):\n @wraps(fun)\n def wrapper(args):\n \"\"\"Wraps function into ipdb exception handler.\"\"\"\n if args[\"--ipdb\"]:\n from ipdb import launch_ipdb_on_exception\n\n with launch_ipdb_on_exception():\n fun(args)\n else:\n try:\n result = fun(args)\n except Exception as exception:\n LOGGER.error(\n \"%s\", exception, exc_info=True\n )\n raise SystemExit(1) from exception\n else:\n return result\n\n return wrapper", "def log_traceback(fn):\n functools.wraps(fn)\n def wrapper(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception as ex:\n rule_name = None\n if hasattr(fn, 'log'):\n fn.log.error(traceback.format_exc())\n rule_name = fn.name\n elif len(args) > 0 and hasattr(args[0], 'log'):\n args[0].log.error(traceback.format_exc())\n rule_name = args[0].name\n else:\n logging.getLogger(LOG_PREFIX).error(traceback.format_exc())\n import core.actions\n if hasattr(core.actions, 'NotificationAction'):\n import configuration\n if hasattr(configuration, 'admin_email') and configuration.admin_email != \"admin_email@some_domain.com\":\n core.actions.NotificationAction.sendNotification(configuration.admin_email, \"Exception: {}: [{}]\".format(rule_name, traceback.format_exc()))\n else:\n core.actions.NotificationAction.sendBroadcastNotification(\"Exception: {}: [{}]\".format(rule_name, traceback.format_exc()))\n return wrapper", "def wrapit(fn):\n def inside(dummy, *args):\n try:\n return fn(*args)\n except Exception as e:\n print(\"Error in XSLT extension: %s\" % e)\n raise\n return inside", "def bye_on_error(fn):\n def wrapper(*args, **kwargs):\n try:\n fn(*args, **kwargs)\n except Exception:\n logging.error(\n \"Hit unhandled exception. Cleaning up then rethrowing.\"\n )\n\n # This is nasty.\n args[0]._bye()('')\n\n raise\n\n return wrapper", "def picklable_exception_safe_function(function):\n if is_testing():\n setattr(function, _ATTRIBUTE_EXCEPTION_SAFE, True)\n\n return update_wrapper_extended(functools.partial(_safe_function, function), function)", "def wrap_method(self, fn):\n\n @functools.wraps(fn)\n def wrapper(self_, *args):\n fname = fn.__name__\n if len(args) > len(self._insigs):\n raise TypeError(\n (\"{}() takes {} positional arguments but {} were \" \"given\").format(\n fname, len(self._insigs) + 1, len(args) + 1\n )\n )\n try:\n newargs = [self._input(self._insigs[i], arg) for i, arg in enumerate(args)]\n except Exception as e:\n message = \"{}: converting input arguments for function '{}\".format(e, fname)\n raise type(e)(message) from None\n\n try:\n result = fn(self_, *newargs)\n except Exception as e:\n message = \"{}: raised by {}()\".format(e, fname)\n raise type(e)(message) from e\n try:\n return self._output(self._outsig, result)\n except Exception as e:\n raise type(e)(\"{} for output of {}()\".format(e, fname)) from None\n\n return wrapper" ]
[ "0.6175209", "0.61053234", "0.60425055", "0.6041987", "0.5936048", "0.5906888", "0.5871319", "0.5841565", "0.58280474", "0.58030313", "0.57741934", "0.57514983", "0.57445973", "0.56957304", "0.5662573", "0.56570697", "0.5612495", "0.5603439", "0.55952275", "0.5540759", "0.5539783", "0.55392855", "0.5522768", "0.55133784", "0.5508554", "0.5483758", "0.5468117", "0.5410502", "0.5360824", "0.5351039" ]
0.81485933
0
Encrypts some data (aligns to 64 bytes, if needed).
def encryptData(self, key, iv, data, align = True): if((len(data) % self.align) != 0 and align): return AES.new(key, AES.MODE_CBC, iv).encrypt(data + ("\x00" * (self.align - (len(data) % self.align)))) else: return AES.new(key, AES.MODE_CBC, iv).encrypt(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _encrypt(data):\n cipher = AES.new(bytes(_AES_KEY), AES.MODE_CBC, bytes(_AES_IV))\n\n # Pad to 16 bytes for AES CBC\n for i in range(16 - (len(data) % 16)):\n data += b'\\0'\n\n return cipher.encrypt(data)", "def Encrypt(self, data):\n\n if len(data) % 16 != 0:\n data += ' ' * (16 - len(data) % 16)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n data = es.encrypt(data)\n data = base64.b64encode(data)\n return data", "def encrypt_data ( aes_key, data ) :\n salt = Crypto.Random.new( ).read( Crypto.Cipher.AES.block_size )\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n encrypted_data = cipher.encrypt( data )\n\n return encode_data( salt + encrypted_data )", "def encrypt(self, data):\n if not data:\n return ''\n data = self._pad_data(data)\n return self._crypt(data, self.ENCRYPT)", "def Encrypt(self, data):\n data = self.__Pad(data)\n iv_bytes = util.RandBytes(self.block_size)\n ciph_bytes = AES.new(self.key_bytes, AES.MODE_CBC, iv_bytes).encrypt(data)\n msg_bytes = self.Header() + iv_bytes + ciph_bytes\n sig_bytes = self.hmac_key.Sign(msg_bytes) # Sign bytes\n return msg_bytes + sig_bytes", "def encrypt_and_encode(data, key):\r\n return base64.urlsafe_b64encode(aes_encrypt(data, key))", "def aes_encrypt(data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = pad(data)\r\n return cipher.encrypt(padded_data)", "def encrypt_data(data, encryption_key):\n assert isinstance(data, str)\n obj = AES.new(encryption_key, AES.MODE_CBC, 'This is an IV456')\n padded = Pad.pad(data.encode())\n ciphertext = obj.encrypt(padded)\n return ciphertext.hex()", "def pad(data):\r\n bytes_to_pad = AES.block_size - len(data) % AES.block_size\r\n return data + (bytes_to_pad * chr(bytes_to_pad))", "def encrypt(self, data):\n\n key_public = RsaPublicKey.Read(self.crypt_public)\n return b64encode(key_public.Encrypt(data))", "def encrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_encrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def __pad(self, data):\n return data + (AES.block_size - len(data) % AES.block_size) * \\\n chr(AES.block_size - len(data) % AES.block_size)", "def encrypt(data, key, iv, save_path=None):\n if isinstance(data, str):\n with open(data, 'rb') as f:\n data = f.read()\n length = str(len(data))\n length = _pad16(length)\n\n key = _pad16(key)\n iv = _pad16(iv)\n data = _pad16(data)\n cipher = AES.new(key, AES.MODE_CBC, iv)\n data = cipher.encrypt(data)\n data = length + data\n if save_path:\n with open(save_path, 'wb') as f:\n f.write(data)\n return data", "def encrypt_data(data, encryption_key, iv=None):\n # Generate a random iv\n if iv is None:\n iv = get_random_bytes(IV_SIZE)\n generate_iv = True\n iv_length = IV_SIZE\n else:\n generate_iv = False\n iv_length = len(iv)\n cipher = AES.new(encryption_key, AES.MODE_GCM, iv)\n ciphered_data, tag = cipher.encrypt_and_digest(bytes(data))\n if generate_iv:\n # if iv passed by user is None, random iv generated\n # above is prepended in encrypted data\n # iv + Cipher + Tag\n result = iv + ciphered_data + tag\n else:\n # Cipher + Tag\n result = ciphered_data + tag\n return result", "def encrypt(data, key):\n data = six.ensure_binary(data)\n data = privy.hide(secret=data, password=key)\n data = six.ensure_text(data)\n return data", "def encrypt(self, sensor_data):\r\n \r\n # set encryption parameters\r\n encryption1 = aes(self.ivkey, 2, self.staticiv)\r\n encryption2 = aes(self.datakey, 2, self.iv)\r\n # encrypt data\r\n self.encrypted_data = encryption2.encrypt(sensor_data) \r\n self.encrypted_iv = encryption1.encrypt(self.iv)\r\n self.encrypted_nodeid = encryption2.encrypt(self.nodeid)\r\n \r\n self.iv = bytes(random.getrandbits(8) for _ in range(16)) # changes every time\r", "def Encrypt(self, data):\n data = self.__Encode(data)\n ciph_bytes = self.key.encrypt(data, None)[0] # PyCrypto returns 1-tuple\n return self.Header() + ciph_bytes", "def encrypt(self):\n # Generate a randomized initialization vector\n iv = Random.new().read(AES.block_size)\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n # Add a buffer so that the plaintext is a multiple of 16 characters in length\n pt_len = len(self.plaintext)\n buffer_size = AES.block_size - pt_len % AES.block_size\n strmsg = self.plaintext + \" \" * buffer_size\n return cipher.encrypt(str.encode(strmsg)), iv", "def encrypt(self, key, data, mode, padding):\n # this can be disabled by _disable_encryption, so pylint: disable=method-hidden\n try:\n block_size = self.cipher.block_size\n iv_len = block_size // 8\n iv = os.urandom(iv_len)\n\n encryptor = Cipher(self.cipher(key), mode.build(iv), backend=default_backend()).encryptor()\n padder = padding.build(block_size).padder()\n\n padded_data = padder.update(data) + padder.finalize()\n return iv + encryptor.update(padded_data) + encryptor.finalize()\n except Exception:\n error_message = \"Encryption failed\"\n _LOGGER.exception(error_message)\n raise EncryptionError(error_message)", "def encrypt_data(self, params):\n raise NotImplementedError", "def _EncryptData(self, data):\n if isinstance(data, str):\n data = data.encode('utf-8')\n encrypted_data = self._gpg.encrypt(\n data,\n self.args.target_key,\n sign=self._gpg.list_keys(True)[0]['fingerprint'],\n always_trust=False)\n if not encrypted_data.ok:\n raise Exception('Failed to encrypt data! Log: %s' % encrypted_data.stderr)\n return encrypted_data.data", "def Encrypt(key, value):\n key = key.zfill(32)[:32]\n value = Pad(value, 16)\n aes = AES.new(key, AES.MODE_ECB)\n encrypted = aes.encrypt(value)\n return base64.b64encode(encrypted)", "def encrypt(self, plaintext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def encrypt_data(self, filename, data, master_pass, website): \n\n \"\"\"Concatenated extra characters in the case that the master password\n is less than 16 characters. However, this isn't a big safety trade off\n as the full length master password is hashed and checked for.\"\"\"\n concatenated_master = master_pass + \"================\"\n\n key = concatenated_master[:16].encode(\"utf-8\")\n\n cipher = AES.new(key, AES.MODE_EAX)\n\n \"\"\"A value that must never be reused for any other encryption done with\n this key saved alongside encrypted password. Converted to hexadecimal\n to be saved in DB. Later converted back to bytes to decode data\"\"\"\n nonce = cipher.nonce.hex()\n\n data_to_encrypt = data.encode(\"utf-8\")\n # again, bytes is invalid data for JSON so we convert it\n encrypted_data = cipher.encrypt(data_to_encrypt).hex()\n\n self.__save_password(filename, encrypted_data, nonce, website)", "def encrypt(algorithm, key, plaintext, associated_data, iv):\n encryptor = Encryptor(algorithm, key, associated_data, iv)\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n return EncryptedData(encryptor.iv, ciphertext, encryptor.tag)", "def encrypt(plaintext):\n # Pad plaintext\n plaintext = pad(plaintext)\n\n # AES encrypt\n iv = Random.new().read(BS)\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return iv + aes.encrypt(plaintext)", "def __WriteEncrypted(self, data, pad=False):\n if pad:\n data = self.__key._Pad(data)\n\n encrypted_bytes = self.__cipher.encrypt(data)\n self.__output_stream.write(encrypted_bytes)\n self.__hmac_stream.Update(encrypted_bytes)", "def encrypt(self, raw, use_base64=True, pad=True):\n encryptor = self.cipher.encryptor()\n if pad:\n raw = self._pad(raw)\n crypted_text = encryptor.update(raw) + encryptor.finalize()\n return base64.b64encode(crypted_text) if use_base64 else crypted_text", "def encrypt(self, message):\n\n message = self._pad(message)\n iv = Random.new().read(AES.block_size)\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return base64.b64encode(iv + cipher.encrypt(message)).decode('utf-8')", "def encrypt(self, message):\n message = self._padding(message, self._block_size)\n initialization_vector = Random.new().read(self._block_size)\n cipher = AES.new(self._key, AES.MODE_CBC, initialization_vector)\n return base64.b64encode(initialization_vector +\n cipher.encrypt(message))" ]
[ "0.7885185", "0.7712841", "0.7654952", "0.7337602", "0.7299068", "0.7281445", "0.71950537", "0.7059179", "0.7026982", "0.69452465", "0.68957233", "0.684373", "0.6826722", "0.67560023", "0.6729065", "0.6721459", "0.66718626", "0.6671148", "0.66647303", "0.66416234", "0.66137403", "0.66007113", "0.65541214", "0.654768", "0.65416074", "0.6526841", "0.6521634", "0.65056735", "0.64678514", "0.6452309" ]
0.77255154
1
Return the current font.
def font(self): return self.m_font
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def font(self):\n return self[\"font\"]", "def font(self):\n return self[\"font\"]", "def font(self):\n return self[\"font\"]", "def GetFont(self):\r\n\r\n return self._font", "def GetFont(self):\r\n\r\n return self._font", "def GetFont(self):\r\n\r\n return self._font", "def _font(self):\n\treturn self.m_gdfont", "def GetActiveFontInfo(self):\n\t\treturn self.acad.ActiveDocument.ActiveTextStyle.GetFont()", "def GetSelectedFont(self):\r\n\r\n return self._selected_font", "def GetSelectedFont(self):\r\n\r\n return self._selected_font", "def get_text_font ( self, object ):\n return self.text_font", "def get_font(self, option):\n return get_font(option=option)", "def selectFont():\n font,ok = QtGui.QFontDialog.getFont()\n if ok:\n return font\n else:\n return None", "def get_text_font ( self, object ):\n if self._is_selected( object ):\n return self.selected_text_font\n return self.text_font", "def getFontFace(self):\n return self.fontFace", "def getfont(self):\r\n if self.font is None:\r\n self.font = cv2.FONT_HERSHEY_SIMPLEX\r\n return self.font", "def get_font(self, font_family: str, mode: str) -> PDFFont:\n family = self.fonts[font_family]\n return family['n'] if mode not in family else family[mode]", "def base_font(self) -> str:\n pass", "def GetFont(*args, **kwargs):\n return _gdi_.StockGDI_GetFont(*args, **kwargs)", "def GetDefaultFont(self):\n return wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL)", "def get_font_dict(f):\n return tk_font.Font(font=f).actual()", "def getFont( self, par, path ):\n\n return self.db.getFontPar( par, path )", "def GetMeasuringFont(self):\r\n\r\n return self._measuring_font", "def GetMeasuringFont(self):\r\n\r\n return self._measuring_font", "def GetNormalFont(self):\r\n\r\n return self._normal_font", "def GetNormalFont(self):\r\n\r\n return self._normal_font", "def _set_default_font(cls):\n if platform.system() == \"Linux\":\n for family in (\"DejaVu Sans\", \"Noto Sans\", \"Nimbus Sans\"):\n if family in tk.font.families():\n logger.debug(\"Setting default font to: '%s'\", family)\n tk.font.nametofont(\"TkDefaultFont\").configure(family=family)\n tk.font.nametofont(\"TkHeadingFont\").configure(family=family)\n tk.font.nametofont(\"TkMenuFont\").configure(family=family)\n break\n return tk.font.nametofont(\"TkDefaultFont\").configure()[\"family\"]", "def get_fonts():\r\n return pygame.font.get_fonts()", "def GetFont(*args, **kwargs):\n return _gdi_.DC_GetFont(*args, **kwargs)", "def from_wx_font ( self, font ):\n return font" ]
[ "0.8364022", "0.8364022", "0.8364022", "0.83319396", "0.83319396", "0.83319396", "0.7937106", "0.784561", "0.78324264", "0.78324264", "0.75864154", "0.75253946", "0.74566376", "0.73828804", "0.7366602", "0.728598", "0.7264327", "0.72018814", "0.71568465", "0.71465045", "0.70899373", "0.7089153", "0.7045824", "0.7045824", "0.69870746", "0.69870746", "0.6945323", "0.69394535", "0.6898372", "0.68705183" ]
0.85015863
0
Return the lowlevel gd font.
def _font(self): return self.m_gdfont
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def font(self):\n\treturn self.m_font", "def font(self):\n\treturn self.m_font", "def get_font(self, option):\n return get_font(option=option)", "def font(self):\n return self[\"font\"]", "def font(self):\n return self[\"font\"]", "def font(self):\n return self[\"font\"]", "def GetDefaultFont(self):\n return wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL)", "def GetFont(*args, **kwargs):\n return _gdi_.StockGDI_GetFont(*args, **kwargs)", "def getFont(fontsize):\n\t\t\n\tfontPath = os.path.join(GATEWAYPATH, \"pilfonts\", \"FreeSans.ttf\")\n\ttry:\n\t\tfont = ImageFont.truetype ( fontPath, fontsize )\t\n\texcept:\n\t\tfont = ImageFont.load('%s/pilfonts/B%0.2d.pil' % (GATEWAYPATH, 24) )\n\treturn font", "def GetFont(*args, **kwargs):\n return _gdi_.DC_GetFont(*args, **kwargs)", "def base_font(self) -> str:\n pass", "def get_font_options(self): # real signature unknown; restored from __doc__\n pass", "def GetFont(self):\r\n\r\n return self._font", "def GetFont(self):\r\n\r\n return self._font", "def GetFont(self):\r\n\r\n return self._font", "def get_text_font ( self, object ):\n return self.text_font", "def font(size=20, name=None):\n name = name or \"regular\"\n path = ROOT_DIR / \"wclib\" / \"assets\" / (name + \".ttf\")\n return pygame.font.Font(path, size)", "def get_fonts():\r\n return pygame.font.get_fonts()", "def getfont(self):\r\n if self.font is None:\r\n self.font = cv2.FONT_HERSHEY_SIMPLEX\r\n return self.font", "def get_font_map(self): # real signature unknown; restored from __doc__\n pass", "def GetNormalFont(self):\r\n\r\n return self._normal_font", "def GetNormalFont(self):\r\n\r\n return self._normal_font", "def get_font_dict(f):\n return tk_font.Font(font=f).actual()", "def loadSystemFont(name, size):\n\n try:\n f = pygame.font.SysFont(name,size)\n except error, message:\n print \"Cannot load font: \", name\n raise SystemExit, message\n return f", "def setHardwareFont():\n dislin.hwfont()", "def defaultFont(self, p_int=None): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def loadDefaultFont(size):\n\n try:\n f = pygame.font.Font(None,size)\n except error, message:\n print \"Cannot load the default font\"\n raise SystemExit, message\n return f", "def GetNativeFontInfo(*args, **kwargs):\n return _gdi_.Font_GetNativeFontInfo(*args, **kwargs)", "def from_wx_font ( self, font ):\n return font", "def get_named_font(*a, **kw):\n return get_named_font(*a, **kw)" ]
[ "0.7086381", "0.7086381", "0.6938595", "0.69367456", "0.69367456", "0.69367456", "0.6928394", "0.6858252", "0.6838323", "0.67379063", "0.6708655", "0.66984797", "0.663425", "0.663425", "0.663425", "0.6609488", "0.6609394", "0.65845054", "0.65330255", "0.65114707", "0.64931756", "0.64931756", "0.647657", "0.64658386", "0.6453607", "0.6434244", "0.6399949", "0.63742113", "0.6323113", "0.6265222" ]
0.80946225
0
Construct a new truetype font. The `font' parameter specifies the file name of a truetype font, and `pointsize' specifies the point size to use.
def __init__(self, font, pointsize): self.m_font = font self.m_pointsize = pointsize
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(font_name, point):\n return pygame.font.SysFont(font_name, int(point))", "def truetype(font=None, size=10, index=0, encoding=\"\",\r\n layout_engine=None):\r\n if not freetype_installed:\r\n raise NotImplementedError(\"freetype-py is not installed or the libfreetype.dll/dylib/so is missing, if freetype-py is not installed, install it with pip install freetype-py\")\r\n fontpath = font\r\n font = FreeTypeFont(font, size)\r\n if font.font is not None:\r\n return font.font\r\n else:\r\n ttf_filename = os.path.basename(fontpath)\r\n dirs = []\r\n if sys.platform == \"win32\":\r\n # check the windows font repository\r\n # NOTE: must use uppercase WINDIR, to work around bugs in\r\n # 1.5.2's os.environ.get()\r\n windir = os.environ.get(\"WINDIR\")\r\n if windir:\r\n dirs.append(os.path.join(windir, \"Fonts\"))\r\n elif sys.platform in ('linux', 'linux2'):\r\n lindirs = os.environ.get(\"XDG_DATA_DIRS\", \"\")\r\n if not lindirs:\r\n # According to the freedesktop spec, XDG_DATA_DIRS should\r\n # default to /usr/share\r\n lindirs = '/usr/share'\r\n dirs += [os.path.join(lindir, \"fonts\")\r\n for lindir in lindirs.split(\":\")]\r\n elif sys.platform == 'darwin':\r\n dirs += ['/Library/Fonts', '/System/Library/Fonts',\r\n os.path.expanduser('~/Library/Fonts')]\r\n ext = os.path.splitext(ttf_filename)[1]\r\n first_font_with_a_different_extension = None\r\n for directory in dirs:\r\n for walkroot, walkdir, walkfilenames in os.walk(directory):\r\n for walkfilename in walkfilenames:\r\n if ext and walkfilename == ttf_filename:\r\n fontpath = os.path.join(walkroot, walkfilename)\r\n font = FreeTypeFont(fontpath, size)\r\n return font.font\r\n elif (not ext and\r\n os.path.splitext(walkfilename)[0] == ttf_filename):\r\n fontpath = os.path.join(walkroot, walkfilename)\r\n if os.path.splitext(fontpath)[1] == '.ttf':\r\n font = FreeTypeFont(fontpath, size)\r\n return font.font\r\n raise IOError(\"cannot find font file\")", "def _create_font(cls, font, size):\n if font[-4:] in (\".ttf\", \".otf\"):\n return pygame.font.Font(font, size)\n else:\n return pygame.font.SysFont(font, size)", "def named_font(self, point):\n return Font.create(self.name, point * self.scale)", "def load(filename, size=12):\r\n # face = Face('./VeraMono.ttf')\r\n face = freetype.Face(filename)\r\n face.set_char_size(size*size)\r\n return face", "def generate_new_font(self, font_file_name, prepend=\"gap_\"):\n\n f = open(font_file_name)\n out_font_filename = prepend + font_file_name\n fo = open(out_font_filename, \"wb\")\n\n fo.write(f.readline())\n fo.write(f.readline())\n fo.write(f.readline())\n\n line = f.readline().split(\" \")\n out_texture_filename = prepend + line[0]\n fo.write(\"%s %s %s\\n\" % (out_texture_filename, self.w, self.h))\n texture_filename = line[0]\n texture_size = ( int(line[1]), int(line[2]) )\n self.open_images(texture_filename, texture_size[0], texture_size[1])\n for i in range(256):\n line = f.readline().split(\" \")\n # ascii, char_x, char_y, byteWidth, byteHeight, xOffset, yOffset, screenWidth, screenHeight\n if i != int(line[0]): raise ValueError, \"font loading error\"\n x, y = (int(line[1]), int(line[2]))\n w, h = (int(line[3]), int(line[4]))\n\n newpos = self.copy_char(x, y, w, h)\n line[1] = str(newpos[0])\n line[2] = str(newpos[1])\n fo.write(\" \".join(line))\n\n line = f.readline()\n fo.write(line)\n line = line.split(\" \")\n\n self.image_out.save(out_texture_filename)\n print \"wrote '%s' and '%s'\" % (out_font_filename, out_texture_filename)", "def load_font(fontSize):\n f1='/usr/share/fonts/corefonts/arialbd.ttf' \n f2='/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'\n if os.path.isfile(f1): font=ImageFont.truetype(f1,fontSize)\n if os.path.isfile(f2): font=ImageFont.truetype(f2,fontSize)\n return font", "def CreateFont(name, size):\r\n\ttry:\r\n\t\tf = pygame.font.Font(name, size)\r\n\t\treturn f\r\n\texcept IOError:\r\n\t\treturn pygame.font.SysFont(name, size)", "def FFontFromPixelSize(*args, **kwargs):\n if kwargs.has_key('faceName'): kwargs['face'] = kwargs['faceName'];del kwargs['faceName']\n val = _gdi_.new_FFontFromPixelSize(*args, **kwargs)\n return val", "def FontFromPixelSize(*args, **kwargs):\n if kwargs.has_key('faceName'): kwargs['face'] = kwargs['faceName'];del kwargs['faceName']\n val = _gdi_.new_FontFromPixelSize(*args, **kwargs)\n return val", "def fromTeXStyle(cls, style):\n name = style.get('FontName', None)\n if name is None:\n return None\n if name.startswith(\"[\") and name.endswith(\"]\") and os.path.exists(name[1:-1]):\n f = TTFont(None, filename=name[1:-1])\n name = f.family\n styles = []\n res = cls(name.strip(), \" \".join(styles), isCtxtSpace=(style.get(\"ztexFontGrSpace\", \"0\")!=\"0\"))\n res.updateTeXFeats(style.get(\"ztexFontFeatures\", \"\"))\n return res", "def FFont(*args, **kwargs):\n if kwargs.has_key('faceName'): kwargs['face'] = kwargs['faceName'];del kwargs['faceName']\n val = _gdi_.new_FFont(*args, **kwargs)\n return val", "def load_font(self, filename: str) -> None:\n try:\n from fontTools import ttLib\n except:\n raise ImportError(\n 'You need to install library fonttools to add new fonts: '\n 'pip install fonttools'\n )\n self.filename = str(Path(filename))\n self.font = ttLib.TTFont(self.filename)\n\n # TODO: cmap needs to be modifiedfor this to work\n self.cmap = self.font['cmap'].getcmap(3,1).cmap\n self.glyph_set = self.font.getGlyphSet()\n\n self.font_descriptor = self._get_font_descriptor()", "def create_font(font_name, fit = True):\n font = {}\n try:\n numbers = Image.open(fonts_path + font_name + \".jpg\")\n if fit:\n numbers = images.fit_to_display(numbers, True)\n width, height = numbers.size\n font[\"d\"] = Image.open(fonts_path + \"degree.jpg\")\n font[\"d\"] = images.fit_to_display(font[\"d\"])\n font[\"p\"] = Image.open(fonts_path + \"percent.jpg\")\n font[\"p\"] = images.fit_to_display(font[\"p\"])\n font[\"m\"] = Image.open(fonts_path + \"am.jpg\")\n font[\"m\"] = images.fit_to_display(font[\"m\"], True)\n font[\"a\"] = Image.open(fonts_path + \"pm.jpg\")\n font[\"a\"] = images.fit_to_display(font[\"a\"], True)\n d_w, d_h = font[\"d\"].size\n font[\"d\"] = font[\"d\"].crop((10,0,d_w-10,d_w))\n box_width = float(width)/10 \n #Crop out each character in the provided image and save that to a dictionary\n for i in range(0, 10):\n box = [int(round(i*(box_width))), 0, int(round((i + 1)*(box_width))), height]\n #Checks if a subrectangle passes the width of the image, and shortens it if necessary\n if box[3] > width:\n box[3] = width\n \n box = tuple(box)\n font[str(i)] = numbers.crop(box) \n return font\n except IOError:\n print(\"Specified font file: %s.jpg cannot be found at: %s\" % (font_name,fonts_path))", "def font(size=20, name=None):\n name = name or \"regular\"\n path = ROOT_DIR / \"wclib\" / \"assets\" / (name + \".ttf\")\n return pygame.font.Font(path, size)", "def getFont(fontsize):\n\t\t\n\tfontPath = os.path.join(GATEWAYPATH, \"pilfonts\", \"FreeSans.ttf\")\n\ttry:\n\t\tfont = ImageFont.truetype ( fontPath, fontsize )\t\n\texcept:\n\t\tfont = ImageFont.load('%s/pilfonts/B%0.2d.pil' % (GATEWAYPATH, 24) )\n\treturn font", "def __init__(self, *args, **kwargs):\n if kwargs.has_key('faceName'): kwargs['face'] = kwargs['faceName'];del kwargs['faceName']\n _gdi_.Font_swiginit(self,_gdi_.new_Font(*args, **kwargs))", "def __init__(self, absoluteSize=False, size='', style='', family='', weight='', GmlTextSymbols=None, GmlSvgParameters=None, GmlColour=None, *args, **kw_args):\n #: True if 'size' is expressed in absolute values. Default is false.\n self.absoluteSize = absoluteSize\n\n #: The size to use for the font in pixels. The default is defined to be 10 pixels, though various systems may have restrictions on what sizes are available.\n self.size = size\n\n #: The style to use for a font. The allowed values are 'normal', 'italic', and 'oblique'.\n self.style = style\n\n #: Family name of a font to use. Allowed values are system-dependent. Any number of font-family attributes may be given and they are assumed to be in preferred order.\n self.family = family\n\n #: The amount of weight or boldness to use for a font. Allowed values are 'normal' and 'bold'.\n self.weight = weight\n\n self._GmlTextSymbols = []\n self.GmlTextSymbols = [] if GmlTextSymbols is None else GmlTextSymbols\n\n self._GmlSvgParameters = []\n self.GmlSvgParameters = [] if GmlSvgParameters is None else GmlSvgParameters\n\n self._GmlColour = None\n self.GmlColour = GmlColour\n\n super(GmlFont, self).__init__(*args, **kw_args)", "def fl_set_font(fontnum, size):\n _fl_set_font = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_font\",\\\n None, [cty.c_int, cty.c_int],\\\n \"\"\"void fl_set_font(int numb, int size)\"\"\")\n library.check_if_flinitialized()\n i_fontnum = library.convert_to_intc(fontnum)\n i_size = library.convert_to_intc(size)\n library.keep_elem_refs(fontnum, i_fontnum, size, i_size)\n _fl_set_font(i_fontnum, i_size)", "def loadDefaultFont(size):\n\n try:\n f = pygame.font.Font(None,size)\n except error, message:\n print \"Cannot load the default font\"\n raise SystemExit, message\n return f", "def setTTFont(font='default'):\n if font == 'default':\n font = 'Times New Roman' \n dislin.winfnt(font)", "def _create_encoding(font, format, base):\n font = font.label(codepoint_from=font.encoding)\n enc = base.Struct(**_ENCODING_TABLE)(\n default_char=int(font.get_default_glyph().codepoint or 0)\n )\n codepoints = font.get_codepoints()\n if not codepoints:\n raise ValueError('No storable code points in font.')\n byte_length = len(max(codepoints))\n if byte_length > 2:\n logging.warning(\n 'Code points greater than 2 bytes cannot be stored in PCF.'\n )\n elif byte_length == 1:\n enc.min_byte1 = enc.max_byte1 = 0\n enc.min_char_or_byte2 = ord(min(codepoints))\n enc.max_char_or_byte2 = ord(max(codepoints))\n elif byte_length == 2:\n byte2 = [_cp[0] for _cp in codepoints if len(_cp) == 2]\n if any(len(_cp) == 1 for _cp in codepoints):\n byte2.append(0)\n enc.min_char_or_byte2 = min(byte2)\n enc.max_char_or_byte2 = max(byte2)\n byte1 = tuple(_cp[-1] for _cp in codepoints)\n enc.min_byte1 = min(byte1)\n enc.max_byte1 = max(byte1)\n glyph_indices = []\n for cp in _generate_codepoints(enc):\n try:\n index = font.get_index(cp)\n except KeyError:\n # -1 means 'not used'\n index = -1\n glyph_indices.append(index)\n glyph_indices = (base.int16 * len(glyph_indices))(*glyph_indices)\n table_bytes = (\n bytes(le.uint32(format))\n + bytes(enc)\n + bytes(glyph_indices)\n )\n return table_bytes, format", "def clone_font(font):\n \n point_size = font.GetPointSize()\n family = font.GetFamily()\n style = font.GetStyle()\n weight = font.GetWeight()\n underline = font.GetUnderlined()\n face_name = font.GetFaceName()\n\n clone = wx.Font(\n point_size, family, style, weight, underline, face_name,\n )\n\n return clone", "def CreateFont(*args):\n return _gdi_.GraphicsContext_CreateFont(*args)", "def load_font(self, path: str, font_family: str, mode: str='n') -> None:\n font = PDFTrueTypeFont('F'+str(self.index), path)\n if not font_family in self.fonts:\n self.fonts[font_family] = {'n': font}\n self.fonts[font_family][mode] = font\n self.index += 1", "def loadCustomFont(path,name,size):\n\n fullname = os.path.join(path,name)\n f = pygame.font.Font(fullname,size)\n return f", "def label_maker(string, size, font='Courier'):\n label = GLabel(string)\n label.font = str(font) + '-' + str(size)\n return label", "def __init__(self, font, color=(255,255,255,255)):\r\n if not font.endswith('.png'):\r\n font += '.png'\r\n super(Pngfont, self).__init__(\"fonts/%s\" % font)\r\n self.font = font\r\n pixels = self.im.load()\r\n\r\n self.glyph_table = {}\r\n # Extract font information from top scanline of font image; create width,\r\n # height, tex_coord and vertices for each character.\r\n for v in range(95):\r\n x = (pixels[v * 2, 0][0] * 2.0) / self.ix\r\n y = ((pixels[v * 2, 0][1] + 8) * 2.0) / self.iy\r\n width = float(pixels[v * 2 + 1, 0][0])\r\n height = float(pixels[v * 2 + 1, 0][1])\r\n width_scale = width / self.ix\r\n height_scale = height / self.iy\r\n\r\n self.glyph_table[v] = [width, height,\r\n [(x + width_scale, y - height_scale),\r\n (x, y - height_scale),\r\n (x, y),\r\n (x + width_scale, y)],\r\n [(width, 0, 0), (0, 0, 0), (0, -height, 0), (width, -height, 0)]]\r\n\r\n alph = self.im.split()[-1] #keep alpha\r\n draw = ImageDraw.Draw(self.im)\r\n draw.rectangle((0, 1, self.ix, self.iy), fill=color)\r\n self.im.putalpha(alph)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = self.im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()", "def str_font ( self, font ):\n weight = { wx.LIGHT: ' Light',\n wx.BOLD: ' Bold' }.get( font.GetWeight(), '' )\n style = { wx.SLANT: ' Slant',\n wx.ITALIC:' Italic' }.get( font.GetStyle(), '' )\n return '%s point %s%s%s' % (\n font.GetPointSize(), font.GetFaceName(), style, weight )", "def _instantiateFont(self, path):\n return self._fontClass(path,\n libClass=self._libClass,\n kerningClass=self._kerningClass,\n groupsClass=self._groupsClass,\n infoClass=self._infoClass,\n featuresClass=self._featuresClass,\n glyphClass=self._glyphClass,\n glyphContourClass=self._glyphContourClass,\n glyphPointClass=self._glyphPointClass,\n glyphComponentClass=self._glyphComponentClass,\n glyphAnchorClass=self._glyphAnchorClass)" ]
[ "0.65943176", "0.6507712", "0.6302644", "0.6169613", "0.60154456", "0.5916379", "0.58347595", "0.5834142", "0.5751763", "0.55977577", "0.55713534", "0.55640525", "0.5546145", "0.5519924", "0.55184734", "0.54587924", "0.54574084", "0.54460585", "0.5384534", "0.53706086", "0.536755", "0.5332757", "0.5308122", "0.5280713", "0.52312136", "0.5222864", "0.52128273", "0.5183736", "0.51745236", "0.5171841" ]
0.6511013
1
Return the current font.
def font(self): return self.m_font
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def font(self):\n return self[\"font\"]", "def font(self):\n return self[\"font\"]", "def font(self):\n return self[\"font\"]", "def GetFont(self):\r\n\r\n return self._font", "def GetFont(self):\r\n\r\n return self._font", "def GetFont(self):\r\n\r\n return self._font", "def _font(self):\n\treturn self.m_gdfont", "def GetActiveFontInfo(self):\n\t\treturn self.acad.ActiveDocument.ActiveTextStyle.GetFont()", "def GetSelectedFont(self):\r\n\r\n return self._selected_font", "def GetSelectedFont(self):\r\n\r\n return self._selected_font", "def get_text_font ( self, object ):\n return self.text_font", "def get_font(self, option):\n return get_font(option=option)", "def selectFont():\n font,ok = QtGui.QFontDialog.getFont()\n if ok:\n return font\n else:\n return None", "def get_text_font ( self, object ):\n if self._is_selected( object ):\n return self.selected_text_font\n return self.text_font", "def getFontFace(self):\n return self.fontFace", "def getfont(self):\r\n if self.font is None:\r\n self.font = cv2.FONT_HERSHEY_SIMPLEX\r\n return self.font", "def get_font(self, font_family: str, mode: str) -> PDFFont:\n family = self.fonts[font_family]\n return family['n'] if mode not in family else family[mode]", "def base_font(self) -> str:\n pass", "def GetFont(*args, **kwargs):\n return _gdi_.StockGDI_GetFont(*args, **kwargs)", "def GetDefaultFont(self):\n return wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL)", "def get_font_dict(f):\n return tk_font.Font(font=f).actual()", "def getFont( self, par, path ):\n\n return self.db.getFontPar( par, path )", "def GetMeasuringFont(self):\r\n\r\n return self._measuring_font", "def GetMeasuringFont(self):\r\n\r\n return self._measuring_font", "def GetNormalFont(self):\r\n\r\n return self._normal_font", "def GetNormalFont(self):\r\n\r\n return self._normal_font", "def _set_default_font(cls):\n if platform.system() == \"Linux\":\n for family in (\"DejaVu Sans\", \"Noto Sans\", \"Nimbus Sans\"):\n if family in tk.font.families():\n logger.debug(\"Setting default font to: '%s'\", family)\n tk.font.nametofont(\"TkDefaultFont\").configure(family=family)\n tk.font.nametofont(\"TkHeadingFont\").configure(family=family)\n tk.font.nametofont(\"TkMenuFont\").configure(family=family)\n break\n return tk.font.nametofont(\"TkDefaultFont\").configure()[\"family\"]", "def get_fonts():\r\n return pygame.font.get_fonts()", "def GetFont(*args, **kwargs):\n return _gdi_.DC_GetFont(*args, **kwargs)", "def from_wx_font ( self, font ):\n return font" ]
[ "0.8364022", "0.8364022", "0.8364022", "0.83319396", "0.83319396", "0.83319396", "0.7937106", "0.784561", "0.78324264", "0.78324264", "0.75864154", "0.75253946", "0.74566376", "0.73828804", "0.7366602", "0.728598", "0.7264327", "0.72018814", "0.71568465", "0.71465045", "0.70899373", "0.7089153", "0.7045824", "0.7045824", "0.69870746", "0.69870746", "0.6945323", "0.69394535", "0.6898372", "0.68705183" ]
0.85015863
1
Set the current font.
def set_font(self, font): self.m_font = font
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_font_family(self, font):\n self.parent.setCurrentFont(font)", "def set_font(self, font: str):\n self.font = font", "def SetFont(self, font):\r\n \r\n self._font = font", "def SetFont(self, font):\r\n\r\n self._font = font", "def set_font(self, font):\n\ttry:\n\t self.m_gdfont = self._fonts[font.lower()]\n\t self.m_font = font\n\texcept KeyError:\n\t raise ValueError, 'Illegal font name.'", "def SetFont(self, font):\r\n\r\n self._font = font\r\n return self", "def set_font(self, font):\n this.font = font\n # Signal to the application that we need a resize\n this.chsize()", "def setFont(self, font):\n self.edit.document().setDefaultFont(font)\n self.edit.setFont(font)\n super(BaseConsole, self).setFont(font)", "def SetFont(self, font):\r\n \r\n wx.PyPanel.SetFont(self, font)\r\n\r\n selectedFont = wx.Font(font.GetPointSize(), font.GetFamily(),\r\n font.GetStyle(), wx.BOLD, font.GetUnderlined(),\r\n font.GetFaceName(), font.GetEncoding())\r\n\r\n self.SetNormalFont(font)\r\n self.SetSelectedFont(selectedFont)\r\n self.SetMeasuringFont(selectedFont)\r\n\r\n return True", "def SetFont(self, font): \r\n\r\n res = wx.PyControl.SetFont(self, font)\r\n\r\n if self._art:\r\n self._art.SetFont(font)\r\n \r\n return res", "def SetSelectedFont(self, font):\r\n\r\n self._art.SetSelectedFont(font)", "def set_font(self, font='A'):\n upper = font.upper()\n if upper == 'B':\n self._set_print_mode(self.FONT_MASK)\n elif upper == 'A':\n self._unset_print_mode(self.FONT_MASK)\n else:\n self._unset_print_mode(self.FONT_MASK)", "def SetSelectedFont(self, font):\r\n \r\n self._selected_font = font", "def set_font(self, font):\n q_font = q_font_from_font(font)\n self.widget.setFont(q_font)", "def set_font_family_default(self):\n font = QFont('Arial', 12)\n self.parent.setCurrentFont(font)", "def SetSelectedFont(self, font):\r\n\r\n self._selected_font = font", "def SetFont(*args):\n return _gdi_.GraphicsContext_SetFont(*args)", "def shell_font_changed(self, font):\n self.set_font(font)", "def SetSelectedFont(self, font):\r\n\r\n self._selected_font = font\r\n self.GetArtProvider().SetSelectedFont(font)", "def replace(self, font):\n self._font = font", "def setFont(font='default',hardware=1):\n if font == 'default' and hardware:\n setHardwareFont()\n return\n currfmt = getFileFormat()\n if isPostscript(currfmt):\n setPSFont(font)\n elif isWMF(currfmt):\n setTTFont(font)\n else:\n setDislinFont(font)", "def SetFont(*args, **kwargs):\n return _gdi_.DC_SetFont(*args, **kwargs)", "def font(self, font='a'):\n if font not in self.__class__.__fontMap.keys():\n raise ValueError('font must be \\'a\\', \\'b\\', \\'c\\'')\n elif self._usePrintMode:\n self._textFont = font\n self._updatePrintMode()\n else:\n self._write(self.__class__.__ESC + 'M' + self.__class__.__fontMap[font])", "def setTTFont(font='default'):\n if font == 'default':\n font = 'Times New Roman' \n dislin.winfnt(font)", "def set_font(self, font, option):\n # Update fonts in all plugins\n set_font(font, option=option)\n plugins = self.main.widgetlist + self.main.thirdparty_plugins\n for plugin in plugins:\n plugin.update_font()", "def setFont(fontKey, update=False, **opts):\n if not hasFont(fontKey) or update:\n globals()[fontKey] = tkFont.Font(**opts)\n \n return globals()[fontKey]", "def set_font(self, font=UNSPECIFIED, fontsize=UNSPECIFIED, fontshadow=UNSPECIFIED):\n if font is not UNSPECIFIED:\n self.font = font\n if fontsize is not UNSPECIFIED:\n self.fontsize = fontsize\n if fontshadow is not UNSPECIFIED:\n self.fontshadow = fontshadow\n self.bar.draw()", "def SetFont(self, font):\r\n\r\n wx.PyScrolledWindow.SetFont(self, font)\r\n\r\n self._normalFont = font \r\n family = self._normalFont.GetFamily()\r\n if family == wx.FONTFAMILY_UNKNOWN:\r\n family = wx.FONTFAMILY_SWISS\r\n self._boldFont = wx.Font(self._normalFont.GetPointSize(), family,\r\n self._normalFont.GetStyle(), wx.BOLD, self._normalFont.GetUnderlined(),\r\n self._normalFont.GetFaceName(), self._normalFont.GetEncoding())\r\n self._italicFont = wx.Font(self._normalFont.GetPointSize(), family,\r\n wx.FONTSTYLE_ITALIC, wx.NORMAL, self._normalFont.GetUnderlined(),\r\n self._normalFont.GetFaceName(), self._normalFont.GetEncoding())\r\n\r\n return True", "def setHardwareFont():\n dislin.hwfont()", "def SetFont(self, font):\r\n \r\n if self._header_win:\r\n self._header_win.SetFont(font)\r\n self.CalculateAndSetHeaderHeight()\r\n self._header_win.Refresh()\r\n \r\n if self._main_win:\r\n return self._main_win.SetFont(font)\r\n else:\r\n return False" ]
[ "0.86504996", "0.85895497", "0.85511273", "0.8487338", "0.82593066", "0.81806946", "0.8180615", "0.80901104", "0.78574944", "0.7839135", "0.783008", "0.77893543", "0.7788783", "0.777804", "0.77533484", "0.77471095", "0.7734195", "0.77050644", "0.76914483", "0.76196116", "0.7601553", "0.7587357", "0.7577595", "0.75737923", "0.75517577", "0.75013393", "0.7484995", "0.7483358", "0.74147284", "0.73689556" ]
0.8616047
1
Return the current point size.
def pointsize(self): return self.m_pointsize
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def size(self) -> Point:\n\t\treturn self._size", "def getPointSize(self):\n l = [point.size for point in self.points]\n if l.count(l[0]) == len(l):\n return l[0]\n else:\n raise ValueError(\"The sizes of the points must be the same otherwise it makes no sense.\")", "def get_size(self):\n return self._surf.get_size()", "def get_pixel_size(self):\n p0 = core.PointF(0, 0)\n p1 = core.PointF(1, 1)\n tr = self.transform().inverted()[0]\n p01 = tr.map(p0)\n p11 = tr.map(p1)\n return core.PointF(p11 - p01)", "def size(self):\n bbox = self.bbox\n return bbox[1] - bbox[0]", "def size(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"size\")", "def getSize(self):\n\n return self.size", "def getSize(self):\r\n return self.size", "def GetPointSize(*args, **kwargs):\n return _gdi_.Font_GetPointSize(*args, **kwargs)", "def getsize_pt(self):\n # The factor 16L/16777216L=2**(-20) converts a fix_word (here self.q)\n # to the corresponding float. Furthermore, we have to convert from TeX\n # points to points, hence the factor 72/72.27.\n return 72/72.27 * 16*self.q/16777216", "def get_num_points(self):\n dimensions = self.data.shape\n return dimensions[0]", "def getSize(self):\n return self.size", "def __get_size(self):\n return self.__size", "def get_size(self):\n return self.__size", "def get_size(self):\n return self.__size", "def get_size(self):\n return self.size", "def get_size(self):\n return self.size", "def get_size(self):\n return self.size", "def get_size(self):\n return self._size", "def get_size(self):\n return self._size", "def get_size(self):\r\n return self.__size", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def get_size(self):\r\n\r\n return self._size", "def getSize(self):\n return self.__size" ]
[ "0.8636495", "0.82199454", "0.75747615", "0.74275225", "0.7424574", "0.7419275", "0.73445594", "0.73120743", "0.7296886", "0.7276074", "0.72740424", "0.7266581", "0.7259557", "0.7255099", "0.7255099", "0.72550595", "0.72550595", "0.72550595", "0.7226226", "0.7226226", "0.7221523", "0.72102857", "0.72102857", "0.72102857", "0.72102857", "0.72102857", "0.72102857", "0.72102857", "0.7210141", "0.7204613" ]
0.90453184
0
Set the current point size.
def set_pointsize(self, pointsize): self.m_pointsize = pointsize
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_point_size(self, pointSize):\n self.pointSize = pointSize", "def setPointSize(self, size):\n for point in self.points:\n point.size = size", "def set_point_size(self, point_size=0.0):\r\n for b in self.buf:\r\n b.unib[8] = point_size", "def set_size(self, size):\n \n self.width = size[0]\n self.height = size[1]", "def SetPointSize(*args, **kwargs):\n return _gdi_.Font_SetPointSize(*args, **kwargs)", "def changeSize(self, value):\n self.layer.brush_size = value", "def setsize(self, size):\n self.__size = size", "def size(self, size):\n self.width = size\n self.height = size", "def size(self, size):\n self.width = size\n self.height = size", "def set_pixel_size(self, pixel_size):\n raise NotImplementedError", "def set_size(self, size):\n self.dtSize = size", "def size(self, size):\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, value):\n self.width = value\n self.height = value", "def set_size(self, new_bunch_size):\n self.bunch_size = new_bunch_size", "def __button_marker_size_clicked(self):\n val, okPressed = QInputDialog.getDouble(self, \"Set marker size\",\"Value:\", self.marker_size, 0, 100, 4)\n if okPressed:\n self.vis.change_marker_size(val)\n self.marker_size = val", "def set_size(self, w, h):\n\t\tpass", "def size(self, val):\n self.width = val\n self.height = val", "def setPointWidth(self, width):\n for point in self.points:\n point.width = width", "def setSize_0(self, size):\n self.setSize(size.getWidth(), size.getHeight())", "def setFilmSize(self, size_x, size_y):\n self.lens.setFilmSize(size_x, size_y)\n self.rebuildMatrixCache()", "def setSize(self, size, units=POINT_UNITS):\n assert units in (Label.PIXEL_UNITS, Label.POINT_UNITS)\n if self.Y_POINTS_UP:\n self.y_sign = 1\n else:\n self.y_sign = -1\n if units == Label.PIXEL_UNITS:\n scale = size / self.font.table['units_per_EM']\n else:\n scale = size * self.DPI / (\n 72 * self.font.table['units_per_EM'])\n self.transform.setScale(scale, self.y_sign * scale)\n self.size = size\n self.sizeUnits = units", "def size(self, size: int):\n\n self._size = size", "def size(self, value):\n self.width = value", "def setFrameSize(self, frame_size):\n \n self.frame_size = frame_size", "def set_size(self, size=None):\n if not size:\n size = self.output_size\n self.img = cv2.resize(self.img, size)\n self.update_image()\n self.update_size()", "def set_size(self, new_size: int):\n self.__tab_size = new_size\n self.__check_interpreter()\n self.__vals = [0 for _ in range(self.__tab_size)]" ]
[ "0.8869498", "0.8786114", "0.8302565", "0.7436967", "0.7383736", "0.7337534", "0.7333951", "0.7167893", "0.7167893", "0.7124836", "0.70554525", "0.6935905", "0.68554765", "0.68554765", "0.68554765", "0.68554765", "0.670467", "0.66977566", "0.66885006", "0.66778666", "0.66609937", "0.662851", "0.6587521", "0.6581645", "0.65765053", "0.6559255", "0.6549362", "0.6514397", "0.6490688", "0.64840287" ]
0.88312125
1
Traverse `graph` with BFS starting from `source`, up to `size` nodes. Return an iterator of subgraph nodes (including source node).
def _bfs_nodes(cls, graph, source, size, **kwargs): if size < 1: return iter(()) return itertools.chain( (source,), itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pfs_nodes(cls, graph, source, size, priority):\n if size < 1:\n return iter(())\n\n # use min-heap to implement (max) priority queue\n # use insertion order to break priority tie\n queue = []\n counter = itertools.count()\n push = lambda priority, node: heappush(queue, (-priority, next(counter), node))\n pop = partial(heappop, queue)\n\n visited = set()\n enqueued = set()\n push(priority(source), source)\n\n while queue and len(visited) < size:\n _, _, node = pop()\n\n if node in visited:\n continue\n\n visited.add(node)\n\n for neighbor in graph[node]:\n if neighbor not in enqueued:\n enqueued.add(neighbor)\n push(priority(neighbor), neighbor)\n\n return iter(visited)", "def bfs_nodes_generator(graph, source, reverse=...):\n ...", "def bfs_edges_generator(graph, source, reverse=...):\n ...", "def dfs_edges_generator(graph, source, reverse=...):\n ...", "def bfs(graph, source):\n visited = [False] * len(graph.graph)\n print(visited)\n\n result = \"\"\n queue = []\n\n queue.append(source)\n visited[source] = True\n\n while queue:\n source = queue.pop(0)\n result += str(source)\n\n while graph.graph[source] is not None:\n data = graph.graph[source].vertex\n if not visited[data]:\n queue.append(data)\n visited[data] = True\n graph.graph[source] = graph.graph[source].next\n return result", "def traverse_breadth_first(self, src: int = 0, graph: GraphInterface = None):\n if not isinstance(graph, DiGraph) or graph is None or self._graph.get_node(src) is None:\n return\n curr = graph.get_node(src)\n\n q = Queue()\n\n q.put(curr)\n curr.tag += 1\n\n while not q.empty():\n\n curr = q.get()\n out_edges = graph.all_out_edges_of_node(curr.key)\n\n for i in out_edges:\n out_edge = out_edges[i]\n neighbor = graph.get_node(out_edge.dest) # Get curr's neighbor\n if neighbor.tag == curr.tag - 1:\n neighbor.tag += 1 # If un-tagged -> tag it.\n q.put(neighbor) # and enqueue it", "def BFS(self, start_vertex):\n yield from self._search(start_vertex, kind='BFS')", "def traverse(self, source):\r\n key = self.d.keys()\r\n #check for source in graph\r\n if source not in key:\r\n raise KeyError(str(source) + \" is not in graph!\")\r\n #initialize V, Q and M\r\n V = []\r\n Q = deque()\r\n Q.append(source)\r\n M = set(source)\r\n #while Q is not empty\r\n while Q:\r\n #take first element of queue\r\n current = Q.popleft()\r\n #add it to V\r\n V.append(current)\r\n neighbors = self.d[current]\r\n #for each value associated with this key\r\n for n in neighbors:\r\n #if it isn't in M, add it to M and end of Q\r\n if n not in M:\r\n Q.append(n)\r\n M.add(n)\r\n return V", "def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)", "def bfsSample(G, source=None, k = 50):\n\twarn(\"networkit.sampling.bfsSample is deprecated, will be removed in future updates.\")\n\tif not source:\n\t\tsource = GraphTools.randomNode(G)\n\tn = G.numberOfNodes()\n\tvisited = [False]*n\n\tQ = [source]\n\tclosest = set([source])\n\tglobal found\n\tfound = 0\n\twhile len(Q) > 0 and found < k:\n\t\tu = Q.pop(0)\n\t\tdef enqueue(u,v,weight, eid):\n\t\t\tglobal found\n\t\t\tif not visited[v] and found < k:\n\t\t\t\tfound += 1\n\t\t\t\tvisited[v] = True\n\t\t\t\tQ.append(v)\n\t\t\t\tclosest.add(v)\n\t\tG.forEdgesOf(u, enqueue)\n\tprint(\"found {0} nodes\".format(len(closest)))\n\tG1 = GraphTools.subgraphFromNodes(G, closest)\n\treturn G1", "def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices will all be gray.\r\n queue = []\r\n \r\n # Loops through the vertices in the graph, creates a key in the dictionary for each vertice, with default values.\r\n for vertex in graph[\"V\"]:\r\n dataInfo[str(vertex)] = {\"ancestor\": \"\", \"descendants\": [], \"distance\": \"\", \"color\": \"white\"}\r\n \r\n # At key source (variable) in dataInfo dictionary, key ancestor is set to have no value other than \"NA\" (as it is the starting point), and distance to 0 (as it will always be zero as it is the source).\r\n dataInfo[str(source)][\"ancestor\"] = \"NA\"\r\n dataInfo[str(source)][\"distance\"] = 0\r\n\r\n def symmetricVertex(edge, otherVertex):\r\n \r\n \"\"\"\r\n Function symmetricVertex takes arguments edge, a list of an edge from the graph dictionary, and otherVertex, an integer that is the other vertex in the edge with the sourceVertex. The function will return the point other than the otherVertex, and will be used to find adjacent vertices relative to the current vertex in the queue. Example: edge ([1, 2]), otherVertex (1), the function will return 2.\r\n \"\"\"\r\n \r\n for num in edge:\r\n if num != otherVertex:\r\n return num\r\n \r\n\r\n def pathFinder(graph, sourceVertex):\r\n \r\n \"\"\"\r\n Function pathFinder takes arguments graph, a dictionary, with the same keys for the edges and the vertices and sourceVertex, an integer. The function will loop through all of the edges in the graph and find adjacent vertices relative to the current sourceVertex. sourceVertex values will be in the queue. The function will edit dictionaries and lists, not return any value.\r\n \"\"\"\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)\r\n \r\n # The function pathFinder is called on the graph and the source vertex, which sets up the queue.\r\n pathFinder(graph, source)\r\n \r\n # While the list queue contains values, the pathFinder function is called on the graph, and the queue value at index 0.\r\n while len(queue) != 0:\r\n pathFinder(graph, queue[0])\r\n \r\n # Loop below is for formatting of the data, makes it easier to read.\r\n for key in dataInfo:\r\n print \"Vertex: \" + key + \", Distance: \" + str(dataInfo[key][\"distance\"]) + \", Ancestor: \" + str(dataInfo[key][\"ancestor\"]) + \", Descendants: \" + str(dataInfo[key][\"descendants\"]) + \", Color: \" + str(dataInfo[key][\"color\"]) + \".\" \r\n \r\n # Returns dictionary dataInfo.\r\n return dataInfo", "def BFS(self, start_vertex, verbose=True):\n if not self.contains(start_vertex):\n return None\n traversal = []\n visited = set()\n for vertex in self.vertices():\n if vertex not in visited:\n self._BFS(vertex, visited, traversal.append)\n if verbose:\n print('BFS(Graph) =', traversal)\n return traversal", "def bounded_bfs_paths(G,sources,max_depth=None):\n\t# Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py\n\t# by D. Eppstein, July 2004.\n\tvisited=set(sources)\n\tdepth=0\n\tstack = []\n\tfor source in sources:\n\t\tstack.append((source,iter(G[source]),depth,[source]))\n\twhile stack:\n\t\t# print stack\n\t\tparent,children,last_depth,path = stack[0]\n\t\tdepth=last_depth+1\n\t\tif (max_depth) and (depth > max_depth):\n\t\t\tbreak\n\t\ttry:\n\t\t\tchild = next(children)\n\t\t\t# if child not in visited:\n#\t\t\tyield parent,child\n#\t\t\tvisited.add(child)\n\t\t\tif child not in path :\n\t\t\t\tyield path+[child]\n\t\t\t\tstack.append((child,iter(G[child]),depth,path+[child]))\n\t\texcept StopIteration:\n\t\t\tstack.pop(0)", "def bfs_path(G, source, destination):\n vertex_dict = dict(nx.bfs_predecessors(G, source))\n queue = deque()\n queue.append(destination)\n while queue[-1] != source:\n queue.append(vertex_dict[queue[-1]])\n queue.reverse()\n return queue", "def ss_breadth_first_search_tree(\n graph: ScipyGraph, source_node: NodeID, depth_limit: int\n ) -> Tuple[NumpyNodeMap, NumpyNodeMap]:\n\n is_directed = ScipyGraph.Type.compute_abstract_properties(\n graph, {\"is_directed\"}\n )[\"is_directed\"]\n node_list: np.ndarray = graph.node_list\n depth_limit = len(node_list) - 1 if depth_limit == -1 else depth_limit\n source_node_position = np.flatnonzero(node_list == source_node).item()\n bfs_tree_csr = ss.csgraph.breadth_first_tree( # depth_limit is not used here!\n graph.value, source_node_position, directed=is_directed\n ).astype(bool)\n\n # Calcuate Depths\n depths = np.full(len(node_list), depth_limit + 1, dtype=int)\n depths[source_node_position] = 0\n current_node_positions = np.array([source_node_position], dtype=int)\n for depth in range(1, depth_limit + 1):\n selector = np.zeros(len(node_list), dtype=bool)\n selector[current_node_positions] = True\n current_node_positions = selector @ bfs_tree_csr\n if not current_node_positions.any():\n break\n depths[current_node_positions] = depth\n\n # Calculate Parents\n parents = np.empty(len(node_list), dtype=int)\n bfs_tree_coo = bfs_tree_csr.tocoo()\n parents[source_node_position] = source_node\n parents[bfs_tree_coo.col] = bfs_tree_coo.row\n\n # Ensure depth_limit\n valid_nodes = graph.node_list\n valid_depths_selector = depths <= depth_limit\n depths = depths[valid_depths_selector]\n parents = parents[valid_depths_selector]\n valid_nodes = valid_nodes[valid_depths_selector]\n depths_nodes = valid_nodes.copy()\n parents_nodes = valid_nodes.copy()\n\n node2depth = NumpyNodeMap(depths, depths_nodes)\n node2parent = NumpyNodeMap(parents, parents_nodes)\n\n return node2depth, node2parent", "def _plain_bfs(G, source):\n Gsucc = G.succ\n Gpred = G.pred\n\n seen = set()\n nextlevel = {source}\n while nextlevel:\n thislevel = nextlevel\n nextlevel = set()\n for v in thislevel:\n if v not in seen:\n yield v\n seen.add(v)\n nextlevel.update(Gsucc[v])\n nextlevel.update(Gpred[v])", "def bfs(get_neighbors, source, target):\n\n parents = {}\n visited = set()\n queue = collections.deque()\n queue.append(source)\n while queue:\n vertex = queue.popleft()\n if vertex == target:\n return _backtrack(target, lambda v: parents.get(v))\n if vertex not in visited:\n visited.add(vertex)\n for neighbor in filter(lambda n: n not in visited, get_neighbors(vertex)):\n queue.append(neighbor)\n parents[neighbor] = vertex\n return []", "def bfs_path(G, source, destination):\n vertex_dict = dict(nx.bfs_predecessors(G, source))\n queue = deque()\n queue.append(destination)\n while queue[-1] != source:\n try:\n queue.append(vertex_dict[queue[-1]])\n except KeyError:\n print(f\"Source: {source}, Dest: {destination}\")\n print(f\"Key {queue[-1]} not found in\")\n print_dict(\"bfs\", vertex_dict)\n break\n queue.reverse()\n return queue", "def single_source_subgraph(g, node):\n return g.subgraph(nx.single_source_shortest_path(g, node).keys())", "def BFS(self,s,t,parent):\n #mark all vertices as not visited\n visited = [False]*(self.ROWS);\n # initialize a queue\n queue = []\n # add source to q and mark it visited\n queue.append(s)\n visited[s] = True\n #Breadth-first-search\n while queue:\n n = queue.pop(0)\n for index,val in enumerate(self.graph[n]):\n if visited[index] == False and val>0:\n queue.append(index)\n visited[index] = True\n parent[index] = n\n #return True if sink was visted\n if visited[t]:\n return True\n else:\n return False", "def progressive_widening_search(G, source, value, condition, initial_width=1):\n # Check for the special case in which the source node satisfies the\n # termination condition.\n if condition(source):\n return source\n # The largest possible value of `i` in this range yields a width at\n # least the number of nodes in the graph, so the final invocation of\n # `bfs_beam_edges` is equivalent to a plain old breadth-first\n # search. Therefore, all nodes will eventually be visited.\n #\n # TODO In Python 3.3+, this should be `math.log2(len(G))`.\n log_m = math.ceil(math.log(len(G), 2))\n for i in range(log_m):\n width = initial_width * pow(2, i)\n # Since we are always starting from the same source node, this\n # search may visit the same nodes many times (depending on the\n # implementation of the `value` function).\n for u, v in nx.bfs_beam_edges(G, source, value, width):\n if condition(v):\n return v\n # At this point, since all nodes have been visited, we know that\n # none of the nodes satisfied the termination condition.\n raise nx.NodeNotFound('no node satisfied the termination condition')", "def bfs (graph, src, tgt):\n\n if not graph.has_key(src):\n raise AttributeError(\"The source '%s' is not in the graph\" % src)\n if not graph.has_key(tgt):\n raise AttributeError(\"The target '%s' is not in the graph\" % tgt)\n\n parents = {src: None}\n queue = deque([src])\n while queue:\n node = queue.popleft()\n for neighbor in graph[node]:\n if neighbor not in parents:\n parents[neighbor] = node\n queue.append(neighbor)\n if node == tgt:\n break\n\n path = [tgt]\n while parents[tgt] is not None:\n path.insert(0, parents[tgt])\n tgt = parents[tgt]\n\n return path", "def bfsPath(graph, start, end, toPrint=False):\n return BFS(graph, start, end, toPrint)", "def _search(self, start_vertex, kind='BFS'):\n \n if kind == 'BFS':\n pop_name = 'pop'\n append_name = 'appendleft'\n if kind == 'DFS':\n pop_name = 'pop'\n append_name = 'append'\n \n # Initialize set of visited vertices and a queue\n visited = set()\n queue = collections.deque([start_vertex])\n \n # While the queue is not empty\n while queue:\n \n # Get the vertex, abandon it if it has been seen before\n vertex = getattr(queue, pop_name)()\n if vertex in visited:\n continue\n visited.add(vertex)\n yield vertex\n \n # Go through neighbors, add unseen to the queue\n for neighbor in self.neighbors(vertex):\n if neighbor not in visited:\n getattr(queue, append_name)(neighbor)", "def bfs(graph,start):\n #keeps track of nodes to be visited\n queue = []\n #keeps track of nodes already visited\n explored = []\n queue.append(start)\n while queue:\n #remove first node from queue\n curr_node = queue.pop(0)\n #check if node is visited\n if curr_node not in explored:\n explored.append(curr_node)\n adjacent_nodes = graph[curr_node]\n #add adjacent nodes to queue\n for i in adjacent_nodes:\n queue.append(i)\n return explored", "def bfs(graph, start_vertex):\n\n queue = deque()\n queue.appendleft(start_vertex)\n explored_vertices = [start_vertex]\n\n while len(queue) != 0:\n vertex = queue.pop()\n neighbours = graph.neighbours(vertex)\n for neighbour in neighbours:\n if neighbour not in explored_vertices:\n explored_vertices.append(neighbour)\n queue.appendleft(neighbour)\n\n return explored_vertices", "def BFS(graph, start, end, toPrint=False):\n initPath = [start]\n pathQueue = [initPath]\n if toPrint:\n print('current BFS path:', printPath(pathQueue))\n while len(pathQueue) != 0:\n # get and remove oldest element in pathQueue\n tmpPath = pathQueue.pop(0)\n print('Current BFS path:', printPath(tmpPath))\n lastNode = tmpPath[-1]\n if lastNode == end:\n return tmpPath # Explore all paths with n hops \n # before exploring any path with >n hops\n for nextNode in graph.childrenOf(lastNode):\n if next not in tmpPath:\n newPath = tmpPath + [nextNode]\n pathQueue.append(newPath)\n return None", "def _BFS(self, start_vertex, visited, callback):\n queue = []\n queue.insert(0, start_vertex)\n visited.add(start_vertex)\n while queue:\n curr_vertex = queue.pop()\n callback(curr_vertex)\n for vertex in self.neighbors(curr_vertex):\n if vertex not in visited:\n queue.insert(0, vertex)\n visited.add(vertex)", "def bfs(graph, initial_node, dest_node):\n return queue_search(graph, initial_node, dest_node, queue.Queue())", "def bfs(self, starting_vertex, destination_vertex): # great if you know to result is somewhere close to the root/start\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue\n q.enqueue([starting_vertex]) # set the starting_vertex with enqueue \n\n while q.size() > 0:\n path = q.dequeue() # dequeue and store first path\n v = path[-1] # store the vertex from the end of path \n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n\n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors \n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n q.enqueue(path_copy) # enqueue the path copy to the queue " ]
[ "0.697801", "0.61683583", "0.6095612", "0.5985303", "0.5921379", "0.588585", "0.58339345", "0.5821681", "0.56726986", "0.5672395", "0.56356573", "0.5611731", "0.5600283", "0.55612266", "0.5560045", "0.5480478", "0.54231256", "0.5411031", "0.54024166", "0.53965765", "0.53898805", "0.5360847", "0.53506166", "0.53115433", "0.53081745", "0.53038746", "0.5299414", "0.52811617", "0.5247372", "0.5230441" ]
0.7747768
0
Priorityfirst traversal of `graph` starting from `source` node, returning up to `size` nodes iterable. Node priority is determined by `priority(node)` callable. Nodes with higher priority value are traversed before nodes with lower priority.
def _pfs_nodes(cls, graph, source, size, priority): if size < 1: return iter(()) # use min-heap to implement (max) priority queue # use insertion order to break priority tie queue = [] counter = itertools.count() push = lambda priority, node: heappush(queue, (-priority, next(counter), node)) pop = partial(heappop, queue) visited = set() enqueued = set() push(priority(source), source) while queue and len(visited) < size: _, _, node = pop() if node in visited: continue visited.add(node) for neighbor in graph[node]: if neighbor not in enqueued: enqueued.add(neighbor) push(priority(neighbor), neighbor) return iter(visited)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )", "def traverse_breadth_first(self, src: int = 0, graph: GraphInterface = None):\n if not isinstance(graph, DiGraph) or graph is None or self._graph.get_node(src) is None:\n return\n curr = graph.get_node(src)\n\n q = Queue()\n\n q.put(curr)\n curr.tag += 1\n\n while not q.empty():\n\n curr = q.get()\n out_edges = graph.all_out_edges_of_node(curr.key)\n\n for i in out_edges:\n out_edge = out_edges[i]\n neighbor = graph.get_node(out_edge.dest) # Get curr's neighbor\n if neighbor.tag == curr.tag - 1:\n neighbor.tag += 1 # If un-tagged -> tag it.\n q.put(neighbor) # and enqueue it", "def ss_breadth_first_search_tree(\n graph: ScipyGraph, source_node: NodeID, depth_limit: int\n ) -> Tuple[NumpyNodeMap, NumpyNodeMap]:\n\n is_directed = ScipyGraph.Type.compute_abstract_properties(\n graph, {\"is_directed\"}\n )[\"is_directed\"]\n node_list: np.ndarray = graph.node_list\n depth_limit = len(node_list) - 1 if depth_limit == -1 else depth_limit\n source_node_position = np.flatnonzero(node_list == source_node).item()\n bfs_tree_csr = ss.csgraph.breadth_first_tree( # depth_limit is not used here!\n graph.value, source_node_position, directed=is_directed\n ).astype(bool)\n\n # Calcuate Depths\n depths = np.full(len(node_list), depth_limit + 1, dtype=int)\n depths[source_node_position] = 0\n current_node_positions = np.array([source_node_position], dtype=int)\n for depth in range(1, depth_limit + 1):\n selector = np.zeros(len(node_list), dtype=bool)\n selector[current_node_positions] = True\n current_node_positions = selector @ bfs_tree_csr\n if not current_node_positions.any():\n break\n depths[current_node_positions] = depth\n\n # Calculate Parents\n parents = np.empty(len(node_list), dtype=int)\n bfs_tree_coo = bfs_tree_csr.tocoo()\n parents[source_node_position] = source_node\n parents[bfs_tree_coo.col] = bfs_tree_coo.row\n\n # Ensure depth_limit\n valid_nodes = graph.node_list\n valid_depths_selector = depths <= depth_limit\n depths = depths[valid_depths_selector]\n parents = parents[valid_depths_selector]\n valid_nodes = valid_nodes[valid_depths_selector]\n depths_nodes = valid_nodes.copy()\n parents_nodes = valid_nodes.copy()\n\n node2depth = NumpyNodeMap(depths, depths_nodes)\n node2parent = NumpyNodeMap(parents, parents_nodes)\n\n return node2depth, node2parent", "def traverse(self, source):\r\n key = self.d.keys()\r\n #check for source in graph\r\n if source not in key:\r\n raise KeyError(str(source) + \" is not in graph!\")\r\n #initialize V, Q and M\r\n V = []\r\n Q = deque()\r\n Q.append(source)\r\n M = set(source)\r\n #while Q is not empty\r\n while Q:\r\n #take first element of queue\r\n current = Q.popleft()\r\n #add it to V\r\n V.append(current)\r\n neighbors = self.d[current]\r\n #for each value associated with this key\r\n for n in neighbors:\r\n #if it isn't in M, add it to M and end of Q\r\n if n not in M:\r\n Q.append(n)\r\n M.add(n)\r\n return V", "def progressive_widening_search(G, source, value, condition, initial_width=1):\n # Check for the special case in which the source node satisfies the\n # termination condition.\n if condition(source):\n return source\n # The largest possible value of `i` in this range yields a width at\n # least the number of nodes in the graph, so the final invocation of\n # `bfs_beam_edges` is equivalent to a plain old breadth-first\n # search. Therefore, all nodes will eventually be visited.\n #\n # TODO In Python 3.3+, this should be `math.log2(len(G))`.\n log_m = math.ceil(math.log(len(G), 2))\n for i in range(log_m):\n width = initial_width * pow(2, i)\n # Since we are always starting from the same source node, this\n # search may visit the same nodes many times (depending on the\n # implementation of the `value` function).\n for u, v in nx.bfs_beam_edges(G, source, value, width):\n if condition(v):\n return v\n # At this point, since all nodes have been visited, we know that\n # none of the nodes satisfied the termination condition.\n raise nx.NodeNotFound('no node satisfied the termination condition')", "def bfs_nodes_generator(graph, source, reverse=...):\n ...", "def dijkstra(graph, source):\n\n if not isinstance(graph, WeightedGraph):\n raise TypeError('dijkstra_heap(graph, source): graph must be a WeightedGraph object')\n if source not in graph.Keys:\n raise ValueError('dijkstra_heap(graph, source): source must be a key of the graph but {} is not a key of the graph'.format(source))\n \n # initialize framework\n init_sssp(graph)\n graph.Dictionary[source].distance = 0\n\n H = prepare_heap(graph, vertice_order)\n\n while not H.is_empty():\n vertice_min = H.remove_min()\n for _, vertice, weight in vertice_min.adj_list:\n relax(H, vertice_min, graph.Dictionary[vertice], weight)", "def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices will all be gray.\r\n queue = []\r\n \r\n # Loops through the vertices in the graph, creates a key in the dictionary for each vertice, with default values.\r\n for vertex in graph[\"V\"]:\r\n dataInfo[str(vertex)] = {\"ancestor\": \"\", \"descendants\": [], \"distance\": \"\", \"color\": \"white\"}\r\n \r\n # At key source (variable) in dataInfo dictionary, key ancestor is set to have no value other than \"NA\" (as it is the starting point), and distance to 0 (as it will always be zero as it is the source).\r\n dataInfo[str(source)][\"ancestor\"] = \"NA\"\r\n dataInfo[str(source)][\"distance\"] = 0\r\n\r\n def symmetricVertex(edge, otherVertex):\r\n \r\n \"\"\"\r\n Function symmetricVertex takes arguments edge, a list of an edge from the graph dictionary, and otherVertex, an integer that is the other vertex in the edge with the sourceVertex. The function will return the point other than the otherVertex, and will be used to find adjacent vertices relative to the current vertex in the queue. Example: edge ([1, 2]), otherVertex (1), the function will return 2.\r\n \"\"\"\r\n \r\n for num in edge:\r\n if num != otherVertex:\r\n return num\r\n \r\n\r\n def pathFinder(graph, sourceVertex):\r\n \r\n \"\"\"\r\n Function pathFinder takes arguments graph, a dictionary, with the same keys for the edges and the vertices and sourceVertex, an integer. The function will loop through all of the edges in the graph and find adjacent vertices relative to the current sourceVertex. sourceVertex values will be in the queue. The function will edit dictionaries and lists, not return any value.\r\n \"\"\"\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)\r\n \r\n # The function pathFinder is called on the graph and the source vertex, which sets up the queue.\r\n pathFinder(graph, source)\r\n \r\n # While the list queue contains values, the pathFinder function is called on the graph, and the queue value at index 0.\r\n while len(queue) != 0:\r\n pathFinder(graph, queue[0])\r\n \r\n # Loop below is for formatting of the data, makes it easier to read.\r\n for key in dataInfo:\r\n print \"Vertex: \" + key + \", Distance: \" + str(dataInfo[key][\"distance\"]) + \", Ancestor: \" + str(dataInfo[key][\"ancestor\"]) + \", Descendants: \" + str(dataInfo[key][\"descendants\"]) + \", Color: \" + str(dataInfo[key][\"color\"]) + \".\" \r\n \r\n # Returns dictionary dataInfo.\r\n return dataInfo", "def _iterative_graph_search(cls, bqm, sample, ordered_priority, visited, size, method):\n graph = bqm.to_networkx_graph()\n graph.remove_nodes_from(visited)\n\n variables = set()\n order = iter(ordered_priority)\n\n while len(variables) < size and len(graph):\n # find the next untraversed variable in (energy) order\n try:\n source = next(order)\n while source in visited or source in variables:\n source = next(order)\n except StopIteration:\n break\n\n # get a subgraph induced by source\n nodes = list(\n method(graph, source, size - len(variables), priority=ordered_priority.get))\n variables.update(nodes)\n\n # in next iteration we traverse a reduced BQM graph\n graph.remove_nodes_from(nodes)\n\n return variables", "def topological_nodes_generator(graph, reverse=...):\n ...", "def dfs_edges_generator(graph, source, reverse=...):\n ...", "def dijkstra_heap(graph, source_vertex):\n x = set()\n heap = []\n shortest_key = {source_vertex: 0}\n heapq.heappush(heap, (0, source_vertex))\n\n for v in graph.keys():\n if v != source_vertex:\n shortest_key[v] = float('inf')\n heapq.heappush(heap, (float('inf'), v))\n\n while heap:\n length_w, w = heapq.heappop(heap)\n x.add(w)\n for y in graph[w].dist.keys():\n idx_to_remove = search_vertex(heap, y)\n\n #################################################################################\n ### Since the graph given it not strictly directive ###\n ### idx_to_remove is None only if it comes from the opposite direction ###\n ### e.g. 1 -> 8 (already processed), but in future, we have to process 8 -> 1 ###\n #################################################################################\n if idx_to_remove is None:\n continue\n #################################################################################\n # Swap the index to be remove to the last and remove it\n heap[idx_to_remove], heap[-1] = heap[-1], heap[idx_to_remove]\n length_y, y = heap.pop()\n\n # Calculate the new length (dijkstra score)\n new_length_y = min(length_y, length_w + graph[w].dist[y])\n heapq.heappush(heap, (new_length_y, y))\n\n # Maintaining the heap invariant\n heapq.heapify(heap)\n\n # Update shortest_key\n shortest_key[y] = new_length_y\n\n return [shortest_key[i] for i in NEXT_TO_VERTICES]", "def prim_algorithm(self, start):\r\n if not self.is_vertex_in_graph(start):\r\n raise GraphException(f\"The vertex {start} does not exist in the graph.\")\r\n q = PriorityQueue()\r\n prev = {node: None for node in self.get_all_vertices()}\r\n dist = {node: numpy.inf for node in self.get_all_vertices()}\r\n processed = {node: False for node in self.get_all_vertices()}\r\n tree_edges = []\r\n\r\n dist[start] = 0\r\n processed[start] = True\r\n\r\n for neighbour in self.get_neighbours(start):\r\n dist[neighbour] = self.get_cost_of_edge(start, neighbour)\r\n prev[neighbour] = start\r\n q.put((dist[neighbour], neighbour))\r\n while not q.empty():\r\n top = q.get()\r\n top_vertex = top[1]\r\n if not processed[top_vertex]:\r\n tree_edges.append((prev[top_vertex], top_vertex))\r\n processed[top_vertex] = True\r\n for neighbour in self.get_neighbours(top_vertex):\r\n if not processed[neighbour] and self.get_cost_of_edge(top_vertex, neighbour) < dist[neighbour]:\r\n dist[neighbour] = self.get_cost_of_edge(top_vertex, neighbour)\r\n q.put((dist[neighbour], neighbour))\r\n prev[neighbour] = top_vertex\r\n return tree_edges", "def breadth_first_traversal(self, start_node, visitor_function=None, max_depth=None):\n self._reset_traversal_state()\n\n if isinstance(start_node, str):\n start_node = self.nodes[start_node]\n\n if not isinstance(start_node, ProcessNode):\n raise TypeError('Expect start_node to either be a string or a ProcessNode. Got [{}] instead'.format(\n str(type(start_node))))\n\n start_node.discovery_time = 1\n queue = collections.deque()\n queue.appendleft(start_node)\n\n while len(queue) > 0:\n node = queue.pop()\n assert NodeColor.WHITE == node.color\n\n if node.predecessor is not None:\n node.discovery_time = node.predecessor.discovery_time + 1\n\n self._visit_enter(node, visitor_function)\n\n node.color = NodeColor.GRAY\n\n if max_depth is None or node.discovery_time + 1 < max_depth:\n for descendant in self.edges[node]:\n if NodeColor.WHITE == descendant:\n descendant.predecessor = node\n queue.appendleft(descendant)\n\n node.finishing_time = self.time\n node.color = NodeColor.BLACK\n\n self._visit_exit(node, visitor_function)", "def least_cost_path(G, start, dest, cost):\n\n # Create a priority queue\n todo = pqueue.PQueue()\n todo.update(start, 0);\n\n # v in visited when the vertex v's least cost from start has been determined\n visited = set()\n\n # parent[v] is the vertex that just precedes v in the path from start to v\n parent = {}\n\n while todo and (dest not in visited):\n\n # priority queue operation\n # remove smallest estimated cost vertex from todo list\n (cur, c) = todo.pop_smallest()\n\n # it is now visited, and will never have a smaller cost\n visited.add(cur)\n\n for n in G.adj_to(cur):\n if n in visited: continue\n if todo.update(n, c+cost((cur,n))):\n parent[n] = cur\n\n # now, if there is a path, extract it. The graph may be disconnected\n # so in that case return None\n if dest not in visited:\n return None\n\n path = [dest]\n cur = dest\n while start not in path:\n cur = parent[cur]\n path.append(cur)\n\n path.reverse()\n return path", "def bfs_edges_generator(graph, source, reverse=...):\n ...", "def greedy_variable_order(primal_graph:PrimalGraph, pvo:List[List[int]]=None, pool_size=8, cutoff=INF):\n def fill_count(nid):\n \"\"\"\n count number of fill-in edges after removing nid\n number of combinations of nhd - existing edges (nodes in the subgraph of nhd)\n \"\"\"\n n_edges = G.subgraph(G.neighbors(nid)).number_of_edges()\n deg = G.degree[nid]\n n_fill = deg*(deg-1)//2 - n_edges\n return n_fill\n\n def remove_fill_in_edges(nid):\n G.add_edges_from(itertools.combinations(G.neighbors(nid), 2)) # adding edge twice? no effect\n G.remove_node(nid)\n\n G = primal_graph.copy() # G = copy.deepcopy(primal_graph)\n if pvo is None:\n pvo = [list(G.nodes())] #[ [all in one block] ]\n ordering = []\n induced_width = 0\n for each_block in pvo:\n processing_nodes = SortedList( [(fill_count(nid), nid) for nid in each_block] ) # ascending order\n while processing_nodes:\n fill, selected_nid = processing_nodes[0]\n if fill != 0: # don't add any edge\n # pick a node in random from a pool of best nodes; each node has prob 1/(fill_in edges)\n scores, candidates = zip(*processing_nodes[:pool_size])\n probs = np.power(np.array(scores), -1.0)\n selected_ind = np.random.choice(len(probs), p=probs/(np.sum(probs)))\n selected_nid = candidates[selected_ind]\n ordering.append(selected_nid)\n # current_width = len(G.neighbors(selected_nid))\n current_width = G.degree[selected_nid]\n if current_width > cutoff:\n return None, induced_width\n if current_width > induced_width:\n induced_width = current_width\n remove_fill_in_edges(selected_nid)\n # recompute score after removing the selected node from primal graph\n processing_nodes = SortedList( [(fill_count(nid), nid) for _, nid in processing_nodes if nid != selected_nid] )\n return ordering, induced_width", "def bfsSample(G, source=None, k = 50):\n\twarn(\"networkit.sampling.bfsSample is deprecated, will be removed in future updates.\")\n\tif not source:\n\t\tsource = GraphTools.randomNode(G)\n\tn = G.numberOfNodes()\n\tvisited = [False]*n\n\tQ = [source]\n\tclosest = set([source])\n\tglobal found\n\tfound = 0\n\twhile len(Q) > 0 and found < k:\n\t\tu = Q.pop(0)\n\t\tdef enqueue(u,v,weight, eid):\n\t\t\tglobal found\n\t\t\tif not visited[v] and found < k:\n\t\t\t\tfound += 1\n\t\t\t\tvisited[v] = True\n\t\t\t\tQ.append(v)\n\t\t\t\tclosest.add(v)\n\t\tG.forEdgesOf(u, enqueue)\n\tprint(\"found {0} nodes\".format(len(closest)))\n\tG1 = GraphTools.subgraphFromNodes(G, closest)\n\treturn G1", "def TopologicallySorted(graph, get_edges):\n get_edges = memoize(get_edges)\n visited = set()\n visiting = set()\n ordered_nodes = []\n def Visit(n):\n if n in visiting:\n raise CycleError(visiting)\n if n in visited:\n return\n visited.add(n)\n visiting.add(n)\n for neighbor in get_edges(n):\n Visit(neighbor)\n visiting.remove(n)\n ordered_nodes.insert(0, n)\n for node in sorted(graph):\n Visit(node)\n return ordered_nodes", "def shortest_path_lengths(self, g, src):\n d = {} # d[v] is upper bound from s to v\n cloud = {} # map reachable v to its d[v] value\n pq = AdaptableHeapPriorityQueue() # vertex v will have key d[v]\n pqlocator = {} # map from vertex to its pq locator\n\n # for each vertex v of the graph, add an entry to the priority queue, with\n # the source having distance 0 and all others having infinite distance\n for v in g.vertices():\n if v is src:\n d[v] = 0\n else:\n d[v] = float('inf') # syntax for positive infinity\n pqlocator[v] = pq.add(d[v], v) # save locator for future updates\n\n while not pq.is_empty():\n key, u = pq.remove_min()\n cloud[u] = key # its correct d[u] value\n del pqlocator[u] # u is no longer in pq\n for e in g.incident_edges(u): # outgoing edges (u,v)\n v = e.opposite(u)\n if v not in cloud:\n # perform relaxation step on edge (u,v)\n wgt = e.element()\n if d[u] + wgt < d[v]: # better path to v?\n d[v] = d[u] + wgt # update the distance\n pq.update(pqlocator[v], d[v], v) # update the pq entry\n\n return cloud # only includes reachable vertices", "def single_source_subgraph(g, node):\n return g.subgraph(nx.single_source_shortest_path(g, node).keys())", "def std_bfs(graph, src_vertex):\n # this sssp yields (node, level) in a breadth first search\n res = nx.single_source_shortest_path_length(graph, src_vertex)\n\n return [dist+1 for _, dist in sorted(res.items())]", "def select(self, batch_size):\n\n #if self.tree.filled_size() < batch_size:\n # print('CALLING REPLAY SAMPLING WHEN NOT FULL ENOUGH')\n # #return None, None\n\n out = []\n indices = []\n #weights = []\n priorities = []\n avoid_resampling = False\n for _ in range(batch_size):\n r = random.random()\n #return (idx, self.tree[idx], self.data[dataIdx])\n data, priority, index = self.tree.find(r)\n #index, priority, data = self.tree.find(r)\n #print(index)\n #print(\"d: {}, \\n priority: {}, \\n index: {}\".format(data, priority, index))\n priorities.append(priority)\n #weights.append((1. / self.memory_size / priority) ** beta if priority > 1e-16 else 0)\n indices.append(index)\n out.append(data)\n if avoid_resampling: self.priority_update([index], [self.epsilon_priority]) # To avoid resampling same transition too much\n\n for i in range(len(priorities)):\n if priorities[i] >= self.bonus_priority: # remove priority bonus\n priorities[i] -= self.bonus_priority\n self.priority_update([indices[i]],[priorities[i]])\n\n # avoid resampling part self.priority_update(indices, priorities) # Revert priorities\n #weights /= max(weights) # Normalize for stability\n return out, indices", "def top(iterable, max_size=1, key=None):\n\n # An import is done here to avoid polluting the namespace\n from heapq import heapreplace, heapify\n\n top_k_values = []\n\n for idx, item in enumerate(iterable):\n if idx < max_size:\n top_k_values.append(item)\n elif idx == max_size:\n heapify(top_k_values)\n elif idx > max_size:\n heapreplace(top_k_values, item)\n\n return iter(sorted(top_k_values, key=key, reverse=True))", "def dijkstra(self, source=None, destination=None):\n for vertex in self.vertices():\n vertex.d = sys.maxint\n if not source:\n source = self.vertices()[0]\n q = simply_python.data_structures.FIFO_dict()\n source.d = 0\n q.append(source)\n while not q.isempty():\n source = q.pop()\n print source\n print source.d\n d = source.d\n for out_vertex in self.out_vertices(source):\n if out_vertex.d == sys.maxint:\n out_vertex.d = d + 1\n q.append(out_vertex)\n if out_vertex == destination:\n return out_vertex.d\n return d", "def shortest_path__dijkstra__priority_queue(self, source, target=None):\n dist = {} # best distances to `v` from `source`\n prev = {} # predecessors of `v`\n Q = PriorityQueue()\n\n dist[source] = 0\n Q.add_with_priority(source, 0)\n\n for v in self.vertices:\n if v != source:\n dist[v] = self.INFINITY # unknown distance from source to `v`\n prev[v] = None # predecessor of `v`\n\n # the main loop\n reached_target = False\n while not Q.is_empty and not reached_target:\n priority, u = Q.extract_min() # remove and return best vertex\n\n # go through all `v` neighbors of `u`\n for v, edge_weight in self.neighbors_of(u):\n alt = dist[u] + edge_weight\n if alt < dist[v]:\n # current known shortest path to `v` is...\n dist[v] = alt # with distance `alt`\n prev[v] = u # through vertex `u`\n\n if not Q.contains(v):\n Q.add_with_priority(v, alt)\n\n if target is not None and u == target:\n # break as soon as `target` is reached\n # no need to calculate shortest path between every pair of vertices\n reached_target = True\n\n if target is not None and reached_target:\n S = [] # holds the shortest path, or empty if None\n u = target\n if u in prev or u == source:\n while u is not None:\n S.append(u)\n u = prev.get(u)\n\n path = S[::-1]\n distance = sum([v.weight for v in S])\n else:\n path = None\n distance = None\n\n return path, distance, dist", "def predecessor_traverse(p,s,g):\n L = []\n v = g\n while v is not None:\n L.append(v)\n v = p.get(v,None)\n #rather than prepending, we appended and now we'll reverse. This is a more efficient than prepending\n return L[::-1]", "def ExpandTopInto(src_queue, trg_queue, cached_states, min_bound=1.0):\n _, best_state = src_queue[0]\n # Produce more candidate items.\n new_states = best_state.ProduceNewStates()\n for new_state in new_states:\n if new_state.state_id not in cached_states:\n score = new_state.score * min_bound\n heapq.heappush(trg_queue, (score, new_state))\n cached_states.add(new_state.state_id)", "def greedy(initial_state, heuristic, dimension=3):\n\n\tdef add_cost(node):\n\t\tnode.cost = heuristic(node.state)\n\n\treturn search(initial_state, Frontier(PriorityQueue), dimension, cost_fn=add_cost)", "def dijkstra_generalized(graph, source, weight='weight'):\n # Removed min=None\n import math\n\n # Definitions consistent with Kurose & Ross\n u = source\n\n def c(x, y):\n return graph[x][y][weight]\n\n N = frozenset(graph.nodes())\n NPrime = {u} # i.e. \"set([u])\"\n D = dict.fromkeys(N, math.inf)\n P = dict.fromkeys(N, [])\n\n # Initialization\n for v in N:\n if graph.has_edge(u, v):\n D[v] = c(u, v)\n P[v] = [u]\n D[u] = 0 # over-write inf entry for source\n\n # Loop through all nodes\n while NPrime != N:\n candidates = {w: D[w] for w in N if w not in NPrime}\n w, Dw = min(candidates.items(), key=lambda item: item[1])\n NPrime.add(w)\n for v in graph[w]:\n if v not in NPrime:\n DvNew = D[w] + c(w, v)\n if DvNew < D[v]:\n D[v] = DvNew\n P[v] = [w]\n\n return P, D" ]
[ "0.6303713", "0.53427094", "0.51598406", "0.5158019", "0.513834", "0.51348895", "0.5099508", "0.5099216", "0.5047503", "0.49562794", "0.48239127", "0.4789225", "0.47295532", "0.47219703", "0.47005248", "0.46776277", "0.46731353", "0.4671026", "0.46589205", "0.4648572", "0.46139494", "0.46081966", "0.45995075", "0.4564305", "0.45569578", "0.45354715", "0.45275024", "0.45264116", "0.44914797", "0.4490469" ]
0.74001116
0
Traverse `bqm` graph using multistart graph search `method`, until `size` variables are selected. Each subgraph is seeded from `ordered_priority` ordered dictionary.
def _iterative_graph_search(cls, bqm, sample, ordered_priority, visited, size, method): graph = bqm.to_networkx_graph() graph.remove_nodes_from(visited) variables = set() order = iter(ordered_priority) while len(variables) < size and len(graph): # find the next untraversed variable in (energy) order try: source = next(order) while source in visited or source in variables: source = next(order) except StopIteration: break # get a subgraph induced by source nodes = list( method(graph, source, size - len(variables), priority=ordered_priority.get)) variables.update(nodes) # in next iteration we traverse a reduced BQM graph graph.remove_nodes_from(nodes) return variables
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_node(self, priority_q, query, debug=False):\n popped = priority_q.pop(0)\n node = list(popped.keys())[0]\n node_label = node.split('.') \n if node[0] == \"C\": \n if debug:\n logging.info(f\"L{len(node_label) - 2} found bucket {node}\") \n return priority_q, node\n \n if debug:\n logging.info(f\"Popped {node}\")\n\n predictions = self.collect_probs_for_node(node_label, query)\n priority_q = self.add_to_priority_queue(priority_q, predictions, len(node_label) - 1, [node])\n priority_q = sorted(priority_q, key=(lambda i: list(i.values())), reverse=True)\n if debug:\n logging.info(f\"L{len(node_label) - 1} added - PQ (Top 5): {priority_q[:5]}\\n\") \n \n return priority_q, node", "def quickbb(graph, fast=True):\n\n \"\"\"Given a permutation of the nodes (called an elimination ordering),\n for each node, remove the node and make its neighbors into a clique.\n The maximum degree of the nodes at the time of their elimination is\n the width of the tree decomposition corresponding to that ordering.\n The treewidth of the graph is the minimum over all possible\n permutations.\n \"\"\"\n\n best = Solution() # this gets around the lack of nonlocal in Python 2\n best.count = 0\n\n def bb(graph, order, f, g):\n best.count += 1\n if len(graph) < 2:\n if f < best.ub:\n assert f == g\n best.ub = f\n best.order = list(order) + list(graph)\n\n else:\n vs = []\n for v in graph:\n # very important pruning rule\n if simplicial(graph, v) or almost_simplicial(graph, v) and len(graph[v]) <= lb:\n vs = [v]\n break\n else:\n vs.append(v)\n\n for v in vs:\n graph1 = copy_graph(graph)\n eliminate_node(graph1, v)\n order1 = order + [v]\n # treewidth for current order so far\n g1 = max(g, len(graph[v]))\n # lower bound given where we are\n f1 = max(g, lower_bound(graph1))\n if f1 < best.ub:\n bb(graph1, order1, f1, g1)\n return\n\n graph = {u: set(graph[u]) for u in graph}\n\n order = []\n best.ub, best.order = upper_bound(graph)\n lb = lower_bound(graph)\n\n # This turns on the branch and bound algorithm that\n # gets better treewidth results, but takes a lot\n # longer to process\n if not fast:\n if lb < best.ub:\n bb(graph, order, lb, 0)\n\n # Build the tree decomposition\n tree = defaultdict(set)\n\n def build(order):\n if len(order) < 2:\n bag = frozenset(order)\n tree[bag] = set()\n return\n v = order[0]\n clique = graph[v]\n eliminate_node(graph, v)\n build(order[1:])\n for tv in tree:\n if clique.issubset(tv):\n break\n bag = frozenset(clique | {v})\n tree[bag].add(tv)\n tree[tv].add(bag)\n\n build(best.order)\n return tree", "def greedy_variable_order(primal_graph:PrimalGraph, pvo:List[List[int]]=None, pool_size=8, cutoff=INF):\n def fill_count(nid):\n \"\"\"\n count number of fill-in edges after removing nid\n number of combinations of nhd - existing edges (nodes in the subgraph of nhd)\n \"\"\"\n n_edges = G.subgraph(G.neighbors(nid)).number_of_edges()\n deg = G.degree[nid]\n n_fill = deg*(deg-1)//2 - n_edges\n return n_fill\n\n def remove_fill_in_edges(nid):\n G.add_edges_from(itertools.combinations(G.neighbors(nid), 2)) # adding edge twice? no effect\n G.remove_node(nid)\n\n G = primal_graph.copy() # G = copy.deepcopy(primal_graph)\n if pvo is None:\n pvo = [list(G.nodes())] #[ [all in one block] ]\n ordering = []\n induced_width = 0\n for each_block in pvo:\n processing_nodes = SortedList( [(fill_count(nid), nid) for nid in each_block] ) # ascending order\n while processing_nodes:\n fill, selected_nid = processing_nodes[0]\n if fill != 0: # don't add any edge\n # pick a node in random from a pool of best nodes; each node has prob 1/(fill_in edges)\n scores, candidates = zip(*processing_nodes[:pool_size])\n probs = np.power(np.array(scores), -1.0)\n selected_ind = np.random.choice(len(probs), p=probs/(np.sum(probs)))\n selected_nid = candidates[selected_ind]\n ordering.append(selected_nid)\n # current_width = len(G.neighbors(selected_nid))\n current_width = G.degree[selected_nid]\n if current_width > cutoff:\n return None, induced_width\n if current_width > induced_width:\n induced_width = current_width\n remove_fill_in_edges(selected_nid)\n # recompute score after removing the selected node from primal graph\n processing_nodes = SortedList( [(fill_count(nid), nid) for _, nid in processing_nodes if nid != selected_nid] )\n return ordering, induced_width", "def bk(g,r,p,x, depth=0):\n # if p and x are empty:\n if not p and not x:\n print('Maximal Clique found: ', r)\n\n while p:\n # choose and remove a node from p\n node = p.pop()\n neighbors = list(g.neighbors(node))\n bk(g, r.union([node]), p.intersection(neighbors), x.intersection(neighbors), depth=depth+1)\n x = x.union([node])", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def select(self, batch_size):\n\n #if self.tree.filled_size() < batch_size:\n # print('CALLING REPLAY SAMPLING WHEN NOT FULL ENOUGH')\n # #return None, None\n\n out = []\n indices = []\n #weights = []\n priorities = []\n avoid_resampling = False\n for _ in range(batch_size):\n r = random.random()\n #return (idx, self.tree[idx], self.data[dataIdx])\n data, priority, index = self.tree.find(r)\n #index, priority, data = self.tree.find(r)\n #print(index)\n #print(\"d: {}, \\n priority: {}, \\n index: {}\".format(data, priority, index))\n priorities.append(priority)\n #weights.append((1. / self.memory_size / priority) ** beta if priority > 1e-16 else 0)\n indices.append(index)\n out.append(data)\n if avoid_resampling: self.priority_update([index], [self.epsilon_priority]) # To avoid resampling same transition too much\n\n for i in range(len(priorities)):\n if priorities[i] >= self.bonus_priority: # remove priority bonus\n priorities[i] -= self.bonus_priority\n self.priority_update([indices[i]],[priorities[i]])\n\n # avoid resampling part self.priority_update(indices, priorities) # Revert priorities\n #weights /= max(weights) # Normalize for stability\n return out, indices", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n q_p=util.PriorityQueue()\n #nodes=[]\n visited=[]\n \n node=dict()\n start=problem.getStartState()\n node['parent']=None\n node['direction']=None\n node['state']=start\n node['cost']=0\n \n q_p.push(node,node['cost'])\n lis.append(node)\n \n while q_p.isEmpty()!=True:\n node=q_p.pop()\n print node\n state=node['state']\n visited.append(state)\n #lis.append(node)\n if problem.isGoalState(state):\n print \"found\"\n return getPath(problem,node)\n \n suc=problem.getSuccessors(state)\n if suc ==None:\n continue \n for child in suc:\n if child[0] not in visited:\n childnode={}\n childnode['parent']=state\n childnode['direction']=child[1]\n childnode['state']=child[0]\n childnode['cost']=node['cost']+1\n q_p.push(childnode,childnode['cost'])\n lis.append(childnode)\n \n\n \n\n\n\n\n \n\n \n \n #util.raiseNotDefined()", "def solve(self):\n # a stack of queries (aka subproblems to be solved)\n stack = []\n initial_query = (len(self.items), self.knapsack_size)\n stack.append(initial_query)\n # Run as long as there are subproblems that need to be solved.\n # - this might not pass through all possible subproblems; in fact, \n # we're counting on it\n # - it will only pass through the subproblems that the initial \n # problem needs solved\n while len(stack) > 0:\n (end, ksize) = stack[-1]\n # this is the subproblem where we have only items self.items[:end]\n # and the knapsack size is ksize\n if self.items[end - 1].size > ksize:\n # item end-1 does not fit\n try:\n # retrieve subproblem result from the cache\n self._cache[(end, ksize)] = self._cache[(end - 1, ksize)]\n except KeyError:\n # subproblem hasn't been solved yet, put it on the stack\n stack.append((end - 1, ksize))\n continue\n else:\n # item end-1 fits; we get two subproblems:\n # - one if we don't include item end-1 in the knapsack\n # - one if we do include it\n sub1 = (end - 1, ksize)\n sub2 = (end - 1, ksize - self.items[end - 1].size)\n try:\n # retrieve 1st subproblem's result from the cache and \n # compute max value if we don't include item end-1\n val1 = self._cache[sub1]\n except KeyError:\n # subproblem hasn't been solved yet, put it on the stack\n stack.append(sub1)\n continue\n try:\n # retrieve 2nd subproblem's result from the cache and\n # compute max value if we do include item end-1\n val2 = self.items[end - 1].value + self._cache[sub2]\n except KeyError:\n # subproblem hasn't been solved yet, put it on the stack\n stack.append(sub2)\n continue\n # is it better to include item end-1 or not?\n self._cache[(end, ksize)] = max(val1, val2)\n # done with this subproblem\n stack.pop()\n return self._cache[(initial_query)]", "def genGraphPrior(graphs, items, fitinfo=Fitinfo({}), mincount=1, undirected=True, returncounts=False):\n a_start = fitinfo.prior_a\n b_start = fitinfo.prior_b\n method = fitinfo.prior_method\n p = fitinfo.zibb_p\n \n priordict={}\n \n #def betabinomial(a,b):\n # return (b / (a + b))\n \n def zeroinflatedbetabinomial(a,b,p):\n return (b / ((1-p)*a+b))\n\n # tabulate number of times edge does or doesn't appear in all of the graphs when node pair is present\n for graphnum, graph in enumerate(graphs): # for each graph\n itemdict=items[graphnum]\n for inum, i in enumerate(graph): # rows of graph\n for jnum, j in enumerate(i): # columns of graph\n if (inum > jnum) or ((undirected==False) and (inum != jnum)):\n item1 = itemdict[inum]\n item2 = itemdict[jnum]\n if undirected:\n pair = np.sort((item1,item2))\n else:\n pair = (item1,item2)\n if pair[0] not in list(priordict.keys()):\n priordict[pair[0]]={}\n if pair[1] not in list(priordict[pair[0]].keys()):\n priordict[pair[0]][pair[1]] = [a_start, b_start]\n if j==1:\n priordict[pair[0]][pair[1]][1] += 1.0 # increment b counts\n elif j==0:\n priordict[pair[0]][pair[1]][0] += 1.0 # increment a counts\n \n if not returncounts:\n for item1 in priordict:\n for item2 in priordict[item1]:\n a, b = priordict[item1][item2] # a=number of participants without link, b=number of participants with link\n if (a+b) >= (mincount + a_start + b_start):\n #if method == \"zeroinflatedbetabinomial\":\n priordict[item1][item2] = zeroinflatedbetabinomial(a,b,p) # zero-inflated beta-binomial\n #elif method == \"betabinomial\":\n # priordict[item1][item2] = betabinomial(a,b) # beta-binomial\n else:\n priordict[item1][item2] = 0.0 # if number of observations is less than mincount, make edge prior 0.0\n if 'DEFAULTPRIOR' in list(priordict.keys()):\n raise ValueError('Sorry, you can\\'t have a node called DEFAULTPRIOR. \\\n Sure, I should have coded this better, but I really didn\\'t think this situation would ever occur.')\n else:\n #if method == \"zeroinflatedbetabinomial\":\n priordict['DEFAULTPRIOR'] = zeroinflatedbetabinomial(a_start, b_start, p)\n #elif method==\"betabinomial\":\n # priordict['DEFAULTPRIOR'] = betabinomial(a_start, b_start)\n \n return priordict", "def iter_func(root_name, root, set_traverse, list_funcs, G, strings,\n plot_nodes, cur_pos, xgrain, min_weight, max_weight):\n set_traverse.append(root)\n nbs = G.neighbors(root)\n nbs = G[root]\n\n plot_nodes.append(cur_pos)\n xgrain = xgrain/2.0\n\n flag_pn = -1\n for nb in nbs.keys():\n if nb in set_traverse:\n continue\n\n next_pos = [0, 0, 0]\n if root.name == root_name:\n next_pos[0] = cur_pos[0]\n else:\n next_pos[0] = cur_pos[0] + xgrain*flag_pn*( 0.8+0.2*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight) ) #* (nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[1] = cur_pos[1] + 3.0*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[2] = nb.name\n\n flag_pn = flag_pn*(-1)\n\n strings.append([root, nb])\n set_traverse, strings, plot_nodes = iter_func(root_name, nb, set_traverse, list_funcs, G, strings, plot_nodes, next_pos, xgrain, min_weight, max_weight)\n\n return set_traverse, strings, plot_nodes", "def search(start):\n\n '''\n Create a class named nodeClass which contains 4 elements: \n state: The puzzle object containing the puzzle board at the node \n misplaced: num of misplaced tiles\n depth: depth of the node in the tree \n prev: parent node\n '''\n nodeClass = namedtuple('nodeClass', 'state, misplaced, depth, prev')\n\n #instantiate object from class creating the root node\n node = nodeClass(start, 0, 0, None)\n\n #stores the nodes that are going to be explored. \n #the node with lower f-score is explored first\n frontier = q.PriorityQueue()\n frontier.put((0,node))\n\n # frontier_set keep track of the nodes in the frontier queue\n frontier_set = {node}\n #contains the board states already explored\n explored_states = set()\n for ite in range(1,max_iterations+2):#while True:\n #Retrieve the node in the frontier with lowest value\n node = frontier.get()[1]\n\n #get the puzzle board obj from the node object\n state = node.state\n\n #Check if the game has ben solved\n if state.solved or ite==max_iterations:\n Result = namedtuple('Result', 'board, depth, nodesExpanded, max_depth, isSolved')\n return Result(state, node.depth, ite, max(no.depth for no in frontier_set), state.solved)\n\n # expanded nodes are added to explored set\n explored_states.add(state)\n\n #EXPANDING\n for mov in state.possible_moves:\n new_state=state.move(mov)\n new_node = nodeClass(new_state, new_state.score,\n node.depth + 1, node)\n\n #compute f-score of the node\n f_score=new_state.score + new_node.depth\n\n if new_state not in explored_states and new_node not in frontier_set:\n frontier.put((f_score,new_node))\n frontier_set.add(new_node)", "def partition(data, s, b, u, res, points, size, depth):\r\n\t# depth is just for demonstration purposes, terminating the recursion early\r\n\t\r\n\t# termination conditions\r\n\tif size > 1 and depth > 0:\r\n\r\n\t\t# variables that keep track of the scope of \"points\" for iteration purposes\r\n\t\trlen = []\r\n\t\tclen = len(points)\r\n\t\tfor i in range(clen):\r\n\t\t\trlen.append(len(points[i]))\r\n\t\t\r\n\t\t# keeps track of which point defines the maximal set\r\n\t\tmax = -10000\r\n\t\tmax_index = [0,0]\r\n\r\n\t\t# each point on the grid defines a potentially maximal set (including that point and the best \r\n\t\t# choice for higher rows) s[x][y] tracks the value of the set defined by (x, y)\r\n\t\tfor i in range(len(points)):\r\n\t\t\t# calculating s based on current row\r\n\t\t\ts[points[i][rlen[i]-1][0]][points[i][rlen[i]-1][1]] = data[points[i][rlen[i]-1][0]][points[i][rlen[i]-1][1]]\r\n\t\t\tfor j in range(rlen[i] - 2, -1, -1):\r\n\t\t\t\ts[points[i][j][0]][points[i][j][1]] = s[points[i][j + 1][0]][points[i][j + 1][1]] + data[points[i][j][0]][points[i][j][1]]\r\n\t\t\t\r\n\t\t\t# if below the first row, factoring in the optimal set from above rows\r\n\t\t\tif i != 0:\r\n\t\t\t\tprev_end = points[i-1][rlen[i-1]-1]\r\n\t\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\t\tu[points[i][j][0]][points[i][j][1]] = b[prev_end[0]][np.minimum(prev_end[1], points[i][j][1])]\r\n\t\t\t\t\ts[points[i][j][0]][points[i][j][1]] += s[prev_end[0]][u[points[i][j][0]][points[i][j][1]]]\r\n\t\t\t\r\n\t\t\t# keeping track of the best sets from the new row for later use (what b and u are for)\r\n\t\t\trow_max = -10000\r\n\t\t\trow_max_index = -1\r\n\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\tcurr = s[points[i][j][0]][points[i][j][1]]\r\n\t\t\t\tif curr > row_max:\r\n\t\t\t\t\trow_max = curr\r\n\t\t\t\t\trow_max_index = points[i][j][1]\r\n\t\t\t\tb[points[i][j][0]][points[i][j][1]] = row_max_index\r\n\r\n\t\t\t# updating the global optimal set\r\n\t\t\tif row_max > max:\r\n\t\t\t\tmax = row_max\r\n\t\t\t\tmax_index[0] = i\r\n\t\t\t\tmax_index[1] = row_max_index\r\n\t\t\r\n\t\t# finding the set of points that generated the global optimum\r\n\t\tpointers = []\r\n\t\tpointers.append(max_index[1])\r\n\t\tfor i in range(max_index[0], 0, -1):\r\n\t\t\tpointers.append(u[points[i][0][0]][pointers[max_index[0]-i]])\r\n\t\tpointers = np.flip(pointers, axis=0)\r\n\t\t\r\n\t\t# finding the set of points of the upper and lower partitions defined by the optimal set\r\n\t\tupper_points = []\r\n\t\tlower_points = []\r\n\t\tup_num = 0\r\n\t\tlow_num = 0\r\n\t\tfor i in range(clen):\r\n\t\t\turow = []\r\n\t\t\tlrow = []\r\n\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\tif i <= max_index[0] and points[i][j][1] >= pointers[i]:\r\n\t\t\t\t\turow.append(points[i][j])\r\n\t\t\t\t\tup_num += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tlrow.append(points[i][j])\r\n\t\t\t\t\tlow_num += 1\r\n\t\t\tif len(urow) > 0:\r\n\t\t\t\tupper_points.append(tuple(urow))\r\n\t\t\tif len(lrow) > 0:\r\n\t\t\t\tlower_points.append(tuple(lrow))\r\n\r\n\t\t# updating the final result and prepping the new datasets to have mean 0\r\n\t\tfor i in range(len(upper_points)):\r\n\t\t\tfor j in range(len(upper_points[i])):\r\n\t\t\t\tres[upper_points[i][j][0]][upper_points[i][j][1]] += max/up_num\r\n\t\t\t\tdata[upper_points[i][j][0]][upper_points[i][j][1]] -= max/up_num\r\n\t\tfor i in range(len(lower_points)):\r\n\t\t\tfor j in range(len(lower_points[i])):\r\n\t\t\t\tres[lower_points[i][j][0]][lower_points[i][j][1]] -= max/low_num\r\n\t\t\t\tdata[lower_points[i][j][0]][lower_points[i][j][1]] += max/low_num\r\n\t\t\r\n\t\t# recursion (if the optimal set is the current one, stop since at this point \r\n\t\t# the mean of the selected elements is optimal over them)\r\n\t\tif up_num != size:\r\n\t\t\tpartition(data, s, b, u, res, upper_points, up_num, depth-1)\r\n\t\tif low_num != size:\r\n\t\t\tpartition(data, s, b, u, res, lower_points, low_num, depth-1)\r\n\telse:\r\n\t\treturn", "def astar_multi(maze):\n # TODO: Write your code here\n gFunction = {}\n frontier = PriorityQueue()\n path = []\n ret = []\n MSTLengths = {}\n edges = {}\n\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives)\n gFunction[start] = 0\n frontier.put(start) \n getEdgeWeights(maze, objectives, edges) # init edge weights for MST\n\n while not frontier.empty():\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n objectivesLeft.remove(currentCell)\n\n # all objectives found, initialise backtrace and exit loop\n if len(objectivesLeft) == 0:\n path.clear()\n ret.clear()\n path.append(currentState)\n ret.append(currentCell)\n break\n \n # if we have already calculated MST length we can reuse value\n # else calculate MST length for this state and store it.\n length = 0\n if str(objectivesLeft) in MSTLengths:\n length = MSTLengths[str(objectivesLeft)]\n else:\n length = getMSTLength(objectivesLeft.copy(), maze, edges)\n MSTLengths[str(objectivesLeft)] = length\n\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n gVal= gFunction[currentState] + 1\n\n if neighbor not in gFunction or gVal < gFunction[neighbor]:\n\n neighbor.setParent(currentState)\n gFunction[neighbor] = gVal\n\n hFunction = []\n for j in objectivesLeft:\n hFunction.append(abs(j[0] - i[0]) + abs(j[1] - i[1]) + length) # use MST length + manhatten distance to nearest objective as heuristic.\n\n hVal = min(hFunction)\n\n neighbor.setfFunction(gFunction[neighbor] + hVal)\n frontier.put(neighbor)\n\n # backtrace\n while path[0]!= start:\n \n currentCell = path[0]\n path.insert(0, currentCell.parent())\n ret.insert(0, currentCell.parent().cell())\n\n return ret", "def test_weight_based_ties(self, dim):\r\n graph = nx.barbell_graph(dim, 0)\r\n subgraph = graph.nodes()\r\n weights = [1] * dim + [2] * dim\r\n\r\n c = clique.shrink(subgraph, graph, node_select=weights)\r\n assert c == list(range(dim, 2 * dim))", "def evalPart(origTopo, capacities, subTopos):\r\n # print \"\\t===== Evaluate Partitioning =====\"\r\n numTopos = len(subTopos)\r\n numPM = len(capacities)\r\n if (numTopos > numPM):\r\n logger.error(\"Number of sub topologies does not match number of PMs\")\r\n exit()\r\n \r\n weights = {x:0 for x in range(numPM)}\r\n cutWeights = {x:0 for x in range(numPM)}\r\n subLinks = list(itertools.chain(*[subTopos[x].links(sort=True) for x in range(numTopos)]))\r\n cuts = [x for x in origTopo.links(sort=True) if x not in subLinks]\r\n\r\n for i in range(numTopos):\r\n weights[i] = calcTopoWeight(subTopos[i])\r\n cutWeights[i] = 0\r\n\r\n # for i in range(numTopos):\r\n # weights[i] = 0.0\r\n # for link in subTopos[i].links():\r\n # if origTopo.isSwitch(link[0]) and origTopo.isSwitch(link[1]):\r\n # weights[i] = weights[i] + subTopos[i].linkInfo(link[0], link[1])[\"bw\"]\r\n \r\n for link in cuts:\r\n for i in range(numTopos):\r\n if link[0] in subTopos[i].switches() or link[1] in subTopos[i].switches():\r\n weights[i] = weights[i] + origTopo.linkInfo(link[0], link[1])[\"bw\"]\r\n cutWeights[i] = cutWeights[i] + origTopo.linkInfo(link[0], link[1])[\"bw\"]\r\n\r\n return [weights, cutWeights]\r\n # return sorted(weights.values(), reverse=True)\r\n # wSum = sum(weights.values())\r\n # print \"\\tPart\\tCap\\tWeight\\tFraction\"\r\n # for x in range(numPM):\r\n # print \"\\t%d\\t%.1f\\t%.1f\\t%.4f\" % (x, capacities[x], weights[x], weights[x]/wSum)\r", "def mine_subgraph(database, projection, dfs_codes, minsup, length, mapper, feature_selection_model):\n\t# test min_support for this pattern *projection*\n\tnsupport = count_support(projection)\n\tif nsupport < minsup:\n\t\treturn dfs_codes\n\tif not is_min(dfs_codes):\n\t\treturn dfs_codes\n\tstopping = evaluate_and_prune(dfs_codes, mapper, projection, length, feature_selection_model)\n\tif stopping:\n\t\treturn dfs_codes\n\n\t# show_subgraph(dfs_codes, nsupport, mapper)\n\tright_most_path = build_right_most_path(dfs_codes)\n\tmin_label = dfs_codes[0].from_label\t# dfs_codes[0] is the starting pattern of this search (root),\n\t# it has the minimum node label (because reversed sorted before starting search)\n\t\n\tpm_backward, pm_forward = genumerate(projection, right_most_path, dfs_codes, min_label, database, mapper)\n\n\tfor pm in sorted(pm_backward, key=dfs_code_backward_compare):\n\t\tdfs_codes.append(pm)\n\t\tdfs_codes = mine_subgraph(database, pm_backward[pm], dfs_codes, minsup, length, mapper, feature_selection_model)\n\t\tdfs_codes.pop()\n\n\tfor pm in reversed(sorted(pm_forward, key=dfs_code_forward_compare)):\n\t\tdfs_codes.append(pm)\n\t\tdfs_codes = mine_subgraph(database, pm_forward[pm], dfs_codes, minsup, length, mapper, feature_selection_model)\n\t\tdfs_codes.pop()\n\n\treturn dfs_codes", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # fringe priority queue\n fringe = util.PriorityQueue()\n fringe.push([problem.getStartState()],1) # fringe will have (priority, order, [s0,s1,..])\n\n # closed set\n closed = []\n\n i = 0\n while not fringe.isEmpty():\n\n # get highest priority path for expansion e.g. [s0,s2,s4]\n path_exp = fringe.pop()\n\n # take last node in path e.g. s4\n node_exp = path_exp[-1]\n\n # check goal state\n if problem.isGoalState(node_exp): # check if goal\n actions = actions_for_path(problem,path_exp)\n #import pdb; pdb.set_trace()\n return actions\n\n # add expanded node into closed set e.g. [s0,s1,s2]\n if node_exp not in closed:\n closed.append(node_exp)\n else:\n # if it's in the closed set, don't expand\n continue\n\n # get sucessors to expand fringe\n successors = problem.getSuccessors(node_exp)\n for successor in successors:\n # unpack states, actions\n ss,aa,_ = successor\n if ss not in closed:\n path = path_exp+[ss]\n # expand fringe by adding candidate paths, prioritize by len of path\n fringe.push(path,len(path))\n\n #i+=1\n if i==1000:\n import pdb; pdb.set_trace()\n\n util.raiseNotDefined()", "def greedy(initial_state, heuristic, dimension=3):\n\n\tdef add_cost(node):\n\t\tnode.cost = heuristic(node.state)\n\n\treturn search(initial_state, Frontier(PriorityQueue), dimension, cost_fn=add_cost)", "def greedy(constraint, indexes, m_l, parallel=False):\n\n selected = np.array([])\n plot = False\n choices = np.array(indexes)\n bar = ChargingBar(\"Calculating index set with greedy method\", max=m_l)\n\n for i in range(len(selected), m_l):\n # print(\"i = %d\" % i)\n start = time.time()\n\n def calc(node):\n return constraint(np.union1d(selected, node))\n\n if parallel:\n pickle_fix.calc = calc\n available_cores = odin.config.get(\"available_cores\", 4)\n pool = multiprocessing.Pool(processes=available_cores)\n values = pool.map(pickle_fix, choices)\n pool.close()\n else:\n # values: [float]\n values = list(map(calc, choices))\n\n greedy_choice = choices[np.argmax(values)]\n\n if plot:\n values = np.sort(values)\n oplt.plot(values)\n oplt.show()\n # current_best = np.max(values)\n\n selected = np.union1d(selected, [greedy_choice])\n choices = np.setdiff1d(choices, [greedy_choice])\n logging.debug(\"selected = %s; choice = %s; time = %.5f\" % (\n selected, greedy_choice, time.time() - start))\n bar.next()\n bar.finish()\n\n return selected", "def create_q(size, b, order, m):\n s = np.matrix(np.zeros(size))\n for i in order:\n s[0,i] = 1\n a = np.dot(np.dot(s, b), s.T)\n a = a * (1. / (2. * m))\n return a.item(0)", "def bk_p(g,p,r,x, counter):\n print(\"counter:\\t\", counter)\n print(\"p:\\t\", p)\n print(\"r:\\t\", r)\n print(\"x:\\t\", x)\n result = []\n pux = set(p).union(set(x))\n if len(pux) == 0:\n print(\"return r: \", r)\n return r\n else:\n pivot = list(pux)[0]\n pN = [n for n in g.neighbors(pivot)]\n p_copy = copy.deepcopy(p)\n print(\"P_COPY\",p_copy)\n print(\"P_N\",pN)\n for n in pN:\n p_copy.remove(n)\n for v in p_copy:\n print(\"v: \", v)\n vNeighbors = [a for a in g.neighbors(v)]\n print(\"vNeighbors: \\t\", vNeighbors)\n # pnnv, ruv, xnnv\n print(\"================================\")\n result.append(bk_p(g, intersection(p,vNeighbors), r+[v], intersection(x, vNeighbors), counter+1))\n print(\"================================\")\n print(\"result:\\t\", result, \"\\tv: \", v)\n p.remove(v)\n x.append(v)\n print(\"fp:\\t\", p)\n print(\"fr:\\t\", r)\n print(\"fx:\\t\", x)\n return result\n\n def bk_p2(g,r,p,x, counter=0):\n \"\"\"\n Bron-Kerbosch algorithm without pivots (implemented with python sets)\n g: an nx graph\n r: disjoint set of vertices of graph g\n p: disjoint set of vertices of graph g\n x: disjoint set of vertices of graph g\n \"\"\"\n pux = p.union(x)\n if not pux:\n print('Maximal clique found: ', r)\n\n # choose an pivot from pux\n pivot = next(iter(pux))\n neighborsP = list(g.neighbors(pivot))\n for v in p.difference(neighborsP):\n neighborsV = list(g.neighbors(v))\n bk_p(g, r.union([v]), p.intersection(neighborsV), x.intersection(neighborsV), counter+1)\n p.remove(v)\n x.add(v)", "def test_expected_growth(self):\r\n\r\n graph = nx.lollipop_graph(4, 1)\r\n graph.add_edge(4, 2)\r\n\r\n c = [3, 4]\r\n result = clique.search(c, graph, iterations=100)\r\n assert result == [0, 1, 2, 3]", "def bfs(initial_state, dimension=3):\n\t\n\treturn search(initial_state, Frontier(Queue), dimension)", "def __call__(self, graph: Data, n_min: int, nodes_to_keep: List[int] = None, exhaustive: bool = False):\n nodes_to_keep = nodes_to_keep if nodes_to_keep is not None else []\n mcts = self._get_mcts(graph, n_min, nodes_to_keep, exhaustive)\n\n for iteration in range(self.m):\n mcts.search_one_iteration()\n\n explanation = mcts.best_leaf_node()\n\n return explanation.node_set, mcts", "def __call__(self, circuit: Circuit):\n gates_qubits_pairs = find_gates_qubits_pairs(circuit)\n if len(gates_qubits_pairs) < 3:\n raise_error(\n ValueError, \"Circuit must contain at least two two qubit gates to implement subgraph placement.\"\n )\n circuit_subgraph = nx.Graph()\n circuit_subgraph.add_nodes_from(range(self.connectivity.number_of_nodes()))\n matcher = nx.algorithms.isomorphism.GraphMatcher(self.connectivity, circuit_subgraph)\n i = 0\n circuit_subgraph.add_edge(gates_qubits_pairs[i][0], gates_qubits_pairs[i][1])\n while matcher.subgraph_is_monomorphic() == True:\n result = matcher\n i += 1\n circuit_subgraph.add_edge(gates_qubits_pairs[i][0], gates_qubits_pairs[i][1])\n matcher = nx.algorithms.isomorphism.GraphMatcher(self.connectivity, circuit_subgraph)\n if (\n self.connectivity.number_of_edges() == circuit_subgraph.number_of_edges()\n or i == len(gates_qubits_pairs) - 1\n ):\n keys = list(result.mapping.keys())\n keys.sort()\n return {i: result.mapping[i] for i in keys}\n return dict(sorted(result.mapping.items()))", "def FuzzyBallGraph(partition, q):\n from sage.graphs.generators.basic import CompleteGraph\n if len(partition)<1:\n raise ValueError(\"partition must be a nonempty list of positive integers\")\n n=q+sum(partition)\n g=CompleteGraph(n)\n curr_vertex=0\n for e,p in enumerate(partition):\n g.add_edges([(curr_vertex+i, 'a{0}'.format(e+1)) for i in range(p)])\n curr_vertex+=p\n return g", "def solve(list_of_kingdom_names, starting_kingdom, adjacency_matrix, params=[]):\n\n #A = adjacency matrix, u = vertex u, v = vertex v\n def weight(A, u, v):\n return A[u][v]\n\n #A = adjacency matrix, u = vertex u\n def adjacent(A, u):\n L = []\n for x in range(len(A)):\n if A[u][x] > 0 and x != u and A[u][x] != 'x':\n L.insert(0,x)\n return L\n\n #Q = min queue\n def extractMin(Q):\n q = Q[0]\n Q.remove(Q[0])\n return q\n\n #Q = min queue, V = vertex list\n def decreaseKey(Q, K):\n for i in range(len(Q)):\n for j in range(len(Q)):\n if K[Q[i]] < K[Q[j]]:\n s = Q[i]\n Q[i] = Q[j]\n Q[j] = s\n\n #V = vertex list, A = adjacency list, r = root\n def prim(V, A, r):\n u = 0\n v = 0\n\n # initialize and set each value of the array P (pi) to none\n # pi holds the parent of u, so P(v)=u means u is the parent of v\n P=[None]*len(V)\n\n # initialize and set each value of the array K (key) to some large number (simulate infinity)\n K = [999999]*len(V)\n\n # initialize the min queue and fill it with all vertices in V\n Q=[0]*len(V)\n for u in range(len(Q)):\n Q[u] = V[u]\n\n # set the key of the root to 0\n K[r] = 0\n decreaseKey(Q, K) # maintain the min queue\n\n # loop while the min queue is not empty\n while len(Q) > 0:\n u = extractMin(Q) # pop the first vertex off the min queue\n\n # loop through the vertices adjacent to u\n Adj = adjacent(A, u)\n for v in Adj:\n w = weight(A, u, v) # get the weight of the edge uv\n\n # proceed if v is in Q and the weight of uv is less than v's key\n if Q.count(v)>0 and w < K[v]:\n # set v's parent to u\n P[v] = u\n # v's key to the weight of uv\n K[v] = w\n decreaseKey(Q, K) # maintain the min queue\n return P\n\n\n # graph is a list of kingdoms that previous i is the parent of j where j = i + 1 \n graph = prim(adjacency_matrix, list_of_kingdom_names, starting_kingdom)\n\n # key = parent, value = children\n g = {}\n\n for x in range(len(list_of_kingdom_names)):\n g[x] = []\n\n for x in range(len(graph)):\n for y in range(len(graph)):\n if x == graph[y]:\n g[x].append(y) \n\n\n def path(k):\n if not g[k]:\n return [k]\n\n lst = [k]\n\n for child in g[k]:\n lst += path(child) + [k]\n # print(lst)\n\n return lst\n\n\n full_path = path(starting_kingdom)\n\n # print(full_path)\n\n\n\n # return closed_walk, conquered_kingdoms", "def _recursive_cutting(g, p, res=[]):\n k = math.ceil(len(g.nodes()) / p)\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > k:\n _recursive_cutting(g.subgraph(partition), p / 2, res)\n else:\n res.append(partition)\n\n return res", "def _fill_graph_score(graph: GraphDatabase, session: Session) -> None:\n _LOGGER.info(\"Computing graph score for each package\")\n\n subgraphs = deque()\n\n # The very first walk will mark down libraries that do not have any dependencies.\n for package_name in graph.get_python_package_version_names_all(distinct=True):\n dependencies = graph.get_depends_on_package_names(package_name)\n subgraphs.append(SubGraphEntity(subgraph_name=package_name, to_visit=set(dependencies)))\n if not dependencies:\n entry = session.query(Package).filter(Package.package_name == package_name).first()\n if not entry:\n # Might be ingesting in the mean time, do not mark down and continue.\n continue\n\n entry.subgraph_size = entry.version_count\n session.commit()\n else:\n subgraphs.append(SubGraphEntity(subgraph_name=package_name, to_visit=set(dependencies)))\n\n while subgraphs:\n subgraph = subgraphs.popleft()\n\n for package_name in subgraph.to_visit:\n entry = session.query(Package).filter(Package.package_name == package_name).first()\n if not entry:\n _LOGGER.warning(\"Cannot score subgraph %r as not all the dependencies were resolved\", package_name)\n break\n\n if entry.subgraph_size is None:\n # Scheduling for the next round.\n subgraphs.append(subgraph)\n break\n\n subgraph.subgraph_size *= entry.subgraph_size * entry.version_count\n subgraph.subgraphs_seen.add(package_name)\n else:\n entry = session.query(Package).filter(Package.package_name == subgraph.subgraph_name).first()\n if not entry:\n _LOGGER.error(\"No subgraph for %r found, this looks like a programming error\")\n continue\n\n entry.subgraph_size = subgraph.subgraph_size\n session.commit()\n\n subgraph.to_visit -= subgraph.subgraphs_seen", "def bfs(level):\n length = len(queue)\n print \"Length of queue: \" + str(length) + \" at level \" + str(level)\n if length <= 0 or level <= 0:\n return\n i = 0\n while i < length:\n try:\n text = req_obj.get_html_text(queue[0])\n if text is None:\n raise requests.RequestException()\n add_links_to_queue(text, queue[0])\n\n # summary generated using summarizer1\n sum_obj.create_and_index_summary(\n req_obj.get_base_url(), text)\n\n # summary generated using summarizer2\n sum_obj2.create_and_index_summary(\n req_obj.get_base_url(), text)\n on_pg_sum.index_on_page_summary(text, queue[0])\n except requests.RequestException as trace:\n print str(trace) + '\\n'\n er_file.write(queue[0] + '\\n')\n er_file.write(str(trace) + '\\n\\n')\n queue.pop(0)\n i += 1\n bfs(level - 1)" ]
[ "0.53520167", "0.5279833", "0.522712", "0.5184884", "0.5166358", "0.513863", "0.5096856", "0.5012815", "0.49687812", "0.49585232", "0.4911081", "0.48797333", "0.48594335", "0.4851185", "0.4849475", "0.48467082", "0.48397836", "0.48288202", "0.4805648", "0.4793294", "0.47791773", "0.47486714", "0.47409627", "0.4721213", "0.4697416", "0.4695774", "0.46881223", "0.46845886", "0.46609658", "0.46558526" ]
0.695425
0
Show back button only if we were already on gites website
def backButtonAvailable(self): referer = self.request.get('HTTP_REFERER') if not referer: return False portalUrl = getToolByName(self.context, 'portal_url')() if referer and referer.startswith(portalUrl): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def back_button(self):\r\n self.update_settings()\r\n self.is_action = True\r\n if self.back_call is not None:\r\n self.back_call()", "def go_back(self):\n self.hide()", "def go_back(self):\n self.hide()", "def back(self):\n self.log_info(f\"Browser.back: Telling browser to return to previous page\")\n self.CORE.back()\n return", "def back_click(self):\n self.controller.show_user_menu_screen(self.us)", "def back_click(self):\n self.controller.show_account_display_screen(self.us)", "def goBack(self):\n self.hide()", "def go_home(self):\r\n if self.home_url is not None:\r\n self.set_url(self.home_url)", "def i_go_back(self):\n if not world.using_selenium:\n assert False, (\"this step needs to be implemented for the \"\n + \"django test client\")\n world.browser.back()", "def __select_back_btn(self):\n for _ in range(2):\n self.fc.select_back()\n if self.printers.verify_printers_screen(raise_e=False):\n return True\n raise AssertionError(\"printers screen didn't display after clicking back button 2 times\")", "def back(self, gui):\n # parameter: gui -> The GUI that is being used.\n gui.show_frame(home_gui.HomePage)", "def page_back(self):\n self.password_entry.hide_password()\n self.pager_frame.change_to_page(Welcome)", "def back(self,**kwargs):\n self.mm.loadPreviousMenu()", "def ishome(self) -> bool:\n pass", "def back(self):\r\n if self.phone.isFullBlackBox():\r\n self.__navigateToIdle()\r\n return True\r\n\r\n currentApplication = self.phone.uiState.getCurrentApplication()\r\n\r\n if currentApplication != 'evo-home':\r\n self.phone.comment('exit.back()')\r\n if currentApplication == 'ntf-drawer':\r\n self.__backToIdleWithBackPress()\r\n else:\r\n self.__backToIdleWithSwipe()\r\n self.phone.delay(300, False)\r\n self.phone.uiState.getCurrentState(True)\r\n else:\r\n self.phone.comment('exit.back() is not done for %s' % currentApplication)", "def is_on_home_page(self):\n current_url_path = urlparse(self.driver.current_url).path\n if current_url_path == \"/opencart.com/\":\n return True\n return False", "def press_back_navigation(self):\n back_navigation = self.driver.find_element_by_name(self.BACK_NAVIGATION_NAME)\n back_navigation.click()", "def skip(self):\n self.click_back_button()", "def back(self):", "def go_back(self):\n app = App.get_running_app()\n app.sm.current = 'menu'", "def goBack(self):\n self.displayUi = LoginScreen()\n self.hide()\n self.displayUi.show()", "def press_back_button(self):\n self.driver.back()", "def isTopHomePage(self):\n domain = self.getDomain()\n if self.url == \"http://\" + domain + \"/\":\n return True\n if self.url == \"http://www.\" + domain + \"/\":\n return True\n if self.url == \"http://\" + domain:\n return True\n if self.url == \"http://www.\" + domain:\n return True\n return False", "def back_link(context, request):\n\n referrer_path = urlparse.urlparse(request.referrer or \"\").path\n current_username = request.user.username\n\n if referrer_path == request.route_path(\n \"activity.user_search\", username=current_username\n ):\n back_label = _(\"Back to your profile page\")\n elif _matches_route(referrer_path, request, \"group_read\"):\n back_label = _(\"Back to group overview page\")\n else:\n back_label = None\n\n return {\"back_label\": back_label, \"back_location\": request.referrer}", "def home(event: EventType, widget: WidgetType) -> bool:\n return event.key == _locals.K_HOME", "def back(self):\n\n\t\tself.controller.showFrame(self.prevFrame)", "def goToPrevLink():\n if wikiPageStackTrace[-2].getUrl() != \"\":\n oldpage = wikiPageStackTrace[-2]\n print(\"going back to \", oldpage.getUrl())\n titleStackTrace.append(oldpage.getTitle())\n urlStackTrace.append(oldpage.getUrl())\n del wikiPageStackTrace[-1]\n update()\n else:\n update()", "def home_checkin():\n\tcheckpremenu()", "def go_back(self):\n self.master.switch_frame(MainView)", "def go_back(self):\n self.master.switch_frame(MainView)" ]
[ "0.66009057", "0.65096796", "0.65096796", "0.6471661", "0.6471332", "0.64020437", "0.6306402", "0.62755764", "0.6258319", "0.6226828", "0.6185653", "0.61637855", "0.61317366", "0.61177963", "0.60919166", "0.6067055", "0.6042377", "0.6030426", "0.5962144", "0.58853775", "0.588251", "0.5881792", "0.5879635", "0.5873712", "0.58735865", "0.5866517", "0.58605695", "0.58601815", "0.5850208", "0.5850208" ]
0.7582404
0
This function predicts the label for a trained onevsall classifier. The labels are in the range 1..K, where K = all_theta.shape[0].
def predict_one_vs_all(all_theta, X): m = X.shape[0] num_labels = all_theta.shape[0] X_add = np.append(np.ones((m,1)), X, axis=1) # compute the class probability for each class on each training instance h = sigmoid(np.dot(X_add, all_theta.T)) p = np.argmax(h, axis=1) # because our array was zero-indexed we need to add one for the true label prediction return p+1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_one_vs_all(all_theta, X):\n m = X.shape[0]\n num_labels = all_theta.shape[0]\n\n # You need to return the following variables correctly\n p = np.zeros((m, 1))\n\n # Add ones to the X data matrix\n X = np.hstack((np.ones((m, 1)), X))\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Complete the following code to make predictions using\n # your learned logistic regression parameters (one-vs-all).\n # You should set p to a vector of predictions (from 1 to\n # num_labels).\n #\n # Hint: This code can be done all vectorized using the np.argmax method.\n\n # =========================================================================\n return p", "def predictOneVsAll(all_theta, X):\n\n\tm = X.shape[0]\n\n\t# You need to return the following variables correctly\n\tp = np.zeros((m, 1))\n\n\t# ====================== YOUR CODE HERE ======================\n\t# Instructions: Complete the following code to make predictions using\n\t# your learned logistic regression parameters (one-vs-all).\n\t# You should set p to a vector of predictions (from 1 to\n\t# num_labels).\n\t#\n\t# Hint: This code can be done all vectorized using the max function.\n\t# In particular, the np.argmax function can return the index of the max \n\t# element, for more information see 'numpy.argmax' on the numpy website. \n\t# If your examples are in rows, then, you can use \n\t# np.argmax(probs, axis=1) to obtain the max for each row.\n\n\tprobas = X @ all_theta.T\n\tp = np.argmax(probas, axis=1).reshape(m, 1) + np.ones((m, 1))\n\treturn p.flatten()", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def predict(theta, X):\n label_array = np.array([]) # initial array\n result_array = sigmoid(X.dot(theta))\n for i in result_array:\n if i >= 0.5:\n label_array= np.append(label_array, 1)\n else:\n label_array= np.append(label_array, 0)\n return label_array", "def predict_one_vs_all(all_theta, X):\n m, n = X.shape\n X = np.hstack((np.ones((m, 1)), X))\n p = np.argmax(X.dot(all_theta.T), axis=1)\n p[p == 0] = 10\n\n return p", "def predict(theta1, theta2, X):\n # Useful values\n m = X.shape[0]\n num_labels = theta2.shape[0]\n\n # compute the class probability for each class on each training instance\n a1 = np.append(np.ones((m,1)), X, axis = 1)\n a2 = np.append(np.ones((m,1)), sigmoid(np.dot(a1, theta1.T)), axis = 1)\n a3 = sigmoid(np.dot(a2, theta2.T))\n \n # because our array was zero-indexed we need to add one for the true label prediction\n p = np.argmax(a3, axis=1)\n return p+1", "def predict(self, unknown):\n for title in unknown:\n for ind in range(len((unknown[list(unknown.keys())[0]]))):\n unknown[title][ind] = (unknown[title][ind] - self.normalization_n[ind]) / (self.normalization_d[ind])\n print(unknown)\n unknown_labels = {}\n for title in unknown:\n neighbors = self.k_neighbors(unknown[title], self.dataset, self.k)\n unknown_labels[title] = self.rate(neighbors, self.labels)\n return unknown_labels", "def predict(self, X_test):\n\n # Predict Label 0\n i = 0\n X = X_test\n\n # Retrieve trained classifier for label 0\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n result = np.zeros((X_test.shape[0], self.label_dim))\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n # iterator = tqdm.trange(1, self.label_dim)\n iterator = range(1, self.label_dim)\n for i in iterator:\n # Predict Label i\n\n # Retrieve trained classifier for label i\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n return result", "def predict_with_tree(self, X, ball_tree):\n predicted_y = np.zeros(X.shape[0])\n for i in range(X.shape[0]):\n neighbours = []\n ball_tree.knn_search(X[i], self.k_nearest, neighbours)\n votes = {}\n for neighbour in neighbours:\n if neighbour.classification in votes:\n votes[neighbour.classification] += 1\n else:\n votes[neighbour.classification] = 1\n predicted_y[i] = max(votes, key=votes.get)\n return predicted_y", "def predict(self, X): \n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n # Input validation\n X = check_array(X)\n\n j= 0\n predicted_labels = np.array([])\n while(j < X.shape[0]):\n current_batch_end = j+self.batch_size if j+self.batch_size < X.shape[0] else X.shape[0]\n current_batch = X[j:current_batch_end]\n self._feedforward(current_batch)\n predicted_labels = np.append(predicted_labels, np.take(self.map_labels, self.bmu_indices))\n j = current_batch_end\n \n return predicted_labels", "def _predict(self, classify: np.array, n_preds=1):\r\n tmp = classify.argsort()[:, :n_preds] # Return the index of the best label classification\r\n preds = copy(tmp) # allow to copy tmp\r\n for index, target in enumerate(self.targets):\r\n preds = np.where(tmp == index, target, preds) # Return the target label corresponding to the index\r\n self.preds = preds", "def one_vs_all(X: np.ndarray, y: np.ndarray, num_labels: int, l: float) -> np.ndarray:\n\n # Some useful variables\n m, n = X.shape\n\n # You need to return the following variables correctly\n all_theta = np.zeros((num_labels, n + 1))\n\n # Add ones to the X data matrix\n X = np.hstack((np.ones((m, 1)), X))\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: You should complete the following code to train num_labels\n # logistic regression classifiers with regularization\n # parameter l.\n\n # =============================================================\n\n return all_theta", "def infer_data_labels(X_labels, cluster_labels):\r\n #Empty array of len(X)\r\n predicted_labels = np.zeros(len(X_labels)).astype(np.uint8)\r\n \r\n for i, cluster in enumerate(X_labels):\r\n for key, value in cluster_labels.items():\r\n if cluster in value:\r\n predicted_labels[i] = key\r\n \r\n return predicted_labels", "def _predict_label(self, df_train, df_test, label=None):\n #train k-nearest neighbors classifier \n neigh = KNeighborsClassifier(n_neighbors=5)\n X, y = df_train[['longitude', 'latitude']], df_train[label]\n neigh.fit(X, y)\n #predict the label for wildfire incidents\n pred_label = neigh.predict(df_test[['longitude', 'latitude']])\n return pred_label", "def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_", "def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_", "def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]", "def predict_label(examples_set):\n all_labels = list(('yes', 'no'))\n prediction = 'no'\n\n for label in all_labels:\n all_same_label = True\n for example in examples_set:\n if example[14] != label:\n all_same_label = False\n break\n if all_same_label:\n prediction = label\n break\n return prediction", "def predict(self, features):\n vec = vectorize(features, self.vocab,\n self.dpvocab, self.projmat)\n label = self.clf.predict(vec)\n # print label\n return self.labelmap[label[0]]", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X, k=1):\n dists = self.compute_distances(X)\n return self.predict_labels(dists, k=k)", "def predict(self):\n train_array = np.array(self.labels != 0, dtype=float)\n if not self.ising:\n labels_logit = self.ising_weights['vertices']\n else:\n neigh_num = self.adj.dot(train_array)\n neigh_num = np.where(neigh_num == 0, 1, neigh_num)\n neigh_weights = self.ising_weights['edges'] * self.labels\n labels_logit = (np.multiply(neigh_weights, neigh_num**(-1))\n + self.ising_weights['vertices'])\n self.prediction = np.where(labels_logit > 0, 1, -1)\n return self", "def predict(self, X):", "def predict(self, X):", "def _predict(self, X):\n predictions = np.asarray([clf.predict(X) for clf in self.clfs_]).T\n predicted_labels = self.combiner.combine(predictions)\n return predicted_labels", "def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels", "def oneVsAll(X, y, num_labels, lbd):\n m, n = X.shape\n y = y.ravel()\n\n # You need to return the following variables correctly \n all_theta = np.zeros((num_labels, n + 1))\n print(all_theta.shape)\n\n # Add ones to the X data matrix\n X = np.column_stack((np.ones((m,1)), X))\n print(X.shape)\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: You should complete the following code to train num_labels\n # logistic regression classifiers with regularization\n # parameter lambda. \n\n for c in range(num_labels):\n\n # initial theta for c/class\n initial_theta = np.zeros((n + 1, 1))\n print(initial_theta.shape)\n\n print(\"Training {:d} out of {:d} categories...\".format(c+1, num_labels))\n\n myargs = (X, (y%10==c).astype(int), lbd)\n theta = minimize(lrcf.lrCostFunction, x0=initial_theta, args=myargs, options={'disp': True, 'maxiter':13}, method=\"Newton-CG\", jac=True)\n\n all_theta[c,:] = theta[\"x\"]\n\n return all_theta", "def predict(self, x):\n pred_labels = np.zeros((x.shape[0], 10))\n\n N = len(self.NET)\n for i in range(N):\n\n inputs = self.apply_dct_permutation(x.copy(), self.permutation[i])\n pred_labels += self.NET[i].model.predict(inputs)\n\n return pred_labels" ]
[ "0.7766958", "0.73554176", "0.72547424", "0.71412677", "0.71165025", "0.6996137", "0.69605386", "0.6868233", "0.685735", "0.6855751", "0.6845887", "0.6840902", "0.6798085", "0.678235", "0.67749465", "0.67749465", "0.67486465", "0.6730217", "0.6724619", "0.66748214", "0.66748214", "0.66748214", "0.6666309", "0.6661703", "0.66593283", "0.66593283", "0.66425955", "0.6639574", "0.6636604", "0.6619634" ]
0.8260868
0
Goes through the output queue, and calculates and answer based on RPN. This is achieved by using a stack to store the results, and checking each item in output queue.
def RPN(self): stack = Stack() while not self.output_queue.is_empty(): item = self.output_queue.pop() if isinstance(item, numbers.Number): stack.push(item) elif isinstance(item, Function): stack.push(item.execute(stack.pop())) elif isinstance(item, Operator): num2 = stack.pop() num1 = stack.pop() stack.push(item.execute(num1, num2)) return stack.pop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\r\n eq = input(\"Input an equation: \")\r\n splitList = (mysplit(eq))\r\n operandsList = []\r\n #This loop takes in the split list and adds to a list without operators\r\n for operand in splitList:\r\n if operand == '+' or operand == '-' or operand == '*' or operand == '/':\r\n continue\r\n operandsList.append(operand)\r\n operatorsList = []\r\n #This loop takes in the split list and adds to a list without digits\r\n for operator in splitList:\r\n if operator.isdigit() is True:\r\n continue\r\n operatorsList.append(operator)\r\n #variable to check if the operator is allowed\r\n operatorChecker = False\r\n for sign in operatorsList:\r\n if sign == '+' or sign == '-' or sign == '/' or sign == '*':\r\n operatorChecker = True\r\n else:\r\n operatorChecker = False\r\n operandsDigits = ''.join(operandsList)\r\n #this checks if the operands are digits\r\n operandsChecker = str.isdigit(operandsDigits)\r\n #check if equation contains division with 0\r\n if '/ 0' in eq:\r\n zeroChecker = False\r\n else:\r\n zeroChecker = True\r\n\r\n #if conditions for the\r\n if operandsChecker is False or operatorChecker is False or zeroChecker is False:\r\n print(\"Invalid Input\")\r\n else:\r\n stack, queue = parseNumbers(eq)\r\n stackAnswer = calculateStack(stack)\r\n queueAnswer = calculateQueue(queue)\r\n print(\"Queue total:\", queueAnswer)\r\n print(\"Stack total:\", stackAnswer)\r\n if queueAnswer == stackAnswer:\r\n print(\"They do match!\")\r\n else:\r\n print(\"They do not match!\")", "def _qprocess(self):\n while 1:\n t, args, kw = self.inq.get()\n ret = self.__call__(*args, **kw)\n self.outq.put((t, ret))", "def calculateQueue(queue):\r\n temp = int(front(queue))\r\n dequeue(queue)\r\n while not emptyQueue(queue):\r\n operation = front(queue)\r\n dequeue(queue)\r\n operandTwo = int(front(queue))\r\n dequeue(queue)\r\n temp = calculate(temp, operandTwo, operation)\r\n return temp", "def inference(self, assignment, queue):\n # Do this as long as there is elements in the queue\n # e.g there is still more arcs to check \n while queue:\n # Pop the first element in the queue\n xi, xj = queue.pop(0)\n # Do the revise check \n if self.revise(assignment, xi, xj):\n # IF zero, CSP has no consistent soluton and AC-3 returns failure \n if len(assignment[xi]) == 0:\n return False\n # If NOT ZERO loop throuh the neighboring arcs of node\n # and append the neighbor and this node to the queue for further checking.\n # We do this so that we keep checking after we do changes and make sure \n # all is gucci gang\n for n in self.get_all_neighboring_arcs(xi):\n if n[0] != xj:\n queue.append((n[0], xi))\n return True", "def final_result(self, board):\n if board.myMarbles>board.opMarbles:\n reward = self.win_value + self.myMarbles - self.opMarbles\n elif board.myMarbles == board.opMarbles:\n reward = self.draw_value\n else:\n reward = self.loss_value + self.myMarbles-self.opMarbles\n self.game_counter += 1\n self.add_game_to_replay_buffer(reward)\n\n # If we are in training mode we run the optimizer.\n if self.training and (self.game_counter > self.pre_training_games):\n\n batch_third = self.batch_size // 3\n train_batch = self.replay_buffer_win.sample(batch_third)\n train_batch.extend(self.replay_buffer_loss.sample(batch_third))\n train_batch.extend(self.replay_buffer_draw.sample(batch_third))\n train_batch = np.array(train_batch)\n\n #\n # Let's compute the target q values for all non terminal move\n # We extract the resulting state, run it through the target net work and\n # get the maximum q value (of all valid moves)\n next_states = [s[2] for s in train_batch if s[2] is not None]\n # print('current board\\n', board)\n # print('next_states', next_states)\n target_qs = []\n\n if len(next_states) > 0:\n firstInput = [self.board_state_to_nn_input(s) for s in next_states]\n # print(firstInput)\n firstInput = np.asarray(firstInput).reshape(20, 1,2,6)[0]\n # print(firstInput.shape)\n # for i in next_states:\n # print(i[0])\n # print(i[1])\n # input()\n probs, qvals = self.get_valid_probs(firstInput,\n self.target_net, [Board(s[0], s[1]) for s in next_states], True)\n # print(probs)\n # print(qvals)\n # input()\n probs=probs[0]\n qvals=qvals[0]\n # print(qvals)\n i = 0\n for t in train_batch:\n if t[2] is not None:\n # print(t[2])\n # print(probs)\n # input()\n max_move = np.argmax(probs)\n max_qval = qvals[max_move]\n target_qs.append(max_qval * self.reward_discount)\n i += 1\n else:\n target_qs.append(t[3])\n\n if i != len(next_states):\n (\"Something wrong here!!!\")\n else:\n target_qs.extend(train_batch[:, 6])\n\n # We convert the input states we have recorded to feature vectors to feed into the training.\n nn_input = [self.board_state_to_nn_input(x[0]) for x in train_batch]\n actions = train_batch[:, 1]\n\n # We run the training step with the recorded inputs and new Q value targets.\n # print(self.q_net.merge.shape)\n # print(self.q_net.train_step.shape)\n # print(np.asarray([self.q_net.merge, self.q_net.train_step]).shape)\n # print(self.q_net.input_positions.shape)\n # print(nn_input.shape)\n # print(self.q_net.target_q.shape)\n # print(target_qs.shape)\n # print(self.q_net.actions.shape)\n # print(actions.shape)\n # print(type(nn_input))\n summary, _ = TFSN.get_session().run([self.q_net.merge, self.q_net.train_step],\n feed_dict={self.q_net.input_positions: np.asarray(nn_input).reshape(20,1,2,6),\n self.q_net.target_q: target_qs,\n self.q_net.actions: actions})\n self.random_move_prob *= self.random_move_decrease\n\n if self.writer is not None:\n self.writer.add_summary(summary, self.game_counter)\n summary = tf.Summary(value=[tf.Summary.Value(tag='Random_Move_Probability',\n simple_value=self.random_move_prob)])\n self.writer.add_summary(summary, self.game_counter)\n\n TFSN.get_session().run(self.graph_copy_op)", "def algorithm(self):\n convergence_threshold = 50\n reward_num_threshold = 300\n alpha = 1\n gamma = 0.5\n while (self.reward_num < reward_num_threshold) and (self.count<convergence_threshold):\n print('------')\n print('Iteration', self.reward_num, '/', reward_num_threshold)\n print('Iterations w/out Q-update:', self.count, '/', convergence_threshold)\n # select a possible action (any of them; all are valid)\n s = self.get_state_num()\n print(\"Initial state:\", s)\n a = random.choice(np.arange(3))\n self.apply_action(a)\n while self.reward == None:\n #print(\"Sleeping to wait for reward\")\n rospy.sleep(0.5)\n reward = self.reward\n print(\"REWARD =\", reward)\n self.reward = None\n if reward == 0:\n next_state = self.get_state_num()\n mx = np.amax(self.Q[next_state])\n else:\n ## There is no next state if nonzero reward seen\n mx = 0\n update = self.Q[s][a] + alpha*(reward+gamma*mx-self.Q[s][a])\n if self.Q[s][a] != update:\n print(\"Update Q matrix\")\n self.Q[s][a] = update\n self.count = 0\n else:\n self.count += 1\n\n print(\"Finished calculating Q-Matrix\\n\\n\\n\\n\\n\\n\\n\")", "def evaluate_postfix(list_input):\n stack_values = []\n\n for item in list_input:\n # debug stuff\n # print \"item\", item\n try:\n item_value = float(item)\n has_value = True\n except ValueError:\n has_value = False\n\n # value, operand, put on stack\n if has_value:\n stack_values.append(item_value)\n has_value = False\n\n # operator, pull two operands from stack\n elif (has_value == False\n and len(stack_values) >= 2):\n second_value = stack_values.pop()\n first_value = stack_values.pop()\n result = evaluate_op(item,\n first_value,\n second_value)\n stack_values.append(result)\n # debug stuff\n # print \"midstep\", result\n\n return stack_values.pop()", "def evaluatePostfix(postfix, variableList, variableLocation, methodVariables, output):\n\n stack = [] # Stack that will contain our pushed operands from the postfix expression\n immediateCount = 0 # Keeps count of how many immediate values are being expressed (not variables)\n sourceRegister = 1 # Source register starts at 1: \"B\", and increments as needed\n destRegister = 0 # Destination register starts at 0: 'A\" and increments as needed\n immFlag = 0 # Used to determine whether source or destination register holds an immediate\n\n for element in postfix:\n # Evaluate each postfix element one by one to determine appropriate action\n\n if sourceRegister > 6 or destRegister > 6:\n # We cap the total amount of registers used to 7 (0-6)\n raise ValueError(\"Too many operands in formula.\")\n\n if element in OPERATIONS:\n # Here, our element is an operator. This means we need to pop the top two values from the stack and\n # execute the given operation.\n operand1, operand2 = stack.pop(), stack.pop()\n\n if operand1 in variableList:\n # The operand is in the list of local variables, so we read the value from memory\n output.write(\" MEMR [4] #\" + str(variableLocation[operand1]) + \" $\" + REGISTERS[sourceRegister] + \"\\n\")\n operand1 = REGISTERS[sourceRegister]\n\n elif operand1 in methodVariables:\n # The operand is in the list of arguments passed into the method. We consult the methodVariables list\n # to determine the appropriate offset from the stack pointer register S2.\n output.write(\" MOV $A2 $S2\\n\")\n output.write(\" ADD #\" + str(int(methodVariables[operand1][1]) * 4) + \" $A2\\n\")\n output.write(\" MEMR [4] $A2 $\" + REGISTERS[sourceRegister] + \"\\n\")\n operand1 = REGISTERS[sourceRegister]\n\n elif operand1 in REGISTER_NAMES:\n # This is simply a register that was pushed onto the stack. We can keep it as is\n pass\n\n else:\n # The operand is an immediate value. We test to see if it's a valid integer\n try:\n isinstance(operand1, int)\n immediateCount += 1\n immFlag = 1\n except ValueError as e:\n raise ValueError(\"Invalid operand\")\n\n if operand2 in variableList:\n # The operand is in the list of local variables, so we read the value from memory\n output.write(\" MEMR [4] #\" + str(variableLocation[operand2]) + \" $\" + REGISTERS[destRegister] + \"\\n\")\n operand2 = REGISTERS[destRegister]\n\n elif operand2 in methodVariables:\n # The operand is in the list of arguments passed into the method. We consult the methodVariables list\n # to determine the appropriate offset from the stack pointer register S2.\n output.write(\" MOV $B2 $S2\\n\")\n output.write(\" ADD #\" + str(int(methodVariables[operand2][1]) * 4) + \" $B2\\n\")\n output.write(\" MEMR [4] $B2 $\" + REGISTERS[destRegister] + \"\\n\")\n operand2 = REGISTERS[destRegister]\n\n elif operand2 in REGISTER_NAMES:\n # This is simply a register that was pushed onto the stack. We can keep it as is\n pass\n\n else:\n # The operand is an immediate value. We test to see if it's a valid integer\n try:\n isinstance(operand2, int)\n immediateCount += 1\n immFlag = 2\n except ValueError as e:\n raise ValueError(\"Invalid operand\")\n\n if immediateCount == 2:\n # If we have two immediate values, we don't really need to calculate the arithmetic in Capua ASM.\n # We discretely do the calculations in the background and push the value to the stack. This avoids\n # unnecessary processing.\n try:\n stack.append(int(OPERATIONS[element]['function'](float(operand2), float(operand1))))\n\n except ZeroDivisionError:\n raise ValueError(\"Error: Division by zero! - {} {} {}\".format(operand2, element, operand1))\n\n else:\n if immediateCount == 1:\n # only one of the operands was an immediate value. We determine which one is the immediate value,\n # as the correct instruction output depends on it.\n if immFlag == 1:\n output.write(\" MOV #\" + str(int(operand1)) + \" $\" + REGISTERS[sourceRegister] + \"\\n\")\n operand1 = REGISTERS[sourceRegister]\n\n elif immFlag == 2:\n output.write(\" MOV #\" + str(int(operand2)) + \" $\" + REGISTERS[destRegister] + \"\\n\")\n operand2 = REGISTERS[destRegister]\n\n else:\n # No operands were immediate values. We can do the arithmetic operation as is.\n # We move the source and destination registers up one letter for the next operation\n sourceRegister += 1\n destRegister += 1\n\n output.write(\" \" + INSTRUCTIONS[element] + \" $\" + str(operand1) + \" $\" + str(operand2) + \"\\n\")\n stack.append(operand2)\n\n immediateCount = 0\n\n else:\n # We have an operand to push onto the stack\n stack.append(element)\n\n if len(stack) != 1:\n # If the stack has more than or less than one element, the expression is incorrect.\n raise ValueError(\"invalid expression.\")\n\n # our result is then \"saved\" into register A. The assignment can now be completed.\n result = stack.pop()\n\n if result in REGISTER_NAMES:\n # If we just have a register at the bottom of the stack, we assume the result is already in register A\n pass\n\n else:\n try:\n isinstance(int(result), int)\n output.write(\" MOV #\" + str(result) + \" $A\\n\")\n except ValueError as e:\n raise ValueError(\"Invalid mathematical expression\")", "def outer_loop_lp(self, profile, missed_winners):\r\n\r\n # Initialize\r\n stats = self.Stats()\r\n\r\n wmg = profile.getWmg()\r\n known_winners = set()\r\n I = list(wmg.keys())\r\n\r\n G = nx.DiGraph()\r\n G.add_nodes_from(I)\r\n\r\n E = nx.DiGraph()\r\n E.add_nodes_from(I)\r\n for cand1, cand2 in itertools.permutations(wmg.keys(), 2):\r\n if wmg[cand1][cand2] > 0:\r\n E.add_edge(cand1, cand2, weight=wmg[cand1][cand2])\r\n\r\n # print(wmg)\r\n # self.output_graph(E)\r\n\r\n # Add any bridge edges from any tier in E\r\n # These are guaranteed to never be in a cycle, so will always be in the final graph after RP procedure\r\n Gc = G.copy()\r\n Gc.add_edges_from(E.edges())\r\n scc = [list(g.edges()) for g in nx.strongly_connected_component_subgraphs(Gc, copy=True) if len(g.edges()) != 0]\r\n bridges = set(Gc.edges()) - set(itertools.chain(*scc))\r\n G.add_edges_from(bridges)\r\n E.remove_edges_from(bridges)\r\n\r\n stats.num_initial_bridges = len(bridges)\r\n\r\n\r\n # Each node contains (G, E, T, P)\r\n # P is path, where each item is of form (G, E, K, a)\r\n # root = Node(value=(self.edges2string(G.edges(), I), self.edges2string(E.edges(), I)))\r\n root = Node(value=(G, E, [], []))\r\n stackNode = []\r\n stackNode.append(root)\r\n\r\n hashtable = set()\r\n\r\n END = self.BEGIN + self.TIMEOUT\r\n\r\n self.missed_winners = set(missed_winners)\r\n\r\n self.data = {}\r\n for w in missed_winners:\r\n self.data[w] = []\r\n\r\n while stackNode:\r\n # Pop new node to explore\r\n node = stackNode.pop()\r\n (G, E, T, P) = node.value\r\n\r\n if time.perf_counter() > END:\r\n print(\"TIMEOUT\")\r\n return sorted(known_winners), stats\r\n\r\n # Check hash\r\n hash_state = self.edges2string(G.edges(), I) + self.edges2string(E.edges(), I) + self.edges2string(T, I)\r\n if hash_state in hashtable:\r\n stats.num_hashes += 1\r\n if self.debug_mode == 3:\r\n print(\"hashed in outer hashtable\")\r\n continue\r\n hashtable.add(hash_state)\r\n\r\n stats.num_nodes += 1\r\n\r\n if self.debug_mode == 3:\r\n print(\"Popped new node: \")\r\n print(\"G:\", sorted(G.edges()))\r\n print(\"E:\", sorted(E.edges()))\r\n print(\"T:\", sorted(T))\r\n\r\n # Flag for whether expanding the current tier required finding max children\r\n f_found_max_children = 0\r\n\r\n # Continue performing RP on this state as long as tie-breaking order doesn't matter\r\n while len(E.edges()) != 0 or len(T) != 0:\r\n if self.stop_conditions(G, E, T, P, I, known_winners, stats) != -1:\r\n # Stop condition hit\r\n break\r\n\r\n if len(T) == 0:\r\n # Get a new tier\r\n (max_weight, max_edge) = max([(d['weight'], (u, v)) for (u, v, d) in E.edges(data=True)])\r\n T = [(u, v) for (u, v, d) in E.edges(data=True) if d['weight'] == max_weight]\r\n E.remove_edges_from(T)\r\n\r\n if self.debug_mode == 3:\r\n print(\"New tier =\", T)\r\n\r\n if len(T) == 1:\r\n # Tier only has one edge, just add it\r\n if self.debug_mode == 3:\r\n print(\"Only 1 edge in tier\")\r\n\r\n if nx.has_path(G, max_edge[1], max_edge[0]) is False:\r\n E.add_edges_from(T)\r\n P.append((self.edges2string(G.edges(), I), self.edges2string(E.edges(), I), known_winners.copy(), max_edge))\r\n E.remove_edges_from(T)\r\n G.add_edges_from(T)\r\n continue\r\n\r\n\r\n # Perform reductions every step:\r\n\r\n # Compute \"bridge edges\" which are not in any cycle\r\n Gc = G.copy()\r\n Gc.add_edges_from(T)\r\n scc = [list(g.edges()) for g in nx.strongly_connected_component_subgraphs(Gc, copy=True) if len(g.edges()) != 0]\r\n bridges = set(Gc.edges()) - set(itertools.chain(*scc))\r\n G.add_edges_from(bridges)\r\n T = list(set(T) - bridges)\r\n\r\n G_tc = nx.transitive_closure(G)\r\n\r\n # Remove \"inconsistent edges\" that cannot be added to G without causing cycle\r\n reverse_G = nx.DiGraph.reverse(G_tc)\r\n T = list(set(T) - set(reverse_G.edges()))\r\n\r\n # Remove \"redundant edges\": if there is already path from e[0] to e[1], can immediately add e\r\n redundant_edges = set()\r\n for e in T:\r\n if G_tc.has_edge(e[0], e[1]):\r\n redundant_edges.add(e)\r\n G.add_edges_from([e])\r\n stats.num_redundant_edges += len(redundant_edges)\r\n T = list(set(T) - redundant_edges)\r\n\r\n if len(T) == 0:\r\n # No need to find further children, as tier is now empty\r\n if self.debug_mode == 3:\r\n print(\"Tier empty\")\r\n continue\r\n\r\n # Used to break ties\r\n index = 0\r\n\r\n # Add each edge to stack by priority\r\n children = dict()\r\n T = sorted(T)\r\n for e in T:\r\n if not G_tc.has_edge(e[1], e[0]):\r\n f_found_max_children = 1\r\n\r\n Gc = G.copy()\r\n Gc.add_edges_from([e])\r\n Ec = E.copy()\r\n Tc = copy.deepcopy(T)\r\n Tc.remove(e)\r\n Pc = copy.deepcopy(P)\r\n\r\n EUT = E.copy()\r\n EUT.add_edges_from(T)\r\n Pc.append((self.edges2string(G.edges(), I), self.edges2string(EUT.edges(), I), known_winners.copy(), e))\r\n child_node = Node(value=(Gc,Ec,Tc,Pc))\r\n\r\n # LPwinners\r\n G_in_degree = Gc.in_degree(I)\r\n potential_winners = set([x[0] for x in G_in_degree if x[1] == 0])\r\n priority = len(potential_winners - known_winners)\r\n\r\n children[child_node] = (priority, index)\r\n index = index + 1\r\n\r\n if self.debug_mode == 3:\r\n print(\"added edge\", e)\r\n\r\n children_items = sorted(children.items(), key=lambda x: (x[1][0], x[1][1]))\r\n sorted_children = [key for key, value in children_items]\r\n stackNode += sorted_children\r\n break\r\n\r\n if len(E.edges()) == 0 and f_found_max_children == 0:\r\n # E is empty\r\n if self.debug_mode >= 2:\r\n print(\"E is empty\")\r\n self.add_winners(G, P, I, known_winners, stats)\r\n\r\n return sorted(known_winners), stats, self.data", "def solve(self):\n # a stack of queries (aka subproblems to be solved)\n stack = []\n initial_query = (len(self.items), self.knapsack_size)\n stack.append(initial_query)\n # Run as long as there are subproblems that need to be solved.\n # - this might not pass through all possible subproblems; in fact, \n # we're counting on it\n # - it will only pass through the subproblems that the initial \n # problem needs solved\n while len(stack) > 0:\n (end, ksize) = stack[-1]\n # this is the subproblem where we have only items self.items[:end]\n # and the knapsack size is ksize\n if self.items[end - 1].size > ksize:\n # item end-1 does not fit\n try:\n # retrieve subproblem result from the cache\n self._cache[(end, ksize)] = self._cache[(end - 1, ksize)]\n except KeyError:\n # subproblem hasn't been solved yet, put it on the stack\n stack.append((end - 1, ksize))\n continue\n else:\n # item end-1 fits; we get two subproblems:\n # - one if we don't include item end-1 in the knapsack\n # - one if we do include it\n sub1 = (end - 1, ksize)\n sub2 = (end - 1, ksize - self.items[end - 1].size)\n try:\n # retrieve 1st subproblem's result from the cache and \n # compute max value if we don't include item end-1\n val1 = self._cache[sub1]\n except KeyError:\n # subproblem hasn't been solved yet, put it on the stack\n stack.append(sub1)\n continue\n try:\n # retrieve 2nd subproblem's result from the cache and\n # compute max value if we do include item end-1\n val2 = self.items[end - 1].value + self._cache[sub2]\n except KeyError:\n # subproblem hasn't been solved yet, put it on the stack\n stack.append(sub2)\n continue\n # is it better to include item end-1 or not?\n self._cache[(end, ksize)] = max(val1, val2)\n # done with this subproblem\n stack.pop()\n return self._cache[(initial_query)]", "def _evaluate_branch(self, branch, remaining_nodes):\n current_eq = None\n next_nodes = []\n\n # Get the player that we need to evaluate\n for n in remaining_nodes:\n # Node are both equations and variables. We just want the\n # equations.\n if current_eq is None:\n if isinstance(n, Equation):\n current_eq = n\n else:\n # Leave whatever is left for the next layer of evaluation.\n next_nodes.append(n)\n\n # No more equations! We're done\n if current_eq is None:\n return\n\n # Go through each of the branches and evaluate the state.\n for b in branch.branches:\n # Let the player assign output\n outputs = current_eq.calculate(b.assignments)\n\n # Construct a distribution of these outputs\n distn = JointDist(outputs)\n\n # Add the branches, and then evaluate using the next set of\n # remaining nodes.\n b.add_branches(distn)\n self._evaluate_branch(b, next_nodes)", "def dequeue_loop():\n while True:\n result = dequeue_function()\n if not result:\n break\n print(result)", "def shunting_yard(self, input_queue):\n operator_stack = Stack()\n for item in input_queue:\n if isinstance(item, numbers.Number):\n self.output_queue.push(item)\n\n elif isinstance(item, Function):\n operator_stack.push(item)\n\n elif item == '(':\n operator_stack.push(item)\n\n elif item == ')':\n while not operator_stack.peek() == '(':\n self.output_queue.push(operator_stack.pop())\n operator_stack.pop()\n\n elif isinstance(item, Operator):\n while not (operator_stack.is_empty() or operator_stack.peek() == '(' or\n operator_stack.peek().strength < item.strength):\n self.output_queue.push(operator_stack.pop())\n operator_stack.push(item)\n\n while not operator_stack.is_empty():\n self.output_queue.push(operator_stack.pop())", "def iterate_days(results_queue, idx=0):\n # Declaration of learners and results' vectors\n ucb1_learner = UCB1Learner(len(prices))\n ucb1_old_learner = UCB1Learnerold(len(prices))\n vector_daily_price_ucb1_loc = []\n vector_daily_price_ucb1_old_loc = []\n print('Starting execution ' + str(idx))\n # For every day:\n for t in range(T):\n if t % 20 == 0:\n log(\"Iteration day: {:3d} - execution: {:3d}\".format(t, idx))\n\n # Get new users in the day t and their costs\n [new_user_1, new_user_2, new_user_3] = env.get_all_new_users_daily(bids[0])\n new_users = [new_user_1, new_user_2, new_user_3]\n [cost1, cost2, cost3] = env.get_all_cost_per_click(bids[0])\n cost = [cost1, cost2, cost3]\n\n # Get the total cost\n total_cost = 0\n for user in range(len(new_users)):\n total_cost += new_users[user] * cost[user]\n\n # Choose the arm and thus the price for UCB1\n daily_arm_ucb1 = ucb1_learner.pull_arm()\n daily_price_ucb1 = prices[daily_arm_ucb1]\n vector_daily_price_ucb1_loc.append(daily_price_ucb1)\n\n # Choose the arm and thus the price for ucb1_old\n daily_arm_ucb1_old = ucb1_old_learner.pull_arm()\n daily_price_ucb1_old = prices[daily_arm_ucb1_old]\n vector_daily_price_ucb1_old_loc.append(daily_price_ucb1_old)\n\n # Calculate the number of bought items\n daily_bought_items_per_class_ucb1 = [0, 0, 0]\n daily_bought_items_per_class_ucb1_old = [0, 0, 0]\n\n for user in range(len(new_users)):\n for c in range(new_users[user]):\n daily_bought_items_per_class_ucb1[user] += env.buy(daily_price_ucb1, user + 1)\n daily_bought_items_per_class_ucb1_old[user] += env.buy(daily_price_ucb1_old, user + 1)\n\n # Sum up the n. of bought items\n daily_bought_items_ucb1 = sum(daily_bought_items_per_class_ucb1)\n daily_bought_items_ucb1_old = sum(daily_bought_items_per_class_ucb1_old)\n\n # Calculate the revenue\n daily_revenue_ucb1 = daily_bought_items_ucb1 * env.get_margin(daily_price_ucb1) - total_cost\n daily_revenue_ucb1_old = daily_bought_items_ucb1_old * env.get_margin(daily_price_ucb1_old) - total_cost\n\n # Get delayed rewards UCB1\n next_30_days = [0] * 30\n for user in range(1, 4):\n next_30_days = list(\n map(add, next_30_days, env.get_next_30_days(daily_bought_items_per_class_ucb1[user - 1], daily_price_ucb1,\n user)))\n\n ucb1_learner.update_observations(daily_arm_ucb1, daily_revenue_ucb1, next_30_days)\n\n # Get delayed rewards UCB1 old\n next_30_days = [0] * 30\n for user in range(1, 4):\n next_30_days = list(\n map(add, next_30_days, env.get_next_30_days(daily_bought_items_per_class_ucb1_old[user - 1], daily_price_ucb1_old,\n user)))\n\n ucb1_old_learner.update_observations(daily_arm_ucb1_old, daily_revenue_ucb1_old, next_30_days)\n\n print('Ending execution ' + str(idx))\n\n # put results in the given queue\n results_queue.put((ucb1_learner.collected_rewards, ucb1_old_learner.collected_rewards, vector_daily_price_ucb1_loc, vector_daily_price_ucb1_old_loc))", "def retrieve_results(self):\n # return the results of the last calculation\n last_calc = self.ctx.calculations[-1]\n for name, port in self.spec().outputs.items():\n if port.required and name not in last_calc.outputs:\n self.report('the spec specifies the output {} as required '\n 'but was not an output of {}<{}>'.format(name, self._calculation.__name__,\n last_calc.pk))\n\n if name in last_calc.outputs:\n # node = last_calc.outputs[name]\n self.out(name, last_calc.outputs[name])\n # self.report(\"attaching the node {}<{}> as '{}'\".format(node.__class__.__name__, node.pk, name))\n return", "def rnaseq():\n pass", "def solve_rr(self, initial_game_state):\n self.queue.append(initial_game_state)\n\n # For loop to go through queue\n while len(self.queue) > 0:\n game = self.queue.pop(0)\n if utils.at_goal(game):\n return game\n self.generate_possible_moves_rr(game)\n return None", "def iterate_days(results_queue, idx=0):\n # Declaration of learners and results' vectors\n ucb1_learner = UCB1Learner(len(prices))\n tsgauss_learner = TSLearnerGauss(len(prices))\n vector_daily_price_ucb1_loc = []\n vector_daily_revenue_ucb1_loc = []\n vector_daily_price_ts_loc = []\n vector_daily_revenue_ts_loc = []\n\n print('Starting execution ' + str(idx))\n\n # For every day:\n for t in range(T):\n if t % 20 == 0:\n log(\"Iteration day: {:3d} - execution: {:3d}\".format(t, idx))\n # Get new users in the day t and their costs\n [new_user_1, new_user_2, new_user_3] = env.get_all_new_users_daily(bids[0])\n new_users = [new_user_1, new_user_2, new_user_3]\n [cost1, cost2, cost3] = env.get_all_cost_per_click(bids[0])\n cost = [cost1, cost2, cost3]\n\n # Get the total cost\n total_cost = 0\n for user in range(len(new_users)):\n total_cost += new_users[user] * cost[user]\n\n # Choose the arm and thus the price for UCB1\n daily_arm_ucb1 = ucb1_learner.pull_arm()\n daily_price_ucb1 = prices[daily_arm_ucb1]\n vector_daily_price_ucb1_loc.append(daily_price_ucb1)\n\n # Choose the arm and thus the price for Thomson Sampling\n daily_arm_ts = tsgauss_learner.pull_arm()\n daily_price_ts = prices[daily_arm_ts]\n vector_daily_price_ts_loc.append(daily_price_ts)\n\n # Calculate the number of bought items\n daily_bought_items_per_class_ucb1 = [0, 0, 0]\n daily_bought_items_per_class_ts = [0, 0, 0]\n\n for user in range(len(new_users)):\n for c in range(new_users[user]):\n daily_bought_items_per_class_ucb1[user] += env.buy(daily_price_ucb1, user + 1)\n daily_bought_items_per_class_ts[user] += env.buy(daily_price_ts, user + 1)\n\n # Sum up the n. of bought items\n daily_bought_items_ucb1 = sum(daily_bought_items_per_class_ucb1)\n daily_bought_items_ts = sum(daily_bought_items_per_class_ts)\n\n # Calculate the revenue\n daily_revenue_ucb1 = daily_bought_items_ucb1 * env.get_margin(daily_price_ucb1) - total_cost\n daily_revenue_ts = daily_bought_items_ts * env.get_margin(daily_price_ts) - total_cost\n\n # Add to the vector the daily revenue\n vector_daily_revenue_ucb1_loc.append(daily_revenue_ucb1)\n vector_daily_revenue_ts_loc.append(daily_revenue_ts)\n\n # Get delayed rewards\n next_30_days = [0] * 30\n for user in range(1, 4):\n next_30_days = list(\n map(add, next_30_days, env.get_next_30_days(daily_bought_items_per_class_ucb1[user - 1], daily_price_ucb1,\n user)))\n\n ucb1_learner.update_observations(daily_arm_ucb1, daily_revenue_ucb1, next_30_days)\n\n # Get delayed rewards\n next_30_days = [0] * 30\n for user in range(1, 4):\n next_30_days = list(\n map(add, next_30_days, env.get_next_30_days(daily_bought_items_per_class_ts[user - 1], daily_price_ts,\n user)))\n tsgauss_learner.update_observations(daily_arm_ts, daily_revenue_ts, next_30_days)\n\n if plot_l_t == True and t>=29:\n plot_learned_curve(tsgauss_learner.mu, tsgauss_learner.tau, real, tsgauss_learner.n_pulled_arms, plots_folder, t)\n\n print('Ending execution ' + str(idx))\n\n # put results in the given queue\n results_queue.put((ucb1_learner.collected_rewards, tsgauss_learner.collected_rewards, vector_daily_price_ucb1_loc,\n vector_daily_revenue_ucb1_loc, vector_daily_price_ts_loc, vector_daily_revenue_ts_loc, tsgauss_learner.mu, tsgauss_learner.tau, tsgauss_learner.n_pulled_arms))", "def postfixCalc(self,tokens):\n if len(tokens) == 0:\n return 0\n stack = []\n # while expr is not empty\n while len(tokens)>0:\n toke = tokens.pop(0)\n # if token is a number push it onto the stack\n if isFloat(toke):\n stack.append(float(toke))\n # if token is a special number push it onto the stack\n elif toke in Calculator.specialNumbers:\n stack.append(Calculator.specialNumbers[toke])\n else:\n # Operators take 2 inputs, functions take 1 input except root which takes 2\n if toke in Calculator.operators or toke == 'root':\n n = 2\n elif toke in Calculator.functions:\n n = 1\n # If the length of the stack is less than the required number of operators the user has not \n # input enough values.\n if len(stack)<n:\n return \"Too Few Error\"\n # Pop the top n numbers from the stack\n popedVals = []\n for i in range(n):\n popedVals.append(stack.pop())\n # Evaluate the operator using the number(s) that were popped, and push back onto the stack\n if n == 2 and toke in Calculator.operators:\n stack.append(Calculator.operators[toke][0](popedVals[1], popedVals[0]))\n elif n == 2:\n stack.append(Calculator.functions[toke](popedVals[1], popedVals[0]))\n elif n == 1:\n stack.append(Calculator.functions[toke](popedVals[0]))\n # If there is more than one value left on the stack the user has input too many values\n if len(stack) > 1:\n return \"Too Many Error\"\n # Return the value on the stack (should only be 1 value left)\n return stack[-1]", "def results(self):\n node = self.ctx.children[self.ctx.iteration - 1]\n\n # We check the `is_finished` attribute of the work chain and not the successfulness of the last process\n # because the error handlers in the last iteration can have qualified a \"failed\" process as satisfactory\n # for the outcome of the work chain and so have marked it as `is_finished=True`.\n max_iterations = self.inputs.max_iterations.value # type: ignore[union-attr]\n if not self.ctx.is_finished and self.ctx.iteration >= max_iterations:\n self.report(\n f'reached the maximum number of iterations {max_iterations}: '\n f'last ran {self.ctx.process_name}<{node.pk}>'\n )\n return self.exit_codes.ERROR_MAXIMUM_ITERATIONS_EXCEEDED # pylint: disable=no-member\n\n self.report(f'work chain completed after {self.ctx.iteration} iterations')\n\n # Simply attach the output of the last children\n self.out_many({key: node.outputs[key] for key in node.outputs})\n return None", "def multi_stack_buster(self, in_queue, out_queue):\n while not in_queue.empty():\n task = in_queue.get()\n if task == POISON_PILL:\n out_queue.put(POISON_PILL)\n return\n\n print(\"Starting task:\", task)\n self.total_calls = 0\n self.max_stack_frames = 0\n next_already_used_candidates = dict()\n next_already_used_candidates[task.name] = None\n # Don't call multi version for now\n result = self.fast_pokemon_stack_buster(task, next_already_used_candidates, False)\n print(\"Finishing task:\", task)\n out_queue.put((result, self.total_calls, self.max_stack_frames))", "def run(self):\n\n # keep track of counter\n counter = 0\n\n while self.queue:\n\n # print depth of tree every 10000 steps\n if counter % 10000 == 0:\n print(len(self.queue[0]))\n\n # get first moves set from queue\n moves_set = self.get_moves_set()\n\n # move all moves from set\n self.try_moves(moves_set)\n\n # continue branch (add to queue) if layout is not in archive\n if self.not_in_archive():\n self.add_to_queue(moves_set)\n \n # check for win\n if self.won_game():\n\n # return winning set of moves\n return moves_set\n \n # reverse moves to original layout\n self.reverse_moves(moves_set)\n \n # add to counter\n counter += 1", "def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n # Fetch answer from task\n answer = next_task()\n self.task_queue.task_done()\n # Put into result queue\n self.result_queue.put(answer)\n return", "def worker_func(queue_in, queue_out, model_type, hidden_size, novelty_use, env_name, noise_std, action_type):\r\n env = gym.make(env_name)\r\n \r\n cache = {} # to store population / networks\r\n \r\n while True:\r\n parents_seeds = queue_in.get()\r\n if parents_seeds == None:\r\n break\r\n new_cache = {}\r\n # for each network seeds \r\n for seeds in parents_seeds:\r\n # if seed history exist\r\n if len(seeds) > 1:\r\n net = cache.get(seeds[:-1])#\r\n # check if network already exists\r\n if net is not None:\r\n # if exist mutate on the new given seed -> the last in the list\r\n net = mutate(net, seeds[-1], noise_std)\r\n else:\r\n # if not exist build the net with the seed history\r\n net = build_net(env, seeds, model_type, hidden_size, noise_std, action_type)\r\n else:\r\n # since no seed history exist -> build network\r\n net = build_net(env, seeds, model_type, hidden_size, noise_std, action_type)\r\n \r\n # saves the networks in a cache \r\n new_cache[seeds] = net\r\n # evaluate new network mutation\r\n reward, steps, bc = evaluate(env, net)\r\n queue_out.put(OutputItem(seeds=seeds, reward=reward, steps=steps, bc=bc))\r\n # after evaluating all seeds the worker sets the new_cache with saved nets to the current cache\r\n cache = new_cache", "def run(self):\n while self.proteins:\n \n protein = self.get_next_protein()\n \n # Get the next amino acid in the chain.\n amino_position = protein.get_unplaced_amino_position()\n if amino_position is not None:\n self.build_children(protein, amino_position)\n else:\n self.check_solution(protein)\n\n # Set's the output to be the protein with the highest score.\n protein = self.best_solution\n\n return self.best_solution", "def evaluate_node(self):\n # p, v = np.random.random(225).astype(np.float16), np.random.random()\n socket = zmq.Context().socket(zmq.DEALER)\n socket.setsockopt_string(zmq.IDENTITY, self.player_id)\n socket.connect('ipc://./tmp/oracle_%s' % self.tree.model_name)\n print('start to evaluate', self.tree.model_name)\n while True:\n # print(self.tree.to_evaluate.qsize())\n batch = []\n states = []\n colors = []\n size = self.tree.to_evaluate.qsize()\n if size > config.INFERENCE_BATCHSIZE:\n size = config.INFERENCE_BATCHSIZE\n elif size == 0:\n time.sleep(0.001)\n continue\n for _ in range(size):\n t, black, white = self.tree.to_evaluate.get()\n mine, yours = posswap(t, black, white)\n batch.append((str(mine), str(yours), t % 2))\n states.append((black, white))\n colors.append(t % 2)\n socket.send(msgpack.dumps((batch, self.player_id)))\n result = msgpack.loads(socket.recv())\n assert len(states) == len(result[0])\n assert len(states) == len(result[1])\n for ind, state in enumerate(states):\n with self.lock:\n self.tree.nodes[state].p = result[0][ind]\n if colors[ind] == 0:\n self.tree.nodes[state].v = result[1][ind]\n else:\n self.tree.nodes[state].v = -result[1][ind]\n self.tree.nodes[state].updated = True", "def radioLoop(self, grid):\n commandInQueue = [0 , 0 , 0 , 0 , 0 , 0]\n q = Queue.Queue()\n while(1):\n # temp\n receivePipeNum = [-1]\n dataRx = []\n if self.radio.available(receivePipeNum):\n self.radio.read(dataRx, self.radio.getDynamicPayloadSize())\n string = \"\"\n print(\"Pipe Number: {}\".format(receivePipeNum))\n # for n in dataRx:\n # # Decode into standard unicode set\n # if (n >= 32 and n <= 126):\n # string += chr(n)\n # print(\"Out received message decodes to: {}\".format(string))\n print(\"Received: {}\".format(dataRx))\n statusByte = dataRx[9]\n print(\"status byte: {}\".format(dataRx[9]))\n# self.radio.print_status(self.radio.get_status())\n# ackPayload = [0x46 , 0x75 , 0x63 , 0x6B , 0x59 , 0x65 , 0x73]\n# self.radio.writeAckPayload(receivePipeNum[0] , ackPayload , 7)\n# self.radio.print_status(self.radio.get_status())\n\n \"\"\"\n add new bot: dataRx[0] & 0x1\n target found: dataRx[0] & 0x2\n requesting data: dataRx[0] & 0x4\n \"\"\"\n # Adding bot (addbot = 1)\n if statusByte & 0x01:\n # TODO(add exploregrid logic)\n # TODO: add in adding bot logic\n # self.addBot(grid, bot)\n print(\"adding bot {}\".format(receivePipeNum[0]))\n commandInQueue[receivePipeNum[0]] = 0\n\n # move request from a bot\n elif statusByte & 0x04:\n if commandInQueue[receivePipeNum[0]] == 0:\n #ack[0] = (0x80 + previousMoves[receivePipeNum[0]])\n #self.radio.writeAckPayload(receivePipeNum[0] , ack , 1)\n #previousMoves[receivePipeNum[0]] = previousMoves[receivePipeNum[0]] + 1\n command = []\n command.append(receivePipeNum[0])\n command.append(self.getBotCommand())\n q.put(command)\n #if not self.isTXQueueFull():\n # self.radio.writeAckPayload(receivePipeNum[0] , command , 1)\n\n\n commandInQueue[receivePipeNum[0]] = 1\n else:\n print(\"command for pipe {} already given\".format(receivePipeNum[0]))\n\n # Not requesting data (req = 0) update sensors\n\n elif statusByte & 0x08:\n # TODO: Add logic to check to see if the move failed\n self.sensors = [dataRx[0], dataRx[1],\n dataRx[2], dataRx[3],\n dataRx[4], dataRx[5],\n dataRx[6], dataRx[7],\n dataRx[8], receivePipeNum[0] + 1,\n statusByte]\n \"\"\"self.sensors = [ord(dataRx[0]), ord(dataRx[1]),\n ord(dataRx[2]), ord(dataRx[3]),\n ord(dataRx[4]), ord(dataRx[5]),\n ord(dataRx[6]), ord(dataRx[7]),\n ord(dataRx[8])]\n \"\"\"\n # Sent a data command, so the previous payload must have been received\n commandInQueue[receivePipeNum[0]] = 0\n print(self.sensors)\n\n # target found bit\n if statusByte & 0x02:\n self.targetFound = True\n print(\"TARGET FOUND\")\n\n if not self.isTXQueueFull():\n #q.empty():\n if not q.empty():\n print(commandInQueue)\n print(\"adding to queue\")\n ack = q.get()\n print(\"ACK {}\".format(ack))\n self.radio.writeAckPayload(ack[0] , ack[1:] , 1)\n time.sleep(.1)\n else:\n print(\"queue\")\n # TODO(add direction computation logic)\n else:\n print(\"fifo full\")\n print(\"\")\n\n\n \"\"\"\n unsure if necessary... clears pipes... add if necessary\n self.radio.stopListening()\n \"\"\"\n\n \"\"\"\n unsure if necessary... additional computation may be enough of\n delay... add if neccessary\n \"\"\"\n time.sleep(.1)", "def iteration(self):\n T = self.generate_T()\n R = self.reproduce(T)\n self.P = self.choose_mi_best(R)\n #print(self.P)", "async def collect_pool_rewards_loop(self):\n\n while True:\n try:\n if not self.blockchain_state[\"sync\"][\"synced\"]:\n await asyncio.sleep(60)\n continue\n\n self.scan_p2_singleton_puzzle_hashes = await self.store.get_pay_to_singleton_phs()\n\n scan_phs: List[bytes32] = list(self.scan_p2_singleton_puzzle_hashes)\n peak_height = self.blockchain_state[\"peak\"].height\n\n # Only get puzzle hashes with a certain number of confirmations or more, to avoid reorg issues\n coin_records: List[CoinRecord] = await self.node_rpc_client.get_coin_records_by_puzzle_hashes(\n scan_phs,\n include_spent_coins=False,\n start_height=self.scan_start_height,\n )\n self.log.info(\n f\"Scanning for block rewards from {self.scan_start_height} to {peak_height}. \"\n f\"Found: {len(coin_records)}\"\n )\n ph_to_amounts: Dict[bytes32, int] = {}\n ph_to_coins: Dict[bytes32, List[CoinRecord]] = {}\n not_buried_amounts = 0\n for cr in coin_records:\n self.log.info(f\"coin_record: {cr}\")\n if cr.confirmed_block_index > peak_height - self.confirmation_security_threshold:\n not_buried_amounts += cr.coin.amount\n continue\n if cr.coin.puzzle_hash not in ph_to_amounts:\n ph_to_amounts[cr.coin.puzzle_hash] = 0\n ph_to_coins[cr.coin.puzzle_hash] = []\n ph_to_amounts[cr.coin.puzzle_hash] += cr.coin.amount\n ph_to_coins[cr.coin.puzzle_hash].append(cr)\n\n # For each p2sph, get the FarmerRecords\n farmer_records = await self.store.get_farmer_records_for_p2_singleton_phs(\n set([ph for ph in ph_to_amounts.keys()])\n )\n\n # For each singleton, create, submit, and save a claim transaction\n claimable_amounts = 0\n not_claimable_amounts = 0\n for rec in farmer_records:\n if rec.is_pool_member:\n claimable_amounts += ph_to_amounts[rec.p2_singleton_puzzle_hash]\n else:\n not_claimable_amounts += ph_to_amounts[rec.p2_singleton_puzzle_hash]\n\n if len(coin_records) > 0:\n self.log.info(f\"Claimable amount: {claimable_amounts / (10**12)}\")\n self.log.info(f\"Not claimable amount: {not_claimable_amounts / (10**12)}\")\n self.log.info(f\"Not buried amounts: {not_buried_amounts / (10**12)}\")\n\n for rec in farmer_records:\n if rec.is_pool_member:\n singleton_tip: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(\n rec.singleton_tip\n )\n if singleton_tip is None:\n continue\n\n singleton_coin_record: Optional[\n CoinRecord\n ] = await self.node_rpc_client.get_coin_record_by_name(singleton_tip.name())\n if singleton_coin_record is None:\n continue\n if singleton_coin_record.spent:\n self.log.warning(\n f\"Singleton coin {singleton_coin_record.coin.name()} is spent, will not \"\n f\"claim rewards\"\n )\n continue\n\n spend_bundle = await create_absorb_transaction(\n self.node_rpc_client,\n rec,\n self.blockchain_state[\"peak\"].height,\n ph_to_coins[rec.p2_singleton_puzzle_hash],\n self.constants.GENESIS_CHALLENGE,\n )\n\n if spend_bundle is None:\n self.log.info(f\"spend_bundle is None. {spend_bundle}\")\n continue\n\n push_tx_response: Dict = await self.node_rpc_client.push_tx(spend_bundle)\n if push_tx_response[\"status\"] == \"SUCCESS\":\n block_index: List[bytes32] = []\n # TODO(pool): save transaction in records\n for cr in ph_to_coins[rec.p2_singleton_puzzle_hash]:\n if cr.confirmed_block_index not in block_index:\n block_index.append(cr.confirmed_block_index)\n reward = RewardRecord(\n rec.launcher_id,\n cr.coin.amount,\n cr.confirmed_block_index,\n cr.coin.puzzle_hash,\n cr.timestamp\n )\n self.log.info(f\"add reward record: {reward}\")\n await self.store.add_reward_record(reward)\n self.log.info(f\"Submitted transaction successfully: {spend_bundle.name().hex()}\")\n else:\n self.log.error(f\"Error submitting transaction: {push_tx_response}\")\n await asyncio.sleep(self.collect_pool_rewards_interval)\n except asyncio.CancelledError:\n self.log.info(\"Cancelled collect_pool_rewards_loop, closing\")\n return\n except Exception as e:\n error_stack = traceback.format_exc()\n self.log.error(f\"Unexpected error in collect_pool_rewards_loop: {e} {error_stack}\")\n await asyncio.sleep(self.collect_pool_rewards_interval)", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()" ]
[ "0.6068715", "0.5985844", "0.5882993", "0.5708964", "0.5642679", "0.561505", "0.56025106", "0.5601041", "0.5527031", "0.5418403", "0.5372319", "0.5371151", "0.53369904", "0.52827096", "0.52697396", "0.52394825", "0.5237868", "0.5236513", "0.52331185", "0.5231739", "0.5179411", "0.51764894", "0.51729536", "0.5165849", "0.5161252", "0.51528454", "0.51499534", "0.5118359", "0.5108115", "0.5107324" ]
0.7618425
0
Takes in an "normal" input queue, and converts it to RPN. The RPN output is pushed to the output queue in the right order.
def shunting_yard(self, input_queue): operator_stack = Stack() for item in input_queue: if isinstance(item, numbers.Number): self.output_queue.push(item) elif isinstance(item, Function): operator_stack.push(item) elif item == '(': operator_stack.push(item) elif item == ')': while not operator_stack.peek() == '(': self.output_queue.push(operator_stack.pop()) operator_stack.pop() elif isinstance(item, Operator): while not (operator_stack.is_empty() or operator_stack.peek() == '(' or operator_stack.peek().strength < item.strength): self.output_queue.push(operator_stack.pop()) operator_stack.push(item) while not operator_stack.is_empty(): self.output_queue.push(operator_stack.pop())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RPN(self):\n stack = Stack()\n while not self.output_queue.is_empty():\n item = self.output_queue.pop()\n\n if isinstance(item, numbers.Number):\n stack.push(item)\n\n elif isinstance(item, Function):\n stack.push(item.execute(stack.pop()))\n\n elif isinstance(item, Operator):\n num2 = stack.pop()\n num1 = stack.pop()\n stack.push(item.execute(num1, num2))\n\n return stack.pop()", "def test_push_pop():\n\n q = PriorityQueue()\n\n # input list (obj, priority) should be reversed\n # in the priority_queue\n input_list = [((1), 9), ((2), 8), ((3), 7),\n ((4), 6), ((5), 5), ((6), 4),\n ((7), 3), ((8), 2), ((9), 1)]\n\n # insert the items in the queue\n for obj, p in input_list:\n q.push(obj, p)\n\n # pop the items into another list\n output = []\n while q._queue:\n output.append(q.pop())\n\n # make sure it lines up with expected result\n eq_(output, range(1, 10)[::-1])", "def queue_to_stack(queue):\n stack = Stack()\n check_list = []\n\n while len(queue) != 0:\n check_list.append(queue.dequeue())\n\n check_list.reverse()\n\n while check_list != []:\n stack.push(check_list[0])\n check_list.remove(check_list[0])", "def array_to_queue(queue, source):\r\n temp = None\r\n \r\n while source != []:\r\n temp = source.pop(0)\r\n queue.insert(temp)\r\n\r\n return", "def encode_queue(self, queue):\n raise NotImplementedError()", "def _qprocess(self):\n while 1:\n t, args, kw = self.inq.get()\n ret = self.__call__(*args, **kw)\n self.outq.put((t, ret))", "def stack_to_queue(stack):\n temp_stack = ArrayStack()\n result_queue = ArrayQueue()\n while not stack.isEmpty():\n elem = stack.pop()\n result_queue.add(elem)\n temp_stack.push(elem)\n while not temp_stack.isEmpty():\n stack.push(temp_stack.pop())\n return result_queue", "def calculateQueue(queue):\r\n temp = int(front(queue))\r\n dequeue(queue)\r\n while not emptyQueue(queue):\r\n operation = front(queue)\r\n dequeue(queue)\r\n operandTwo = int(front(queue))\r\n dequeue(queue)\r\n temp = calculate(temp, operandTwo, operation)\r\n return temp", "def test_queue(self):\n queue = Queue(1, 1)\n out = queue(\n Variable(torch.FloatTensor([[1]])),\n Variable(torch.FloatTensor([[0]])),\n Variable(torch.FloatTensor([[.8]])),\n )\n # queue.log()\n assert_approx_equal(out.item(), .8)\n out = queue(\n Variable(torch.FloatTensor([[2]])),\n Variable(torch.FloatTensor([[.1]])),\n Variable(torch.FloatTensor([[.5]])),\n )\n # queue.log()\n assert_approx_equal(out.item(), 1.3)\n out = queue(\n Variable(torch.FloatTensor([[3]])),\n Variable(torch.FloatTensor([[.9]])),\n Variable(torch.FloatTensor([[.9]])),\n )\n # queue.log()\n assert_approx_equal(out.item(), 2.7)", "def insert_roots_into_queue(rt: np.ndarray):\n q = []\n for j in range(rt.shape[0]):\n if j == 0:\n q = rt[j, :]\n else:\n q = np.concatenate((q, rt[j, :]))\n q = np.reshape(q, (int(q.shape[0] / rt.shape[1]), rt.shape[1]))\n return q", "def NNRunner(input_queue, output_queue):\n nn = NeuralNet('tiny_res_slow')\n # nn.export_weights()\n while True:\n pic = input_queue.get(True).resize((p.IMAGE_SIZE, p.IMAGE_SIZE))\n boxes = nn.run_images([np.asarray(pic)], cutoff=0.2)\n output_queue.put(boxes)\n input_queue.task_done()", "def encode_queue(self, queue):\n return b\"\".join(queue)", "def radioLoop(self, grid):\n commandInQueue = [0 , 0 , 0 , 0 , 0 , 0]\n q = Queue.Queue()\n while(1):\n # temp\n receivePipeNum = [-1]\n dataRx = []\n if self.radio.available(receivePipeNum):\n self.radio.read(dataRx, self.radio.getDynamicPayloadSize())\n string = \"\"\n print(\"Pipe Number: {}\".format(receivePipeNum))\n # for n in dataRx:\n # # Decode into standard unicode set\n # if (n >= 32 and n <= 126):\n # string += chr(n)\n # print(\"Out received message decodes to: {}\".format(string))\n print(\"Received: {}\".format(dataRx))\n statusByte = dataRx[9]\n print(\"status byte: {}\".format(dataRx[9]))\n# self.radio.print_status(self.radio.get_status())\n# ackPayload = [0x46 , 0x75 , 0x63 , 0x6B , 0x59 , 0x65 , 0x73]\n# self.radio.writeAckPayload(receivePipeNum[0] , ackPayload , 7)\n# self.radio.print_status(self.radio.get_status())\n\n \"\"\"\n add new bot: dataRx[0] & 0x1\n target found: dataRx[0] & 0x2\n requesting data: dataRx[0] & 0x4\n \"\"\"\n # Adding bot (addbot = 1)\n if statusByte & 0x01:\n # TODO(add exploregrid logic)\n # TODO: add in adding bot logic\n # self.addBot(grid, bot)\n print(\"adding bot {}\".format(receivePipeNum[0]))\n commandInQueue[receivePipeNum[0]] = 0\n\n # move request from a bot\n elif statusByte & 0x04:\n if commandInQueue[receivePipeNum[0]] == 0:\n #ack[0] = (0x80 + previousMoves[receivePipeNum[0]])\n #self.radio.writeAckPayload(receivePipeNum[0] , ack , 1)\n #previousMoves[receivePipeNum[0]] = previousMoves[receivePipeNum[0]] + 1\n command = []\n command.append(receivePipeNum[0])\n command.append(self.getBotCommand())\n q.put(command)\n #if not self.isTXQueueFull():\n # self.radio.writeAckPayload(receivePipeNum[0] , command , 1)\n\n\n commandInQueue[receivePipeNum[0]] = 1\n else:\n print(\"command for pipe {} already given\".format(receivePipeNum[0]))\n\n # Not requesting data (req = 0) update sensors\n\n elif statusByte & 0x08:\n # TODO: Add logic to check to see if the move failed\n self.sensors = [dataRx[0], dataRx[1],\n dataRx[2], dataRx[3],\n dataRx[4], dataRx[5],\n dataRx[6], dataRx[7],\n dataRx[8], receivePipeNum[0] + 1,\n statusByte]\n \"\"\"self.sensors = [ord(dataRx[0]), ord(dataRx[1]),\n ord(dataRx[2]), ord(dataRx[3]),\n ord(dataRx[4]), ord(dataRx[5]),\n ord(dataRx[6]), ord(dataRx[7]),\n ord(dataRx[8])]\n \"\"\"\n # Sent a data command, so the previous payload must have been received\n commandInQueue[receivePipeNum[0]] = 0\n print(self.sensors)\n\n # target found bit\n if statusByte & 0x02:\n self.targetFound = True\n print(\"TARGET FOUND\")\n\n if not self.isTXQueueFull():\n #q.empty():\n if not q.empty():\n print(commandInQueue)\n print(\"adding to queue\")\n ack = q.get()\n print(\"ACK {}\".format(ack))\n self.radio.writeAckPayload(ack[0] , ack[1:] , 1)\n time.sleep(.1)\n else:\n print(\"queue\")\n # TODO(add direction computation logic)\n else:\n print(\"fifo full\")\n print(\"\")\n\n\n \"\"\"\n unsure if necessary... clears pipes... add if necessary\n self.radio.stopListening()\n \"\"\"\n\n \"\"\"\n unsure if necessary... additional computation may be enough of\n delay... add if neccessary\n \"\"\"\n time.sleep(.1)", "def rotate_queue(queue):\n\n next_value = queue[0]\n queue.rotate(-1)\n\n return next_value", "def process_queue_fast(self):\n while self.queue:\n self.queue.popleft()()", "def MoveItem(src_queue, trg_queue, order_func):\n score, item = heapq.heappop(src_queue)\n score = - float(order_func(- score, item))\n heapq.heappush(trg_queue, (score, item))\n return item", "def extract_string_from_queue(queue: Queue) -> str:\n ret = StringBuilder(\"\")\n while not queue.empty():\n ret += queue.peek()\n queue.pop()\n return str(ret)[::-1]", "def test_merge():\n\n q1 = PriorityQueue()\n q2 = PriorityQueue()\n\n # input list (obj, priority) should be reversed\n # in the priority_queue\n input_list = [((1), 9), ((2), 8), ((3), 7),\n ((4), 6), ((5), 5), ((6), 4),\n ((7), 3), ((8), 2), ((9), 1)]\n\n # insert the items into the queues\n for idx, (obj, p) in enumerate(input_list):\n if idx < 4:\n q1.push(obj, p)\n else:\n q2.push(obj, p)\n\n # merge the queues\n q1.merge(q2)\n\n # dump the queue into a list\n output = []\n while q1._queue:\n output.append(q1.pop())\n\n # validate the expected behavior\n eq_(output, range(1, 10)[::-1])", "def segmentationVisionQueue(q, output_dir, save_intermediate=False, overwrite=False):\n\n while not q.empty():\n try:\n input_img = q.get()\n print(\"input img: \", input_img)\n # this is hard coded to run on my registered rasters only\n img_info = input_img.split('/')\n img_id = img_info[-1]\n img_id = img_id[:-4]\n output_root = output_dir + '/' + img_id\n applySegmentationSteps(input_img, 'fp', output_root, save_intermediate=save_intermediate, overwrite=overwrite)\n except ValueError as val_error:\n print(val_error)\n except Exception as error:\n print(error)", "def populatereadyqueue():\n readyQueue.put(Process(\"P1\", time(0, 0, 1), time(0, 0, 4)))\n readyQueue.put(Process(\"P2\", time(0, 0, 2), time(0, 0, 6)))\n readyQueue.put(Process(\"P3\", time(0, 0, 3), time(0, 0, 2)))", "def _push_queue(self):\n\n self.add_cons_vars(self._var_queue, sloppy=self.sloppy)\n self.add_cons_vars(self._cons_queue, sloppy = self.sloppy)\n\n if len(self._var_queue) > 0:\n self.regenerate_variables()\n if len(self._cons_queue) > 0:\n self.regenerate_constraints()\n\n self._var_queue = list()\n self._cons_queue = list()", "def process_node(self, priority_q, query, debug=False):\n popped = priority_q.pop(0)\n node = list(popped.keys())[0]\n node_label = node.split('.') \n if node[0] == \"C\": \n if debug:\n logging.info(f\"L{len(node_label) - 2} found bucket {node}\") \n return priority_q, node\n \n if debug:\n logging.info(f\"Popped {node}\")\n\n predictions = self.collect_probs_for_node(node_label, query)\n priority_q = self.add_to_priority_queue(priority_q, predictions, len(node_label) - 1, [node])\n priority_q = sorted(priority_q, key=(lambda i: list(i.values())), reverse=True)\n if debug:\n logging.info(f\"L{len(node_label) - 1} added - PQ (Top 5): {priority_q[:5]}\\n\") \n \n return priority_q, node", "def encode_queue(self, queue):\n return \"[\" + \",\".join(queue) + \"]\"", "def reader(handle, input_queue):\n input_queue.put(handle.read())", "def queue_input(self, value):\n self.input_queue.append(value)", "def process_queue(self, queue):\n\n while queue:\n deferred, data = queue.popleft()\n deferred.callback(data)", "def queue_to_array(queue, target):\r\n temp = None\r\n \r\n while queue.is_empty() == False:\r\n temp = queue.remove()\r\n target.append(temp)\r\n\r\n return", "def anagram_queue(self):\n\n for i in utility_obj.get_anagram_prime():\n queue.enqueue(i)\n\n for i in range(0, queue.size()):\n print(queue.dequeue())", "def processIncoming(self):\n try:\n msg = self.guiqueue.get(False)\n (PK, XERRO, tt) = getDataFromString(msg)\n self.PK0.append(PK[0])\n self.PK1.append(PK[1])\n self.PK2.append(PK[2])\n self.PK3.append(PK[3])\n self.PK4.append(PK[4])\n self.PK5.append(PK[5])\n self.PK6.append(PK[6])\n self.PK7.append(PK[7])\n self.XERRO0.append(XERRO[0])\n self.XERRO1.append(XERRO[1])\n self.XERRO2.append(XERRO[2])\n self.XERRO3.append(XERRO[3])\n self.XERRO4.append(XERRO[4])\n self.XERRO5.append(XERRO[5])\n self.XERRO6.append(XERRO[6])\n self.XERRO7.append(XERRO[7])\n self.t.append(tt)\n \n \n #replota a cada dado novo\n self.updatePlot()\n \n except queue.Empty:\n #print(\"Fila Vazia\")\n pass\n\n self.root.after(25, self.processIncoming)", "def queue_to_list(queue):\n result = []\n while queue.qsize() != 0:\n result.append(queue.get())\n return result" ]
[ "0.69277567", "0.5724403", "0.5680305", "0.5612169", "0.55673456", "0.54916817", "0.54428995", "0.5430331", "0.5354963", "0.5272707", "0.525648", "0.52467066", "0.517748", "0.5164673", "0.51554155", "0.5152124", "0.51476604", "0.51356125", "0.51281035", "0.5121775", "0.5111035", "0.5095541", "0.5094065", "0.5083522", "0.5076493", "0.5062737", "0.5057695", "0.50382125", "0.50234663", "0.5017649" ]
0.6383998
1
Get a PDU buffer of the given size cast to the correct type
def get_clns_buffer (self, size, pdu_type): if sys.version_info >= (3, 0): buf = bytearray(size) hdr = pdu.PDU_PDU_TYPES[pdu_type].from_buffer(buf) else: buf = create_string_buffer(size) hdr = util.cast_as(buf, pdu.PDU_PDU_TYPES[pdu_type]) hdr.llc_ssap = 0xfe hdr.llc_dsap = 0xfe hdr.llc_control = 0x03 hdr.clns_idrp = clns.CLNS_IDRP_ISIS hdr.clns_len = pdu.PDU_HEADER_LEN[pdu_type] hdr.clns_version = clns.CLNS_VERSION hdr.clns_sysid_len = 6 hdr.clns_reserved1 = 0 hdr.clns_pdu_type = pdu_type hdr.clns_version2 = clns.CLNS_VERSION2 hdr.clns_reserved2 = 0 hdr.clns_max_area = 3 tlvview = memoryview(buf)[sizeof(hdr):] return hdr, buf, tlvview
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_buffer(self) -> bytearray:\n packet = bytearray()\n packet.extend(\n struct.pack(\n \"!ccccHH\",\n \"D\".encode(\"ascii\"),\n \"L\".encode(\"ascii\"),\n \"E\".encode(\"ascii\"),\n \"P\".encode(\"ascii\"),\n int(self.type),\n self.len,\n )\n )\n return packet", "def get_buffer(for_data, datatype):\n global data_offset\n global buffer\n buffer_size = len(for_data.flatten()) * dtype_size(datatype)\n (buffer_size, for_data) = align(buffer_size, for_data, 64)\n buffer.append(for_data)\n\n data_offset += buffer_size\n return data_offset - buffer_size, len(buffer)", "def read_bytes(self, size):\n return self.read('bytes:'+str(size))", "def read(self, size: int) -> np.ndarray:\n if size > self._buffer_size:\n raise ValueError('Cannot read more samples than the size of the buffer.')\n elif size <= 0:\n raise ValueError('Size must be positive.')\n\n start_index = self._buffer_size - size\n return np.copy(self._buffer[start_index:])", "def get_buf(self, data_type = \"void\"):\n if self.buf is not None:\n return ffi.cast(data_type + \"*\", self.buf)\n else:\n raise RuntimeError(\"Buffer not created.\")", "def __read_block(self, size):\n buf = b\"\"\n if len(self.__read_buffer):\n limit = (\n size if size <= len(self.__read_buffer) else\n len(self.__read_buffer)\n )\n buf = self.__read_buffer[:limit]\n self.__read_buffer = self.__read_buffer[limit:]\n size -= limit\n if not size:\n return buf\n try:\n buf += self.sock.recv(size)\n except (socket.timeout, ssl.SSLError):\n raise Error(\"Failed to read %d bytes from the server\" % size)\n self.__dprint(buf)\n return buf", "def read(self, size=-1):\n if not self._buf:\n self._buf.append(next(self._iter, b''))\n if len(self._buf[0]) < size or size < 0:\n return self._buf.pop(0)\n block = self._buf.pop(0)\n self._buf.insert(0, block[size:])\n return block[:size]", "def _unpack(self, _format: str, size: int) -> Any:\n return unpack(_format, self.read(size))[0]", "def _pack_length(buffer: bytes) -> bytes:\n length = len(buffer)\n length_bytes = struct.pack('!Q', length)\n return length_bytes", "def peek(self, size, timeout=_UNSET):\n with self._recv_lock:\n if len(self.rbuf) >= size:\n return self.rbuf[:size]\n data = self.recv_size(size, timeout=timeout)\n self.rbuf = data + self.rbuf\n return data", "def readBytes(self, size=1):\n return self.bytes", "def get_udp_packet(self, sock, size=0):\n\n pkt = ''\n while True:\n buf = ''\n try:\n buf = sock.recvfrom(64)[0]\n except socket.timeout:\n break\n if size and len(pkt) >= size:\n break\n if not buf:\n break\n pkt += buf\n return pkt", "def read(self, size=-1):\n _complain_ifclosed(self._closed)\n buf = self._buf\n while size < 0 or len(buf) < size:\n try:\n buf = buf + next(self._generator)\n except StopIteration:\n break\n\n returned = b\"\"\n if size >= 1:\n self._buf = buf[size:]\n returned = buf[:size]\n else:\n self._buf = b\"\"\n returned = buf\n\n self._position = self._position + len(returned)\n return returned", "def Read_Bytes(self, size = 0):\r\n if size == 0: size = self.Port.inWaiting()\r\n data = self.Port.read(size)\r\n return data", "def read_memory(self, address, size):\n return self.read(0, address, size, mem_device=True)", "def read_data(self, size, attempts = 1):\n data = Array('B')\n # do we have all of the data in the read buffer?\n if size <= len(self.rdbuf) - self.rdofs:\n data = self.rdbuf[self.rdofs : self.rdofs + size]\n self.rdofs += size\n return data\n # do we have some of the data in the read buffer?\n if len(self.rdbuf) - self.rdofs > 0:\n data = self.rdbuf[self.rdofs:]\n # do a usb read to get the rest...\n # read from the usb device\n try:\n bytes_to_rd = size - len(data)\n while bytes_to_rd > 0:\n # read from the usb device\n while True:\n self.rdbuf = self._read()\n self.rdofs = 0\n if len(self.rdbuf) > 0:\n break\n else:\n # no data received\n attempts -= 1\n if attempts > 0:\n # try again\n continue\n # return what we have\n return data\n # copy the read buffer into the returned data\n n = len(self.rdbuf)\n if n >= bytes_to_rd:\n # copy a partial read buffer\n data += self.rdbuf[:bytes_to_rd]\n self.rdofs = bytes_to_rd\n return data\n else:\n # copy all of the read buffer\n data += self.rdbuf\n bytes_to_rd -= n\n # read more data...\n except usb.core.USBError as e:\n raise usbdev_error(str(e))\n # never reached\n raise usbdev_error(\"internal error\")", "def _make_memoryview(size):\n return memoryview(bytearray(size))", "def read_buffer(serial):\r\n resp = serial.read_all()\r\n return resp.decode()", "def size_to_bytes(size):\n # little-endian representation of 32-bit (4-byte)\n # int size\n return size.to_bytes(4, \"little\")", "def _get_data(self, read_size):\n return self._character_device.read(read_size)", "def read(self, size: int=-1) -> bytes:\n ...", "def read(self, size: int=-1) -> bytes:\n ...", "def read_raw(self, offset, size, return_raw = False):\n raw_data = self.reader(offset, size)\n if raw_data is None:\n return None\n if return_raw:\n return raw_data\n else:\n if size == 1:\n data = struct.unpack(\"%dB\" %size, raw_data)[0]\n else:\n data = struct.unpack(\"%dB\" %size, raw_data)\n return data", "def GetBytes(byte, size):\n if sys.version_info[0] >= 3:\n data = bytes([byte]) * size\n else:\n data = chr(byte) * size\n return data", "def read(self, size=-1):\n\n if size < 0:\n raise NotImplementedError(\"Don't be greedy, that could be massive!\")\n elif size == 0:\n if self._text:\n return \"\"\n else:\n return b\"\"\n elif self._within_block_offset + size <= len(self._buffer):\n # This may leave us right at the end of a block\n # (lazy loading, don't load the next block unless we have too)\n data = self._buffer[self._within_block_offset:self._within_block_offset + size]\n self._within_block_offset += size\n assert data # Must be at least 1 byte\n return data\n else:\n # if read data overflows to next block\n # pull in rest of data in current block\n data = self._buffer[self._within_block_offset:]\n\n # decrement size so that we only pull the rest of the data\n # from next block\n size -= len(data)\n self._load_block() # will reset offsets\n\n if not self._buffer:\n return data # EOF\n\n # if there is still more to read\n elif size:\n # pull rest of data from next block\n return data + self.read(size)\n else:\n # Only needed the end of the last block\n return data", "def _read_raw_bytes_direct(self, size):\n with(self.visa_handle.ignore_warning(pyvisa.constants.VI_SUCCESS_MAX_CNT)):\n data, statuscode = self.visa_handle.visalib.read(\n self.visa_handle.session, size)\n\n return data", "def extract_packet(_buffer):\n if len(_buffer)>=5:\n mtype=_buffer[0]\n msglen=struct.unpack('!L',_buffer[1:5])[0]\n if len(_buffer)>=msglen+1:\n return _buffer[5:msglen+1],mtype,_buffer[msglen+1:]\n return None,None,_buffer", "def decode(self, size=1):\n while len(self.__buf) < size:\n self.__buf += self.__lib.decompress(self.__ref.read(1))\n buf, self.__buf = self.__buf[:size], self.__buf[size:]\n return buf", "def recv(self, size, flags=0, timeout=_UNSET):\n with self._recv_lock:\n if timeout is _UNSET:\n timeout = self.timeout\n if flags:\n raise ValueError(\"non-zero flags not supported: %r\" % flags)\n if len(self.rbuf) >= size:\n data, self.rbuf = self.rbuf[:size], self.rbuf[size:]\n return data\n if self.rbuf:\n ret, self.rbuf = self.rbuf, b''\n return ret\n self.sock.settimeout(timeout)\n try:\n data = self.sock.recv(self._recvsize)\n except socket.timeout:\n raise Timeout(timeout) # check the rbuf attr for more\n if len(data) > size:\n data, self.rbuf = data[:size], data[size:]\n return data", "def _serial_read(self, size):\n self.write([self.SERIAL_IO])\n resp = self.read(size)\n data = self.decode(resp)\n return data" ]
[ "0.6299509", "0.62020695", "0.6165722", "0.585175", "0.5818612", "0.5739499", "0.5722811", "0.56874335", "0.5664632", "0.5662061", "0.56257224", "0.56121916", "0.5598169", "0.55793726", "0.55635995", "0.55509305", "0.5521163", "0.54932046", "0.5467055", "0.5446987", "0.54348004", "0.54348004", "0.5433181", "0.5428494", "0.54188484", "0.54007584", "0.5399821", "0.5378718", "0.537624", "0.53622127" ]
0.6536022
0
This method is called by AdjDB if DIS election information has changed
def dis_election_info_changed(self, lindex): lxlink = self.lxlink[lindex] if lxlink: lxlink.dis_election_info_changed()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n self.haveDistrict = len(self.districts()) > 0", "def district_check(self):\n\t\t\n\t\tplaces_2_fetch=list(ons_week.stored_names.values())+ons_week.extra_places\n\t\tself.edition=None\n\t\tfor place in places_2_fetch:\n\t\t\t_filters=self.district_filter(place)\n\t\t\tif _filters:\n\t\t\t\tself.api.filters=_filters\n\t\t\telse:\n\t\t\t\tlog.info(f'Not fetching {place} - not in PHE API')\n\t\t\t\tcontinue\n\t\t\ttries=0\n\t\t\twhile tries < 5:\n\t\t\t\ttry:\n\t\t\t\t\tlog.debug(f'Fetching {place}')\n\t\t\t\t\tself.data=self.api.get_json() # Returns a dictionary\n\t\t\t\t\tnew_data=self.data.get('data')\n\t\t\t\t\t#log.info(self.latest_update)\n\t\t\t\t\tif not self.edition:\n\t\t\t\t\t\tself.edition=self.latest_update\n\t\t\t\t\tbreak\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tlog.error(e)\n\t\t\t\t\tlog.error('Retrying after 8 secs')\n\t\t\t\t\ttime.sleep(8)\n\t\t\t\t\ttries +=1\n\t\t\t\t\tnew_data=[]\n\t\t\tif not new_data:\n\t\t\t\tlog.error(f'No data found for {place}')\n\t\t\telse:\n\t\t\t\tself.data_all +=new_data\n\t\t\ttime.sleep(0.1)", "def _handle_coordinator_update(self) -> None:\n self._attr_is_on = self.relay.active\n self.async_write_ha_state()", "def _update(self):\n pass", "def check_for_new_data(self):\n return", "def _clear_discharged(self):\n resurrected = self._inmate.discharge_date_earliest is not None\n if resurrected:\n self._inmate.discharge_date_earliest = None\n self._inmate.discharge_date_latest = None\n self._inmate.in_jail = self._inmate.housing_history.latest().housing_location.in_jail\n return resurrected", "def changed(self):\n\t\tpass", "def data_changed(self):\n return", "def refresh(self):\n self.lease = self.blazar.lease.get(self.id)", "def update_isolation(self, time: int):", "def test_client_nationlity_update(self):\n pass", "def test_client_nationlity_partial_update(self):\n pass", "def retain(self):\n self.r4.setCase(self.case, self.predictionCase) \n self.DB.open_connection()\n self.r4.retain()\n self.DB.close_connection()", "def agent_updated(self, context, payload):\n if payload['admin_state_up'] != self.admin_state_up:\n self.admin_state_up = payload['admin_state_up']\n if self.admin_state_up:\n self.needs_resync = True\n else:\n for pool_id in self.cache.get_pool_ids():\n self.destroy_device(pool_id)\n LOG.info(_(\"agent_updated by server side %s!\"), payload)", "def __init__(self, datacenter_id, server):\n logging.debug('initialized')\n self.datacenter_id = datacenter_id\n # self.datacenters = CONFIG['datacenters']\n # update the datacenters, so that the id and port are all int\n # self.datacenters = dict([(x, y) for x, y in self.datacenters.items()])\n self.total_ticket = CONFIG['total_ticket']\n\n # get current_term, voted_for, log from the state.p\n filename = 'state'+datacenter_id+'.pkl'\n pkl_file = open(filename, 'rb')\n data = pickle.load(pkl_file)\n\n self.current_term = data['current_term']\n self.voted_for = data['voted_for']\n\n self.role = 'follower'\n\n # keep a list of log entries\n # put a dummy entry in front\n #self.log = [LogEntry(0, 0)]\n self.log = data['log']\n\n\n # record the index of the latest comitted entry\n # 0 means the dummy entry is already comitted\n self.commit_idx = -1\n\n # store the server object to be used for making requests\n self.server = server\n self.leader_id = None\n\n self.election_timeout = random.uniform(CONFIG['T'], 2*CONFIG['T'])\n\n # become candidate after timeout\n if self.datacenter_id in self.getAllCenterID():\n self.election_timer = Timer(self.election_timeout, self.startElection)\n self.election_timer.daemon = True\n self.election_timer.start()\n else:\n self.election_timer = None\n logging.debug('started election countdown')\n\n # used by leader only\n self.heartbeat_timeout = CONFIG['heartbeat_timeout']\n self.heartbeat_timer = None", "def onUpdated(self):", "def recover(self):\n if self.get_info_from_db():\n logger.info(\"Recover by reading previous results\")\n self.check_items(self.get_user_results_from_db())\n else:\n self.create_info_in_db() # create record in axdb", "def update_from_latest_data(self) -> None:\n self._attr_is_on = (\n self._ambient.stations[self._mac_address][ATTR_LAST_DATA][\n self.entity_description.key\n ]\n == self.entity_description.on_state\n )", "def refresh(self):\n self.fetch(False)", "def reset_election(self, *args):\n with self.voter_lock:\n self.results = {}\n self.voters = {}\n self.election_expiration = None\n\n return 'Election reset.'", "def kill_candidate(self, confid):\n for dct in self.c.select(gaid=confid):\n self.c.update(dct.id, extinct=1)", "def old_collected_data_status(self, old_collected_data_status):\n\n self._old_collected_data_status = old_collected_data_status", "def update_from_latest_data(self) -> None:\n self._attr_is_on = self.coordinator.data[self.entity_description.data_key]", "def update_from_latest_data(self) -> None:\n data = self.coordinator.data[self.entity_description.uid]\n\n self._attr_is_on = bool(data[\"state\"])\n\n attrs = {\n ATTR_CURRENT_CYCLE: data[\"cycle\"],\n ATTR_ID: data[\"uid\"],\n ATTR_NO_CYCLES: data[\"noOfCycles\"],\n ATTR_RESTRICTIONS: data[\"restriction\"],\n ATTR_SLOPE: SLOPE_TYPE_MAP.get(data[\"slope\"], 99),\n ATTR_SOIL_TYPE: SOIL_TYPE_MAP.get(data[\"soil\"], 99),\n ATTR_SPRINKLER_TYPE: SPRINKLER_TYPE_MAP.get(data[\"group_id\"], 99),\n ATTR_STATUS: RUN_STATE_MAP[data[\"state\"]],\n ATTR_SUN_EXPOSURE: SUN_EXPOSURE_MAP.get(data.get(\"sun\")),\n ATTR_VEGETATION_TYPE: VEGETATION_MAP.get(data[\"type\"], 99),\n }\n\n if \"waterSense\" in data:\n if \"area\" in data[\"waterSense\"]:\n attrs[ATTR_AREA] = round(data[\"waterSense\"][\"area\"], 2)\n if \"fieldCapacity\" in data[\"waterSense\"]:\n attrs[ATTR_FIELD_CAPACITY] = round(\n data[\"waterSense\"][\"fieldCapacity\"], 2\n )\n if \"precipitationRate\" in data[\"waterSense\"]:\n attrs[ATTR_PRECIP_RATE] = round(\n data[\"waterSense\"][\"precipitationRate\"], 2\n )\n\n if self._entry.options[CONF_USE_APP_RUN_TIMES]:\n provision_data = self._data.coordinators[DATA_PROVISION_SETTINGS].data\n if zone_durations := provision_data.get(\"system\", {}).get(\"zoneDuration\"):\n attrs[ATTR_ZONE_RUN_TIME] = zone_durations[\n list(self.coordinator.data).index(self.entity_description.uid)\n ]\n\n self._attr_extra_state_attributes.update(attrs)", "def lost(self):\r\n return None", "def _updateHeartbeat (self) :\r\n for pw, conn in self.clients :\r\n if conn : # we do have at least one client, enable heartbeat if needed\r\n self.have_clients = True\r\n return\r\n \r\n self.have_clients = False", "def _got_new_lease(self):\n self._new_lease_event.set()", "def _update_info(self):\n data = self._get_data()\n if not data:\n return False\n\n active_clients = [client for client in data if client.state]\n self.last_results = active_clients\n\n _LOGGER.debug(\n \"%s Active clients: %s\",\n len(active_clients),\n \",\".join(f\"{client.mac} {client.name}\" for client in active_clients),\n )\n return True", "def _update_leader(self):", "def handle_actual_updated(self):\n self._actual_updated()" ]
[ "0.55303323", "0.542889", "0.5330749", "0.53204805", "0.5200429", "0.51930946", "0.5169944", "0.516099", "0.5150847", "0.51451606", "0.5138021", "0.5088016", "0.5068397", "0.504758", "0.50452393", "0.5004574", "0.50022286", "0.49970877", "0.4994714", "0.4983401", "0.4978172", "0.49661365", "0.4963252", "0.49544084", "0.49522102", "0.49470952", "0.49432853", "0.4940825", "0.49402058", "0.4930487" ]
0.6338383
0
Fill an SNP packet with SNP entries
def fill_snp_packet (self, ssnflags, tlvview): snpstruct = tlv.SNPEntryStruct sz = snpstruct.size availb = len(tlvview) - 2 avail = availb // sz while avail > 0 and ssnflags: tavailb = min(255, availb) tlvview[0] = tlvwrb(tlv.TLV_SNP_ENTRIES) tlvp = tlvview[2:2 + tavailb] origtlvp = tlvp while len(tlvp) >= sz and ssnflags: # XXX don't we need to lock the DB here while we look at this data? lspseg = ssnflags.pop() lsphdr = lspseg.lsphdr self.clear_flag_impl(SSN, lspseg) tlvp[0:sz] = snpstruct.pack(lsphdr.lifetime, stringify3(lsphdr.lspid), lsphdr.seqno, lsphdr.checksum) tlvp = tlvp[sz:] tlen = len(origtlvp) - len(tlvp) tlvview[1] = tlvwrb(tlen) tlvview = tlvview[tlen + 2:] availb = len(tlvview) - 2 avail = availb // sz return ssnflags, tlvview
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _capture_snp(self):\n # Get the forward position\n self._forward_position = self._sequence.find('[')\n # Get the reverse position\n self._reverse_position = len(self._sequence) - self._sequence.find(']')\n # Get the SNP\n self._snp = self._sequence[self._forward_position:self._sequence.find(']') + 1]", "def __udp_initialize_packet(self, seq):\n packet_payload, packet_size = self.__get_file_chunk()\n self.packets_status.update(\n {seq: {\"status\": 1, \"payload\": packet_payload, \"size\": packet_size}})", "def setupPacket(self):\n return None", "def __init__(self, station_definition=None, number_of_packets_in_record=None, packet_number_in_record=None):", "def __init__(self, station_definition=None, number_of_packets_in_record=None, packet_number_in_record=None):", "def _get_snp_data(self):\n\n vcf_counter = 0\n for filename in self.vcffilenames:\n vcf_reader = vcf.Reader(open(filename), \"rb\")\n samplename = vcf_reader.samples[0]\n for record in vcf_reader:\n chromosome, position, ref, alt = (\n record.CHROM,\n record.POS,\n record.REF,\n record.ALT,\n )\n position = str(position)\n\n ## code to build all snps position\n self._record_all_snp_positions(chromosome, position)\n\n self._record_all_snps(filename, chromosome, position, ref, alt)\n # self.snp_positions.update({str(vcf_counter) + \"_\" + chromosome + \"_\" + str(position):{\"ref\": str(ref), \"alt\":str(alt).replace(\"[\",\"\").replace(\"]\", \"\")}})\n self.snpsites[chromosome][str(position)][vcf_counter] = True\n\n vcf_counter += 1", "def __init__(self, sequence_number=None, packet_id=0, data_length=None,\n p1=0, p2=0, p3=0, p4=0, p5=0, data=b''):\n if sequence_number is None:\n sequence_number = StenoPacket.sequence_number\n StenoPacket._increment_sequence_number()\n if data is not None:\n # Data is padded to 8 bytes\n remainder = len(data) % 8\n if remainder:\n data += b'\\x00' * (8 - remainder)\n if data_length is None:\n data_length = len(data)\n self.sequence_number = sequence_number\n self.packet_id = packet_id\n self.data_length = data_length\n self.p1 = p1\n self.p2 = p2\n self.p3 = p3\n self.p4 = p4\n self.p5 = p5\n self.data = data", "def snp(rsid, pair):\n if rsid[0] == 'I' or rsid[0] == 'i':\n return { 'error': 'Cannot find indicators, must use rs #s'}\n formatPair = '(' + pair[0].upper() + ';' + pair[1].upper() + ')'\n soup = BeautifulSoup(urllib.urlopen('http://snpedia.com/index.php/Special:Browse/' + rsid + formatPair).read())\n trows = soup('table')[1].find_all('tr')\n if len(trows) < 2:\n return { 'error': 'That base pair does not have a trait associated with it.' }\n locations = getLocations(soup)\n genotypeData = getData(locations, soup)\n genotypeData['rsid'] = rsid\n genotypeData['genotype'] = pair\n return genotypeData", "def _set_packed_record(self, i, s):\n\n raise NotImplementedError()", "def post_process(self, packet: 'dict[str, Any]') -> 'Schema':\n if self.flags['type'] == Enum_SeedID.IPV6_SOURCE_ADDRESS:\n self.seed = packet.get('src', NoValue)\n return self", "def replace_nucl_with_SNP(\n FASTA, ENTRY,\n SNP_arr, INDEL_arr\n ):\n\n # read in fasta file\n FAdict = {}\n format = \"fasta\"\n handle = open(FASTA)\n FAdict = SeqIO.to_dict(SeqIO.parse(handle, format))\n\n # intialize ID and seq holder\n ID = ''\n Seq = ''\n SNPSeq = ''\n\n # intialize holders for coordinate (COORD), old nucleotide (OLD), and new/snp nucleotide (NEW)\n COORD = ''\n OLD = ''\n NEW = ''\n\n # populate ID and Seq\n ID = FAdict[ENTRY].id\n Seq = FAdict[ENTRY].seq\n OGSeq = FAdict[ENTRY].seq\n \n # if length of SNP_arr is greater than 0 then replace snps\n if len(SNP_arr) > 0:\n # loop through SNP mutations and replace coordinate with sequence\n for snp in SNP_arr:\n\n # assign coordinate (COORD), old nucleotide (OLD), and new/snp nucleotide (NEW)\n COORD = snp[1]\n OLD = snp[2]\n NEW = snp[3]\n\n # check if coordinate matches the old nucleotide\n COORD=int(COORD)-1\n if Seq[COORD] == OLD:\n # create string with new sequence\n SNPSeq=Seq[:COORD] + NEW + Seq[COORD + 1:]\n # reset Seq to have the SNPs\n Seq=SNPSeq\n else:\n print(\"Position\", COORD+1, \"is a\", Seq[COORD], \"and not a\", OLD)\n print(\"Check that the correct position has been specified\")\n sys.exit()\n\n # pass to indel_of_nucl\n indel_of_nucl(\n SNPSeq, ID,\n INDEL_arr, OGSeq\n )\n\n # if SNP_arr is of length 0, then move to next function\n elif len(SNP_arr) == 0:\n SNPSeq = OGSeq\n # pass to indel_of_nucl\n indel_of_nucl(\n SNPSeq, ID,\n INDEL_arr, OGSeq\n )", "def dump(self, packet):\n # TODO\n packet['type'] = \"table\"\n src = packet['src']\n packet['src'] = packet['dst']\n packet['dst'] = src\n\n table_list = []\n\n # TODO fill out table string with routing table\n table_string = \"\"\n # TODO asking for int indexes instead of string for route?\n for ip in self.updates.keys():\n # TODO have to fill ip address of peer\n\n entry = {'network' : self.updates[ip][MESG][NTWK], 'netmask' : self.updates[ip][MESG][NMSK], 'peer' : ip}\n table_list.append(entry)\n packet[MESG] = table_list\n msg = json.dumps(packet)\n #print(json.dumps(packet, sort_keys=True, indent=4))\n\n sock = self.sockets[src]\n sock.sendall(msg.encode())\n return True", "def _record_all_snps(self, filename, chromosome, position, ref, alt):\n\n if filename in self.snp_positions.keys():\n if chromosome in self.snp_positions[filename].keys():\n self.snp_positions[filename][chromosome].update(\n {\n str(position): {\n \"ref\": ref,\n \"alt\": str(alt)\n .replace(\"[\", \"\")\n .replace(\"]\", \"\")\n .replace(\" \", \"\"),\n }\n }\n )\n else:\n self.snp_positions[filename].update(\n {\n chromosome: {\n str(position): {\n \"ref\": ref,\n \"alt\": str(alt)\n .replace(\"[\", \"\")\n .replace(\"]\", \"\")\n .replace(\" \", \"\"),\n }\n }\n }\n )\n else:\n self.snp_positions.update(\n {\n filename: {\n chromosome: {\n str(position): {\n \"ref\": ref,\n \"alt\": str(alt)\n .replace(\"[\", \"\")\n .replace(\"]\", \"\")\n .replace(\" \", \"\"),\n }\n }\n }\n }\n )", "def insert_SnpCnv_(cur, tSnp='SNP', tRegionCnv='RegionCnv', tname='SnpCnv', chiplike='%'):\n insert = '''insert into %s\n values (?, ?, ?, ?, ?, ?)''' % tname\n\n def _select_snp_rows():\n select = \"\"\"select Chr, Loc from %s\n order by Chr, Loc\n \"\"\" % tSnp\n cur.execute(select)\n return [(row[0].encode(), row[1]) for row in cur]\n\n def _select_region_vals(chip):\n select = \"\"\"select Chr, Start, End, State, Cn, Im\n from %s\n where chip=%r\n \"\"\" % (tRegionCnv, chip)\n print select\n cur.execute(select)\n return [(e[0].encode(), e[1], e[2], [e[3].encode(), e[4], e[5]])\n for e in cur]\n\n chips = get_chips(cur, chiplike, tname=tRegionCnv)\n snp_rows = _select_snp_rows()\n cnv = Cnv(snp_rows)\n for chip in chips:\n region_vals = _select_region_vals(chip)\n snp_vals = cnv.snpValuesFromRegionValues(region_vals,\n default=['FM', 2, 0])\n for chr, block in groupby(snp_rows, itemgetter(0)):\n curr_vals = snp_vals[chr]\n for (chr, loc), (state, cn, im) in izip(block, curr_vals):\n cur.execute(insert, (chip, chr, loc, state, cn, im))", "def update_table(self, packet):\n # add packet to list of updates (cache for later)\n self.updates.append(packet)\n packetMessage = packet[MESG]\n # if we don't need to coalesce, add entry to forwarding table\n if not self.coalesce(packet):\n # add a new entry into forwarding table\n # storing network, netmask, peer, localPref,\n # selfOrigin, AS Path, and Origin information\n self.forwarding_table.append({\n # SRCE\n SRCE: packet[SRCE],\n # DEST\n DEST: packet[DEST],\n # NTWK\n NTWK: packetMessage[NTWK],\n # NMSK\n NMSK: packetMessage[NMSK],\n # PEER IP\n PEER: packet[SRCE],\n # Local Pref\n LPRF: packetMessage[LPRF],\n # Self Origin\n SORG: packetMessage[SORG],\n # AS Path\n APTH: packetMessage[APTH],\n # Origin\n ORIG: packetMessage[ORIG],\n # CIDR Prefix Length\n \"CIDR\": self.get_prefix(packetMessage)\n })", "def serialize(self, buf, offset):\n super(GPRSActionPushUDPIP,self).serialize(buf, offset)\n ofproto_parser.msg_pack_into(\"!Hxx4s4sHH\", buf, offset+8, \n self.subtype, self.da, self.sa, self.dp, self.sp)", "def __init__(self, bytes = None):\n hrd = pcs.Field(\"hrd\", 16, default = 1)\n pro = pcs.Field(\"pro\", 16, default = 0x800)\n hln = pcs.Field(\"hln\", 8, default = 6)\n pln = pcs.Field(\"pln\", 8, default = 4)\n op = pcs.Field(\"op\", 16)\n sha = pcs.StringField(\"sha\", 48)\n spa = pcs.Field(\"spa\", 32)\n tha = pcs.StringField(\"tha\", 48)\n tpa = pcs.Field(\"tpa\", 32)\n \n pcs.Packet.__init__(self, [hrd, pro, hln, pln, op,\n sha, spa, tha, tpa], bytes = bytes)\n self.description = \"ARP\"\n self.data = None", "def send_sus_list(key):\n while True:\n if not receive_sus():\n signature = key.create_signature(json.dumps(SUS) + '2')\n\n pack_send = Ether(dst='98:98:98:22:22:22') / \\\n IP(dst='172.16.104.16') / \\\n UDP(dport=2223, sport=2223) / \\\n DB(len_sign=len(signature), cmd=2,\n send_num=5, param=signature + json.dumps(SUS).encode())\n\n conf.iface = 'eth0'\n sendp(pack_send)", "def __init__(self, bytes = None):\n version = pcs.Field(\"version\", 4, default = 4)\n hlen = pcs.Field(\"hlen\", 4)\n tos = pcs.Field(\"tos\", 8)\n length = pcs.Field(\"length\", 16)\n id = pcs.Field(\"id\", 16)\n flags = pcs.Field(\"flags\", 3)\n offset = pcs.Field(\"offset\", 13)\n ttl = pcs.Field(\"ttl\", 8, default = 64)\n protocol = pcs.Field(\"protocol\", 8)\n checksum = pcs.Field(\"checksum\", 16)\n src = pcs.Field(\"src\", 32)\n dst = pcs.Field(\"dst\", 32)\n pcs.Packet.__init__(self,\n [version, hlen, tos, length, id, flags, offset,\n ttl, protocol, checksum, src, dst],\n bytes = bytes)\n # Description MUST be set after the PCS layer init\n self.description = \"IPv4\"\n\n\n if (bytes != None):\n offset = self.hlen << 2\n self.data = self.next(bytes[offset:len(bytes)])\n else:\n self.data = None", "def define_snps(genome, num):\n for n in range(num):\n snp_pos = get_snp_pos(genome)\n var = Variant(\"snp\", snp_pos, snp_pos, 0)\n genome.add_variant(var)\n genome.unavail_pos.append(snp_pos)", "def __init__(self, lines):\n self.next_addr = 16\n self.buff = lines\n table = {\n '@SP' : 0,\n '@LCL' : 1,\n '@ARG' : 2,\n '@THIS' : 3,\n '@THAT' : 4,\n '@SCREEN' : 16384,\n '@KBD' : 24576}\n R = {'@R'+str(i): i for i in xrange(0, 16)}\n table.update(R)\n self.table = table", "def __parse(self, packet: bytes) -> TSPacket.TSPacket:\n p = TSPacket.TSPacket()\n try:\n b1, b23, b4 = struct.unpack('>BHB', packet[0:4])\n # 4-byte Transport Stream Header\n p.tsh_sync = b1\n p.tsh_tei = (b23 & 32768) >> 15\n p.tsh_pusi = (b23 & 16384) >> 14\n p.tsh_tp = (b23 & 8192) >> 13\n p.tsh_pid = b23 & 8191\n p.tsh_tsc = (b4 & 192) >> 6\n p.tsh_afc = (b4 & 48) >> 4\n p.tsh_cc = b4 & 15\n # Adaptation Field\n if p.tsh_afc == 2 or p.tsh_afc == 3:\n p.af_length = packet[4] # b1\n if p.af_length != 0:\n b2 = packet[5]\n p.af_disc = (b2 & 128) >> 7\n p.af_random = (b2 & 64) >> 6\n p.af_espi = (b2 & 32) >> 5\n p.af_pcrf = (b2 & 16) >> 4\n p.af_opcrf = (b2 & 8) >> 3\n p.af_spf = (b2 & 4) >> 2\n p.af_tpdf = (b2 & 2) >> 1\n p.af_afef = b2 & 1\n pos = 6\n if p.af_pcrf:\n # p.af_pcr = packet[6:12]\n b14, b56 = struct.unpack('>LH', packet[6:12])\n p.af_pcr = ((b14 << 1) + (b56 >> 15)) * 300 + (b56 & 511)\n pos += 6\n if p.af_opcrf:\n # p.af_opcr = packet[pos:(pos+6)]\n b14, b56 = struct.unpack('>LH', packet[6:12])\n p.af_opcr = ((b14 << 1) + (b56 >> 15)) * 300 + (b56 & 511)\n pos += 6\n if p.af_spf:\n p.af_sc = packet[pos]\n pos += 1\n if p.af_tpdf:\n l = packet[pos]\n pos += 1\n p.af_tpd = packet[pos:(pos+l)]\n pos += l\n if p.af_afef:\n l = packet[pos]\n pos += 1\n p.af_ae = packet[pos:(pos+l)]\n # Calculate payload start byte\n if p.tsh_afc == 1:\n p.payload = 4\n elif p.tsh_afc == 3:\n p.payload = 5 + p.af_length\n return p\n except Exception as err:\n logging.warning('TS packet parsing error:' + str(err))\n return None", "def artnet_to_sacn(self):\n while True:\n try:\n input_packet, input_ip = self.socket.artnet_socket.recvfrom(1143)\n except (timeout, BlockingIOError): # Ignore timeouts and blocked socket errors\n continue\n self.artnet_input.new_packet(input_packet)\n self.artnet_input.identify_packet()", "def __udp_preprocess_packet(self, seq):\n return b'06' + seq.to_bytes(4, 'big') \\\n + self.packets_status[seq][\"size\"].to_bytes(2, 'big') \\\n + self.packets_status[seq][\"payload\"]", "def fill_data(self, data):\n self._data = data\n\n self._data_length = data[1:3]\n self._frame_id = data[4]\n self._address = XbeeAddress(data[5:9], data[9:13], data[13:15])\n self._at_command = data[15:17]\n self._command_status = data[17]\n try:\n self._command_data = data[18:21]\n self._checksum = data[22]\n except IndexError:\n self._command_data = None\n self._checksum = data[18]", "def _record_all_snp_positions(self, chromosome, position):\n if chromosome in self.snpsites.keys():\n if str(position) in self.snpsites[chromosome].keys():\n return\n else:\n self.snpsites[chromosome][str(position)] = [False] * len(\n self.vcffilenames\n )\n else:\n self.snpsites.update(\n {chromosome: {str(position): [False] * len(self.vcffilenames)}}\n )", "def write_data(self, packet):\n # TODO: check frequency instead of just doing one and two - could only be 2\n if not packet.satellites:\n return # No satellites\n t = dt.datetime(1980, 1, 6) + \\\n dt.timedelta(days=7*packet.week, seconds=int(packet.rcvTow),\n microseconds=(packet.rcvTow - int(packet.rcvTow))*10**6) - \\\n dt.timedelta(seconds=packet.leapS)\n epoch = f'> {t.year:4d} {t.month:02d} {t.day:02d} {t.hour:2d} {t.minute:2d} ' \\\n f'{t.second + t.microsecond*10**-6:11.7f} 0{len(packet.satellites):>3d}{\" \":<44}\\n'\n line = ''\n for s in packet.satellites:\n snr0 = s[0].cno\n if s[0].key == '':\n pass\n elif s[0].sigId in [0, 1]:\n line = line + f'{s[0].key:3s}{s[0].prMeas:>14.3f} {snr0:>1d}{s[0].cpMeas:>14.3f} {snr0:>1d}' \\\n f'{s[0].doMeas:>14.3f} {snr0:>1d}{s[0].cno:>14.3f} {snr0:>1d}'\n if len(s) == 2: # If L1 and L2 Frequencies\n snr1 = s[1].cno\n line = line + f'{s[1].prMeas:>14.3f} {snr1:>1d}{s[1].cpMeas:>14.3f} {snr1:>1d}{s[1].doMeas:>14.3f} ' \\\n f'{snr1:>1d}{s[1].cno:>14.3f} {snr1:>1d}\\n'\n\n else:\n line = line + f'{0.0:>14.3f} {0.0:>14.3f} {0.0:>14.3f} {0.0:>14.3f} \\n'\n elif len(s) < 2:\n line = line + f'{0.0:>14.3f} {0.0:>14.3f} {0.0:>14.3f} {0.0:>14.3f} ' \\\n f'{s[0].key:3s}{s[0].prMeas:>14.3f} {snr0:>1d}{s[0].cpMeas:>14.3f} {snr0:>1d}' \\\n f'{s[0].doMeas:>14.3f} {snr0:>1d}{s[0].cno:>14.3f} {snr0:>1d}\\n'\n elif len(s) == 2:\n line = line + f'{s[1].prMeas:>14.3f} {snr1:>1d}{s[1].cpMeas:>14.3f} {snr1:>1d}{s[1].doMeas:>14.3f} ' \\\n f'{snr1:>1d}{s[1].cno:>14.3f} {snr1:>1d}' \\\n f'{s[0].key:3s}{s[0].prMeas:>14.3f} {snr0:>1d}{s[0].cpMeas:>14.3f} {snr0:>1d}' \\\n f'{s[0].doMeas:>14.3f} {snr0:>1d}{s[0].cno:>14.3f} {snr0:>1d}\\n'\n \n try:\n with open(self.fname, 'a') as f:\n f.write(epoch + line)\n except FileNotFoundError:\n print('Bad data directory. Try again.')\n sys.exit(0)", "def SendPacketsElements(self) -> _n_0_t_7[SendPacketsElement]:", "def __init__(self, packet: Dict[str, Any]) -> None:\n self.source_address = packet['ip_header']['source_address']\n self.source_port = packet['tcp_header']['source_port']\n self.destination_address = packet['ip_header']['destination_address']\n self.destination_port = packet['tcp_header']['destination_port']\n\n self.packets = [(TCPStream.INBOUND, packet)]", "def set_packet(self, packet):\n if not isinstance(packet, list):\n self._packets.append(packet)\n elif len(packet) != 0:\n for i in packet:\n self._packets.append(i)" ]
[ "0.5446104", "0.53793764", "0.53116316", "0.5188341", "0.5188341", "0.5178828", "0.51245576", "0.51135856", "0.506556", "0.49394774", "0.49370044", "0.49324453", "0.49284285", "0.48855937", "0.48824796", "0.4859781", "0.48385096", "0.482571", "0.48249093", "0.48228484", "0.48194945", "0.48165947", "0.48150805", "0.48101893", "0.47964206", "0.47922108", "0.4784616", "0.47837672", "0.47727183", "0.47580937" ]
0.7410464
0
Fetch chunks and yield in order. Chunks are downloaded with concurrency as configured in `async_queue`
def for_each_chunk(blob: Blob, chunk_size: int=default_chunk_size, async_queue: Optional[AsyncQueue]=None): reader = Reader(blob, chunk_size=chunk_size) if async_queue is not None: for chunk_number in reader._unfetched_chunks: async_queue.put(reader._fetch_chunk, chunk_number) for chunk in async_queue.consume(): yield chunk else: for chunk_number in reader._unfetched_chunks: yield reader._fetch_chunk(chunk_number)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def for_each_chunk_async(blob: Blob, async_set: AsyncSet, chunk_size: int=default_chunk_size):\n reader = Reader(blob, chunk_size)\n\n def fetch_chunk(chunk_number):\n data = reader._fetch_chunk(chunk_number)\n return chunk_number, data\n\n for chunk_number in range(reader.number_of_chunks):\n for cn, d in async_set.consume_finished():\n yield cn, d\n # Breaking after the first yield allows us to add more downloads to the pot without\n # waiting for the client to complete potentially time-consuming operations.\n break\n async_set.put(fetch_chunk, chunk_number)\n for cn, d in async_set.consume():\n yield cn, d", "async def async_readchunks(self, size: int):\n while True:\n data = await self.read(size)\n if data:\n await yield_(data)\n else:\n return", "def iter_chunks(self) -> ChunkTupleAsyncStreamIterator:\n ...", "async def aiterator(self, chunk_size=2000):\n if chunk_size <= 0:\n raise ValueError(\"Chunk size must be strictly positive.\")\n use_chunked_fetch = not connections[self.db].settings_dict.get(\n \"DISABLE_SERVER_SIDE_CURSORS\"\n )\n iterable = self._iterable_class(\n self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size\n )\n if self._prefetch_related_lookups:\n results = []\n\n async for item in iterable:\n results.append(item)\n if len(results) >= chunk_size:\n await aprefetch_related_objects(\n results, *self._prefetch_related_lookups\n )\n for result in results:\n yield result\n results.clear()\n\n if results:\n await aprefetch_related_objects(\n results, *self._prefetch_related_lookups\n )\n for result in results:\n yield result\n else:\n async for item in iterable:\n yield item", "def iter_asynchronously(gen_func):\n q = Queue()\n p = Process(target=_async_queue_manager, args=(gen_func, q))\n p.start()\n while True:\n item = q.get()\n if item is PoisonPill:\n break\n else:\n yield item", "def async_fetch(self):\n args = (async_get_pipe, self.zargs, self.connections)\n mapped = yield ait.async_map(*args)\n return_value(multiplex(mapped))", "async def get_iter(self) -> AsyncIterator[Data]:\n async with self.read_mutex:\n if self.get_in_progress:\n # This should be guarded against with the read_mutex,\n # exception is only here as a failsafe\n raise ServerError(\n \"Called get_iter on Websocket frame assembler \"\n \"while asynchronous get is already in progress.\"\n )\n self.get_in_progress = True\n\n chunks = self.chunks\n self.chunks = []\n self.chunks_queue = asyncio.Queue()\n\n # Sending None in chunk_queue supersedes setting message_complete\n # when switching to \"streaming\". If message is already complete\n # when the switch happens, put() didn't send None, so we have to.\n if self.message_complete.is_set():\n await self.chunks_queue.put(None)\n\n # Locking with get_in_progress ensures only one task can get here\n for c in chunks:\n yield c\n while True:\n chunk = await self.chunks_queue.get()\n if chunk is None:\n break\n yield chunk\n\n # Unpause the transport, if its paused\n if self.paused:\n self.protocol.resume_frames()\n self.paused = False\n if not self.get_in_progress: # no cov\n # This should be guarded against with the read_mutex,\n # exception is here as a failsafe\n raise ServerError(\n \"State of Websocket frame assembler was modified while an \"\n \"asynchronous get was in progress.\"\n )\n self.get_in_progress = False\n if not self.message_complete.is_set(): # no cov\n # This should be guarded against with the read_mutex,\n # exception is here as a failsafe\n raise ServerError(\n \"Websocket frame assembler chunks queue ended before \"\n \"message was complete.\"\n )\n self.message_complete.clear()\n if self.message_fetched.is_set(): # no cov\n # This should be guarded against with the read_mutex,\n # and get_in_progress check, this exception is\n # here as a failsafe\n raise ServerError(\n \"Websocket get_iter() found a message when state was \"\n \"already fetched.\"\n )\n\n self.message_fetched.set()\n # this should already be empty, but set it here for safety\n self.chunks = []\n self.chunks_queue = None", "async def fetch_all(self, urls):\n async with ClientSession() as session:\n tasks = []\n for url in urls:\n task = asyncio.create_task(self.fetch(session, url))\n tasks.append(task)\n results = await asyncio.gather(*tasks)\n return results", "async def fetch_next_block(self):\n\n results = []\n for _ in range(self._page_size):\n try:\n results.append(await self.__anext__())\n except StopAsyncIteration:\n # no more results\n break\n return results", "async def run():\n sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)\n tasks = []\n\n async with ClientSession() as session:\n for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:\n task = asyncio.ensure_future(bound_fetch(sem, u, session))\n tasks.append(task)\n responses = asyncio.gather(*tasks)\n await responses", "async def run(self):\n pool_tasks = []\n async with aiomultiprocess.Pool(\n processes=4, maxtasksperchild=64, childconcurrency=8, queuecount=2\n ) as pool:\n for call in self.calls_list:\n pool_tasks.append(pool.apply(self._get_call, args=[call]))\n for download in tqdm(asyncio.as_completed(pool_tasks), total=len(pool_tasks)):\n await download", "def __iter__(self):\n\n # collector will fetch chunksize array for each 'get' call\n collector = FIFOArray(self.chunksize, self.axis)\n\n # make tmp array to hold generated subarrs\n tmp = []\n tmp_size = 0\n for subarr in self.data(**self.kwargs):\n\n tmp.append(subarr)\n tmp_size += subarr.shape[self.axis]\n\n # if tmp exceeds chunksize put in collector\n if tmp_size >= self.chunksize:\n arr = np.concatenate(tmp, axis=self.axis)\n collector.put(arr)\n\n # fetch chunksize till not full\n while collector.full():\n yield collector.get()\n\n # place leftover back into tmp and empty collector\n tmp = [collector.queue]\n tmp_size = collector.qsize()\n collector.queue = np.array([])\n\n else:\n\n # append to tmp again\n continue\n\n # else runs after normal loop exit -- required here\n else: #pylint: disable=useless-else-on-loop\n\n # yield whatever is left in tmp (its below chunksize)\n remaining = np.concatenate(tmp, axis=self.axis)\n if remaining.size > 0:\n yield remaining", "async def queued_coroutine(self) -> None:\n try:\n while True:\n url, max_redirect = await self.queue.get()\n\n assert url in self.crawled_urls\n\n await self.fetch(url, max_redirect)\n\n self.queue.task_done()\n\n except asyncio.CancelledError:\n pass", "def chunk_reader(chunk_filenames, chunk_filename_queue):\n chunks = []\n done = chunk_filenames\n\n while True:\n if not chunks:\n chunks, done = done, chunks\n random.shuffle(chunks)\n if not chunks:\n print(\"chunk_reader didn't find any chunks.\")\n return None\n while len(chunks):\n filename = chunks.pop()\n done.append(filename)\n chunk_filename_queue.put(filename)\n print(\"chunk_reader exiting.\")\n return None", "async def worker(\n self, queue: asyncio.Queue, session: aiohttp.ClientSession\n ) -> None:\n while True:\n url = await queue.get()\n await self.fetch(url, session)\n queue.task_done()", "def iter_chunked(self, n: int) -> AsyncStreamIterator[bytes]:\n ...", "def completed_prefetch(self, blocking_wait=False, max_yield=999):\n\n for worker, obj_ref in self.completed(blocking_wait=blocking_wait):\n self._fetching.append((worker, obj_ref))\n\n for _ in range(max_yield):\n if not self._fetching:\n break\n\n yield self._fetching.popleft()", "async def loop_fetch_proxies(self):\n LOG.debug('Try to get proxies from %s' % self.domain)\n self.produce_url_task = asyncio.ensure_future(self.gen_urls(self.url))\n while True:\n try:\n while len(self.consume_tasks) <= self.max_conn:\n url = await self.url_pool.get()\n task = asyncio.ensure_future(self.fetch_on_page(url))\n self.consume_tasks.append(task)\n self.consume_tasks = list(filter(lambda t: not t.done(), self.consume_tasks))\n if self.pool.full() or self.consume_tasks:\n await asyncio.sleep(1)\n except concurrent.futures.CancelledError as e:\n LOG.debug(\"%s canceled from working.\" % (self.__class__.__name__))\n break;\n except (Exception) as e:\n LOG.error(\"Loop for %s error with %s.%s\" % (self.__class__.__name__, e, type(e)))\n break;\n # return [self.fetch_on_page(url) for url in self.url2urls(self.url)]", "async def fetch_all(urls: List[str]) -> None:\n tasks = []\n async with ClientSession() as session:\n for url in urls:\n task = asyncio.ensure_future(fetch(url, session))\n tasks.append(task) # create list of tasks\n done = await asyncio.gather(*tasks)\n dp = pathlib.Path(\"data\")\n for url, res in done:\n fp = dp.joinpath(url[url.find(\"json\") + 5 :])\n with fp.open(\"w\") as out:\n out.write(res.decode(\"utf-8\"))", "def iterate(self, *args, **kwargs):\n # use thread pool to parallel process\n q = Queue()\n\n max_workers = self.num_workers\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n # Set up workers\n futures = []\n for i in range(max_workers):\n futures.append(executor.submit(self._worker, q))\n\n # Submit worker jobs\n # Wrap the main task in a try block so that the queue completes regardless of success/failure of main job\n try:\n for f in self.ftp_downloader.iterate(*args, **kwargs):\n q.put(f)\n yield f\n finally:\n # Stop processing\n # Not doing a queue to join, because if all workers fail this will hang with items still left in q...\n # q.join()\n\n # poison pill\n for i in range(max_workers):\n q.put(None)\n for future in futures:\n future.result()", "def data_generator():\n msg = Message(Message.ADD, queue.uuid, queue)\n PROVIDER_MQ.put(msg)\n keep_running = True\n while keep_running:\n try:\n chunk = queue.get()\n yield chunk\n except Empty:\n app.logger.info('Queue empty. Ending stream')\n keep_running = False", "def task(self, chunk_filename_queue, writer):\n self.init_structs()\n while True:\n filename = chunk_filename_queue.get()\n for item in self.single_file_gen(filename):\n writer.send_bytes(item)", "def chunk_generator( callback, request, c ):", "async def main(self, loop: asyncio.get_event_loop) -> None:\n queue = asyncio.Queue()\n\n for url in self.url_list:\n queue.put_nowait(url)\n\n async with aiohttp.ClientSession(loop=loop) as session:\n workers = [\n asyncio.create_task(self.worker(queue, session))\n for _ in range(self.max_treads)\n ]\n await queue.join()\n\n for worker in workers:\n worker.cancel()\n\n await asyncio.gather(*workers, return_exceptions=True)", "def asynchronous(urls, batch_size, delay=0, verbose=False):\n try:\n count=1\n threads=[]\n print(urls.strip(' ').split(\",\"))\n for url in urls.strip(\" '\").split(\",\"):\n print('On batch {}'.format(count))\n threads.append(gevent.spawn(fetch, url, verbose))\n responses = gevent.joinall(threads)\n time.sleep(delay)\n return responses\n except Exception as e:\n print(e)\n return None", "def jobs(self, time_frame):\n for name, content in self.connection.job_get(time_frame):\n task = self.task(name, content, self.connection)\n yield task", "def generate():\n with open(remote_path, \"rb\") as f:\n for chunk in iter(lambda: f.read(buffer_size), b''):\n yield chunk", "def request_chunk(self, x, z):\n\n if (x, z) in self.chunk_cache:\n returnValue(self.chunk_cache[x, z])\n elif (x, z) in self.dirty_chunk_cache:\n returnValue(self.dirty_chunk_cache[x, z])\n elif (x, z) in self._pending_chunks:\n # Rig up another Deferred and wrap it up in a to-go box.\n retval = yield self._pending_chunks[x, z].deferred()\n returnValue(retval)\n\n chunk = Chunk(x, z)\n yield maybeDeferred(self.serializer.load_chunk, chunk)\n\n if chunk.populated:\n self.chunk_cache[x, z] = chunk\n self.postprocess_chunk(chunk)\n #self.factory.scan_chunk(chunk)\n returnValue(chunk)\n\n if self.async:\n from ampoule import deferToAMPProcess\n from bravo.remote import MakeChunk\n\n d = deferToAMPProcess(MakeChunk,\n x=x,\n z=z,\n seed=self.seed,\n generators=configuration.getlist(self.config_name, \"generators\")\n )\n\n # Get chunk data into our chunk object.\n def fill_chunk(kwargs):\n chunk.blocks = fromstring(kwargs[\"blocks\"],\n dtype=uint8).reshape(chunk.blocks.shape)\n chunk.heightmap = fromstring(kwargs[\"heightmap\"],\n dtype=uint8).reshape(chunk.heightmap.shape)\n chunk.metadata = fromstring(kwargs[\"metadata\"],\n dtype=uint8).reshape(chunk.metadata.shape)\n chunk.skylight = fromstring(kwargs[\"skylight\"],\n dtype=uint8).reshape(chunk.skylight.shape)\n chunk.blocklight = fromstring(kwargs[\"blocklight\"],\n dtype=uint8).reshape(chunk.blocklight.shape)\n\n return chunk\n d.addCallback(fill_chunk)\n else:\n # Populate the chunk the slow way. :c\n for stage in self.pipeline:\n stage.populate(chunk, self.seed)\n\n chunk.regenerate()\n d = succeed(chunk)\n\n # Set up our event and generate our return-value Deferred. It has to\n # be done early becaues PendingEvents only fire exactly once and it\n # might fire immediately in certain cases.\n pe = PendingEvent()\n # This one is for our return value.\n retval = pe.deferred()\n # This one is for scanning the chunk for automatons.\n #pe.deferred().addCallback(self.factory.scan_chunk)\n self._pending_chunks[x, z] = pe\n\n def pp(chunk):\n chunk.populated = True\n chunk.dirty = True\n\n self.postprocess_chunk(chunk)\n\n self.dirty_chunk_cache[x, z] = chunk\n del self._pending_chunks[x, z]\n\n return chunk\n\n # Set up callbacks.\n d.addCallback(pp)\n d.chainDeferred(pe)\n\n # Because multiple people might be attached to this callback, we're\n # going to do something magical here. We will yield a forked version\n # of our Deferred. This means that we will wait right here, for a\n # long, long time, before actually returning with the chunk, *but*,\n # when we actually finish, we'll be ready to return the chunk\n # immediately. Our caller cannot possibly care because they only see a\n # Deferred either way.\n retval = yield retval\n returnValue(retval)", "async def run_requests(self):\n loop = asyncio.get_event_loop()\n tasks = []\n async with aiohttp.ClientSession(connector=self.connector) as session:\n\n for index, id in enumerate(self.ids):\n if id not in self.processed_ids:\n url = self.base_url + id\n auth_token = base64.b64encode(id.encode('ascii'))\n header = {\"Authorization\": auth_token.decode('UTF-8')}\n tasks.append(asyncio.ensure_future(self._request_one(url=url, header=header, id=id, index = index, session = session)))\n\n _ = await asyncio.gather(*tasks)", "def iter_chunks(sequence, chunk_size) :\n res = []\n for item in sequence :\n res.append(item)\n if len(res) >= chunk_size :\n yield res\n res = []\n if res : yield res" ]
[ "0.7039756", "0.65459436", "0.62428933", "0.6091742", "0.60693383", "0.6043477", "0.60399", "0.603614", "0.6003629", "0.599732", "0.59915036", "0.5966428", "0.5956915", "0.5922741", "0.5853379", "0.58391833", "0.5839072", "0.58271337", "0.5776846", "0.5762124", "0.57584995", "0.5739917", "0.5735254", "0.5643034", "0.55696714", "0.55535686", "0.5538712", "0.5508454", "0.5491897", "0.5486602" ]
0.7003836
1
Fetch chunks with concurrency as configured in `async_set`, yielding results as soon as available. Results may be returned in any order.
def for_each_chunk_async(blob: Blob, async_set: AsyncSet, chunk_size: int=default_chunk_size): reader = Reader(blob, chunk_size) def fetch_chunk(chunk_number): data = reader._fetch_chunk(chunk_number) return chunk_number, data for chunk_number in range(reader.number_of_chunks): for cn, d in async_set.consume_finished(): yield cn, d # Breaking after the first yield allows us to add more downloads to the pot without # waiting for the client to complete potentially time-consuming operations. break async_set.put(fetch_chunk, chunk_number) for cn, d in async_set.consume(): yield cn, d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def fetch_all(self, urls):\n async with ClientSession() as session:\n tasks = []\n for url in urls:\n task = asyncio.create_task(self.fetch(session, url))\n tasks.append(task)\n results = await asyncio.gather(*tasks)\n return results", "async def fetch_next_block(self):\n\n results = []\n for _ in range(self._page_size):\n try:\n results.append(await self.__anext__())\n except StopAsyncIteration:\n # no more results\n break\n return results", "def for_each_chunk(blob: Blob, chunk_size: int=default_chunk_size, async_queue: Optional[AsyncQueue]=None):\n reader = Reader(blob, chunk_size=chunk_size)\n if async_queue is not None:\n for chunk_number in reader._unfetched_chunks:\n async_queue.put(reader._fetch_chunk, chunk_number)\n for chunk in async_queue.consume():\n yield chunk\n else:\n for chunk_number in reader._unfetched_chunks:\n yield reader._fetch_chunk(chunk_number)", "async def aiterator(self, chunk_size=2000):\n if chunk_size <= 0:\n raise ValueError(\"Chunk size must be strictly positive.\")\n use_chunked_fetch = not connections[self.db].settings_dict.get(\n \"DISABLE_SERVER_SIDE_CURSORS\"\n )\n iterable = self._iterable_class(\n self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size\n )\n if self._prefetch_related_lookups:\n results = []\n\n async for item in iterable:\n results.append(item)\n if len(results) >= chunk_size:\n await aprefetch_related_objects(\n results, *self._prefetch_related_lookups\n )\n for result in results:\n yield result\n results.clear()\n\n if results:\n await aprefetch_related_objects(\n results, *self._prefetch_related_lookups\n )\n for result in results:\n yield result\n else:\n async for item in iterable:\n yield item", "async def run():\n sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)\n tasks = []\n\n async with ClientSession() as session:\n for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:\n task = asyncio.ensure_future(bound_fetch(sem, u, session))\n tasks.append(task)\n responses = asyncio.gather(*tasks)\n await responses", "def async_fetch(self):\n args = (async_get_pipe, self.zargs, self.connections)\n mapped = yield ait.async_map(*args)\n return_value(multiplex(mapped))", "def chunk(self, count):\n page = 1\n results = self.for_page(page, count).get()\n\n while len(results) > 0:\n yield results\n\n page += 1\n\n results = self.for_page(page, count).get()", "def iter_chunks(self) -> ChunkTupleAsyncStreamIterator:\n ...", "async def async_readchunks(self, size: int):\n while True:\n data = await self.read(size)\n if data:\n await yield_(data)\n else:\n return", "def queryset_in_batches(queryset):\n start_pk = 0\n\n while True:\n qs = queryset.filter(pk__gt=start_pk)[:BATCH_SIZE]\n pks = list(qs.values_list(\"pk\", flat=True))\n\n if not pks:\n break\n\n yield pks\n\n start_pk = pks[-1]", "def chunks(l):\n for i in range(0, len(l), concurrent):\n yield l[i:i + concurrent]", "def iterfetch(cursor, batchsize=1000):\n\t# type: (Cursor, int) -> Iterator[Any]\n\n\twhile True:\n\t\tresults = cursor.fetchmany(batchsize)\n\t\tif not results:\n\t\t\tbreak\n\t\tfor result in results:\n\t\t\tyield result", "def get_chunks_result(self, data_keys: List[str], fetch_only: bool = False) -> List:", "async def loop_fetch_proxies(self):\n LOG.debug('Try to get proxies from %s' % self.domain)\n self.produce_url_task = asyncio.ensure_future(self.gen_urls(self.url))\n while True:\n try:\n while len(self.consume_tasks) <= self.max_conn:\n url = await self.url_pool.get()\n task = asyncio.ensure_future(self.fetch_on_page(url))\n self.consume_tasks.append(task)\n self.consume_tasks = list(filter(lambda t: not t.done(), self.consume_tasks))\n if self.pool.full() or self.consume_tasks:\n await asyncio.sleep(1)\n except concurrent.futures.CancelledError as e:\n LOG.debug(\"%s canceled from working.\" % (self.__class__.__name__))\n break;\n except (Exception) as e:\n LOG.error(\"Loop for %s error with %s.%s\" % (self.__class__.__name__, e, type(e)))\n break;\n # return [self.fetch_on_page(url) for url in self.url2urls(self.url)]", "async def fetch_all(urls: List[str]) -> None:\n tasks = []\n async with ClientSession() as session:\n for url in urls:\n task = asyncio.ensure_future(fetch(url, session))\n tasks.append(task) # create list of tasks\n done = await asyncio.gather(*tasks)\n dp = pathlib.Path(\"data\")\n for url, res in done:\n fp = dp.joinpath(url[url.find(\"json\") + 5 :])\n with fp.open(\"w\") as out:\n out.write(res.decode(\"utf-8\"))", "async def _get_data(self):\n coros = []\n results = []\n for series_ids in self.series_ids:\n response = self._post(data={\"series_id\": series_ids})\n coros.append(response)\n if len(coros) == 5: # throttle at 5\n _ = await asyncio.gather(*coros)\n results.extend(_)\n coros = [] # Reset accumulator\n if coros:\n results.extend(await asyncio.gather(*coros))\n\n return filter(None, results)", "async def run(self):\n pool_tasks = []\n async with aiomultiprocess.Pool(\n processes=4, maxtasksperchild=64, childconcurrency=8, queuecount=2\n ) as pool:\n for call in self.calls_list:\n pool_tasks.append(pool.apply(self._get_call, args=[call]))\n for download in tqdm(asyncio.as_completed(pool_tasks), total=len(pool_tasks)):\n await download", "def iter_chunked(self, n: int) -> AsyncStreamIterator[bytes]:\n ...", "def batch_query(url, headers=None, timeout=299):\n\n offset = 0\n count = 0\n\n proxies = {\n 'http': ARGS.proxy_string,\n 'https': ARGS.proxy_string\n }\n\n options = {\n \"headers\": headers,\n \"verify\": False,\n \"timeout\": timeout,\n \"proxies\": proxies,\n \"params\": {}\n }\n\n while True: # do - while offset < count\n options[\"params\"][\"offset\"] = offset\n req = requests.get(url, **options)\n\n if not req.status_code == 200:\n errmsg = \"status_code: {0.status_code}: {0.content}\"\n raise UnknownResult(errmsg.format(req))\n\n res = req.json()\n data = res[\"data\"]\n count = res.get(\"count\", 0)\n\n yield from data\n\n offset += len(data)\n\n if offset >= count:\n break", "def chunks(self, chunk_size=None):\n if not chunk_size:\n chunk_size = self.DEFAULT_CHUNK_SIZE\n\n if hasattr(self, 'seek'):\n self.seek(0)\n\n while True:\n data = self.read(chunk_size)\n if not data:\n break\n yield data", "def chunks(self, chunk_size=None):\n if not chunk_size:\n chunk_size = self.DEFAULT_CHUNK_SIZE\n\n if hasattr(self, 'seek'):\n self.seek(0)\n\n while True:\n data = self.read(chunk_size)\n if not data:\n break\n yield data", "def fetchsome(cursor, arraySize=5000):\n while True:\n results = cursor.fetchmany(arraySize)\n if not results:\n break\n for result in results:\n yield result", "async def fetchmany(self, query, size=None, connection=None):\n async with self.connection(connection) as conn:\n r = await conn.execute(query)\n return await r.fetchmany(size)", "def asynchronous(urls, batch_size, delay=0, verbose=False):\n try:\n count=1\n threads=[]\n print(urls.strip(' ').split(\",\"))\n for url in urls.strip(\" '\").split(\",\"):\n print('On batch {}'.format(count))\n threads.append(gevent.spawn(fetch, url, verbose))\n responses = gevent.joinall(threads)\n time.sleep(delay)\n return responses\n except Exception as e:\n print(e)\n return None", "def iter_call(self, service, method,\r\n chunk=100, limit=None, offset=0, *args, **kwargs):\r\n if chunk <= 0:\r\n raise AttributeError(\"Chunk size should be greater than zero.\")\r\n\r\n if limit:\r\n chunk = min(chunk, limit)\r\n\r\n result_count = 0\r\n kwargs['iter'] = False\r\n while True:\r\n if limit:\r\n # We've reached the end of the results\r\n if result_count >= limit:\r\n break\r\n\r\n # Don't over-fetch past the given limit\r\n if chunk + result_count > limit:\r\n chunk = limit - result_count\r\n results = self.call(service, method,\r\n offset=offset, limit=chunk, *args, **kwargs)\r\n\r\n # It looks like we ran out results\r\n if not results:\r\n break\r\n\r\n # Apparently this method doesn't return a list.\r\n # Why are you even iterating over this?\r\n if not isinstance(results, list):\r\n yield results\r\n break\r\n\r\n for item in results:\r\n yield item\r\n result_count += 1\r\n\r\n offset += chunk\r\n\r\n if len(results) < chunk:\r\n break", "def get_json_objects_async(urls):\n pool = ThreadPool(processes=4)\n url_json_dicts = pool.map(request_url_json_dict_from_url, urls)\n pool.close()\n pool.join()\n return url_json_dicts", "def iterator(self, chunk_size=None):\n if chunk_size is None:\n if self._prefetch_related_lookups:\n raise ValueError(\n \"chunk_size must be provided when using QuerySet.iterator() after \"\n \"prefetch_related().\"\n )\n elif chunk_size <= 0:\n raise ValueError(\"Chunk size must be strictly positive.\")\n use_chunked_fetch = not connections[self.db].settings_dict.get(\n \"DISABLE_SERVER_SIDE_CURSORS\"\n )\n return self._iterator(use_chunked_fetch, chunk_size)", "def completed_prefetch(self, blocking_wait=False, max_yield=999):\n\n for worker, obj_ref in self.completed(blocking_wait=blocking_wait):\n self._fetching.append((worker, obj_ref))\n\n for _ in range(max_yield):\n if not self._fetching:\n break\n\n yield self._fetching.popleft()", "def _fetch_in_bulk(self, func_name, page_range, **func_args):\n all_results = []\n prog_bar = None\n\n if 'page_num' in func_args:\n func_args = func_args.pop('page_num')\n\n if self.profile.use_prog_bar:\n try:\n max_val = (max(page_range) + 1)\n except ValueError:\n max_val = 1\n\n prog_bar = progressbar.ProgressBar(max_value=max_val)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.profile.num_thread_workers) as executor:\n counter = 1\n future_to_page = {executor.submit(func_name, page_num=page, **func_args): page for page in page_range}\n\n for future in concurrent.futures.as_completed(future_to_page):\n try:\n data = future.result()\n except PageSizeError:\n raise\n except RequestFailed:\n continue\n\n if 'content' in data:\n items = data['content']\n for item in items:\n all_results.append(item)\n\n if self.profile.use_prog_bar:\n prog_bar.update(counter)\n time.sleep(0.1)\n counter += 1\n\n if self.profile.use_prog_bar:\n prog_bar.finish()\n\n return all_results", "def fetch_things2(query, chunk_size = 100, batch_fn = None):\r\n orig_rules = deepcopy(query._rules)\r\n query._limit = chunk_size\r\n items = list(query)\r\n done = False\r\n while items and not done:\r\n #don't need to query again at the bottom if we didn't get enough\r\n if len(items) < chunk_size:\r\n done = True\r\n\r\n if batch_fn:\r\n items = batch_fn(items)\r\n\r\n for i in items:\r\n yield i\r\n\r\n if not done:\r\n query._rules = deepcopy(orig_rules)\r\n query._after(i)\r\n items = list(query)" ]
[ "0.66831255", "0.64402056", "0.63998175", "0.6395605", "0.61951405", "0.6106072", "0.5962315", "0.5895014", "0.5870512", "0.58378106", "0.5837633", "0.57841134", "0.57645226", "0.5702948", "0.56887645", "0.56626827", "0.56352234", "0.56112266", "0.5609669", "0.55819654", "0.55819654", "0.55697185", "0.55545324", "0.554552", "0.5541505", "0.5514866", "0.5489556", "0.5487321", "0.5446273", "0.5437011" ]
0.75537604
0
Syntactic sugar for timethis with default logger at DEBUG level
def debugtime(message = None, level = logging.DEBUG, store = lambda _:_): return timethis(message, lambda *args: logging.log(level, *args), store)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug_logger(name='test'):\n return LogAdapter(DebugLogger(), name)", "def logger(self):\n pass", "def debug ( self , message , *args , **kwargs ) :\n return self.logger.debug ( message , *args , **kwargs )", "def logger_initiate():\n logger.setLevel(logging.DEBUG)\n return logging.basicConfig(\n format=(\n '%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s %(message)s'),\n datefmt='%Y-%m-%d %H:%M:%S')", "def timethis(message = None, logit = lambda *args: logging.info(*args), store = lambda _:_):\n def wrap(f, *args, **kw):\n @wraps(f)\n def wrapped(*args, **kw):\n t0 = datetime.now()\n r = f(*args, **kw)\n delta = datetime.now()-t0\n logit('%s: %s', message if message is not None else f.__name__, delta)\n store(delta)\n return r\n return wrapped\n return wrap", "def log(self, extra=None):\n\t\tself.stop()\n\t\treturn self._log(self.time(), extra)", "def _log_some_info(self):\n logging.info('info')", "def debug(self, *args, **kwargs):\n self.msg(logging.DEBUG, *args, **kwargs)", "def set_log_time(enable_time=False):\n THE_LOGGER.set_formatter(enable_time)", "def __call__(self, this):\n if self.logging is True:\n self.trace += '{Now} {Host} {Proc} {Event}\\n'.format(\n Now=time.strftime('%c', time.localtime()),\n Host=node(),\n Proc=self.tag,\n Event=this,\n )", "def logger(self):\n return self.debug.ThreadAwareLogger", "def logger(self, value):\n pass", "def _log(self, runtime, extra):\n\t\tif extra is None:\n\t\t\tdebug(\"Timer - %s took %d ms\" % (self._item, 1000 * runtime))\n\t\telse:\n\t\t\tdebug(\"Timer - %s [%s] took %d ms\" % (self._item, str(extra), 1000 * runtime))\n\t\treturn self", "def demo_log(self):\n self.logger.debug('This is a debug')\n self.logger.debug(self.name)\n self.logger.debug(self.doc)", "def _get_logger(self):", "def debug_log(self, msg, *args, **kwargs):\n if self.debug:\n self.log.debug(msg, *args, **kwargs)", "def ThreadAwareLogger(self):\n currentThread=threading.current_thread()\n loggerName=\"%s\"%(currentThread.name)\n if hasattr(self,loggerName): \n return eval(\"self.%s\"%(loggerName))\n if hasattr(self,\"debug\") and hasattr(self.debug,loggerName): # 026 hack - tries to find logger in e.g. NodeProxy. While actually is in debug.\n return eval(\"self.debug.%s\"%(loggerName))\n else:\n if hasattr(self,\"cloneMainLogger\"): # 026 hack (delegated logger tries to find cloneMainLogger in e.g. NodeProxy.\n return self.cloneMainLogger(loggerName)\n else: return self.debug.cloneMainLogger(loggerName)", "def simple():\r\n if LogOptions._SIMPLE is None:\r\n LogOptions._SIMPLE = app.get_options().twitter_common_log_simple\r\n return LogOptions._SIMPLE", "def logger(self):\n return logging", "def debug(self, msg, *args):\n if self.lvl<=logging.DEBUG: return self._log(msg, *args)", "def setup_logging(debug=False):\n today = date.today()\n logfile = \"{:04}-{:02}-{:02}-classperf.log\".format(\n today.year, today.month, today.day\n )\n\n teelogger = getLogger(\"opengever-time-layers\")\n formatter = Formatter(\"\")\n\n filehandler = FileHandler(logfile, mode=\"w\")\n filehandler.setFormatter(formatter)\n\n stdouthandler = StreamHandler(stream=stdout)\n stdouthandler.setFormatter(formatter)\n\n if debug:\n teelogger.setLevel(DEBUG)\n else:\n teelogger.setLevel(INFO)\n\n teelogger.addHandler(filehandler)\n teelogger.addHandler(stdouthandler)\n\n return teelogger", "def main(ctx, debug):\n if debug:\n logger.setLevel(logging.DEBUG)", "def logme(logger, *args, **kwargs):\n if logger is not None:\n logger.debug(generate_log(*args, **kwargs))", "def logging(self):\r\n return None", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.log = logging.getLogger(logger_name(__name__))", "def __init__(self):\n self.logger = logger()", "def _get_logger(self):\n return Logger(\"SLOTH\")", "def SetTimestampLogging(new_timestamp=True):\n global _log_time\n _log_time = new_timestamp", "def log_debug(task_request, message):\n _log(logger.debug, task_request, message)", "def log_time(name):\n if DEBUG:\n now = time.time()\n logging.debug('emcc step \"%s\" took %.2f seconds', name, now - TimeLogger.last)\n TimeLogger.update()" ]
[ "0.64855564", "0.6156266", "0.6101502", "0.6100334", "0.6082833", "0.60534525", "0.60445464", "0.5964673", "0.593482", "0.5923773", "0.58918554", "0.58901954", "0.5874944", "0.58692116", "0.5829989", "0.5771339", "0.5764214", "0.5760625", "0.57563484", "0.57418877", "0.57377017", "0.5724187", "0.57170326", "0.5696478", "0.5666746", "0.56309515", "0.56226736", "0.5619759", "0.56125706", "0.5610765" ]
0.75985086
0
Returns only the alerts that have a closure label
def get_closure_alerts(alert_list): out = [] for alert in alert_list: if alert[2] == "Park Closure": out.append(alert) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_all_alert():\n warning = []\n \n all_alerts = db.get_table_content(\"Alert\")\n for alert in all_alerts:\n ticker = Ticker.Ticker(alert[0], True)\n \n if ticker.is_valid and ticker.last_price > 0:\n if alert[1] == \"up\":\n if ticker.last_price > alert[2]:\n tmp_alert = Alert(ticker, alert[1], alert[2], True)\n warning.append(tmp_alert)\n elif alert[1] == 'down':\n if ticker.last_price < alert[2]:\n tmp_alert = Alert(ticker, alert[1], alert[2], True)\n warning.append(tmp_alert)\n \n return warning", "def check_alerts(self):\n status = self._mcp9600.get('STATUS')\n return status.alert_1, status.alert_2, status.alert_3, status.alert_4", "def event_name_choices(self): \n query = \"\"\"\n SELECT DISTINCT event_name\n FROM tubidw.sampled_analytics_thousandth\n WHERE date >= dateadd('day',-2,GETDATE())\n \"\"\"\n df = tdr.query_redshift(query).to_df()\n return ['no event filter'] + pd.Series(df['event_name']).sort_values().tolist()", "def alerts(self):\n return c.Alerts(self)", "def extractTriggeredStocks(self):\n userIDs = []\n messages = []\n for (a, b, c, d, e, f) in self.db.select_all_stocks_triggered():\n userIDs.append(a)\n messages.append(\n \"<b>ALERT!</b>\\n\\nThreshold for <b>{}:{} - {}</b> has been exceeded!\\n\\n3/15MA threshold set: <i>{}</i>\\nLatest 3/15MA calculated: <i>{}</i>\".format(c, b, d, e, f))\n return (userIDs, messages)", "def alert(self):\n return self._alert", "def alerts_county_zone(self: SimpleNWS) -> List[Dict[str, Any]]:\n return self._alerts_county_zone", "def valid_alerts(arch, **kwargs):\n xpath = '//*[contains(concat(\" \", @class), \" alert-\")'\n xpath += ' or contains(concat(\" \", @t-att-class), \" alert-\")'\n xpath += ' or contains(concat(\" \", @t-attf-class), \" alert-\")]'\n xpath += '[not(contains(@class, \"alert-link\") or contains(@t-att-class, \"alert-link\")'\n xpath += ' or contains(@t-attf-class, \"alert-link\"))]'\n xpath += '[not(@role=\"alert\")]'\n xpath += '[not(@role=\"alertdialog\")]'\n xpath += '[not(@role=\"status\")]'\n if arch.xpath(xpath):\n return \"Warning\"\n return True", "def get_sellability_report(melons):", "def warnings_active(self) -> List[Error]:", "def get_new_global_alerts():\n m = int(minutes_a_record_remains_new())\n new_date = make_date_time(seconds_to_subtract=m * 60)\n f = []\n ip_details = my_ip_details()\n\n data = get_feeds().find({\"time.observation\": {\"$gt\": new_date}, \"$or\": [\n {\"source.ip\": {\"$exists\": True}, \"source.network\": {\"$exists\": True}}],\n \"source.network\": {\"$ne\": ip_details['network']}}).sort([(\"time_observation\", DESCENDING)])\n if data and data.count() > 0:\n data = normalize_dict(data)\n for x, value in data.items():\n value['id'] = x\n if '_id' in value:\n del value['_id']\n\n if 'raw' in value:\n del value['raw']\n\n f.append(value)\n\n create_log(None, None, \"{0} Global attacks detected : \".format(len(f)), LOG_SYSTEM_TYPE)\n return f", "def group_by_alert_details(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"group_by_alert_details\")", "def get_calls_in_function( ea ):\r\n\tcallist = []\r\n\tflowgraph = create_flowgraph_from( ea )\r\n\tfor x in flowgraph.nodes.items():\r\n\t\tname = x[0]\r\n\t\tblock = get_basic_block( string.atol( name, 16 ))\r\n\t\tfor instruction in block:\r\n\t\t\tif instruction[ 1 ] == \"call\":\r\n\t\t\t\tcallist.append( instruction )\r\n\treturn callist", "def test_v1_alert_list_get(self):\n pass", "def selection_alert(self):\n self._probe.swj_sequence(136, 0x19bc0ea2e3ddafe986852d956209f392ff)", "def get_alerts(self):\n url = 'http://www.p2000-online.net/p2000.php?%s&nofilterform=1'\n url = url % '&'.join(['%s=1' % x for x in self.regions])\n if self.verbose:\n print time.ctime(), url\n try:\n data = urllib.urlopen(url).read()\n except IOError:\n if self.verbose:\n import traceback\n traceback.print_exc()\n return []\n\n doc = soup(data)\n alerts = []\n table = doc.body('table', {'style': 'align:center'})[0]\n for tr in table('tr'):\n if tr.td.get('class', None) == 'DT':\n alerts.append(Alert(*[x.text for x in tr('td')]))\n else:\n recipient = tr('td')[-1].text\n if recipient != '&nbsp;':\n alerts[-1].recipients.append(recipient)\n return alerts", "def alert_messages(messages):\n return {\n 'messages': messages\n }", "def filter_none(alert: AlertInterface) -> bool: # noqa: unused\n return True", "def add_alerts(self):", "def get_matching_alerts(alert_filename):\n reprog = re.compile(r\"\"\"^.*\\[MATCHING\\sALERT\\]\\s.*\n Caption:\\s(.+)\\s\n Transcript:\\s(.+).*$\"\"\", re.I | re.X)\n alerts = []\n fr = open(alert_filename, \"r\")\n for alertline in fr:\n m = reprog.match(alertline)\n if m:\n g = m.groups()\n if (len(g) == 2):\n alert = MatchingAlert(g[0], g[1])\n alerts.append(alert)\n else:\n print(\"Error: wrong format of alert file\")\n break\n else:\n print(\"Error: wrong format of alert file\")\n break\n fr.close()\n return alerts", "def alert_message(self):\r\n alerts = self.q(css=\"div.open-ended-alert\").text\r\n\r\n if len(alerts) < 1:\r\n return \"\"\r\n else:\r\n return alerts[0]", "def check_event(self, event):\r\n for tab in self.tabs:\r\n tab.check_event(event)", "def alerted(self) -> bool:\n\t\treturn self._raw_result['data']['alerted']", "def list_warnings(self):\n lwarn = []\n r = (220,0,0) # Red\n w = (244,234,244) # White\n g = (144,238,144) # Green\n w = (255,255,255) # White\n c = cf.gs.game.character\n ci = c.inventory\n f = ci.sorted_items['food'].amount\n if f > 0 and f < 10:\n lwarn.append(\n {'item':None,'value':'Low food!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif f <= 0:\n lwarn.append(\n {'item':None,'value':'0 food: HP -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n lwarn.append(\n {'item':None,'value':'0 food: Sanity -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n h = cf.gs.game.character.selected_house\n if h == 'Staying with Friends':\n lwarn.append(\n {'item':None,'value':'No house: Sanity -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if c.health == 1:\n lwarn.append(\n {'item':None,'value':'Low health!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif c.health <= 0:\n lwarn.append(\n {'item':None,'value':'0 health!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if len(cf.gs.game.events.inactive_events) == 5:\n lwarn.append(\n {'item':None,'value':'5 events: Activating!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if c.sanity > 0 and c.sanity < 10:\n lwarn.append(\n {'item':None,'value':'Low sanity!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif c.sanity <= 0:\n lwarn.append(\n {'item':None,'value':'0 sanity!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n cash = ci.sorted_items['cash'].amount\n if cash > 0 and cash < 4000:\n lwarn.append(\n {'item':None,'value':'Low cash!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif cash <= 0:\n lwarn.append(\n {'item':None,'value':'0 cash: Sanity-=1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if len(lwarn) == 0:\n lwarn.append(\n {'item':None,'value':'Green means go!',\n 'selected_bgcolor':g,'bgcolor':g,'font_size':20})\n return lwarn", "def data_alerts(self):\n return self._data_alerts", "def get_sellability_report(melons):\n\n # Fill in the rest \n sellable = None\n for melon in melons:\n if melon.is_sellable():\n sellable = 'CAN BE SOLD'\n else:\n sellable = 'NOT SELLABLE'\n print(f'Harvested by {melon.harvester} from Field {melon.field_number} {sellable}')", "def alerts_all_zones(self: SimpleNWS) -> List[Dict[str, Any]]:\n return self._alerts_all_zones", "def show_additional_alerts():\n\n\n user_id = session['user_id']\n user = User.query.get(user_id)\n city = user.location.city\n data = get_alerts(city)\n\n if len(data[\"alerts\"]) > 0:\n\n alerts = {\n \"description\": data[\"alerts\"][0][\"description\"],\n \"date\": data[\"alerts\"][0][\"date\"],\n \"expires\": data[\"alerts\"][0][\"expires\"],\n \"message\": data[\"alerts\"][0][\"message\"]\n }\n else:\n alerts = {\n \"message\": \"No active Alert\"\n }\n\n\n return jsonify(alerts)", "def test_list_alerts(self):\n pass", "def get_sellability_report(melons):\n\n # Fill in the rest \n for melon in melons:\n\n if melon.is_sellable():\n sellability = \"CAN BE SOLD\"\n else:\n sellability = \"NOT SELLABLE\"\n\n print(\"Harvested by {} from Field {} ({})\".format(melon.harvester,melon.field,sellability))" ]
[ "0.5146252", "0.49078953", "0.4843155", "0.48385358", "0.4751838", "0.47516876", "0.47288427", "0.4712131", "0.4680923", "0.46440727", "0.46357977", "0.46296796", "0.46294236", "0.46020463", "0.45657477", "0.4553938", "0.4550098", "0.4546892", "0.45239007", "0.45100582", "0.45068833", "0.4500247", "0.44893008", "0.44868454", "0.44704714", "0.4466422", "0.44478297", "0.44430283", "0.44403243", "0.44343448" ]
0.673873
0
Creates the dictionary of required info for the map marker given a park id. If there is no closure alert for the park, it assumes the park is open.
def get_waypoint_info(park_id, DB, max_alerts_displayed=3): info = {} # (code, name, lon, lat, url) park_info = DB.get_park_info(park_id) # [(code, title, alert_type, description), (...), ...] alert_info = DB.get_alert_info(park_id) if park_info is None: logging.error("Park ID: %s is not in DB", park_id) return None if park_info[2] == '' or park_info[3] == '': logging.warning("Park has no long lat, skipping") return None info["name"] = park_info[1].replace("`","") info["lon"] = park_info[2] info["lat"] = park_info[3] info["url"] = park_info[4].replace("`","") # If no alert for part, assume open if alert_info is None: info["status"] = "open" info["desc"] = "No closure updates from this park" return info else: # Multiple possible alerts for given park closure_alerts = get_closure_alerts(alert_info) # Only use a certain number of alerts most recent # print(len(closure_alerts)) closure_alerts = closure_alerts[:-min(max_alerts_displayed+1, len(closure_alerts)):-1] # print(closure_alerts) # If only want to use the latest alerts for estimate closure_alerts_desc = [closure_alerts[0]] if closure_alerts else [] closed_words = {"closure", "closed", "close", "closing"} open_words = {"open", "reopen", "increasing"} includes_open, includes_closed = False, False for alert in closure_alerts_desc: description = (alert[1] + " " + alert[3]).replace(",", " ").replace(".", " ").replace(":", " ").replace(";", " ") # If any of the open words are in the description, then the park # could be open. if any(word in open_words for word in description.lower().split()): includes_open = True # print(alert[0], "Open") # If any of the closed words are in description, but open words weren't # then status is closed. if any(word in closed_words for word in description.lower().split()): includes_closed = True # print("Includes closed:", alert) info["desc"] = "<br/>".join([("<i>"+alert[1]+"</i><br/>" + alert[3]).replace("`", "") for alert in closure_alerts]) if includes_open and not includes_closed: # If only open, then open info["status"] = "open" elif not includes_open and includes_closed: # If only closed, then closed info["status"] = "closed" # info["desc"] = "<br/>".join([("<i>"+alert[1]+"</i><br/>" + alert[3]).replace("`", "") for alert in closure_alerts]) else: # If both open and closed, or neither, then other info["status"] = "other" # info["desc"] = "<br/>".join([("<i>"+alert[1]+"</i><br/>" + alert[3]).replace("`", "") for alert in closure_alerts]) return info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_marker_param(self, marker):\n params = {}\n if marker:\n params[self.MARKER] = marker\n return params", "def _get_goal_info(self, last_info):\n start_ID = 4\n end_ID = start_ID + self.num_parts\n places = {}\n for ID in range(start_ID, end_ID):\n assert ID in last_info, f'something went wrong with ID={ID}'\n position, _, _ = last_info[ID]\n places[ID] = (position, (0, 0, 0, 1.))\n return places", "def get_params_info(cls):\n return dict(\n config='laygo configuration dictionary.',\n threshold='transistor threshold flavor.',\n draw_boundaries='True to draw boundaries.',\n num_blk='number of driver segments.',\n show_pins='True to draw pin geometries.',\n )", "def _park_from_data(data):\n\n _description = data[\"description\"]\n _id = data[\"id\"]\n _img = \"\"\n if data[\"images\"]:\n _img = data[\"images\"][0][\"url\"]\n _name = data[\"fullName\"]\n _url = data[\"url\"]\n _lat_long = data[\"latLong\"].split(\",\")\n _latitude = _lat_long[0].split(\":\")[1]\n _longitude = _lat_long[1].split(\":\")[1]\n _weather = get_forecast_by_coordinates(_latitude, _longitude)\n\n park = {\n \"id\": _id,\n \"name\": _name,\n \"description\": _description,\n \"url\": _url,\n \"img\": _img,\n \"weather\": _weather,\n }\n\n return park", "def generateMarkers(self, *args, **kwargs): \n return 'var PloneMapMarkers = [' + \\\n ''.join([\"{'type': '%s','options': { 'position': new google.maps.LatLng( %s, %s ), 'title' : '%s', 'title_' : '%s' }},\" \n % (object.markerIcon, object.latitude, object.longitude, object.Title(), object.getId()) \n for object in self.context.objectValues() \n if hasattr(object, 'latitude') and len(object.latitude) > 0 ])[:-1] \\\n + '];'", "def create_parking_lot(data):\n size = int(data['size'])\n PARKING_LOT[0] = size\n for i in range(1, size + 1):\n PARKING_LOT.append(SlotVehicleDriverMapping())\n return 'Created parking of {} slots'.format(size)", "def Dictionary_create(nMarkers, markerSize):\n pass", "def drawMarker(dictionary, id, sidePixels, img=None, borderBits=None):\n pass", "def init_params():\r\n\r\n p = OrderedDict()\r\n p['startYear'] = 1855\r\n p['num5YearAgeClasses'] = 25\r\n p['numCareLevels'] = 5\r\n p['pixelsInPopPyramid'] = 2000\r\n p['pixelsPerTown'] = 16 # 56\r\n p['mapGridXDimension'] = 20\r\n p['mapGridYDimension'] = 25\r\n p['careLevelColour'] = ['deepskyblue','green','yellow','orange','red']\r\n p['careDemandInHours'] = [ 0.0, 12.0, 24.0, 48.0, 96.0 ]\r\n p['unmetNeedColor'] = ['deepskyblue','green','yellow','orange','red', 'mediumorchid']\r\n p['houseSizeColour'] = ['deepskyblue','green','yellow','orange','red', 'mediumorchid']\r\n p['mainFont'] = 'Helvetica 18'\r\n p['fontColour'] = 'white'\r\n p['dateX'] = 70\r\n p['dateY'] = 20\r\n p['popX'] = 70\r\n p['popY'] = 50\r\n p['delayTime'] = 0.0\r\n p['maxTextUpdateList'] = 12\r\n \r\n return p", "def new_police_report(self):\n\n d = {'category':'',\n 'original_text':'',\n 'line_num':0,\n 'address':'',\n 'map_scale':mapscale.UNKNOWN,\n 'date_month':0,\n 'date_day':0,\n 'date_year':0,\n 'lat':'',\n 'long':''}\n\n return d", "def custom_dictionary(nMarkers, markerSize):\n pass", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def _generate_voter_in_dict(id: bytes, timestamp: int, prep: 'Prep') -> dict:\n voter_in_dict = {\n \"id\": '0x' + bytes.hex(id),\n \"timestamp\": timestamp,\n \"address\": str(prep.address),\n \"name\": prep.name,\n \"amount\": prep.delegated\n }\n return voter_in_dict", "def get_map_task_params(self):\n return {}", "def _create_tiling_entry(tiling_json: dict) -> Dict[str, Any]:\n return {\n History._TILING_TIME: get_current_time_string(),\n History._TILING: tiling_json,\n }", "def _generate_common_proposal_info_in_dict(proposal_info: 'ProposalInfo') -> dict:\n proposal_info_in_dict = {\n \"id\": '0x' + bytes.hex(proposal_info.id),\n \"proposer\": str(proposal_info.proposer),\n \"proposerName\": proposal_info.proposer_name,\n \"status\": hex(proposal_info.status),\n \"startBlockHeight\": hex(proposal_info.start_block_height),\n \"endBlockHeight\": hex(proposal_info.end_block_height),\n \"contents\": {\n \"title\": proposal_info.title,\n \"description\": proposal_info.description,\n \"type\": hex(proposal_info.type),\n \"value\": proposal_info.value\n }\n }\n return proposal_info_in_dict", "def get_kegg_info(kegg_info_file):\n kegg_info_fh = open(kegg_info_file, 'r')\n kegg_info_dict = {}\n\n title_line = kegg_info_fh.next()\n kegg_info_dict['title'] = title_line.strip().split(' ')[1]\n\n release_line = kegg_info_fh.next()\n release_info = release_line.strip().split(' ')[1]\n if release_info.startswith('Release'):\n kegg_info_dict['release'] = release_info\n else:\n kegg_info_dict['release'] = None\n\n kegg_info_dict['lab_info'] = kegg_info_fh.next().strip()\n\n for line in kegg_info_fh:\n toks = line.strip().split()\n kegg_info_dict[toks[0]] = ' '.join(toks[1:])\n\n kegg_info_fh.close()\n\n return kegg_info_dict", "def get_infos(self):\n infos = dict()\n infos['dataset'] = self._dataset_name()\n infos['task'] = self.task\n if self.task == 'sep_clean':\n data_license = [librispeech_license]\n else:\n data_license = [librispeech_license, wham_noise_license]\n infos['licenses'] = data_license\n return infos", "def getInfo(self, ID, name, nowForget=False):\n def getCallDict():\n if hasattr(self, 'callDict'):\n result = self.callDict\n if nowForget:\n del self.callDict\n else:\n result = None\n return result\n \n if hasattr(self, 'pastInfo'):\n if ID is None and name == 'callDict':\n return getCallDict()\n if ID in self.pastInfo:\n x = self.pastInfo[ID]\n if nowForget:\n del self.pastInfo[ID]\n return x.get(name, None)\n return None\n if name == 'callDict':\n return getCallDict()\n return None", "def info() -> Dict[str, Any]:", "def get_infos(self):\n infos = dict()\n infos[\"dataset\"] = self.dataset_name\n infos[\"task\"] = \"separate_noisy\"\n infos[\"licenses\"] = [librispeech_license, tac_license]\n return infos", "def map_key_info_to_dict(mki: MapKeyInfo) -> Dict[str, Any]:\n properties = serialize_init_args(object=mki)\n properties[\"__type\"] = mki.__class__.__name__\n return properties", "def get_kml_dict(self,name,filename):\n\n lon1,lon2,lat1,lat2=self.get_bounds()\n d={'lat1':lat1,'lat2':lat2,'lon1':lon1,'lon2':lon2, \\\n 'name':name,'filename':filename,'time':self.get_time()}\n return d", "def get_info(self) -> Optional[Dict[str, Any]]:", "def init_info_pane(self):\n self.single_acq = QtWidgets.QPushButton(\"Single Acquisition\")\n self.start_acq = QtWidgets.QPushButton(\"Start Acquisition\")\n self.stop_acq = QtWidgets.QPushButton(\"Stop Acquisition\")\n\n self.exposure = QtWidgets.QDoubleSpinBox()\n self.exposure.setSuffix(\" ms\")\n self.get_exposure_params()\n\n self.maj_radius = QtWidgets.QLabel()\n self.min_radius = QtWidgets.QLabel()\n self.avg_radius = QtWidgets.QLabel()\n self.ellipticity = QtWidgets.QLabel()\n self.x_radius = QtWidgets.QLabel()\n self.y_radius = QtWidgets.QLabel()\n self.x_centroid = QtWidgets.QLabel()\n self.y_centroid = QtWidgets.QLabel()\n\n # Mark current beam position\n self.mark = QtWidgets.QPushButton(\"Mark\")\n self.unmark = QtWidgets.QPushButton(\"Unmark\")\n\n # Mark location\n self.mark_x = QtWidgets.QLabel()\n self.mark_y = QtWidgets.QLabel()\n\n # Beam distance from marked location\n self.x_delta = QtWidgets.QLabel()\n self.y_delta = QtWidgets.QLabel()\n\n # Keep a list of mark sub-widgets so we can hide/show them\n # Obviously we don't want to hide the mark buttons themselves\n self.mark_widgets.extend([\n self.mark_x, self.mark_y,\n # self.x_delta, self.y_delta,\n ])\n\n self.fps = QtWidgets.QLabel()\n self.message = QtWidgets.QLabel()\n self.status = QtWidgets.QLabel(\"Stopped\")", "def marker(self):\r\n return _marker_of(self.api.paramstyle)", "def detectMarkers(image, dictionary, corners=None, ids=None, parameters=None, rejectedImgPoints=None):\n pass", "def get_parkey_map(self):\n parkey_map = self.selector.get_parkey_map()\n tpn_values = self.tpn_valid_values\n for key in self.get_extra_parkeys():\n if key in parkey_map and \"CORR\" not in key:\n continue\n parkey_map[key] = tpn_values.get(key, [])\n if key.endswith(\"CORR\"): # and parkey_map[key] == []:\n parkey_map[key] = [\"PERFORM\", \"OMIT\", \"NONE\", \"COMPLETE\", \"UNDEFINED\"]\n return parkey_map", "def get_info(self):\n return {}", "def init_map(json_map):\n source = json_map['connections']['source']\n target = json_map['connections']['target']\n price = json_map['connections']['price']\n critical = json_map['locations']['critical']\n n = len(source)\n\n peking_map = Graph()\n\n # Populate graph.\n for i in range(n):\n peking_map.add_edge(source[i], target[i], price[i])\n\n for i in critical:\n peking_map.update_critical(i)\n\n return peking_map" ]
[ "0.5516939", "0.5484209", "0.5084606", "0.5053326", "0.5027071", "0.4971269", "0.4958931", "0.4953794", "0.49500203", "0.49257556", "0.48262146", "0.4823674", "0.47392023", "0.47389412", "0.4737305", "0.47296727", "0.47292727", "0.4706636", "0.46876857", "0.4654955", "0.46485433", "0.46411428", "0.4632986", "0.4627397", "0.46200225", "0.46047983", "0.45850435", "0.45694402", "0.45655394", "0.45633754" ]
0.6238994
0
returns True if the caps are RAW
def is_raw(caps): rep = caps.to_string() valid = ["video/x-raw", "audio/x-raw", "text/plain", "text/x-pango-markup"] for val in valid: if rep.startswith(val): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_raw(self):\n return not self.has_structure", "def IsRegular(info):\n return (info.external_attr >> 28) == 010", "def is_worthless(self):\n self.normalize()\n return self.all_details['normalized'] in WORTHLESS_UA_TYPES", "def is_raw(self) -> bool:\n return len(self.segments) == 0", "def sniff( self, filename ):\n handle = open(filename)\n line = handle.readline()\n handle.close()\n first = line.split()\n\n if ( len(first) != 6 ):\n return False\n\n if ( first[5] != \"1\" and first[5] != \"0\" ):\n return False\n\n if ( first[2].isalnum() or first[3].isalnum() or first[4].isalnum() ):\n return False\n\n return True", "def is_raw(self):\n return not self._isReduced", "def test_rawdata(data):\n base = list(data)[0]\n if base in [\"tv\",\"leftovers\",\"tv short\",\"movie\",\"OVA / ONA / Special\"]:\n return True\n return False", "def readable(self):\n return 'r' in self._mode or '+' in self._mode", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def regular(self):\n return all(numpy.allclose(w, w[0]) for w in self.binwidths)", "def get_format(self):\n return \"RAW\"", "def read_cap(stream: Stream):\n cap_bin = stream.read_uchar()\n if cap_bin == 0:\n return 'butt'\n elif cap_bin == 1:\n return 'round'\n elif cap_bin == 2:\n return 'square'\n else:\n raise UnreadableSymbolException('unknown cap style {}'.format(cap_bin))", "def inbinary(self):\n from telnetlib3.telopt import BINARY\n return self.force_binary or self.stream.remote_option.enabled(BINARY)", "def usb_mode() -> str:", "def readable(self):\n return self._cc[13] == 0", "def CheckRaw(self):\n try:\n self.raw['elem'][0, 0]\n self.raw['enum'][0]\n except:\n return 1\n\n return 0", "def sstbf_enabled():\n return common.SSTBF_CAP in SYSTEM_CAPS", "def is_binary(self):\n return self._vtype is self.BINARY", "def is_armed_custom_bypass(self):\n return self == ArmingState.ARMED_CUSTOM_BYPASS", "def is_raw_cell(cell):\n return cell[\"cell_type\"] == \"raw\"", "def is_coding(self):\n return self.wt.is_coding()", "def extended_capability(self, capability):\n res = self._dll.JLINKARM_EMU_HasCapEx(capability)\n return (res == 1)", "def is_raw_read(command): \n if command.startswith('<READ') and command.endswith('>') and \\\n is_valid_raw(command):\n return True\n else: \n return False\n # end if", "def DataIsBinaryData(self):\n return self.data_type == definitions.REG_BINARY", "def __get_verify_mode(self):\n ...", "def sstcp_enabled():\n return common.POWER_CAP in SYSTEM_CAPS", "def test_is_AKs_preHandSimple_correct(self):\n self.assertEqual(self.hand.getPreHandSimple(), 'AKs')", "def antenny_is_safemode(self):\n return self.safe_mode", "def _detect(self):\n return True", "def is_strobe(self):\n if self._driver is None and not self._strobers:\n raise ValueError(\n 'internal %s is not driven by anything' % self._name)\n return bool(self._strobers)" ]
[ "0.5681612", "0.56376565", "0.56115663", "0.55799925", "0.55633974", "0.5548804", "0.5468991", "0.5462032", "0.5429142", "0.5349694", "0.5348325", "0.53240716", "0.53076625", "0.53020465", "0.52818954", "0.5262257", "0.5241407", "0.5236117", "0.5233745", "0.5208863", "0.51933616", "0.5172327", "0.5161722", "0.5154577", "0.513495", "0.513256", "0.51208717", "0.51071364", "0.5104723", "0.5092861" ]
0.7767293
0
Returns the list of demuxers, decoders and parsers available, sorted by rank
def _getSortedFactoryList(self): def myfilter(fact): if fact.get_rank() < 64 : return False klass = fact.get_klass() if not ("Demuxer" in klass or "Decoder" in klass or "Parse" in klass): return False return True reg = gst.registry_get_default() res = [x for x in reg.get_feature_list(gst.ElementFactory) if myfilter(x)] res.sort(lambda a, b: int(b.get_rank() - a.get_rank())) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPUsers(self):\n model = self.tvPUsers.get_model()\n result = []\n model.foreach(lambda model, path, iter, data:\n result.append(model.get(iter, 0)[0]), None)\n result.sort()\n return result", "def prioritized_viewers():\n\n viewers = [ep.load() for ep in iter_entry_points(entry_point_group['viewers'])]\n return sorted(viewers, key=operator.attrgetter('priority'), reverse=True)", "def get_decoders_names(self):\n if self.replay_source is None:\n return [\"P\" + str(self.parameters_common_index) + \".\" + str(self.parameters_fs_index) + \"_E\" \\\n + str(self.get_encoder_number())]\n\n if self.helper_decoders_one_class:\n decoders_names = [\"P\" + str(self.parameters_common_index) + \".\" +\n str(self.parameters_fs_index) + \".\" +\n str(self.parameters_helper_index) + \".\" +\n str(self.parameters_incremental_index) +\n \"_T\" + str(self.test_index) + \"_S\" + str(i) + \"_\" +\n self.replay_source + \"_1\" for i in range(len(self.test_structure))]\n else:\n decoders_names = [\"P\" + str(self.parameters_common_index) + \".\" +\n str(self.parameters_fs_index) + \".\" +\n str(self.parameters_helper_index) + \".\" +\n str(self.parameters_incremental_index) +\n \"_T\" + str(self.test_index) + \"_S\" + str(i) + \"_\" +\n self.replay_source for i in range(len(self.test_structure))]\n\n decoders_names[0] = \"P\" + str(self.parameters_common_index) + \".\" + str(self.parameters_fs_index) + \"_E\" \\\n + str(self.get_encoder_number())\n\n return decoders_names", "def modules(self):\n return sorted([module for module in self._registry.values()],\n key=lambda scomp: (scomp.order, scomp.label))", "def lexers():\n result = [(lexer[0], lexer[1][0]) for lexer in get_all_lexers()]\n result.sort()\n return result", "def list_engines_by_priority(engines=None):\n if engines is None:\n engines = ENGINES\n\n return sorted(engines, key=operator.methodcaller(\"priority\"))", "def list_parsers(self, *args):\n print('==== Available parsing modules: ====\\n')\n for parser in sorted(self.parse_modules):\n print(self.parse_modules[parser].name.ljust(16) + \\\n ': ' + self.parse_modules[parser].desc)\n sys.exit(0)", "def rank(self):\n ranks = []\n for logger in self._loggers:\n ranks.append(logger._rank)\n return ranks", "def get_sorted_results(self):\n results = self.results.values()\n return sorted(results, key=lambda r: r.rank(), reverse=True)", "def get_all_rankings(session: CondorSession) -> List[sc.Ranking]:\n return [sc.Ranking(matrix) for matrix in RankingMatrix.list(session)]", "def encoders(self):\n return self.rpc.call(MsfRpcMethod.ModuleEncoders)['modules']", "def get_players_by_rank(self):\n return sorted(self.participants, key=lambda p: p.tournament_score, reverse=True)", "def list_available_cameras():\n graph = FilterGraph()\n device_names = graph.get_input_devices()\n return device_names", "def getEncoders ():\n return _registeredEncoders", "def autoguessports(phonemodule):\n\n res=[]\n\n ports=[(islikelyportscore(port, phonemodule), port) for port in comscan.comscan()+usbscan.usbscan()+bitflingscan.flinger.scan() if port['available']]\n\n ports.sort()\n\n return [ (port['name'], port) for score,port in ports if score>=0]", "def get_top_k_ports(self, k):\n port_list = self.extract_list(k)\n return port_list", "def to_list(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n export_list = self.output_div('list')\n else:\n export_list = self.output('list')\n return export_list", "def enumerateDevices():\r\n \r\n return tuple((dev,dev) for dev in pygame.camera.list_cameras())", "def argparse_encoder_list() -> None:\n print(\"Available encoders:\\n\")\n print(\"{:<12}| {}\".format(\"NAME\", \"DESCRIPTION\"))\n print(\"{}|{}\".format(\"-\" * 12, \"-\" * 12))\n for encoder in AVAILABLE_ENCODERS:\n print(\"{:<12}| {}\".format(encoder, AVAILABLE_ENCODERS[encoder]))", "def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def list():\n return [Drive.ENCODER_L,\n Drive.ENCODER_R]", "def list_systems():\n return sorted(systems.keys())", "def available_versions(self):\n return list(sorted(self.onxs))", "def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)", "def api_read_foundations(self):\n return [str(found.get_topmost_card()) for found in self.board.foundations]", "def comports(include_links=False):\n return list(iterate_comports())", "def getAllStreams(name):\n global index\n module = index.get_module(name)\n if not module:\n return None\n streams = set()\n for s in module.get_all_streams():\n streams.add(s.get_stream_name())\n return list(streams)", "def list_known_phylogenetic_metrics():\r\n result = []\r\n for name in dir(qiime.beta_metrics):\r\n if name.startswith('dist_'):\r\n result.append(name[5:])\r\n result.sort()\r\n return result", "def GetDecodersBlock(n, separators, decoders, name_fcn):\n num_blocks = GetNumberCodeBlocks(separators)\n assert n > 0 and n <= num_blocks\n return [decoder for decoder in decoders\n if ((n == 1\n or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and\n (n == num_blocks or\n not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))]", "def order(self):\n pairs = [(w['source'][0], w['target'][0]) for w in self.wires]\n return processing_order(len(self.modules), pairs)" ]
[ "0.5803798", "0.56278354", "0.5614147", "0.553916", "0.54727554", "0.54135334", "0.52898943", "0.5285935", "0.5066761", "0.5046462", "0.50107205", "0.4980161", "0.49614632", "0.49613678", "0.4952624", "0.49493778", "0.49484605", "0.49353278", "0.49073923", "0.49002057", "0.48972997", "0.48955774", "0.4892161", "0.48326758", "0.48249146", "0.4824875", "0.47973746", "0.47888863", "0.47700372", "0.47686142" ]
0.57236826
1
Returns a list of factories (sorted by rank) which can take caps as input. Returns empty list if none are compatible
def _findCompatibleFactory(self, caps): self.debug("caps:%s" % caps.to_string()) res = [] for factory in self._factories: for template in factory.get_static_pad_templates(): if template.direction == gst.PAD_SINK: intersect = caps.intersect(template.static_caps.get()) if not intersect.is_empty(): res.append(factory) break self.debug("returning %r" % res) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getSortedFactoryList(self):\n def myfilter(fact):\n if fact.get_rank() < 64 :\n return False\n klass = fact.get_klass()\n if not (\"Demuxer\" in klass or \"Decoder\" in klass or \"Parse\" in klass):\n return False\n return True\n reg = gst.registry_get_default()\n res = [x for x in reg.get_feature_list(gst.ElementFactory) if myfilter(x)]\n res.sort(lambda a, b: int(b.get_rank() - a.get_rank()))\n return res", "def getFactorys(self) -> List[ghidra.app.util.viewer.field.FieldFactory]:\n ...", "def getUnusedFactories(self) -> List[ghidra.app.util.viewer.field.FieldFactory]:\n ...", "def _get_factories(self):\n return self._factories", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=True,\n ))\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def get_usable_guards(client_as, fp_to_as, pfi):\r\n\r\n # dict mapping all guard fps to bool usability\r\n guard_to_usability = make_guard_usability_dict(client_as, \r\n fp_to_as, \r\n pfi)\r\n\r\n\r\n # filter\r\n guard_to_usability = {fp:guard_to_usability[fp] for fp in fp_to_as}\r\n\r\n safe_guard_fps = list(filter(lambda x: guard_to_usability[x],\r\n guard_to_usability.keys()))\r\n\r\n return safe_guard_fps", "def algorithms_factory():\n all_algorithms = []\n for algorithm_module in ALGORITHMS:\n module_name = \"{}.{}\".format(PREFIX, algorithm_module)\n module = importlib.import_module(module_name)\n for item in dir(module):\n item = getattr(module, item)\n try:\n if issubclass(item, base.Algorithm):\n item.is_implemented()\n else:\n continue\n except (exceptions.AlgorithmsNotImplemented, TypeError):\n continue\n\n all_algorithms.append(item)\n\n return all_algorithms", "def strategy_lists(\n draw,\n strategies=axl.short_run_time_strategies,\n min_size=1,\n max_size=len(axl.short_run_time_strategies),\n):\n strategies = draw(\n lists(sampled_from(strategies), min_size=min_size, max_size=max_size)\n )\n return strategies", "def factory_names(self):\n return list(self._class_name_class_dict.keys())", "def choose_facilities(facilities, clients, algorithm='randomized'):\n primal, dual = solve_lp(facilities, clients)\n facilities = init_facilities(facilities, primal)\n clients = init_clients(clients, primal, dual)\n\n if algorithm == 'randomized':\n client_chooser = lambda client: client.lowest_pair_cost + client.get_expected_cost()\n facility_chooser = get_probably_good_neighbor\n elif algorithm == 'deterministic':\n client_chooser = lambda client: client.lowest_pair_cost\n facility_chooser = get_cheapest_neighbor\n else:\n return None\n\n return rounding_algorithm(facilities, clients, client_chooser, facility_chooser)", "def getFactorys(self, row: int) -> List[ghidra.app.util.viewer.field.FieldFactory]:\n ...", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def get_deck():\n deck = []\n for suit in Suit:\n for rank in Rank:\n deck.append(Card(suit, rank))\n return deck", "def match_factory(variable, factories):\n if not isinstance(factories, tuple):\n factories = (factories,)\n\n for factory in factories:\n if (\n variable.rank == factory.rank\n and variable.name == factory.name\n and variable.units == factory.units\n ):\n return True\n return False", "def get_close(self):\n pool = set()\n\n for f in SUCC_FINGERS:\n pool.update(self.best_finger_succ[f])\n for f in PRED_FINGERS:\n pool.update(self.best_finger_pred[f])\n\n return list(pool)", "def main():\n\t\n\tDeck = []\n\tfor suite in range(suites):\n for typecard in range(1, typecard+1):\n cards.append(typecard)", "def card_factory(rank,suit):\n pass", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(\n GeometryV4Strategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def findfamily(p):\n ans = []\n for i in range(10):\n candidate = p.replace(\"*\", str(i))\n if is_prime(int(candidate)):\n ans.append(candidate)\n return sorted(ans)", "def get_specs(mag : str, spec : str) -> list:\n if spec == \"F\":\n #if mag == \"20x\": max = 6\n #if mag == \"40x\": max = 8\n #if mag == \"60x\": max = 12\n max = 12\n specs = [\"F\"+str(i).zfill(3) for i in range(1,13)]\n if spec == \"Z\":\n specs = [\"Z\"+str(i).zfill(2) for i in range(1,8)]\n if spec == \"A\":\n specs = [\"A\"+str(i).zfill(2) for i in range(1,5)]\n return specs", "def ranked_species(self, number=10, types=None):\n if types is None:\n types = {}\n\n # sort the species by their sighting counts\n raw_list = self.species\n sorted_species = sorted(raw_list, key=lambda x: x.count, reverse=True)\n\n # the percent chance that a certain thing will be shown\n sum_scores = sum(types.values())\n type_scores = {\n k: float(score) / sum_scores\n for k, score in types.iteritems()\n }\n\n # Make a generator to spit out species\n type_ranked = (\n i\n for i in sorted_species\n if random.random() < type_scores[i.le_type]\n )\n\n # Return only <number> of our list of species\n return list(itertools.islice(type_ranked, number))", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n\n strategies.extend([\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False)[0]])\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def _get_surfaces(idf):\n surface_types = [\n 'BUILDINGSURFACE:DETAILED',\n 'FENESTRATIONSURFACE:DETAILED',\n ]\n surfaces = []\n for surface_type in surface_types:\n surfaces.extend(idf.idfobjects[surface_type])\n\n return surfaces", "def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]", "def build_observation_randomizers(cls, constants) -> List[ObservationRandomizer]:\n return []", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations" ]
[ "0.71993726", "0.5707209", "0.540283", "0.5378691", "0.5331204", "0.5251546", "0.52479005", "0.51990193", "0.5153327", "0.51474243", "0.5144368", "0.5135094", "0.51296175", "0.51014024", "0.5099869", "0.5071028", "0.50625104", "0.5040018", "0.5008479", "0.49882582", "0.49881667", "0.49763867", "0.49658152", "0.49624392", "0.49584264", "0.4952094", "0.49459448", "0.49459448", "0.49175453", "0.49105456" ]
0.68841034
1
Tries to link one of the factories' element to the given pad. Returns the element that was successfully linked to the pad.
def _tryToLink1(self, source, pad, factories): self.debug("source:%s, pad:%s , factories:%r" % (source.get_name(), pad.get_name(), factories)) result = None for factory in factories: element = factory.create() if not element: self.warning("weren't able to create element from %r" % factory) continue sinkpad = element.get_pad("sink") if not sinkpad: continue self.add(element) element.set_state(gst.STATE_READY) try: pad.link(sinkpad) except: element.set_state(gst.STATE_NULL) self.remove(element) continue self._closeLink(element) element.set_state(gst.STATE_PAUSED) result = element break return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _closePadLink(self, element, pad, caps):\n self.debug(\"element:%s, pad:%s, caps:%s\" % (element.get_name(),\n pad.get_name(),\n caps.to_string()))\n if caps.is_empty():\n self.log(\"unknown type\")\n return\n if caps.is_any():\n self.log(\"type is not know yet, waiting\")\n return\n if caps.intersect(self.caps):\n # This is the desired caps\n if not self._srcpad:\n self._wrapUp(element, pad)\n elif is_raw(caps):\n self.log(\"We hit a raw caps which isn't the wanted one\")\n # FIXME : recursively remove everything until demux/typefind\n\n else:\n # Find something\n if len(caps) > 1:\n self.log(\"many possible types, delaying\")\n return\n facts = self._findCompatibleFactory(caps)\n if not facts:\n self.log(\"unknown type\")\n return\n self._tryToLink1(element, pad, facts)", "def _wrapUp(self, element, pad):\n\n if self._srcpad:\n return\n self._markValidElements(element)\n self._removeUnusedElements(self.typefind)\n self.log(\"ghosting pad %s\" % pad.get_name())\n self._srcpad = gst.GhostPad(\"src\", pad)\n self._srcpad.set_active(True)\n self.add_pad(self._srcpad)\n self.post_message(gst.message_new_state_dirty(self))", "def _closeLink(self, element):\n to_connect = []\n dynamic = False\n templates = element.get_pad_template_list()\n for template in templates:\n if not template.direction == gst.PAD_SRC:\n continue\n if template.presence == gst.PAD_ALWAYS:\n pad = element.get_pad(template.name_template)\n to_connect.append(pad)\n elif template.presence == gst.PAD_SOMETIMES:\n pad = element.get_pad(template.name_template)\n if pad:\n to_connect.append(pad)\n else:\n dynamic = True\n else:\n self.log(\"Template %s is a request pad, ignoring\" % pad.name_template)\n\n if dynamic:\n self.debug(\"%s is a dynamic element\" % element.get_name())\n self._controlDynamicElement(element)\n\n for pad in to_connect:\n self._closePadLink(element, pad, pad.get_caps())", "def selectLinkedElement():\n\n collector = FilteredElementCollector(doc).ToElementIds()\n wrongAngle = []\n for id in collector:\n \n element= doc.GetElement(id)\n\n if element.get_Parameter(BuiltInParameter.FABRICATION_PART_ANGLE) is not None:\n try:\n chord = element.CenterlineLength\n angle = element.get_Parameter(BuiltInParameter.FABRICATION_PART_ANGLE).AsDouble()\n angle = degrees(angle)\n diameter = element.get_Parameter(BuiltInParameter.FABRICATION_PART_DIAMETER_IN).AsDouble()\n radius = ((360/angle)*chord )/(pi*2)\n \n if round(radius,4) == round(diameter,4):\n wrongAngle.append(id)\n\n except Exception as ex:\n print(ex, str(id))\n pass\n\n wrongAngle = List[ElementId](wrongAngle)\n uidoc.Selection.SetElementIds(wrongAngle)", "def padid(self):\r\n return self.word2idx.get(PAD, 0)", "def convert_pad(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mxnet_pad_width = convert_string_to_list(attrs.get(\"pad_width\"))\n onnx_pad_width = transform_padding(mxnet_pad_width)\n\n pad_mode = attrs.get(\"mode\")\n\n if pad_mode == \"constant\":\n pad_value = float(attrs.get(\"constant_value\")) \\\n if \"constant_value\" in attrs else 0.0\n node = onnx.helper.make_node(\n 'Pad',\n inputs=input_nodes,\n outputs=[name],\n mode='constant',\n value=pad_value,\n pads=onnx_pad_width,\n name=name\n )\n else:\n node = onnx.helper.make_node(\n 'Pad',\n inputs=input_nodes,\n outputs=[name],\n mode=pad_mode,\n pads=onnx_pad_width,\n name=name\n )\n\n return [node]", "def get_port_pair_from_link(self, link_to_port, src_dpid, dst_dpid):\r\n if (src_dpid, dst_dpid) in link_to_port:\r\n return link_to_port[(src_dpid, dst_dpid)]\r\n else:\r\n self.logger.info(\"Link from dpid:%s to dpid:%s is not in links\" %\r\n (src_dpid, dst_dpid))\r\n return None", "def find_element_by_selector(self, selector):\n return UiObject(selector, self.android_device_driver) if UiObject(\n selector, self.android_device_driver).verify_exist() else None", "def build_device():\n\n devices = [evdev.InputDevice(fn) for fn in evdev.list_devices()]\n for device in devices:\n if device.name == \"Microsoft X-Box pad\":\n device = evdev.InputDevice(device.fn)\n\n try:\n device\n except NameError:\n print(\"No X-Box controller found\")\n pass\n else:\n print(str(device.name + \" found\"))\n return device", "def find_element(self, attrib_key, attrib_value, match_option=None):\n selector = UiSelector()\n selector.attributes(attrib_key, attrib_value, match_option)\n return UiObject(selector, self.android_device_driver) if UiObject(\n selector, self.android_device_driver).verify_exist() else None", "def find_element(self, element: WebElement) -> WebElement:\n return element", "def get_maker(self, name):\n try:\n assert name in self.list_makers()\n return self.devices[name]\n except KeyError:\n raise UnknownDevice(name)", "def pad_to_match(feature, target_length, rank, constant_values):\n padding_list = []\n target_length = tf.maximum(target_length, tf.shape(feature)[1])\n for r in range(rank):\n if r == 1:\n padding_list.append([0, target_length - tf.shape(feature)[1]])\n else:\n padding_list.append([0, 0])\n return tf.pad(feature, padding_list, constant_values=constant_values,\n name=\"pad_to_match\")", "def get_device(link):\n device = Device(\"\",0,0,0,0,0)\n device.link = link\n return device.identify()", "def try_find_element(web_driver: WebDriver, by: FindElementBy, unique_val, retry_count, ignore_if_not_found=False) \\\n -> WebElement:\n element = None\n retried = 0\n while True:\n try:\n if by == FindElementBy.CLASS:\n element = web_driver.find_element_by_class_name(unique_val)\n elif by == FindElementBy.NAME:\n element = web_driver.find_element_by_name(unique_val)\n elif by == FindElementBy.AUTOMATION_ID:\n element = web_driver.find_element_by_accessibility_id(unique_val)\n except NoSuchElementException:\n retried = retried + 1\n if retried > retry_count:\n if ignore_if_not_found:\n return None\n raise NoSuchElementException\n else:\n sleep(1)\n continue\n break\n return element", "def get_pads(m_data: List[Dict[str, Any]], nets: List[Net]) -> List[FpPad]:\n layer = get_dict_by_key(m_data, 'layer')['layer']\n pads: List[FpPad] = list()\n used_pads = [\"\"]\n for pad in get_all_dicts_by_key(m_data, 'pad'):\n fp_pad = pad['pad']\n pad_id = fp_pad[0].replace('\"', \"\")\n if pad_id in used_pads:\n count = 1\n while pad_id+str(count) in used_pads:\n count += 1\n pad_id = pad_id+str(count)\n used_pads.append(pad_id)\n smd = (fp_pad[1] == 'smd')\n drill = 0 if smd else get_dict_by_key(fp_pad, \"drill\")['drill']\n if fp_pad[2] == 'rect':\n pad_type = PadType.rect\n elif fp_pad[2] == 'circle':\n pad_type = PadType.circle\n elif fp_pad[2] == 'oval':\n pad_type = PadType.oval\n else:\n pad_type = PadType.custom\n pos_data = get_dict_by_key(fp_pad, 'at')['at']\n pos = FpPos(pos=[pos_data[0], -1.0*float(pos_data[1])], rot=(pos_data[2]) if len(pos_data) == 3 else 0)\n if 'B.' in layer:\n pos.pos[1] = -1*pos.pos[1]\n size_data = get_dict_by_key(fp_pad, 'size')\n size = [size_data['size'][0], size_data['size'][1]] if size_data else [0, 0]\n pad_layers: List[Layer] = convert_to_layers(get_dict_by_key(fp_pad, 'layers')['layers'])\n net_data = get_dict_by_key(fp_pad, 'net')\n net_id = get_dict_by_key(fp_pad, 'net')['net'][0] if net_data else \"\"\n net_name = get_dict_by_key(fp_pad, 'net')['net'][1] if net_data else \"\"\n new_pad = FpPad(pad_id=pad_id, smd=smd, drill=drill, pad_type=pad_type, center=pos, size=size,\n layers=pad_layers, net_id=net_id, net_name=net_name, extra_points=list())\n if pad_type == PadType.custom:\n pad_data = get_dict_by_key(fp_pad, 'primitives')['primitives']\n for extra_pad in pad_data:\n if isinstance(extra_pad, dict):\n print(extra_pad)\n if 'gr_poly' in extra_pad.keys():\n points = get_dict_by_key(extra_pad['gr_poly'], 'pts')['pts']\n elif 'pts' in extra_pad.keys():\n points = extra_pad['pts']\n else:\n continue\n for point in points:\n new_pad.extra_points.append([point['xy'][0], str(-1*float(point['xy'][1]))])\n print(new_pad.extra_points)\n pads.append(new_pad)\n return pads", "def find_element_view(\n self,\n *,\n element: Optional[Element] = None,\n ) -> Optional[ElementView]:\n return next(\n (view for view in self.element_views if view.element.id == element.id), None\n )", "def associate(self, house):\n self.house = house\n try:\n if self._room != 'House':\n self.device = self.house.devices[self._room][self._device]\n else:\n self.device = self.house\n except IndexError:\n print \"Error: Rule could not find an implementer\", self._room, self._device\n self.device = None", "def _element_from_rep_and_mod(self, rep, mod):\n if mod != self.T.rep.rep:\n raise UnificationFailed('Element does not appear to be in the same field.')\n return self.element_from_poly(Poly(rep, self.T.gen))", "def spawn_pads(self):\n self.pad_sprites.empty()\n while len(self.pad_sprites) < NUMBER_OF_PADS:\n tall = random.choice([True, False])\n pad = Pad(random.randrange(79, WIDTH - 79), random.randrange(HEIGHT - 200, HEIGHT), tall=tall)\n pad_collision = pygame.sprite.spritecollide(pad, self.pad_sprites, False)\n if not pad_collision:\n self.pad_sprites.add(pad)", "def dpae_join(self, pkt, datapath, in_port):\n _payload = str(pkt.protocols[-1])\n self.logger.info(\"Phase 2 DPAE discovery packet received from dpid=%s \"\n \"port=%s payload=%s\",\n datapath.id, in_port, _payload)\n #*** Try decode of payload as JSON:\n try:\n dpae_discover = json.loads(_payload)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n self.logger.error(\"Phase 2 DPAE API Create exception while \"\n \"decoding JSON body=%s Exception %s, %s, %s\",\n _payload, exc_type, exc_value, exc_traceback)\n return 0\n #*** Check to see if JSON has a uuid_controller key:\n if 'uuid_controller' in dpae_discover:\n uuid_controller = dpae_discover['uuid_controller']\n else:\n self.logger.debug(\"No uuid_controller field in discovery \"\n \"packet so ignoring...\")\n return 0\n #*** Check to see if JSON has a hostname_dpae key:\n if 'hostname_dpae' in dpae_discover:\n hostname_dpae = dpae_discover['hostname_dpae']\n else:\n self.logger.debug(\"No hostname_dpae field in discovery \"\n \"packet so ignoring...\")\n return 0\n #*** Check to see if JSON has a if_name key:\n if 'if_name' in dpae_discover:\n if_name = dpae_discover['if_name']\n else:\n self.logger.debug(\"No if_name field in discovery \"\n \"packet so ignoring...\")\n return 0\n #*** Check to see if JSON has a uuid_dpae key:\n if 'uuid_dpae' in dpae_discover:\n uuid_dpae = dpae_discover['uuid_dpae']\n else:\n self.logger.debug(\"No uuid_dpae field in discovery \"\n \"packet so ignoring...\")\n return 0\n #*** Look the key up in the database:\n db_result = self.dbdpae.find_one({'_id': str(uuid_controller)})\n if db_result:\n #*** Check all fields match:\n if not hostname_dpae == str(db_result[u'hostname_dpae']):\n self.logger.error(\"Phase 2 hostname_dpae mismatch\")\n return 0\n if not if_name == str(db_result[u'if_name']):\n self.logger.error(\"Phase 2 if_name mismatch\")\n return 0\n if not uuid_dpae == str(db_result[u'uuid_dpae']):\n self.logger.error(\"Phase 2 uuid_dpae mismatch\")\n return 0\n self.logger.debug(\"Phase 2 updating DPAE record\")\n db_result = self.dbdpae.update_one(\n {'_id': str(uuid_controller)},\n {\n '$set': {\n 'dpid': datapath.id,\n 'switch_port': in_port\n },\n }\n )\n self.logger.debug(\"Phase 2 updated %s database record(s)\",\n db_result.modified_count)\n else:\n #*** Ignore as no uuid_controller key:\n self.logger.debug(\"Phase 2 discovery packet uuid_controller field \"\n \"not found in database, so ignoring...\")\n return 0", "def create_element(self, identifier: str, element_name: str) -> Device:\n return self.create_device(identifier, element_name)", "def _find_element(locator, timeout=1, type = By.XPATH):\n elements = _find_elements(locator, timeout, type)\n if elements:\n if len(elements) > 1:\n logger.warning(f\"There is more than one element matching the locator {locator}.\"\n \"Try a more specific locator, or use _find_elements if this is expected.\")\n return None\n return elements[0]\n else:\n logger.warning(\"Could not find element with the locator [%s]\"%(locator))\n return None", "def get_conflicting_element(self, new_element):\n\n for element in self._list:\n \n if (element.no == new_element.no) and (element.grp == new_element.grp):\n \n return element\n \n return None", "def _match_device(self):\n for device in self.manager.all_devices:\n if (device.get_char_device_path() ==\n self._character_device_path):\n self.device = device\n device.leds.append(self)\n break", "def pad(input, pad, mode='constant', value=0):\n ndim = input.ndimension()\n pads_begin, pads_end = [0] * ndim, [0] * ndim\n for i in range(len(pad) // 2):\n pads_begin[ndim - 1 - i] = pad[i * 2]\n pads_end[ndim - 1 - i] = pad[i * 2 + 1]\n mode = {'constant': 'CONSTANT', 'reflect': 'REFLECT',\n 'replicate': 'EDGE', 'circular': 'EDGE'}[mode]\n return FunctionLib.apply(\n 'Pad', input.device, [input], mode=mode, value=float(value),\n ndim=ndim, pads=pads_begin + pads_end)", "def make_elm_or_print_err(factoryname, name, printedname, detail=\"\"):\n print(\"Creating\", printedname)\n elm = Gst.ElementFactory.make(factoryname, name)\n if not elm:\n sys.stderr.write(\"Unable to create \" + printedname + \" \\n\")\n if detail:\n sys.stderr.write(detail)\n return elm", "def linkTo( self, node2, port1=None, port2=None ):\n node1 = self\n if port1 is None:\n port1 = node1.newPort()\n if port2 is None:\n port2 = node2.newPort()\n intf1 = node1.intfName( port1 )\n intf2 = node2.intfName( port2 )\n makeIntfPair( intf1, intf2 )\n node1.addIntf( intf1, port1 )\n node2.addIntf( intf2, port2 )\n node1.registerIntf( intf1, node2, intf2 )\n node2.registerIntf( intf2, node1, intf1 )\n return intf1, intf2", "def CreateElementAt(self, arg0: 'unsigned long long') -> \"itkQuadEdgeMeshPointF2GQEULLULLBBT &\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_CreateElementAt(self, arg0)", "def scapy_layers_dot11_Dot11_find_elt_by_id(self, id):\n\tfor elt in self.elts():\n\t\tif elt.ID == id:\n\t\t\treturn elt\n\treturn None" ]
[ "0.503194", "0.49497634", "0.47379994", "0.46080324", "0.4555549", "0.45181164", "0.44388047", "0.44343936", "0.4425721", "0.4418763", "0.4364893", "0.43579903", "0.43553662", "0.4336896", "0.43215272", "0.43126917", "0.42910552", "0.42624408", "0.42379418", "0.42214766", "0.42036813", "0.41972405", "0.41967738", "0.41839936", "0.41713133", "0.41681293", "0.4164485", "0.41605002", "0.41594702", "0.41261616" ]
0.67851293
0
Ghost the given pad of element. Remove nonused elements.
def _wrapUp(self, element, pad): if self._srcpad: return self._markValidElements(element) self._removeUnusedElements(self.typefind) self.log("ghosting pad %s" % pad.get_name()) self._srcpad = gst.GhostPad("src", pad) self._srcpad.set_active(True) self.add_pad(self._srcpad) self.post_message(gst.message_new_state_dirty(self))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_padding(im, pad):\n\n return im[pad:-pad, pad:-pad]", "def _removeUnusedElements(self, element):\n self.log(\"element:%r\" % element)\n for pad in element.src_pads():\n if pad.is_linked():\n peer = pad.get_peer().get_parent()\n self._removeUnusedElements(peer)\n if not peer in self._validelements:\n self.log(\"removing %s\" % peer.get_name())\n pad.unlink(pad.get_peer())\n peer.set_state(gst.STATE_NULL)\n self.remove(peer)", "def remove_filler(dgm, val=np.inf):\r\n inds = (dgm[:,0] != val)\r\n return dgm[inds,:]", "def _UnPad(self, padded):\n pad = bytearray(padded)[-1]\n return padded[:-pad]", "def remove_padding_from_bb(boxes, x_padding):\n boxes[boxes[:, 0] < x_padding] = x_padding\n boxes[:, 0] -= x_padding\n boxes[:, 2] -= x_padding\n return boxes", "def clear(self):\n self.vram = [[BLACK] * self.width for j in range(self.height)]", "def unpad(I, pad):\n\tif pad[3] == 0 and pad[1] > 0:\n\t\treturn I[..., pad[2]:, pad[0]:-pad[1]]\n\telif pad[3] > 0 and pad[1] == 0:\n\t\treturn I[..., pad[2]:-pad[3], pad[0]:]\n\telif pad[3] == 0 and pad[1] == 0:\n\t\treturn I[..., pad[2]:, pad[0]:]\n\telse:\n\t\treturn I[..., pad[2]:-pad[3], pad[0]:-pad[1]]", "def _unpad(self, a, axis, out):\n\n if a.shape[axis] == self.Nin:\n return a\n elif a.shape[axis] != self.N:\n raise ValueError(\"array much be of size N or len(x)\")\n\n Npad = self.N - self.Nin\n if out:\n _Npad, Npad_ = Npad - Npad//2, Npad//2\n else:\n _Npad, Npad_ = Npad//2, Npad - Npad//2\n\n return np.take(a, range(_Npad, self.N - Npad_), axis=axis)", "def drop_unattached(self):\n for x in range(self.size):\n for y in range(self.size):\n coords = (x, y)\n if self.is_cell_unattached(coords):\n self.drop([coords])", "def clearPlayground(self):\n\n for cell in self.cells:\n cell.delete()\n self.cells = []\n self.generation = 0", "def __UnPad(self, padded):\n pad = ord(padded[-1])\n return padded[:-pad]", "def remove_pad(x, pad_remover, mode):\n # Concatenate all tokens (without padding)\n x = flatten_all_but_last(x)\n\n # Remove padding for training and eval\n if mode != ModeKeys.PREDICT:\n # This is a hack to allows inference when the <go> token\n # is detected as padding and removed. This works for now because there is\n # no padding at inference.\n x = pad_remover.remove(x)\n\n x = tf.expand_dims(x, axis=0) # Now batch_size=1\n return x", "def remove_padding(paddedMsg, block_size): \n try:\n if not valid_padding(paddedMsg, block_size):\n raise ValueError\n except ValueError:\n print(f\"{ paddedMsg } has invalid PKCS#7 padding.\")\n return\n \n last_byte = paddedMsg[-1]\n unpadded = paddedMsg[:-last_byte]\n print(f\"Padding removed successfully...\")\n print(f\"Before padding removal: { paddedMsg }\")\n print(f\"After padding removal: { unpadded }\")", "def _deshard_and_remove_padding(all_inferences, all_indices):\n # PyTree[batch_count * B, H, ...] -> PyTree[batch_count * B * H, ...]\n # batch_count * B * H is the total number of examples including padding\n # examples at the end if they exist.\n all_inferences = jax.tree_map(lambda x: x.reshape((-1,) + x.shape[2:]),\n all_inferences)\n\n # [batch_count * B, H] -> [batch_count * B * H]\n all_indices = all_indices.reshape(-1)\n\n # Remove padding.\n non_pad_idxs = np.where(all_indices >= 0)\n all_indices = all_indices[non_pad_idxs]\n all_inferences = jax.tree_map(lambda x: x[non_pad_idxs], all_inferences)\n return all_inferences, all_indices", "def quickWipe(strip):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, Color(0,0,0))\n strip.show()", "def remove(self, xcord, ycord, g_board):\n for i in range(xcord, xcord + 2):\n for j in range(ycord, ycord + self.size):\n g_board[i][j] = ' '", "def add_periodic_padding(X, pad_size):\n\tpad_size = np.array(pad_size)\n\tn_duplicates = tuple([int(x) for x in np.ceil(pad_size/np.array(X.shape))*2 + 1])\n\tX_out = np.tile(X, n_duplicates)\n\tn_dlt = [int(x) for x in (np.array(X.shape) - np.mod(pad_size, np.array(X.shape)))]\n\tX_out = X_out[:-n_dlt[0], :]\n\tX_out = X_out[:, :-n_dlt[1]]\n\tX_out = X_out[n_dlt[0]:, :]\n\tX_out = X_out[:, n_dlt[1]:]\n\treturn X_out", "def discard(self, element: int) -> None:\n self._used.discard(element)\n if element < self.search_pos:\n self.search_pos = element", "def _set_ghost_mask(self, rdd):\n tau, N, dom_mins, dom_maxs = self.tau, self.N, self.dom_mins, self.dom_maxs\n container_mins, container_maxs = self.container_mins, self.container_maxs\n buff_mins, buff_maxs = self.buff_mins, self.buff_maxs\n\n def ghost_map_wrapper(iterator): \n for arr in iterator: \n ghost_mask(arr, tau, N, container_mins, container_maxs, \n buff_mins, buff_maxs, dom_mins, dom_maxs)\n yield arr\n\n return rdd.mapPartitions(ghost_map_wrapper, preservesPartitioning=True)", "def erase_items(self):\n for row in range(self._grid.height):\n for col in range(self._grid.width):\n empty_label = widgets.ItemLabel(front_utils.path_to_image(' '))\n self._graphic_grid.addWidget(empty_label, 2 + row, 2 + col)", "def remove_numbers(self):\n for i in range(len(self.board.board[0])):\n while self.board.board[i].count(0) < 6:\n random_val = random.randint(0, 8)\n self.board.update_board((i, random_val), 0)", "def clear(self, fill = 0x00):\n self._buffer = [ fill ] * ( self.width * self.height )", "def clear(self):\n self._buffer = [0]*(self.width*self._pages)", "def remove(self, x):\n with tf.name_scope(\"pad_reduce/remove\"):\n x_shape = x.get_shape().as_list()\n x = tf.gather_nd(\n x,\n indices=self.nonpad_ids,\n )\n # This is a hack but for some reason, gather_nd return a tensor of\n # undefined shape, so the shape is set up manually\n x.set_shape([None] + x_shape[1:])\n return x", "def clear(self):\n self._cells = [[EMPTY for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]", "def reset(self):\n width = len(self.cell)\n height = len(self.cell[0])\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]", "def reset(self):\n self.elements = [0] * len(self)", "def unset_padding(self):\n if self.metadata.Signal.has_item('pad_tuple'):\n Npy, Npx = self.metadata.Signal.pad_tuple\n else:\n # If no padding was done, return the same signal\n return self\n Nx, Ny = self.axes_manager.signal_shape\n s=self.deepcopy()\n del s.metadata.Signal.pad_tuple\n if self.axes_manager.navigation_dimension == 0:\n s.data = s.data[Npy[0]:(Ny-Npy[1]), Npx[0]:(Nx-Npx[1])]\n s.get_dimensions_from_data()\n elif self.axes_manager.navigation_dimension > 0:\n s.data = s.data[..., Npy[0]:(Ny-Npy[1]), Npx[0]:(Nx-Npx[1])]\n s.get_dimensions_from_data()\n # copy in case of non-linear defoci\n s.axes_manager.navigation_axes[0].axis = self.axes_manager.navigation_axes[0].axis.copy()\n return s", "def unpad(data, *args, **kwargs): # pragma: no cover\n raise NotImplementedError()", "def clear_elements(self):\n\n pass" ]
[ "0.60958475", "0.5913197", "0.5780126", "0.57168174", "0.5607434", "0.5409166", "0.5402955", "0.53921175", "0.5367817", "0.5362773", "0.5360787", "0.53020066", "0.52960426", "0.5293717", "0.5272031", "0.5266434", "0.52592355", "0.52355856", "0.5184225", "0.51815903", "0.5179895", "0.5172429", "0.51628506", "0.5150378", "0.51482385", "0.51375055", "0.51210535", "0.51137376", "0.5096362", "0.50919795" ]
0.6121266
0
Mark this element and upstreams as valid
def _markValidElements(self, element): self.log("element:%s" % element.get_name()) if element == self.typefind: return self._validelements.append(element) # find upstream element pad = list(element.sink_pads())[0] parent = pad.get_peer().get_parent() self._markValidElements(parent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setValid(self):\n self.valid = True", "def valid(self):\n pass", "def SetValid(self, valid):\r\n\r\n self._valid = valid", "def SetValid(self, valid):\r\n\r\n self._valid = valid", "def valid(self, target):", "def valid(self) -> bool:\n pass", "def valid(self) -> bool:\n return True", "def invalidate(self):\n self.valid = False", "def is_invalid(self):\n self._is_valid = False", "def invalidate(self):\n self._valid = False", "def valid_from(self, valid_from):\n\n self._valid_from = valid_from", "def is_valid(self):\n raise NotImplementedError", "def is_valid(self, is_valid):\n\n self._is_valid = is_valid", "def is_valid(self, is_valid):\n\n self._is_valid = is_valid", "def is_valid(self):\r\n raise NotImplementedError", "def validity(self, validity):\n\n self._validity = validity", "def validity(self, validity):\n\n self._validity = validity", "def validity(self, validity):\n\n self._validity = validity", "def is_valid(self): # -> bool:\n ...", "def gatherValids(self, valids, doUnify):\n\n if not self.fromAnchor or not self.toAnchor:\n return\n if doUnify:\n for unique in valids:\n if (self.fromAnchor == unique.fromAnchor and\n self.toAnchor == unique.toAnchor):\n return\n valids.append(self)", "def is_valid(self):\n\n return True", "def _is_valid(self):\n self._is_allows_valid()\n self._is_denies_valid()", "def set_validated(self):\n self.__validationerrors=[]\n self.set_property('image',None)", "def _check_validity(self):\n pass", "def invalid(self):\n pass", "def check_validity(self):", "def validate(self) -> None:\n validate_instance(self)", "def __bool__(self):\r\n return self.valid", "def clean(self):\n if not self.is_input and not self.is_output:\n raise ValidationError(\"TransformationXput with pk={} is neither an input nor an output\".format(self.pk))\n if self.has_structure:\n self.structure.clean()", "def _validate(self):\n pass" ]
[ "0.66019636", "0.62431264", "0.6062435", "0.6062435", "0.5959112", "0.5954582", "0.59216887", "0.5829217", "0.5820905", "0.5796385", "0.57952785", "0.5787865", "0.5753835", "0.5753835", "0.5690036", "0.5673884", "0.5673884", "0.5673884", "0.5671159", "0.5644206", "0.56414044", "0.5614816", "0.5609864", "0.5589279", "0.55479944", "0.55278915", "0.55242264", "0.55131835", "0.54976463", "0.54876757" ]
0.67863214
0
Remove unused elements connected to srcpad(s) of element
def _removeUnusedElements(self, element): self.log("element:%r" % element) for pad in element.src_pads(): if pad.is_linked(): peer = pad.get_peer().get_parent() self._removeUnusedElements(peer) if not peer in self._validelements: self.log("removing %s" % peer.get_name()) pad.unlink(pad.get_peer()) peer.set_state(gst.STATE_NULL) self.remove(peer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_discarded(self):\n while self.shrink_target.has_discards:\n discarded = []\n\n for ex in self.shrink_target.examples:\n if ex.discarded and (not discarded or ex.start >= discarded[-1][-1]):\n discarded.append((ex.start, ex.end))\n\n assert discarded\n\n attempt = bytearray(self.shrink_target.buffer)\n for u, v in reversed(discarded):\n del attempt[u:v]\n\n if not self.incorporate_new_buffer(attempt):\n break", "def dedupe(self):\n elems = []\n for x in self.elems:\n if x not in elems:\n elems.append(x)\n return _coconut_tail_call(self.__class__, *elems)", "def clean_duplicate(self):\r\n self.elements = list(set(self.elements))\r\n self.elements = [e for e in self.elements if e != '']", "def remove_empty_sources(self):\n for source in [\"dxf\", \"edilizia\", \"easyroom\", \"merged\"]:\n if source in self and not self[source]:\n del self[source]", "def clear_elements(self):\n\n pass", "def remove_dup2(linkedlist):", "def remove_unused_tags(the_config, name, action, protocol, src_ip, dst_ip,\n sport_operator, src_port, dport_operator, dst_port,\n count, log, dscp):\n if not dscp or dscp == '':\n remove_from_xml_tree(the_config, 'dscp')\n# handle count and log tags\n if not count:\n remove_from_xml_tree(the_config, 'count')\n\n if not log:\n remove_from_xml_tree(the_config, 'log')\n# handle ip tags\n if src_ip == 'any':\n remove_from_xml_tree(the_config, 'src-mask')\n\n if dst_ip == 'any':\n remove_from_xml_tree(the_config, 'dst-mask')\n\n# handle protocol tags\n if protocol == 'tcp':\n remove_from_xml_tree(the_config, 'udp')\n elif protocol == 'udp':\n remove_from_xml_tree(the_config, 'tcp')\n\n if ((src_port == '') & (dst_port == '')):\n remove_from_xml_tree(the_config, 'port')\n elif (src_port == ''):\n remove_from_xml_tree(the_config, 'sport')\n elif (dst_port == ''):\n remove_from_xml_tree(the_config, 'dport')\n\n if (sport_operator == 'range'):\n remove_from_xml_tree(the_config, 'sport-number-eq-neq')\n elif (sport_operator == 'eq'):\n remove_from_xml_tree(the_config, 'sport-number-range')\n else:\n remove_from_xml_tree(the_config, 'sport')\n\n if (dport_operator == 'range'):\n remove_from_xml_tree(the_config, 'dport-number-eq-neq')\n elif (dport_operator == 'eq'):\n remove_from_xml_tree(the_config, 'dport-number-range')\n else:\n remove_from_xml_tree(the_config, 'dport')", "def remove_undesirable_elements(self):\n for undesirable in self.undesirables:\n need_to_remove = any(item in self.tree.iter()\n for item in self.tree.xpath(undesirable))\n if need_to_remove:\n self.debug_print('\\nAny {} elements present?'.\n format(undesirable), need_to_remove\n )\n for element in self.tree.xpath(undesirable):\n self.debug_print(' * acting now on {}'.\n format(element))\n element.getparent().remove(element)\n self.debug_print('All {} elements now removed?'.\n format(undesirable),\n all(item not in self.tree.iter()\n for item in self.tree.xpath(undesirable))\n )", "def trim_dag(G):\n SINKS = G.graph['SINKS']\n SOURCES = G.graph['SOURCES']\n\n for node in G.nodes():\n node_attr = G.node[node]\n children = G.successors(node)\n\n if check_key(node_attr, 'is_const', True):\n if len(children) == 0:\n print \"!!!!! \", node\n\n SOURCES.remove(node)\n if node in SINKS:\n SINKS.remove(node)\n for c in children:\n if 'immediates' in G.node[c].keys():\n G.node[c]['immediates'].append(node_attr['value'])\n\n if check_key(node_attr, 'op', \"mv\") or check_key(node_attr, 'is_const', True) :\n parents = G.predecessors(node)\n \n for p in parents:\n for c in children:\n G.add_edge(p, c)\n #print G.predecessors(node), \" -> \", node, \" -> \", G.successors(node)\n G.remove_node(node)\n\n return", "def clean(self):\n return _coconut_tail_call((self.__class__), *filter(_coconut.functools.partial(_coconut.operator.ne, self.identity), self.elems))", "def removeInputCopies(self):\n for p in self.assoc.parlist:\n if int(p['group']) == 1:\n _img = p['image'].datafile\n shutil.move(p['orig_filename'],_img)", "def disconnect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n # `discard` ignores non-existing elements (unlike `remove`)\n app.edges[src_id].discard(trg_id)\n self.mark_as_unsaved()\n self.update()", "def remove_gap_traces(st):\n # Check 1:\n gaps = st.get_gaps()\n for i in range(len(gaps)):\n for tr in st.select(network=gaps[i][0], station=gaps[i][1], location=gaps[i][2], channel=gaps[i][3]):\n st.remove(tr)\n # Check 2:\n masked = [np.ma.is_masked(tr.data) for tr in st]\n for i, tr in enumerate(st):\n if masked[i]:\n st.remove(tr)\n return st", "def _remove_ignored_empty_subelems(subelems1, subelems2, parent_path, ignore_namespaces, ignored_empty_tags, debug_stream):\r\n ds = debug_stream\r\n if ds: print >>ds, \"parent_path: %s\" % parent_path\r\n removed = []\r\n for i, subelem1 in enumerate(subelems1):\r\n if len(subelem1.getchildren()) > 0:\r\n continue\r\n \r\n # See if the tag should be ignored if it doesn't exist on\r\n # the other side\r\n is_ignored = False\r\n for ignored_tag in ignored_empty_tags:\r\n if ds: print >>ds, \"ignored_tag = %s, tag = %s\" % (ignored_tag, parent_path + \"/\" + _get_tag(subelem1, ignore_namespaces))\r\n if ignored_tag == parent_path + \"/\" + _get_tag(subelem1, ignore_namespaces):\r\n is_ignored = True\r\n break\r\n if not is_ignored:\r\n continue\r\n \r\n # See if the tag exists on the other side\r\n found = False\r\n for subelem2 in subelems2:\r\n if _get_tag(subelem1, ignore_namespaces) == _get_tag(subelem2, ignore_namespaces):\r\n found = True\r\n break\r\n if not found:\r\n removed.append(i)\r\n \r\n # Sort and reverse the removed list so that deleting starts from the\r\n # end and the indices are correct throughout the operation\r\n removed.sort()\r\n removed = removed[::-1]\r\n if len(removed) >= 2:\r\n if removed[0] < removed[-1]:\r\n raise RuntimeError(\"Internal error: list in wrong order: %s\" % removed)\r\n \r\n for i in removed:\r\n del subelems1[i]", "def drop_unattached(self):\n for x in range(self.size):\n for y in range(self.size):\n coords = (x, y)\n if self.is_cell_unattached(coords):\n self.drop([coords])", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def _deshard_and_remove_padding(all_inferences, all_indices):\n # PyTree[batch_count * B, H, ...] -> PyTree[batch_count * B * H, ...]\n # batch_count * B * H is the total number of examples including padding\n # examples at the end if they exist.\n all_inferences = jax.tree_map(lambda x: x.reshape((-1,) + x.shape[2:]),\n all_inferences)\n\n # [batch_count * B, H] -> [batch_count * B * H]\n all_indices = all_indices.reshape(-1)\n\n # Remove padding.\n non_pad_idxs = np.where(all_indices >= 0)\n all_indices = all_indices[non_pad_idxs]\n all_inferences = jax.tree_map(lambda x: x[non_pad_idxs], all_inferences)\n return all_inferences, all_indices", "def _prune(self, idx):\n idx = list(idx)\n neurons = []\n for nold in self.neurons:\n k = nold[1] # number of neurons\n ix1 = [i for i in idx if i < k] # index for current neuron type\n idx = [i-k for i in idx if i >= k]\n func = nold[0]\n number = len(ix1)\n W = nold[2][:, ix1]\n bias = nold[3][ix1]\n neurons.append((func, number, W, bias))\n self.neurons = neurons", "def cleanup(self):\n for element in self.root.iter():\n element.tag = element.tag.partition('}')[-1]", "def _remove_additional_elements(self):\n # Produces a list of keys in sample sorted by seed\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n\n # Removes the keys with largest seed values (beyond the first k keys)\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]", "def clear_invalid_node_references(doc, ids_to_remove):\n start = time.time()\n for elem in doc.iter(\"nd\"):\n ref = elem.attrib[\"ref\"]\n\n if ref in ids_to_remove:\n tmp = elem\n elem = elem.getnext()\n tmp.getparent().remove(tmp)\n\n print 'Time Taken :: ', round(time.time() - start, 3), \" seconds\"\n return doc", "def _wrapUp(self, element, pad):\n\n if self._srcpad:\n return\n self._markValidElements(element)\n self._removeUnusedElements(self.typefind)\n self.log(\"ghosting pad %s\" % pad.get_name())\n self._srcpad = gst.GhostPad(\"src\", pad)\n self._srcpad.set_active(True)\n self.add_pad(self._srcpad)\n self.post_message(gst.message_new_state_dirty(self))", "def _remove_additional_elements(self):\n # Produces a list of keys in sample sorted by seed\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n\n # Removes the keys with largest seed values (beyond the\n # first k keys)\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]", "def cleaned_list():\n ws_oc = catalog.srcs.copy() # write-safe read copy for\n # the GLEAM object catalog\n cat = catalog.srcs.copy()\n # we loop in reverse, to avoid concurrent mod. exceptions\n for i in range(len(ws_oc) - 1, 0, -1):\n # classic. The easiest way to check if a value is NaN:\n # it won't equal itself\n if ws_oc[i].alpha != ws_oc[i].alpha:\n cat = np.delete(cat, i)\n return cat", "def _prune_unreached(self):\n swcdict = {}\n for n in self._data: # Hash all the swc nodes\n swcdict[n[0]] = Node(n[0])\n\n # Try to join all the unconnected branches at first\n for i, n in enumerate(self._data):\n if n[6] not in swcdict:\n # Try to match it\n matched, midx = self.match(n[2:5], n[5])\n if matched:\n self._data[i, 6] = self._data[midx, 0]\n\n # Add mutual links for all nodes\n for n in self._data:\n id = n[0]\n pid = n[6]\n if pid >= 0:\n swcdict[id].add_link(swcdict[pid])\n\n groups = connected_components(set(swcdict.values()))\n lenlist = [len(g) for g in groups]\n maxidx = lenlist.index(max(lenlist))\n set2keep = groups[maxidx]\n id2keep = [n.id for n in set2keep]\n self._data = self._data[np.in1d(self._data[:, 0], np.asarray(id2keep)), :]", "def remove_blanks_list(src):\n return [el for el in src if el]", "def noiseRemoval(array, minSize, classes):\n img=array.astype('int')\n for i in range(classes):\n B=(img!=i) # return a bool array\n B = morphology.remove_small_objects(B, min_size=minSize, connectivity=1) \n img[B==False]=i\n \n return img", "def delete_sources(image_sources):\n index = np.where(image_sources[:, 3] == 0.0)\n active_sources = np.delete(image_sources, index, 0)\n return(active_sources)", "def prune_unlinked(self):\n linked_ids = set()\n for (link_from, link_to, link_style, link_tail) in self.links:\n linked_ids.add(link_from)\n linked_ids.add(link_to)\n nodes_to_delete = []\n for name, node in self.nodes.items():\n if node.node_id not in linked_ids:\n nodes_to_delete.append(name)\n for name in nodes_to_delete:\n del self.nodes[name]", "def remove_hydrogens(self) -> None:\n for cid, c in self:\n for rid, r in c:\n for aid, a in r:\n if a.element == 'H':\n print('removing H at %s' % aid)\n r.remove_atom(a)" ]
[ "0.6208477", "0.60841775", "0.5987278", "0.5718551", "0.5528435", "0.5527522", "0.5503612", "0.54967445", "0.54903656", "0.5440708", "0.5410119", "0.54018587", "0.53677535", "0.5367388", "0.53665525", "0.5362788", "0.5347237", "0.5332786", "0.53268355", "0.5318066", "0.531519", "0.52970976", "0.52795136", "0.52777743", "0.52775687", "0.52664256", "0.52576816", "0.5257184", "0.5234699", "0.5221503" ]
0.75238466
0
Test correct formatting of the footer string
def test_format_emperor_html_footer_string(self): self.maxDiff = 5000 # footer for a jackknifed pcoa plot without biplots out_string = format_emperor_html_footer_string(False, True) self.assertItemsEqual(out_string.split('\n'), EXPECTED_FOOTER_A.split('\n')) self.assertEqual(out_string, EXPECTED_FOOTER_A) # footer for biplots without jackknifing out_string = format_emperor_html_footer_string(True, False) self.assertItemsEqual(out_string.split('\n'), EXPECTED_FOOTER_B.split('\n')) self.assertEqual(out_string, EXPECTED_FOOTER_B) # no biplots nor jackknifing out_string = format_emperor_html_footer_string(False, False) self.assertItemsEqual(out_string.split('\n'), EXPECTED_FOOTER_C.split('\n')) self.assertEqual(out_string, EXPECTED_FOOTER_C) # no biplots no jackknifing but with vectors out_string = format_emperor_html_footer_string(False, False, True) self.assertItemsEqual(out_string.split('\n'), EXPECTED_FOOTER_D.split('\n')) self.assertEqual(out_string, EXPECTED_FOOTER_D) # comparison plot out_string = format_emperor_html_footer_string(False, False, False, True) self.assertItemsEqual(out_string.split('\n'), EXPECTED_FOOTER_E.split('\n')) self.assertEqual(out_string, EXPECTED_FOOTER_E)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_report_footer(self):", "def WriteFooter(self):\n self.WriteText('}')", "def email_footer():\n footer = \"\"\n\n return footer", "def footer():\n\treturn \"\"\"<footer><table width=\"100%\"><th>Weather Icons by <a href=\"https://github.com/erikflowers/weather-icons\">Erik Flowers</a></th>\n\t<th><a href=\"http://forecast.io/\">Powered by Forecast</a></th></table></footer></div>\n\t</body></html>\"\"\"", "def parse_footer(self): # -> tuple[list[Unknown], str]:\n ...", "def parse_footer(self): # -> tuple[list[Unknown], str]:\n ...", "def WriteFooter(self):\n return", "def _get_footer(self, footer):\n if footer is None:\n html = self.footer()\n else:\n html = footer\n return html", "def print_footer():\n print('</text>')", "def parse_footer(self):\n lines=self.lines\n bodyfinish=re.compile(r\"</body>\", re.IGNORECASE).search(lines).span()[0]\n self.footer=lines[bodyfinish:]", "def writeFooter(self):\n pass", "def _get_footer_text(self):\n return _(THEME_FOOTER_TEXT)", "def getFooter():\n return _FOOTER", "def getFooter(HTMLstring):\n footer = open(os.path.dirname(os.path.realpath(__file__))+\"/html/footer.html\", \"r\")\n HTMLstring += footer.read()\n footer.close()\n return HTMLstring", "def parse_footer(self): # -> tuple[list[Unknown], Literal['']]:\n ...", "def testWriteFooter(self):\n file_writer = writers.VS2008ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteFooter()\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = b'</VisualStudioProject>\\r\\n'\n self.assertEqual(output_data, expected_output_data)", "def footer(self, **args):\n return self.pageConfig['footer'] % self.pageConfig", "def testWriteFooter(self):\n file_writer = writers.VS2010ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteFooter()\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n self.assertTrue(output_data.endswith(b'</Project>'))", "def foot(cls):\n return ''", "def test_create_report():\n text_string = mailroom.create_report()\n reference_text = \"Paul Allen $ 708.42 3 \\\n$ 236.14\"\n assert text_string.split(\"\\n\")[-2] == reference_text", "def footer():\n return u'</form></body></html>\\n'", "def __footer(self):\n result = \"\"\"This API and the related documentation has been created with <a href=\"https://github.com/opencitations/ramose\" target=\"_blank\">RAMOSE</a>, the *Restful API Manager Over SPARQL Endpoints*, developed by <a href=\"http://orcid.org/0000-0003-0530-4305\" target=\"_blank\">Silvio Peroni</a> and <a href=\"https://marilenadaquino.github.io\">Marilena Daquino</a>.\"\"\"\n return markdown(result)", "def test_write_page_margins_footer(self):\n\n self.worksheet.set_footer(margin=0.5)\n self.worksheet._write_page_margins()\n\n exp = \"\"\"<pageMargins left=\"0.7\" right=\"0.7\" top=\"0.75\" bottom=\"0.75\" header=\"0.3\" footer=\"0.5\"/>\"\"\"\n got = self.fh.getvalue()\n\n self.assertEqual(got, exp)", "def get_footer_text(self):\n return self._get_footer_text()", "def get_footer() -> html:\n footer = dbc.Container([\n html.Hr(),\n dbc.Row([\n dbc.Col([\n 'Made with ❤ in Frankfurt from ',\n dcc.Link(children='STATWORX',\n href='https://www.statworx.com/',\n style={\"color\": COLOR_STATWORX}),\n ]),\n dbc.Col(dcc.Link(\n children='Try Again!', href='/', style={\"color\": COLOR_STATWORX}),\n className=\"text-right\")\n ])\n ],\n className='mb-4')\n\n return footer", "def footer_html():\n note_div = html.Div(\n [\n dcc.Markdown(\n \"This website uses natural language processing (NLP) to power search on a set of research papers related to COVID-19.\"\n \" It was created by the team behind [Matscholar](https://www.matscholar.com), a research effort led by the [HackingMaterials](https://hackingmaterials.lbl.gov), \"\n \" [Persson](https://perssongroup.lbl.gov), and [Ceder](https://ceder.berkeley.edu) research\"\n \" groups at Lawrence Berkeley National Lab.\"\n \" The virus icon in our logo was made by Freepik from www.flaticon.com\",\n className=\"column is-half is-size-6\"\n )\n ],\n className=\"columns is-centered\"\n\n )\n\n common_footer_style = \"has-text-weight-bold\"\n\n about_matscholar = html.A(\n \"About Matscholar\",\n href=\"https://github.com/materialsintelligence/matscholar-web\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n privacy_policy = html.A(\n \"Privacy Policy\",\n href=\"https://www.iubenda.com/privacy-policy/55585319\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n submit_feedback = html.A(\n \"Matscholar Forum\",\n href=\"https://discuss.matsci.org/c/matscholar\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n footer_link_tree = html.Div(\n [\n about_matscholar,\n html.Span(\" | \"),\n privacy_policy,\n html.Span(\" | \"),\n submit_feedback,\n ]\n )\n\n footer_copyright = html.Div(\n html.Span(\"Copyright © 2019 - Materials Intelligence\")\n )\n\n footer = html.Div(\n [note_div, footer_link_tree, footer_copyright],\n id=\"footer_container\",\n className=\"content has-text-centered\",\n )\n\n footer_container = html.Div(footer)\n return footer_container", "def get_footer(self):\n self.footer = '</div>' \\\n '</div>' \\\n '</div>' \\\n '<div class=\"footer\">' \\\n '<div class=\"container\">' \\\n '<p class=\"text-muted\">Copyright Harm Brugge 2014.</p>' \\\n '</div>' \\\n '</div>' \\\n '</body>' \\\n '</html>'\n return self.footer", "def footer(node):\n\n current_time = datetime.datetime.now()\n return '''\n </div>\n <div id=\"edit\">\n Last edit: ''' + time.strftime(\"%m/%d/%Y %I:%M:%S %p\", node.page.last_edit) + '''\n </div>\n </div>\n <footer>\n &copy; ''' + str(current_time.year) + ' ' + AUTHOR + ''' | Generated with <a href=\"http://www.minimalblue.com/software/minimalsite.html\">minimalsite</a> \n </footer>\n </div>\n </body>\n</html>'''", "def test_text_multiline(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(text=\"abc\\ndef\")), \":warning: **abc**\\ndef\")", "def footer(self):\n pass" ]
[ "0.6916973", "0.6706198", "0.65470004", "0.6467227", "0.6396218", "0.6396218", "0.6322183", "0.6317668", "0.630486", "0.62373716", "0.6228748", "0.6082782", "0.6076292", "0.6027462", "0.5991507", "0.5988088", "0.5987023", "0.5986638", "0.5892697", "0.5833384", "0.58297634", "0.58236045", "0.5818769", "0.58097684", "0.57631475", "0.5736514", "0.568864", "0.5686053", "0.5680112", "0.5669494" ]
0.75619566
0
Sent by a client when the user entered a new message. The _message is sent to all people in the room.
def message(message): room = session.get('room') print('%s : message : %s' % (session, message['message'])) emit('_message', {'user_name': session.get('name'), 'message' : message['message']}, room=room, include_self=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_message(self, message):\n #print(f\"This message was sent: {message}\") # Writes to the console window (server side)\n self.write_message(f\"This message was sent: {message}\") # Writes message to sender", "def on_message(self, message):\n print \"Client %s received a message : %s\" % (self.id, message)", "def new_message(self, room, mess):\n pass", "async def new_message(self, message):\n user = self.scope['user']\n response_data = {\n 'message': message,\n 'username': user.get_full_name()\n }\n await self.create_chat_message(user, message)\n await self.channel_layer.group_send(\n self.conversation_name,\n {\n 'type': 'chat_message',\n 'response_data': json.dumps(response_data)\n }\n )", "def on_message(self, message):\n self.write_message(u\"%s\" % message)", "def new_message(self, message):\n self.message_counter += 1\n self.message_buffer.append(str(message))\n self.event_loop()", "def message_handler(msg):\n logging.info(\"Message Text: %s\" % msg['msg'])\n\n message_entry = Message(request.sid, msg['room'], msg['msg'], msg['time'])\n if msg['msg'] != \"User has connected!\":\n logging.info(\"About to add to DB\")\n db.session.add(message_entry)\n db.session.commit()\n logging.info(\"Added to DB\")\n send(msg['msg'], room=msg['room'])", "def on_message(self, message):\n print \"Client %s received a message : %s\" % (self.id, message)\n self.write_message(\"Conn!\")", "async def chat_message(self, event):\n if self.user and not self.user.is_authenticated:\n return\n\n user_id = event['user_id']\n message = event['message']\n created_at = event['created_at']\n publisher_full_name = event['publisher_full_name']\n\n await self.send(text_data=json.dumps({\n 'user_id': user_id,\n 'created_at': created_at,\n 'message': \"{}\".format(message),\n 'publisher_full_name': publisher_full_name,\n }))", "async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })", "def _handle_message(self, msg):\n self.event('message', msg)", "def on_message(self, _, message):\n with self.message_lock:\n self.messages.append(Message.deserialize(message))\n self.new_message_available.set()\n super().on_message(_, message)", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def client_message_handler(self, message, client):\n LOG.debug(f\"Разбираем сообщение: {message}\")\n if (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_PRESENCE\n and s.KEY_TIME in message\n and s.KEY_USER in message\n ):\n if message[s.KEY_USER][s.KEY_ACCOUNT_NAME] not in self.names.keys():\n self.names[message[s.KEY_USER][s.KEY_ACCOUNT_NAME]] = client\n MSG.send(client, s.RESPONSE_200)\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Имя пользователя уже занято.\"\n MSG.send(client, response)\n self.clients.remove(client)\n client.close()\n return\n # Если это сообщение, то добавляем его в очередь сообщений.\n # Ответ не требуется.\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_MESSAGE\n and s.KEY_TIME in message\n and s.KEY_TO in message\n and s.KEY_FROM in message\n and s.KEY_MESSAGE in message\n ):\n self.messages.append(message)\n return\n # Если клиент выходит\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_EXIT\n and s.KEY_ACCOUNT_NAME in message\n ):\n self.clients.remove(self.names[message[s.KEY_ACCOUNT_NAME]])\n self.names[message[s.KEY_ACCOUNT_NAME]].close()\n del self.names[message[s.KEY_ACCOUNT_NAME]]\n return\n # Иначе отдаём Bad request\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Запрос не корректен\"\n MSG.send(client, response)\n return", "def on_message(\n self, client: mqtt.Client, userdata: typing.Any, msg: mqtt.MQTTMessage\n ) -> None:\n self.msgs.append(msg)", "async def chat_message(self, event):\n await self.send_json(\n return_value(\n ACTION_MESSAGE,\n event['label'],\n event['username'],\n MSG_MESSAGE,\n event['message']\n )\n )", "def onMessage(self, message):\n raise NotImplementedError", "def message_callback(self, message):\n pass", "def write_message(self, message):\r\n logging.debug(\"Sending message {mes} to {usr}\".format(mes=message, usr=self.id))\r\n self.handler.write_message(message)", "def __sendMessage(self):\n # TODO: Switch to this when implemented\n \n msg = self.ui.inputWidget.toPlainText()\n self.ui.inputWidget.clear()\n strv = StringView()\n strv.appendText(unicode(msg))\n self._amsn_conversation.sendMessage(strv)\n self.ui.textEdit.append(\"<b>/me says:</b><br>\"+unicode(msg)+\"\")", "async def chat_message(self, event):\n\n print(\"PublicChatConsumer\", \"chat_message from user\", event[\"user_id\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_MESSAGE,\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"user_id\": event[\"user_id\"],\n \"message\": event[\"message\"],\n \"natural_timestamp\": humanize_or_normal(timezone.now())\n })", "def websock_message(self, user, client, message):\n service = client.service\n self.websock_handlers[service]['new_message'](user, client, message)\n return", "def text(message):\n global list_messages\n room = session.get('room')\n msg = session.get('name') + ':' + message['msg']\n list_messages.append(msg)\n addNewMsg(message,session)\n print ('size of list_messages ' + str(len(list_messages)) + ', session ' + str(session))\n emit('message', {'msg': msg}, room=room)", "def message_new(\n self,\n event: Dict[str, Any]\n ) -> NoReturn:\n event = event[\"object\"][\"message\"]\n msg = event[\"text\"].lstrip(\"/\")\n peer_id = event[\"peer_id\"]\n from_id = event[\"from_id\"]\n msg_id = event[\"conversation_message_id\"]\n\n if peer_id in self.messages_to_delete:\n peer = CHAT_ID_OFFSET + config.USERBOT_CHATS[peer_id]\n new_messages_to_delete = []\n ids = []\n\n for item in self.messages_to_delete[peer_id]:\n if item['date'] > datetime.now():\n new_messages_to_delete.append(item)\n else:\n ids.append(item['id'])\n\n if new_messages_to_delete:\n self.messages_to_delete[peer_id] = new_messages_to_delete\n else:\n self.messages_to_delete.pop(peer_id)\n\n if ids:\n self.userbot.delete_messages(ids, peer)\n\n user = self.data.get_user(from_id, self) if from_id > 0 else None\n\n messages = self.get_messages(event)\n selected_message = messages[0] if len(messages) == 1 else None\n selected_user = (\n self.data.get_user(selected_message['from_id'], self)\n if selected_message and selected_message['from_id'] > 0 else None)\n\n try:\n self.commands.process(\n msg, peer_id, from_id, messages, msg_id,\n user, selected_user)\n except Exception as e:\n print(e)", "def client(self,message):\n self.message = message\n self.run()", "def on_message(data):\n pass", "def joined(message):\n\tglobal GLOBAL_NUM_USERS\n\tGLOBAL_NUM_USERS = GLOBAL_NUM_USERS + 1\n\tprint(message)\n\tsession['name'] = message['name']\n\tsession['room'] = message['room']\n\troom = session.get('room')\n\tjoin_room(room)\n\tprint('%s : joined' % session)\n\temit('_joined', {'user_name': session.get('name'), 'num_users' : GLOBAL_NUM_USERS}, room=room)", "def message(self, message):\n if '\\n' not in message:\n cmd = '{}serverMessage \"{}\"'.format(self.console, Commands.aquote(message))\n self.write_command(cmd)\n else:\n self.multiple_messages(message.split('\\n'))" ]
[ "0.75515765", "0.7389769", "0.73630506", "0.7308458", "0.7274646", "0.7151676", "0.71495336", "0.71244293", "0.70860195", "0.70378906", "0.6993945", "0.68838733", "0.68355983", "0.68355983", "0.68355983", "0.68079144", "0.6807457", "0.6795922", "0.67576134", "0.6731406", "0.67183095", "0.6692338", "0.66804117", "0.6665366", "0.665281", "0.6632184", "0.6630715", "0.662552", "0.66214937", "0.6614083" ]
0.76300406
0
Returns true or false based on wheter or not a show ID is valid
def validate_id(show_id: int, database_connection: mysql.connector.connect) -> bool: try: show_id = int(show_id) except ValueError: return False try: cursor = database_connection.cursor() query = "SELECT showid from ww_shows where showid = %s;" cursor.execute(query, (show_id,)) result = cursor.fetchone() cursor.close() return bool(result) except ProgrammingError as err: raise ProgrammingError("Unable to query the database") from err except DatabaseError as err: raise DatabaseError("Unexpected database error") from err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def id_exists(show_id: int,\n database_connection: mysql.connector.connect) -> bool:\n return validate_id(show_id, database_connection)", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False", "def id_is_valid(gal_id, query_id, data):\n return not ((data.cam_idx[query_id] == data.cam_idx[gal_id]) and (data.labels[query_id] == data.labels[gal_id]))", "def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False", "def is_valid_entity(self):\n return is_correct_cve_id(self.cve_id)", "def check_id(self, id):", "def validateId(shortId):\n return shortId in [DockerUtil.getShortId(container) for container in DOCKER_CLIENT.containers.list()]", "def identify_id(id: str) -> bool:\n return validate_handle(id)", "def has_valid_id(self):\n try:\n project_id = self.track.project.id\n except (OSError, AttributeError):\n return False\n pointer, name = self._get_pointer_and_name()\n return bool(RPR.ValidatePtr2(project_id, pointer, name))", "def _is_validation(video_id):\n hasher = md5()\n hasher.update(bytes(video_id, 'utf-8'))\n first = hasher.hexdigest()[0]\n return first in ['0', '1']", "def date_exists(show_year: int,\n show_month: int,\n show_day: int,\n database_connection: mysql.connector.connect) -> bool:\n show_date = None\n try:\n show_date = datetime.datetime(show_year, show_month, show_day)\n except ValueError as err:\n raise ValueError(\"Invalid year, month and/or day value\") from err\n\n try:\n show_date_str = show_date.isoformat()\n cursor = database_connection.cursor()\n query = \"SELECT showid from ww_shows WHERE showdate = %s;\"\n cursor.execute(query, (show_date_str,))\n result = cursor.fetchone()\n cursor.close()\n\n return bool(result)\n except ProgrammingError as err:\n raise ProgrammingError(\"Unable to query the database\") from err\n except DatabaseError as err:\n raise DatabaseError(\"Unexpected database error\") from err", "def _has_valid_visa(self):\n if self.visa is not None:\n return self.visa_is_valid()\n else:\n return False", "def is_valid_instance_id(version):\n return bool(INSTANCE_ID_RE.match(version))", "def isValid(t_id):\n\tstr_id=str(t_id).strip()\n\treturn str_id.isdigit()", "def checkValid(self):\n if (self.noteName is not None) and (self.accidental is not None) and (self.octave is not None):\n return True\n else:\n return False", "def can_show(self):\n return self.can_show", "def is_valid_option(cls, id_):\n return id_ in cls.CHOICES", "def test_details_nonnum_id(self):\n self.check_response(\n '/attributes/xyz',\n ('Please enter an integer value for Attribute ID',))", "def checkValidId(self, id, prep_id = False):\n new_id = unquote(id)\n if prep_id: new_id = self.prepId(id)\n try:\n globalCheckValidId(self, new_id)\n return True\n except Exception:\n return str(sys.exc_info()[1])", "def _is_conveyance_id_valid(self, conveyance_id):\n sql = \"SELECT id FROM {} WHERE id=?\".format(self.conveyance_type)\n\n try:\n query_result = self.cursor.execute(sql, (str(conveyance_id),))\n if query_result.fetchall():\n return True\n return False\n except Exception as e:\n raise Exception(\n \"An error occurred while fetching a %s in the database: query: %s - message: %s\"\n % (self.conveyance_type, sql, e)\n )", "def validate(self, record, records):\n if not record or self.field not in record.props:\n return False\n handle_id = record.props[self.field].val\n # Make sure the format of handle id is equivalent to all other handles\n # e.g. '0x123' will become '0x0123'.\n handle_id = '0x{:04X}'.format(int(handle_id, 16))\n if handle_id not in records:\n return False\n if records[handle_id].type_id != self.type_id:\n return False\n return True", "def valid_object(self,object_data):\n if not object_data.get('planId'):\n logging.error(\"Couldn't find planId, required field\")\n self.append_response(\"missing planId\")\n return (False)\n if not object_data.get(\"title\"):\n logging.warning(\"No title set for task\")\n return True", "def test_details_id_neg(self):\n self.check_response(\n '/attributes/-1',\n ('Please enter a number that is 1 or greater for Attribute ID',))", "def valid_app_id(self, app_id):\n return self.app_id == app_id", "def is_valid(self):\n if len(self) <= 64 and re.match(RE_VALID_UID, self):\n return True\n\n return False", "def test_details_id_ok(self):\n self.check_response('/attributes/1',\n ('Attribute ID#1 not found',))", "def checkValidId(self, id, prep_id = False):\n # RRD docs say that limit on vnames is 255 characters and that\n # A-Za-z0-9_ are the valid characters. Zenoss reserves - for it's own\n # use. Limiting to 200 instead just to leave room for whatever.\n # http://oss.oetiker.ch/rrdtool/doc/rrdgraph_data.en.html\n if len(id) > 200:\n return 'GraphPoint names can not be longer than 200 characters.'\n allowed = set(string.ascii_letters + string.digits + '_')\n attempted = set(id)\n if not attempted.issubset(allowed):\n return 'Only letters, digits and underscores are allowed' + \\\n ' in GraphPoint names.'\n return ZenModelRM.checkValidId(self, id, prep_id)", "def is_exp_set(self):\n if self.exp_id is None:\n return False\n if self.working_dir is None:\n return False\n if self.id != str(self.Id_widget.text()).strip():\n return False\n return True", "def validate_identifier(identifier: str) -> bool:\n if identifier[:2] == 'NR':\n return True\n\n if len(identifier) < 9:\n return False\n\n try:\n d = int(identifier[-7:])\n if d == 0:\n return False\n except ValueError:\n return False\n # TODO This is not correct for entity types that are not Coops\n if identifier[:-7] not in ('CP', 'XCP', 'BC'):\n return False\n\n return True", "def has_id(self):\n return not self.id is None" ]
[ "0.6806671", "0.6270524", "0.62053543", "0.6015456", "0.5900047", "0.5893857", "0.5874957", "0.5871361", "0.5863451", "0.5851223", "0.57663053", "0.5741871", "0.5730465", "0.5723474", "0.5673921", "0.56678677", "0.5666891", "0.5639787", "0.56023496", "0.5581077", "0.5566015", "0.5565124", "0.55521524", "0.5533449", "0.5487808", "0.5485915", "0.5485158", "0.5478815", "0.543937", "0.5430395" ]
0.76452476
0
Returns a show's ID based on the show's year, month and day
def convert_date_to_id(show_year: int, show_month: int, show_day: int, database_connection: mysql.connector.connect) -> int: show_date = None try: show_date = datetime.datetime(year=show_year, month=show_month, day=show_day) except ValueError as err: raise ValueError("Invalid year, month and/or day value") from err try: show_date_str = show_date.isoformat() cursor = database_connection.cursor() query = "SELECT showid from ww_shows WHERE showdate = %s;" cursor.execute(query, (show_date_str,)) result = cursor.fetchone() cursor.close() if result: return result[0] return None except ProgrammingError as err: raise ProgrammingError("Unable to query the database") from err except DatabaseError as err: raise DatabaseError("Unexpected database error") from err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getShowId(show, conn):\n cur = conn.cursor()\n cur.execute(\"SELECT id_show FROM show WHERE name=?\", (show,))\n id_show = cur.fetchone()[0]\n return id_show", "def identifier(self):\n return self.slug + str(self.year())", "def search_show_id(self, series, year=None):\n # make the search\n logger.info('Searching show id for %r', series)\n r = self.session.post(self.server_url + 'search.php', data={'q': series}, timeout=10)\n r.raise_for_status()\n\n # get the series out of the suggestions\n soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])\n show_id = None\n for suggestion in soup.select('div.left li div a[href^=\"/tvshow-\"]'):\n match = link_re.match(suggestion.text)\n if not match:\n logger.error('Failed to match %s', suggestion.text)\n continue\n\n if sanitize(match.group('series')).lower() == series.lower():\n if year is not None and int(match.group('first_year')) != year:\n logger.debug('Year does not match')\n continue\n show_id = int(suggestion['href'][8:-5])\n logger.debug('Found show id %d', show_id)\n break\n\n soup.decompose()\n soup = None\n\n return show_id", "def get_model_year_id(make, model, year):\n api_url = 'https://api.edmunds.com/api/vehicle/v2/{}/{}/{}?fmt=json&api_key={}'\\\n .format(make, model, year, API_KEY)\n r = requests.get(api_url).json()\n return r['id']", "def convert_id_to_date(show_id: int,\n database_connection: mysql.connector.connect\n ) -> datetime.datetime:\n try:\n cursor = database_connection.cursor()\n query = \"SELECT showdate FROM ww_shows WHERE showid = %s;\"\n cursor.execute(query, (show_id,))\n result = cursor.fetchone()\n cursor.close()\n\n if result:\n return result[0].isoformat()\n\n return None\n except ProgrammingError as err:\n raise ProgrammingError(\"Unable to query the database\") from err\n except DatabaseError as err:\n raise DatabaseError(\"Unexpected database error\") from err", "def from_parts(cls, year: int, month: int, inc: int) -> 'Identifier':\n prefix = f'{str(year)[-2:]}{str(month).zfill(2)}'\n return cls(f'{prefix}.{str(inc).zfill(5)}')", "def date_exists(show_year: int,\n show_month: int,\n show_day: int,\n database_connection: mysql.connector.connect) -> bool:\n show_date = None\n try:\n show_date = datetime.datetime(show_year, show_month, show_day)\n except ValueError as err:\n raise ValueError(\"Invalid year, month and/or day value\") from err\n\n try:\n show_date_str = show_date.isoformat()\n cursor = database_connection.cursor()\n query = \"SELECT showid from ww_shows WHERE showdate = %s;\"\n cursor.execute(query, (show_date_str,))\n result = cursor.fetchone()\n cursor.close()\n\n return bool(result)\n except ProgrammingError as err:\n raise ProgrammingError(\"Unable to query the database\") from err\n except DatabaseError as err:\n raise DatabaseError(\"Unexpected database error\") from err", "def get_invNo(self, obj):\n return str(obj.invDate.year) + str(obj.id)", "def getID():", "def _get_unique_id(self):\n now = datetime.now()\n\n u_id = now.second + 60*(now.minute + 60*(now.hour + 24*(now.day + 31*(now.month + 366*(now.year)))))\n return \"instance\" + str(u_id)", "def model_id(self):\n date_str = dt.now().strftime(\"%Y-%m-%d_%H:%M\")\n return \"%s_%s_%s_%s\" % (date_str, self.embedding_type, self.class_label, self.drop_irrelevant)", "def load_show(year, season, brand):\n\n print \"Show\"\n\n brand_id = db.session.query(Brand).filter_by(brand_name=brand).one().brand_id\n\n year = 2017\n show = Show(season=season,\n year=year,\n brand_id=brand_id)\n\n # We need to add to the session or it won't ever be stored\n db.session.add(show)\n\n # Once we're done, we should commit our work\n db.session.commit()\n return show", "def create_show(token, show_id):\n url = 'https://api.thetvdb.com/series/' + str(show_id)\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text).get('data')\n network = json_data.get('network')\n title = json_data.get('seriesName')\n time = json_data.get('airsTime')\n day = json_data.get('airsDayOfWeek')\n overview = json_data.get('overview')\n s = Show.Show(show_id, network, title, time, day, overview)\n return s", "def get_show_with_slug(self, slug: str) -> int:\n shows = self.show_source.show_names\n show_slugs = \\\n {\n self.sluggify(name): show\n for name, show in shows.items()\n }\n try:\n return show_slugs[slug]\n except KeyError as e:\n raise NoSuchShowError from e", "def season_episode_str_from_show(show):\n return 'S{:02d}E{:02d}'.format(show._next.season, show._next.episode)", "def _next_yymm_id(self, identifier: Identifier) -> Optional[Identifier]:\n next_yymm_id = None\n if identifier.year is not None and \\\n identifier.month is not None:\n new_year = identifier.year\n new_month = identifier.month + 1\n new_num = 1\n if new_month > 12:\n new_month = 1\n new_year = new_year + 1\n if identifier.is_old_id:\n next_yymm_id = '{}/{:02d}{:02d}{:03d}'.format(\n identifier.archive, new_year % 100, new_month, new_num)\n elif new_year >= 2015:\n next_yymm_id = '{:02d}{:02d}.{:05d}'.format(\n new_year % 100, new_month, new_num)\n else:\n next_yymm_id = '{:02d}{:02d}.{:04d}'.format(\n new_year % 100, new_month, new_num)\n\n try:\n return Identifier(arxiv_id=next_yymm_id)\n except IdentifierException:\n return None\n else:\n return None", "def test_get_id():\n vc = vtec.parse(EX1)\n assert vc[0].get_id(2005) == \"2005-KJAN-TO-W-0130\"", "def parse_show(self, res, date, movie_id, theater_id):\n times = res.css(SELECTORS['SHOW_TIMES']).re(r'[0-9]{1,2}[:][0-9]{2}')\n obj = {\n 'movie_id': movie_id,\n 'theater_id': theater_id,\n 'date': DateHelper.strtodatetime(date),\n 'start': DateHelper.strtoseconds(times[0]),\n 'end': DateHelper.strtoseconds(times[1]),\n 'type': SelectHelper.get_array(res, SELECTORS['SHOW_TYPE']),\n 'url': BASE_URL + SelectHelper.get(res, SELECTORS['SHOW_URL']),\n }\n return Show(obj)", "def slug(self):\n return self.date.strftime('%Y-%m')", "def id_by_title(self, title):\n logging.debug('id_by_title(%s)', title)\n if not self.list_loaded_:\n self.load_shows()\n\n for show_id in self.shows_data:\n next_show = self.shows_data[show_id]\n logging.debug('id_by_title(%s) = %s', next_show['title'], show_id)\n if next_show['title'] == title:\n logging.debug('Found id_by_title(%s) = %s', title, show_id)\n return show_id\n\n print('Unknown title - {0}'.format(title))\n sys.exit(1)", "def show_cal(request, year=None, month=None):\n if year == None:\n # get the current comic as a starting point\n lToday = Comic.objects.filter(published=True).order_by('-date')[0].date\n year = lToday.year\n month = lToday.month\n\n return calendar(request, year, month)", "def get_product_id(self):\n pid = \"%s-%s-%s-%s\" % (self.valid.strftime(\"%Y%m%d%H%M\"),\n self.source, self.wmo, self.afos)\n return pid.strip()", "def id(self):\n return \"{model:s}--{serial:08x}\".format(model=self.model.replace('-',''), serial=self.serial_number).lower()", "def _parse_id(line):\n ablt_pat = re.compile('(?<=2014_)[0-9]{12}(?=.jpg)')\n orig_pat = re.compile('(?<=[0-9]{16}_)[0-9]+')\n mat = ablt_pat.search(line)\n if mat is None: #original image\n mat = orig_pat.search(line)\n assert not mat is None, (\"this line does not contain a COCO image id: {}\" % line )\n return line[mat.start(): mat.end()], 'orig'\n else: #ablated image\n num = line[mat.start(): mat.end()]\n return str(int(num)), 'ablt'", "def get_datecode():\n now = datetime.utcnow()\n return now.strftime(\"%Y%m%d\")", "def id(self):\n # Might also be a first 12-characters shortcut.\n return self._id", "def get_movie_id(self) -> str:\n return self.movie.id", "def test_get_id(self):\n\n self.metadata.create_or_update(data=self.create)\n\n # First pick up by name\n res_name = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n # Then fetch by ID\n res = self.metadata.get_by_id(entity=Dashboard, entity_id=res_name.id)\n\n self.assertEqual(res_name.id, res.id)", "def movie_identifier(self):\n return 'bluray_id'", "def _next_id(self, identifier: Identifier) -> Optional['Identifier']:\n next_id = None\n if identifier.year is not None and \\\n identifier.month is not None and \\\n identifier.num is not None:\n new_year = identifier.year\n new_month = identifier.month\n new_num = identifier.num + 1\n if (identifier.is_old_id and new_num > 999) \\\n or (not identifier.is_old_id\n and identifier.year < 2015\n and new_num > 9999) \\\n or (not identifier.is_old_id\n and identifier.year >= 2015 and new_num > 99999):\n new_num = 1\n new_month = new_month + 1\n if new_month > 12:\n new_month = 1\n new_year = new_year + 1\n\n if identifier.is_old_id:\n next_id = '{}/{:02d}{:02d}{:03d}'.format(\n identifier.archive, new_year % 100, new_month, new_num)\n else:\n if new_year >= 2015:\n next_id = '{:02d}{:02d}.{:05d}'.format(\n new_year % 100, new_month, new_num)\n else:\n next_id = '{:02d}{:02d}.{:04d}'.format(\n new_year % 100, new_month, new_num)\n try:\n return Identifier(arxiv_id=next_id)\n except IdentifierException:\n return None\n else:\n return None" ]
[ "0.63307565", "0.62844926", "0.59220475", "0.5455561", "0.54395676", "0.539838", "0.5384833", "0.53265357", "0.52978224", "0.5228968", "0.52228785", "0.5200436", "0.5122689", "0.5111875", "0.5071342", "0.50661474", "0.5059235", "0.5027292", "0.49998707", "0.497275", "0.49435627", "0.48852533", "0.48759276", "0.48754707", "0.48750684", "0.48566663", "0.4835123", "0.48180288", "0.48073715", "0.48072144" ]
0.74980223
0
Returns a show's date based on the show's ID
def convert_id_to_date(show_id: int, database_connection: mysql.connector.connect ) -> datetime.datetime: try: cursor = database_connection.cursor() query = "SELECT showdate FROM ww_shows WHERE showid = %s;" cursor.execute(query, (show_id,)) result = cursor.fetchone() cursor.close() if result: return result[0].isoformat() return None except ProgrammingError as err: raise ProgrammingError("Unable to query the database") from err except DatabaseError as err: raise DatabaseError("Unexpected database error") from err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_date_from_id(date_id):\n return Date.query.filter_by(id=date_id).first()", "def get(self, show_id):\r\n show = Shows.query.filter_by(ShowID=show_id).first_or_404()\r\n content = jsonify({\r\n \"shows\": [{\r\n \"date\": get_iso_format(show.ShowDate),\r\n \"countryCode\": show.CountryCode,\r\n \"country\": show.Country,\r\n \"city\": show.City,\r\n \"venue\": show.Venue,\r\n \"setlist\": self.get_setlist(show.ShowID),\r\n \"otherBands\": self.get_other_bands(show.ShowID),\r\n \"people\": self.get_show_people(show.ShowID),\r\n }]\r\n })\r\n\r\n return make_response(content, 200)", "def convert_date_to_id(show_year: int,\n show_month: int,\n show_day: int,\n database_connection: mysql.connector.connect) -> int:\n show_date = None\n try:\n show_date = datetime.datetime(year=show_year,\n month=show_month,\n day=show_day)\n except ValueError as err:\n raise ValueError(\"Invalid year, month and/or day value\") from err\n\n try:\n show_date_str = show_date.isoformat()\n cursor = database_connection.cursor()\n query = \"SELECT showid from ww_shows WHERE showdate = %s;\"\n cursor.execute(query, (show_date_str,))\n result = cursor.fetchone()\n cursor.close()\n\n if result:\n return result[0]\n\n return None\n except ProgrammingError as err:\n raise ProgrammingError(\"Unable to query the database\") from err\n except DatabaseError as err:\n raise DatabaseError(\"Unexpected database error\") from err", "def parse_show(self, res, date, movie_id, theater_id):\n times = res.css(SELECTORS['SHOW_TIMES']).re(r'[0-9]{1,2}[:][0-9]{2}')\n obj = {\n 'movie_id': movie_id,\n 'theater_id': theater_id,\n 'date': DateHelper.strtodatetime(date),\n 'start': DateHelper.strtoseconds(times[0]),\n 'end': DateHelper.strtoseconds(times[1]),\n 'type': SelectHelper.get_array(res, SELECTORS['SHOW_TYPE']),\n 'url': BASE_URL + SelectHelper.get(res, SELECTORS['SHOW_URL']),\n }\n return Show(obj)", "def show_last_watched_by_date(self, alias):\n date_to = datetime.date.today()\n if alias == 'day':\n date_from = date_to + datetime.timedelta(days=-1)\n elif alias == 'week':\n date_from = date_to + datetime.timedelta(days=-7)\n elif alias == 'month':\n prev_month = date_to.replace(day=1) + datetime.timedelta(days=-1)\n date_from = date_to + datetime.timedelta(days=-prev_month.day)\n else:\n print('Unknown alias - {0}'.format(alias))\n sys.exit(1)\n\n self.load_shows()\n print()\n print('Watched from {0} to {1}'.format(\n date_from.strftime('%Y-%m-%d'),\n date_to.strftime('%Y-%m-%d')\n ))\n print()\n re_c = re.compile(r'(\\d{1,2})\\.(\\d{1,2})\\.(\\d{4})')\n count = 0\n for show_id in self.shows_data:\n next_show = self.shows_data[show_id]\n if next_show['watchedEpisodes'] <= 0:\n continue\n watched = self.load_watched(next_show['showId'])\n epis = None\n last_map = {}\n for epi_id in watched:\n next_episode = watched[epi_id]\n re_m = re_c.match(next_episode['watchDate'])\n if not re_m:\n print('Warning: unknown date format - {0}'.format(\n next_episode['watchDate']))\n continue\n dtv = [int(s) for s in re_m.group(3, 2, 1)]\n epi_date = datetime.date(dtv[0], dtv[1], dtv[2])\n if date_from <= epi_date <= date_to:\n if not epis:\n epis = self.load_episodes(show_id)\n count += 1\n if epi_id not in epis['episodes']:\n print('Episode not found: {0}'.format(epi_id))\n logging.debug('Episodes:')\n logging.debug(epis)\n continue\n\n episode = epis['episodes'][epi_id]\n date_key = epi_date.toordinal() * 1000\\\n + episode['seasonNumber'] * 10\\\n + episode['episodeNumber']\n last_map[date_key] = episode\n\n for date_key in sorted(last_map.keys()):\n episode = last_map[date_key]\n print('{0} s{1:02d}e{2:02d} \"{3}\" at {4}'.format(\n tr_out(epis['title']),\n episode['seasonNumber'], episode['episodeNumber'],\n tr_out(episode['title']),\n watched[str(episode['id'])]['watchDate']\n ))\n print()\n print('Total count: {0}'.format(count))\n print()", "def get(self, show_id, session):\n try:\n show = db.show_by_id(show_id, session=session)\n except NoResultFound:\n raise NotFoundError('Show with ID %s not found' % show_id)\n\n args = series_list_parser.parse_args()\n begin = args.get('begin')\n latest = args.get('latest')\n\n return jsonify(series_details(show, begin, latest))", "def _get_date(self, relative_idx):\r\n return self.dl.dates[self._identified_date_id + relative_idx]", "def getShowId(show, conn):\n cur = conn.cursor()\n cur.execute(\"SELECT id_show FROM show WHERE name=?\", (show,))\n id_show = cur.fetchone()[0]\n return id_show", "def create_show(token, show_id):\n url = 'https://api.thetvdb.com/series/' + str(show_id)\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text).get('data')\n network = json_data.get('network')\n title = json_data.get('seriesName')\n time = json_data.get('airsTime')\n day = json_data.get('airsDayOfWeek')\n overview = json_data.get('overview')\n s = Show.Show(show_id, network, title, time, day, overview)\n return s", "def __repr__(self):\n return f'<Show {self.id} {str(self.start_time)}>'", "def for_date(filter_id, date):\n try:\n return ProjectSummary.objects.get(filter_id=filter_id, created_on=date)\n except ProjectSummary.DoesNotExist:\n return None", "def generate_show_details_label(shows, show_id):\n print shows[show_id]\n return \"{starttime:<4}: {name: >40}\\n{details}\".format(**shows[show_id])", "def showSelectedDate(self):\n pass", "def get_date(self):\n return self.date", "def get_date(self):\n return self.date", "def get_date(self):\n return self.date", "def get_date(self):\n return self.date", "def getSelectedShowtime(self):\n\n cur = self.current()\n if cur < 0:\n return None\n else:\n return self.theater.showtimes(self.showtimeIds[cur])", "def get_date_from_display(self) -> str:\n return _date(\n self.date_from,\n \"DATETIME_FORMAT\" if self.settings.show_times else \"DATE_FORMAT\"\n )", "def get(self, show_id, season_id, session):\n try:\n db.show_by_id(show_id, session=session)\n except NoResultFound:\n raise NotFoundError('show with ID %s not found' % show_id)\n try:\n season = db.season_by_id(season_id, session)\n except NoResultFound:\n raise NotFoundError('season with ID %s not found' % season_id)\n if not db.season_in_show(show_id, season_id):\n raise BadRequest(f'season with id {season_id} does not belong to show {show_id}')\n\n rsp = jsonify(season.to_dict())\n\n # Add Series-ID header\n rsp.headers.extend({'Series-ID': show_id})\n return rsp", "def show_venue(venue_id):\n # shows the venue page with the given venue_id\n result = db.session.query(Venue).filter(Venue.id == venue_id)\n result = result[0]\n\n past_shows_count = 0\n upcoming_shows_count = 0\n\n past_shows = []\n upcoming_shows = []\n\n all_shows = Shows.query.all()\n\n print(all_shows)\n\n for show in all_shows:\n if show.venue_id == result.id:\n show_time = datetime.strptime(show.start_time, '%Y-%m-%d %H:%M:%S')\n if show_time > datetime.now() :\n upcoming_shows.append(show)\n else: \n past_shows.append(show)\n \n past_shows_count = len(past_shows)\n upcoming_shows_count = len(upcoming_shows)\n \n\n # TODO: replace with real venue data from the venues table, using venue_id (DONE)\n resdata = {\n \"id\": result.id,\n \"name\": result.name,\n \"genres\": json.loads(result.genres),\n \"address\": result.address,\n \"city\": result.city,\n \"state\": result.state,\n \"phone\": result.phone,\n \"website\": result.website,\n \"facebook_link\": result.facebook_link,\n \"seeking_talent\": result.seeking_talent,\n \"seeking_description\": result.seeking_description,\n \"image_link\": result.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": past_shows_count,\n \"upcoming_shows_count\": upcoming_shows_count,\n }\n \n data = list(filter(lambda d: d[\"id\"] == venue_id, [resdata]))[0]\n return render_template(\"pages/show_venue.html\", venue=data)", "def __str__(self):\n return self._date", "def fetch_show_information (self, id, type):\n # check if we have a show or a movie, the request made depends on this\n if type == 'show':\n paths = [\n ['videos', id, ['requestId', 'regularSynopsis', 'evidence']],\n ['videos', id, 'seasonList', 'current', 'summary']\n ]\n else:\n paths = [['videos', id, ['requestId', 'regularSynopsis', 'evidence']]]\n response = self._path_request(paths=paths)\n return self._process_response(response=response, component='Show information')", "def get(self, show_id, ep_id, session):\n try:\n db.show_by_id(show_id, session=session)\n except NoResultFound:\n raise NotFoundError('show with ID %s not found' % show_id)\n try:\n episode = db.episode_by_id(ep_id, session)\n except NoResultFound:\n raise NotFoundError('episode with ID %s not found' % ep_id)\n if not db.episode_in_show(show_id, ep_id):\n raise BadRequest(f'episode with id {ep_id} does not belong to show {show_id}')\n\n rsp = jsonify(episode.to_dict())\n\n # Add Series-ID header\n rsp.headers.extend({'Series-ID': show_id})\n return rsp", "def retrieve_scoring_info_by_show_id(show_id: int,\n database_connection: mysql.connector.connect\n ) -> Dict:\n\n info = OrderedDict()\n cursor = database_connection.cursor(dictionary=True)\n query = (\"SELECT s.showdate, pm.panelistlrndstart, \"\n \"pm.panelistlrndcorrect, pm.panelistscore \"\n \"FROM ww_shows s \"\n \"JOIN ww_showpnlmap pm ON pm.showid = s.showid \"\n \"WHERE s.showid = %s \"\n \"LIMIT 1;\")\n cursor.execute(query, (show_id, ))\n result = cursor.fetchone()\n cursor.close()\n\n if not result:\n return None\n\n info[\"id\"] = show_id\n info[\"date\"] = result[\"showdate\"].isoformat()\n info[\"start\"] = result[\"panelistlrndstart\"]\n info[\"correct\"] = result[\"panelistlrndcorrect\"]\n info[\"score\"] = result[\"panelistscore\"]\n\n return info", "def get_data(self):#id in db\n\t\tarr = self.startTime.split(\"\\/\")\n\t\treturn new Date(arr[0],arr[1]-1,arr[2].split(\" \")[0])", "def get_show_info(self, id, **kwargs):\n kwargs['id'] = id\n return self.get('info/show.json', **kwargs)", "def __str__(self):\n return f'{self.id} {self.posted_date}'", "def load_show(year, season, brand):\n\n print \"Show\"\n\n brand_id = db.session.query(Brand).filter_by(brand_name=brand).one().brand_id\n\n year = 2017\n show = Show(season=season,\n year=year,\n brand_id=brand_id)\n\n # We need to add to the session or it won't ever be stored\n db.session.add(show)\n\n # Once we're done, we should commit our work\n db.session.commit()\n return show", "def get_show_with_slug(self, slug: str) -> int:\n shows = self.show_source.show_names\n show_slugs = \\\n {\n self.sluggify(name): show\n for name, show in shows.items()\n }\n try:\n return show_slugs[slug]\n except KeyError as e:\n raise NoSuchShowError from e" ]
[ "0.61641234", "0.603501", "0.5778164", "0.5685854", "0.5669825", "0.5654186", "0.5623275", "0.5566595", "0.553927", "0.55132943", "0.5504347", "0.5438018", "0.54188645", "0.54028845", "0.54028845", "0.54028845", "0.54028845", "0.53803724", "0.52745485", "0.52659804", "0.5258812", "0.52457726", "0.52271044", "0.52132964", "0.5209676", "0.5186815", "0.51607585", "0.5160716", "0.5142123", "0.51377684" ]
0.65703845
0
Returns true or false based on whether or not a show ID exists
def id_exists(show_id: int, database_connection: mysql.connector.connect) -> bool: return validate_id(show_id, database_connection)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_id(show_id: int,\n database_connection: mysql.connector.connect) -> bool:\n try:\n show_id = int(show_id)\n except ValueError:\n return False\n\n try:\n cursor = database_connection.cursor()\n query = \"SELECT showid from ww_shows where showid = %s;\"\n cursor.execute(query, (show_id,))\n result = cursor.fetchone()\n cursor.close()\n\n return bool(result)\n except ProgrammingError as err:\n raise ProgrammingError(\"Unable to query the database\") from err\n except DatabaseError as err:\n raise DatabaseError(\"Unexpected database error\") from err", "def _id_exists(self):\n return self.app_id in self.engine.app_list", "def has_id(self):\n return not self.id is None", "def can_show(self):\n return self.can_show", "def _exists(isamAppliance, id):\n exists = False\n ret_obj = get_all(isamAppliance)\n\n for snmp in ret_obj['data']:\n if snmp['id'] == id:\n exists = True\n break\n\n return exists", "def exists(self):\n\n return self.ids[-1] is not None", "def date_exists(show_year: int,\n show_month: int,\n show_day: int,\n database_connection: mysql.connector.connect) -> bool:\n show_date = None\n try:\n show_date = datetime.datetime(show_year, show_month, show_day)\n except ValueError as err:\n raise ValueError(\"Invalid year, month and/or day value\") from err\n\n try:\n show_date_str = show_date.isoformat()\n cursor = database_connection.cursor()\n query = \"SELECT showid from ww_shows WHERE showdate = %s;\"\n cursor.execute(query, (show_date_str,))\n result = cursor.fetchone()\n cursor.close()\n\n return bool(result)\n except ProgrammingError as err:\n raise ProgrammingError(\"Unable to query the database\") from err\n except DatabaseError as err:\n raise DatabaseError(\"Unexpected database error\") from err", "def has_id(self, data):\n # (Dict[str, Any]) -> bool\n return self.id_column.name in data", "def ShowObject(object_id):\n return ShowObjects(object_id)==1", "def has_stockrecords(self):\n try:\n a=self.stockrecords.pk\n return True\n except:\n return False", "def has(self, key):\n return self.collection.find_one({'_id': key}) is not None", "def presentation_exists(self, presentation):\r\n result = QtSql.QSqlQuery('''SELECT * FROM presentations''')\r\n while result.next():\r\n if (unicode(presentation.title) == unicode(result.value(1).toString())\r\n and unicode(presentation.speaker) == unicode(result.value(2).toString())):\r\n return True\r\n return False", "def if_already_present(video_id: str) -> bool:\n return Video.objects.filter(video_id=video_id).exists()", "def has_valid_id(self):\n try:\n project_id = self.track.project.id\n except (OSError, AttributeError):\n return False\n pointer, name = self._get_pointer_and_name()\n return bool(RPR.ValidatePtr2(project_id, pointer, name))", "def mustShow(table, kwargs):\n\n return G(kwargs, N.showEid) if G(kwargs, N.showTable) == table else None", "def has_id_field(class_or_instance: Any) -> bool:\n return hasattr(class_or_instance, _ID_FIELD_NAME)", "def _exists (self):\n cursor = self._exec (self.select)\n return bool (cursor.fetchall ())", "def should_show():", "def checkExistence(idMeta):\n # XXX add for diaries\n sqlq = \"\"\"\n SELECT dt\n FROM Electricity_10min\n WHERE `Meta_idMeta` = {}\n LIMIT 1;\n \"\"\".format(idMeta)\n try:\n result = mdb.getSQL(sqlq)[0]\n if (result):\n return True\n except:\n return False", "def isExist(data):\n return True/False", "def has_id(self):\n return self.is_root() or hasattr(self, self.id_field)", "def getShowId(show, conn):\n cur = conn.cursor()\n cur.execute(\"SELECT id_show FROM show WHERE name=?\", (show,))\n id_show = cur.fetchone()[0]\n return id_show", "def check_id(self, id):", "def exists( identifier ):\n return note.exists(identifier)", "def check_db_for_vid(self):\n with db.cursor() as cursor:\n if self.videoId in db.\n pass", "def user_exists(self,unique_ID):\n\t\ttry:\n\t\t\tself.data[unique_ID]\n\t\texcept KeyError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def has_display(self) -> bool:\r\n return KebaService.DISPLAY in self.services", "def has_key(cls, id):\n return super().has_key(id)", "def exists(self):\n return True", "def exists(self):\n return True" ]
[ "0.67525923", "0.62630546", "0.62593603", "0.61877805", "0.61274064", "0.6115206", "0.6102666", "0.5976109", "0.5963691", "0.595145", "0.59385914", "0.59335345", "0.5849504", "0.58397585", "0.5820755", "0.58057904", "0.57720315", "0.57560587", "0.5727696", "0.57051325", "0.56993914", "0.5652791", "0.5629395", "0.5615831", "0.5598636", "0.55975807", "0.5576882", "0.5576489", "0.55714726", "0.55714726" ]
0.7385383
0
Returns true or false based on whether or not a show exists for the requested year, month and day
def date_exists(show_year: int, show_month: int, show_day: int, database_connection: mysql.connector.connect) -> bool: show_date = None try: show_date = datetime.datetime(show_year, show_month, show_day) except ValueError as err: raise ValueError("Invalid year, month and/or day value") from err try: show_date_str = show_date.isoformat() cursor = database_connection.cursor() query = "SELECT showid from ww_shows WHERE showdate = %s;" cursor.execute(query, (show_date_str,)) result = cursor.fetchone() cursor.close() return bool(result) except ProgrammingError as err: raise ProgrammingError("Unable to query the database") from err except DatabaseError as err: raise DatabaseError("Unexpected database error") from err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def record_exists(self, date):\n for record in self.records:\n if self.date_str == record[\"date\"]:\n return True\n return False", "def is_in_advent() -> bool:\n # Run the code from the 1st to the 24th\n return datetime.now(EST).day in range(1, 25) and datetime.now(EST).month == 12", "def check(self):\n validity_year = int(self.date[0:4])\n validity_month = int(self.date[5:7])\n validity_day = int(self.date[8:10])\n if datetime.today().year > validity_year:\n self.flag = False\n elif datetime.today().year == validity_year:\n if datetime.today().month > validity_month:\n self.flag = False\n elif datetime.today().month == validity_month:\n if datetime.today().day > validity_day:\n self.flag = False\n else:\n self.flag = True\n else:\n self.flag = True\n else:\n self.flag = True", "def test_contains_month_true(self):\n ary = self.ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')[2009]\n self.assertTrue(11 in ary)", "def check_emonth(edate, st_dict):\n \n # if data exists in a year before edate.year....True\n years = [int(y) for y in st_dict['years']] \n if any(y < edate.year for y in years):\n return True \n \n \n # if sdate.year is in years..check sdate.month \n if edate.year in years:\n months = [int(m) for m in st_dict[str(edate.year)]['months']]\n if any(m <= edate.month for m in months):\n return True\n return False", "def check_season_bounds(next_episode, show_details):\n pass", "def presentation_exists(self, presentation):\r\n result = QtSql.QSqlQuery('''SELECT * FROM presentations''')\r\n while result.next():\r\n if (unicode(presentation.title) == unicode(result.value(1).toString())\r\n and unicode(presentation.speaker) == unicode(result.value(2).toString())):\r\n return True\r\n return False", "def valid_visa(visa):\n if dates_difference(visa[\"date\"]) < 730:\n return True\n return False", "def checkExistence(idMeta):\n # XXX add for diaries\n sqlq = \"\"\"\n SELECT dt\n FROM Electricity_10min\n WHERE `Meta_idMeta` = {}\n LIMIT 1;\n \"\"\".format(idMeta)\n try:\n result = mdb.getSQL(sqlq)[0]\n if (result):\n return True\n except:\n return False", "def __nonzero__(self):\n return not (self.year is None and\n self.month is None and\n self.day is None)", "def _valid_day(self, date_find):\n try:\n datetime.strptime(date_find, settings.TIME_FORMAT)\n valid = True\n except ValueError:\n valid = False\n return valid", "def test_contains_year_true(self):\n ary = self.ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')\n self.assertTrue(2008 in ary)", "def needs_refreshing(filepath):\n today = datetime.date.today()\n year = today.year - 2000 # Obviously does not work prior to 2000\n if today.month <= 6:\n current_season = str(year - 1) + str(year)\n else:\n current_season = str(year) + str(year + 1)\n return (current_season in filepath and\n last_modified_date(filepath) != today)", "def test_query_events_by_first_date(self):\n events = list(query_events_by_first_date(Event.objects.all(), timezone.now()))\n self.assertTrue(self.event_show2 in events)\n self.assertFalse(self.event_show1 in events)", "def equals(self, d2):\n if self.year == d2.year and self.month == d2.month and self.day == d2.day:\n return True\n else:\n return False", "def date_is_valid(year, month, day):\r\n \r\n if (datetime.date(year, month <= 12, day <= 31)):\r\n return True\r\n\r\n else:\r\n return False", "def today(self) -> bool:\n return self._algorithm.can_study_now(self._stat)", "def is_valid(article, date):\n is_in_range = (date > start) and (date < end)\n has_headline = (type(article['headline']) == dict) and ('main' in artricle['headline'].keys())\n return is_in_range and has_headline", "def test_valid_for(self):\n july = datetime.date(1983, 7, 3)\n result = {(s.name, s.valid_for(july))\n for s in seasons.southern_meteo.seasons}\n expected = {('spring', False),\n ('summer', False),\n ('autumn', False),\n ('winter', True)\n }\n assert result == expected", "def check_release_exists(self, **kwargs):\n\n # List all available releases for logging and debugging purposes\n # These values are not used to actually check if the release is available\n logging.info(f\"Listing available releases since start date ({self.start_date}):\")\n for dt in pendulum.period(pendulum.instance(self.start_date), pendulum.today(\"UTC\")).range(\"years\"):\n response = requests.get(f\"https://api.crossref.org/snapshots/monthly/{dt.year}\")\n soup = BeautifulSoup(response.text)\n hrefs = soup.find_all(\"a\", href=True)\n for href in hrefs:\n logging.info(href[\"href\"])\n\n # Construct the release for the execution date and check if it exists.\n # The release for a given execution_date is added on the 5th day of the following month.\n # E.g. the 2020-05 release is added to the website on 2020-06-05.\n data_interval_start = kwargs[\"data_interval_start\"]\n exists = check_release_exists(data_interval_start, self.api_key)\n assert (\n exists\n ), f\"check_release_exists: release doesn't exist for month {data_interval_start.year}-{data_interval_start.month}, something is wrong and needs investigating.\"\n\n return True", "def knowledge_date_valid(record):\n today = datetime.now(timezone.utc).date().strftime(\"%Y-%m-%d\")\n gen_date = record['knowledge_date'].strftime(\"%Y-%m-%d\")\n assert gen_date == today", "def check_release_exists(month: pendulum.DateTime, api_key: str) -> bool:\n\n url = make_snapshot_url(month)\n logging.info(f\"Checking if available release exists for {month.year}-{month.month}\")\n\n # Get API key: it is required to check the head now\n response = retry_session().head(url, headers={\"Crossref-Plus-API-Token\": f\"Bearer {api_key}\"})\n if response.status_code == 302:\n logging.info(f\"Snapshot exists at url: {url}, response code: {response.status_code}\")\n return True\n else:\n logging.info(\n f\"Snapshot does not exist at url: {url}, response code: {response.status_code}, \"\n f\"reason: {response.reason}\"\n )\n return False", "def __call__(self, date):\n for game in self._games:\n if game.datetime.year == date.year and \\\n game.datetime.month == date.month and \\\n game.datetime.day == date.day:\n return game\n raise ValueError('No games found for requested date')", "def is_bissextile(today):\n if (today.year % 4 == 0 and today.year % 100 != 0) or (today.year % 400 == 0):\n return True\n return False", "def id_exists(show_id: int,\n database_connection: mysql.connector.connect) -> bool:\n return validate_id(show_id, database_connection)", "def valid_args(args):\n is_valid = True\n\n # valid date format?\n try:\n datetime.datetime(year=args.year, month=args.month, day=args.day)\n except Exception:\n traceback.print_exc()\n is_valid = False\n\n print(f\"Arguments: {args}\")\n return is_valid", "def from_new_banner_api(self):\n return self.semester > \"2022A\"", "def should_show():", "def matches(self: object, filename: str) -> bool:\n # Filename is equal to episode string representation\n if str(self) == filename:\n return True\n\n # Check leading episode number => download marked as special episode manually\n filename_match: Match[str] = re.search(r\"(^[0-9]{4} )\", filename)\n if filename_match:\n filename_id: int = int(filename_match.group(1))\n return self.episode_id == filename_id\n\n # Check for download of dailymotion\n filename_match: Match[str] = re.search(r\"(_E([0-9]{3,4})_)\", filename)\n if filename_match:\n filename_id: int = int(filename_match.group(2))\n return self.episode_id == filename_id\n\n # Check episode prefix with number => alredy handled by TaRen\n filename_match: Match[str] = re.search(r\"^(Tatort - ([0-9]{4}) )\", filename)\n if filename_match:\n filename_id: int = int(filename_match.group(2))\n return self.episode_id == filename_id\n\n # Last check => Is episode name part of filename\n return self.episode_name.lower() in filename.lower()", "def is_include(self, day, now):\n if day in self._workdays:\n return True\n if \"holiday\" in self._workdays and now in self._obj_holidays:\n return True\n\n return False" ]
[ "0.60448545", "0.5855372", "0.5646979", "0.56412184", "0.5618332", "0.5570403", "0.55611014", "0.5494139", "0.53984886", "0.53625304", "0.5299045", "0.52949613", "0.5285001", "0.523645", "0.5209728", "0.51797956", "0.51647764", "0.5139528", "0.5135483", "0.51350844", "0.51290864", "0.5121782", "0.51167595", "0.5111064", "0.5100022", "0.50861657", "0.5031077", "0.50248474", "0.5022173", "0.5018705" ]
0.7940867
0
outputs automaton to a file
def output(self, out): res = "# File: " + out + "\n# NFA\n# Q_ - the set of states\n" for q in self.states: res += q + ' ' res = res[0:-1] res += "\n# Sigma_ ­ the alphabet\n" for a in self.alphabet: res += a + ' ' res = res[0:-1] res += '\n# q_0_ ­ the start state\n' + self.q_0 + "\n# F_ ­ the set of accept states\n" for f in self.final: res += f + ' ' res = res[0:-1] res += "\n# delta_ ­ the transition function\n" for x in self.transition: splitted = list(str(x).split(',')) res += splitted[0] + " " + splitted[1] for i in self.transition[x]: res += " " + i res += '\n' f = open(out, 'w') f.write(res) f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_sequence(self):\n\n staves = self.get_sequence()\n\n with open(self.output_file, 'w') as out_file:\n\n print()\n out_file.write('')\n for num, staff in enumerate(staves):\n #out_file.write(('Sequence staff # ' + str(num) + '\\n' + staff + '\\n'))\n out_file.write((staff + '\\n'))\n print('Sequence staff #', num)\n print(staff,'\\n')\n out_file.write('')\n print()\n\n out_file.close()", "def _amber_write_input_file(self):\n logger.debug(\"Writing {}\".format(self.input))\n with open(os.path.join(self.path, self.input), \"w\") as f:\n f.write(\"{}\\n\".format(self.title))\n f.write(\" &cntrl\\n\")\n self._write_dict_to_mdin(f, self.cntrl)\n\n if self.ewald is not None:\n f.write(\" &ewald\\n\")\n self._write_dict_to_mdin(f, self.ewald)\n\n if self.cntrl[\"nmropt\"] == 1:\n if self.wt is not None:\n for line in self.wt:\n f.write(\" \"+line+\"\\n\")\n f.write(\" &wt type = 'END', /\\n\")\n if self.restraint_file is not None:\n f.write(\"DISANG = {}\\n\".format(self.restraint_file))\n f.write(\"LISTOUT = POUT\\n\\n\")\n if self.group is not None:\n f.write(\"{:s}\".format(self.group))", "def write_coord_seq():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n lis = []\n with open(filepath, 'r') as file:\n for line in file:\n if line[:4] == 'ATOM':\n line_split = line.split()\n lis.append(line_split[3:4])\n choice1 = input('Enter name for the output file: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as myfile:\n for i in lis:\n myfile.writelines(i)\n print('Done!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def writeFastaFile(filename,sequences):\n fhw=open(filename,\"w\")\n for id in sequences:\n fhw.write(\">\"+id+\"\\n\"+sequences[id]+\"\\n\")\n fhw.close()", "def write(self, file):\n\n # Initialize output buffer\n out = ''\n\n # Print specification\n for key, value in self.specification.items():\n out += f'{key} : {value}\\n'\n\n # Print the tour\n if self.tour:\n out += 'TOUR_SECTION\\n'\n for s in self.tour:\n out += str(s) + '\\n'\n out += '-1\\n'\n\n # Append EOF\n out += 'EOF\\n'\n\n # Write to file\n with open(file, 'w') as f:\n f.write(out)", "def _write_outfile(A):\n def __remove_symmetry_A(A):\n A_triu = defaultdict(int)\n for (i, j, k), w in list(A.items()):\n if j > i:\n A_triu[(i, j, k)] = w\n return A_triu\n def __write_nodes(outfile):\n outfile += \"*Vertices %d\" % Nn\n for nid, label in enumerate(nodes):\n outfile += '\\n%d \"%s\" 1.0' % (nid + index_from, str(label))\n return outfile\n def __write_intra_edges(outfile):\n outfile += \"\\n*Intra\\n# layer node node [weight]\"\n for (i, j, k), w in list(__remove_symmetry_A(A).items()):\n outfile += '\\n%d %d %d %f' % (\n k + index_from, # layer\n nodemap[i] + index_from, # node\n nodemap[j] + index_from, # node\n w # weight\n )\n return outfile\n\n outfile = \"\"\n outfile = __write_nodes(outfile)\n outfile = __write_intra_edges(outfile)\n\n return outfile", "def main():\n in_file_name = \"input.txt\"\n out_file_name = \"output.txt\"\n\n in_file = open(in_file_name, 'r')\n\n out_file = open(out_file_name, 'w')\n out_file.write('IlgizZamaleev' + '\\n')\n\n problems = int(in_file.readline())\n\n for i in range(problems):\n automato = Parser().parse_automata(in_file)\n\n tests = int(in_file.readline())\n\n out_file.write(str(i + 1) + '\\n')\n\n for j in range(tests):\n word = in_file.readline()[:-1]\n out_file.write(automato.check_word(word) + '\\n')\n\n in_file.close()\n out_file.close()", "def _write_outfile(A):\n def __remove_symmetry_A(A):\n A_triu = defaultdict(int)\n for (i, j, k), w in list(A.items()):\n if j > i:\n A_triu[(i, j, k)] = w\n return A_triu\n def __write_nodes(outfile):\n outfile += \"*Vertices %d\" % Nn\n for nid, label in enumerate(nodes):\n outfile += '\\n%d \"%s\" 1.0' % (nid + index_from, labelmap[label])\n return outfile\n def __write_edges(outfile):\n outfile += \"\\n*Intra\\n# layer node node [weight]\"\n sorted_A_sparse = sorted(list(__remove_symmetry_A(A).items()), key=lambda ind__: ind__[0][2])\n for (i, j, k), w in sorted_A_sparse:\n outfile += '\\n%d %d %d %f' % (\n k + index_from, # layer\n nodemap[i] + index_from, # node\n nodemap[j] + index_from, # node\n w # weight\n )\n return outfile\n \n outfile = \"\"\n outfile = __write_nodes(outfile)\n outfile = __write_edges(outfile)\n \n return outfile", "def write_output(self):", "def to_file(self, fn):\n store.store_dict(fn, 'trainalgorithm', self.to_dict())", "def write_to_file(self):\n\t\tfile = open(\"states.txt\", \"w\")\n\t\t\n\t\tpointer = self.head\n\t\twhile pointer != None:\n\t\t\tfile.write(pointer.state + \"\\t\" + pointer.info)\t\n\t\t\tpointer = pointer.next\n\n\t\tfile.close()", "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "def writetif(self,outputname,):\n pass", "def writeseq(afile, seq):\n print(seq, file=afile)", "def write(self, outputFile):\n \n try: \n f = open(outputFile + '.py', 'w')\n for trail in self.trails: \n f.write(\"[\")\n for index in trail:\n f.write(\"({0}, {1}), \".format(*index)) \n f.write(\"]\\n\")\n \n except IOError, e:\n msg = \"Exception encountered when attempting \" + \\\n \"to write data to file: {0}.\" + \\\n \"\\n\\t -- Exception was: {1}\" + \\\n \"\\n\\t For help use --help\".format(outputFile, e)\n raise Usage(e)", "def main():\r\n\timport sys\r\n\r\n\tlistofSequences = FastAreader(sys.stdin).readFasta() \r\n\tPAMSequences = PAMfinder(listofSequences).classController() # Calls on controller class to return desired models.\r\n\tf = open('Guide Sequences.txt','w') \r\n\tfor i in range(len(PAMSequences[0])):\r\n\t\tf.write(PAMSequences[0][i]) # Prints the header sequence into the file.\r\n\t\tf.write('\\n') \r\n\t\tprint(PAMSequences[0][i]) \r\n\t\tfor j in range(len(PAMSequences[1][i])): \r\n\t\t\tif j == 0: \r\n\t\t\t\tf.write(\"Forward Strand PAM Sites:\") \r\n\t\t\t\tf.write('\\n')\r\n\t\t\t\tprint(\"Forward Strand PAM Sites:\") \r\n\t\t\tprint(PAMSequences[1][i][j]) # Prints the forward sequences\r\n\t\t\ty = str(PAMSequences[1][i][j]) # Changes from int to string characters.\r\n\t\t\tx = ''.join(y) # Joining all the string values so we can print to file.\r\n\t\t\tf.write(x) # Write the joined forward sequences to the file.\r\n\t\t\tf.write('\\n')\r\n\t\tfor k in range(len(PAMSequences[2][i])): # For reverse sequences, and follows same logic as forward. \r\n\t\t\tif k == 0:\r\n\t\t\t\tf.write(\"Reverse Strand PAM Sites (in reference to the Top Strand Position):\")\r\n\t\t\t\tf.write('\\n')\r\n\t\t\t\tprint(\"Reverse Strand PAM Sites (in reference to the Top Strand Position):\")\r\n\t\t\tprint(PAMSequences[2][i][k]) # Prints the reverse sequences with the corresponding positions. \r\n\t\t\ta = str(PAMSequences[2][i][k]) # Changes the integer to string characters, allowing for the values to join.\r\n\t\t\tb = ''.join(a)\r\n\t\t\tf.write(b) # Write all of the reverse sequences onto the text file with their positions. \r\n\t\t\tf.write('\\n')\r\n\tf.close() # Close the file.\r", "def write_output(self, output_path, output_filename):\n self.output_file = output_path + '/' + output_filename\n if os.path.isfile(self.output_file + '.txt'): # Creación del archivo txt de salida.\n os.remove(self.output_file + '.txt')\n file = open(self.output_file + '.txt', \"x\")\n\n self.parse_html() # Obtiene los html de entrada.\n file.write(\"############################\\n\")\n file.write(\"# ISAMI VERSION: v11.1.0 #\\n\")\n file.write(\"# INITIATION LUG #\\n\")\n file.write(\"# ISAMI_LUG VERSION: v1.0 #\\n\")\n file.write(\"############################\\n\")\n for id in self.parsed_html_dic: # Escribe la salida en el txt con el nombre del caso y kt correspondiente.\n file.writelines('-----------------------------------\\n')\n header = id + \"\\n\"\n file.writelines(header)\n file.writelines('-----------------------------------\\n')\n tables = self.read_tables(self.parsed_html_dic[id])\n info = tables[0]\n for i in info:\n file.writelines(i + \" = \" + str(info[i]) + \"\\n\")\n kt = self.find_kt(self.parsed_html_dic[id])\n file.writelines(\" Kt = \" + str(kt) + \"\\n\")\n file.close()", "def save(self, filename):\n target = open(filename, 'w')\n target.write(\"\\\\data\\\\\\n\")\n target.write(\"ngram 1=\" + str(len(self.f1)) + \"\\n\\n\")\n target.write(\"\\\\1-grams:\\n\")\n for w,p in sorted(self.f1.items()): \n target.write(str(p) + \" \" + w + \"\\n\")\n target.write(\"\\\\end\\\\\\n\")\n target.close()", "def outputToFile(self, file, fileSorted):\r\n output = open(file, 'w')\r\n for i in self.list:\r\n output.write(i.showRule() + \"\\n\")\r\n output.close()\r\n outputSorted = open(fileSorted, 'w')\r\n for i in self.sortedList:\r\n outputSorted.write(i[0].showRule() + \"\\n\")\r\n outputSorted.close()", "def write_output(word_dict):\n # create an empty output.txt file\n output = open('output.txt', 'w')\n\n for i in words_dict: \n output.write(i + \" : \" + str(words_dict[i]) + \"\\n\")", "def write_txt_file(title, abstract, f_out):\n\n print(\n '*' * 40,\n '\\n',\n '[Title] {}'.format(title),\n '\\n',\n '[Abstract] {}'.format(abstract),\n file=f_out\n )", "def write_output():\n f = open(OUTPUT_FILE, 'w')\n for case_index, words in get_output():\n f.write('Case #%d: %s\\n' % (case_index, ' '.join(words)))\n f.close()", "def write(filename):\n print(uc.write(filename))", "def write_SEQRES_fasta():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n with open(filepath,'r') as file:\n seq_list = []\n for line in file:\n if line[:6] == 'SEQRES':\n line_split = line.split()[4:]\n seq_list.append(line_split)\n choice1 = input('Enter name of the outfile: ') \n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in seq_list:\n outfile.writelines(i)\n print('Sequences successfully written!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')", "def export(self, fname):\n f = open(fname, 'w')\n for ue in self.ue_list:\n line_components = list()\n line_components.append(ue.expression)\n line_components.append(ue.meaning)\n print >>f, '\\t'.join(line_components).encode('utf-8')", "def writeOutputToFile(self, expanded_acronyms, file_path):\n output_file = open(file_path, \"w\")\n if expanded_acronyms:\n for acronym in sorted(expanded_acronyms.keys()):\n output_file.write(\n acronym + \",\" + str(self._getExpansion(expanded_acronyms[acronym])) + \"\\n\")\n else:\n output_file.close(string_error_no_results_to_show)\n output_file.close()", "def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()", "def to_file(self, f: str) -> None:\n with open(f, \"w\") as open_file:\n open_file.write(\"\\n\".join(self.itos) + \"\\n\")", "def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()" ]
[ "0.6571705", "0.63586414", "0.6302915", "0.62937003", "0.6239766", "0.62379503", "0.62351996", "0.6158247", "0.61540806", "0.61510086", "0.61367595", "0.61271656", "0.6108666", "0.6108193", "0.60962945", "0.6069241", "0.6062584", "0.6049", "0.6020087", "0.6017663", "0.600498", "0.6003971", "0.60009056", "0.59635097", "0.59616965", "0.5951354", "0.5940233", "0.59380823", "0.5916561", "0.59091806" ]
0.6979519
0
private function, adds prefix to each state of automaton
def _add_state(self, prefix): for i in range(len(self.states)): self.states[i] = prefix + self.states[i] self.q_0 = prefix + self.q_0 for i in range(len(self.final)): self.final[i] = prefix + self.final[i] keys = list(self.transition.keys()) for key in keys: new_key = prefix + key self.transition[new_key] = [] for i in range(len(self.transition[key])): self.transition[new_key].append(prefix + self.transition[key][i]) del self.transition[key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_prefix(prefix = \"Peptides\"):\n var_list = gen_cell_lines_states_replicates()\n prefix = prefix\n res_list = []\n for i in var_list:\n unit_str = prefix + \" \"\n unit_str += i\n res_list.append(unit_str)\n return res_list", "def add_prefix(self, state_dict, prefix):\n print('add prefix \\'{}\\''.format(prefix))\n f = lambda x: x + prefix # 去除带有prefix的名字\n return {f(key): value for key, value in state_dict.items()}", "def _build_prefix(self):\r\n pattern = self.string2\r\n m = len(pattern)\r\n p = [None]*m\r\n p[0] = 0\r\n k = 0\r\n for i in range(1,m):\r\n while k > 0 and pattern[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == pattern[i]:\r\n k = k+1\r\n p[i] = k\r\n self._prefix = p", "def _addPrefixes(data):\n prevTags = None\n newData = []\n\n for n, (token, tags) in enumerate(data):\n\n newTags = []\n\n for t in tags:\n p = \"B\" if ((prevTags is None) or (t not in prevTags)) else \"I\"\n newTags.append(\"%s-%s\" % (p, t))\n\n newData.append((token, newTags))\n prevTags = tags\n\n return newData", "def genPrefixAntString(self,estimatedVar,prefix=\"_\"):\n self.prefixAntString = self.antString\n for name in estimatedVar:\n self.prefixAntString = replaceVariable(self.prefixAntString,\n name,prefix+name)", "def build_prefix(self):\r\n pattern = self.pattern\r\n m = len(pattern)\r\n p = [None]*m\r\n p[0] = 0\r\n k = 0\r\n for i in range(1,m):\r\n while k > 0 and pattern[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == pattern[i]:\r\n k = k+1\r\n p[i] = k\r\n self._prefix = p", "def prefix(self, prefix, *args):\n new_prefix = '%s%s' % (self.prefixes[-1], prefix % args)\n self.prefixes.append(new_prefix)\n try:\n yield\n finally:\n assert self.prefixes.pop() == new_prefix", "def prefix(self, xform):\n tail = self\n while tail.prev != None:\n tail = tail.prev\n tail.prev = xform", "def prefix(prefix_list):\n def add_attribute(func):\n if not hasattr(func, \"prefix\"):\n func.prefix = []\n func.prefix.append(prefix_list)\n return func\n return add_attribute", "def test_ipam_prefixes_update(self):\n pass", "def prefix_nodes(graph, prefix):\n mapping = {node: f\"{prefix}{node}\" for node in graph.nodes}\n return networkx.relabel_nodes(graph, mapping)", "def transform(self, prefix):\n nfa_transformed = copy.deepcopy(self)\n nfa_transformed._add_state(prefix)\n return nfa_transformed", "def add_prefix(inputs, prefix):\n\n outputs = dict()\n for name, value in inputs.items():\n outputs[f\"{prefix}.{name}\"] = value\n\n return outputs", "def prefix_all(value, LL):\n return [[value] + L for L in LL]", "def prefixer(prefix: str):\n def prefixed(\n node,\n dumps=lambda node: codenode.dumps(node),\n ):\n for line_content in dumps(node).splitlines():\n yield codenode.line(f'{prefix}{line_content}')\n return prefixed", "def apply_prefix(string):\n for short, long in PREFIXES.items():\n if string.startswith(long):\n return short + ':' + string[len(long):]\n return string", "def prefix_replace(original, old, new):\n ...", "def add_prefix_with_treatments(prefix = \"Reporter intensity corrected\", treatments = 10):\n var_list = gen_cell_lines_states_replicates()\n treatments = [i for i in range(treatments)]\n prefix = prefix\n res_list = []\n for i in var_list:\n for j in treatments:\n unit_str = prefix + \" \"\n unit_str += str(j) + \" \"\n unit_str += i\n res_list.append(unit_str)\n return res_list", "def prefixes(s):\n output = ''\n for i in range(len(s) + 1):\n add = s[0:i]\n output += add\n return output", "def test_ipam_prefixes_partial_update(self):\n pass", "def pause_review(self, prefix: Nibbles) -> None:\n self._active_prefixes.add(prefix)", "def prefixes(s):\n if s:\n yield from prefixes(s[:-1])\n yield s", "def insert(self, prefix: str):\n leaf = self.root\n for level in range(len(prefix)):\n letter = prefix[level]\n\n # if current character is not present\n if letter not in leaf.children:\n leaf.children[letter] = self.get_node()\n leaf = leaf.children[letter]\n\n # mark last node as leaf\n leaf.word_count += 1", "def gen_states(basenm,sites,is_prefix):\n n=len(sites)\n\n # calculate max \n max=1\n for i in range(len(sites)):\n max=max*len(sites[i])\n\n vec_of_states=[]\n names=[]\n for i in range(max):\n j=i\n a=[]\n divisor=max/len(sites[0])\n for k in range(len(sites)):\n a.append(int(j/divisor))\n j=j%divisor\n try: \n divisor=divisor/len(sites[k+1])\n except IndexError: # on the last trip through the loop\n divisor=divisor/1 # yes I know, but this is formally what I'm doing \n vec_of_states.append(a)\n nm=basenm\n for i in range(n):\n if is_prefix[i]:\n nm=\"%s_%s\" % (sites[i][a[i]],nm) # prepend mUb0_ or whatever - note a[i] is not printed directly but used as a lookup \n else:\n nm=\"%s_%s\" % (nm, sites[i][a[i]]) # append _mUb0 or whatever - note a[i] is not printed directly but used as a lookup \n names.append(nm) \n return vec_of_states, names", "def _add_prefix(self, table, prefix):\n\t\tif prefix == \"\":\n\t\t\treturn\n\t\tfor colname in table.colnames:\n\t\t\tif colname not in [\"VECTOR_ASSOC\", \"VECTOR_ASSOC_1\", \"VECTOR_ASSOC_2\"]:\n\t\t\t\ttable.rename_column(colname, prefix + colname)", "def prefix_lines(prefix, lines):\n assert lines[-1] == \"\\n\"\n return prefix + lines[:-1].replace(\"\\n\", \"\\n\"+prefix) + \"\\n\"", "def prefixer_iter(prefix: str):\n def prefixed(\n node,\n dump_iter=lambda node: codenode.default_writer_type(node).dump_iter()\n ):\n for line_content in yield_lines(dump_iter(node)):\n yield codenode.line(f'{prefix}{line_content}')\n return prefixed", "def append_state_label(symbol):\n\t\tif symbol == \"c\":\n\t\t\tself.state_label = self.state_label.replace(\"o\", \"\")\n\t\tif symbol == \"d\":\n\t\t\tself.state_label = self.state_label.replace(\"k\", \"\")\n\t\telse:\n\t\t\tself.state_label += symbol", "def prefix(self, prefix):\n\n self._prefix = prefix", "def prefix(self, prefix):\n\n self._prefix = prefix" ]
[ "0.7056793", "0.69126916", "0.62347484", "0.62095976", "0.61847854", "0.6168341", "0.61563754", "0.6150717", "0.6126691", "0.60915726", "0.6062005", "0.6011941", "0.59447503", "0.5837729", "0.5836263", "0.5826232", "0.5809354", "0.5775668", "0.57756335", "0.57562876", "0.5732091", "0.5688757", "0.5664163", "0.56505924", "0.56391096", "0.56184316", "0.5613522", "0.5603307", "0.55930996", "0.55930996" ]
0.7660045
0
adds epsilon transitions from new state and from final states to start state
def add_epsilon_transitions(self, state): self.states.append(state) self.transition[state + ', .'] = [self.q_0] for s in self.final: self.transition[s + ', .'] = [self.q_0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeEpsilonTransition(self, currentStates):\n nextStates = self.makeTransition(currentStates, '$', True)\n #if epsilon transition did not occur or it started an infitine loop\n if not nextStates or nextStates == currentStates:\n return currentStates #end recursion\n else:\n return nextStates.union(self.makeEpsilonTransition(nextStates))", "def makeEpsilonTransition(self, currentState):\n nextState = self.makeTransition(currentState, \"$\", True)\n #if the epsilon transition cannot be made\n if nextState is None:\n #return the last state automaton was found in before the transition\n return currentState\n #return the current state if it is an acceptable state\n if nextState in self.acceptableStates:\n return nextState\n #otherwise try to make a new epsilon transition recursively\n return self.makeEpsilonTransition(nextState)", "def final(self, state):\n\n deltaReward = state.getScore() - self.pre_state.getScore()\n self.observeTransition(self.pre_state, self.action, state, deltaReward, True)\n self.stopEpisode()\n\n # Make sure we have this var\n if not 'episodeStartTime' in self.__dict__:\n self.episodeStartTime = time.time()\n if not 'lastWindowAccumRewards' in self.__dict__:\n self.lastWindowAccumRewards = 0.0\n self.lastWindowAccumRewards += state.getScore()\n\n NUM_EPS_UPDATE = 10000\n if self.episodesSoFar % NUM_EPS_UPDATE == 0:\n print('episode: ', self.episodesSoFar)\n print('Saving model...')\n self.model.save(MODEL_PATH)\n print('Reinforcement Learning Status:')\n windowAvg = self.lastWindowAccumRewards / float(NUM_EPS_UPDATE)\n if self.episodesSoFar <= self.numTraining:\n trainAvg = self.accumTrainRewards / float(self.episodesSoFar)\n print('\\tCompleted %d out of %d training episodes' % (self.episodesSoFar, self.numTraining))\n print('\\tAverage Rewards over all training: %.2f' % (trainAvg))\n\n else:\n testAvg = float(self.accumTestRewards) / (self.episodesSoFar - self.numTraining)\n print('\\tCompleted %d test episodes' % (self.episodesSoFar - self.numTraining))\n\n print('\\tAverage Rewards over testing: %.2f' % testAvg)\n\n print('\\tAverage Rewards for last %d episodes: %.2f' % (NUM_EPS_UPDATE, windowAvg))\n\n print('\\tEpisode took %.2f seconds' % (time.time() - self.episodeStartTime))\n\n self.lastWindowAccumRewards = 0.0\n self.episodeStartTime = time.time()\n\n if self.episodesSoFar == self.numTraining:\n msg = 'Training Done (turning off epsilon and alpha)'\n print('%s\\n%s' % (msg, '-' * len(msg)))", "def construct(self, nfa):\n states = set([self.start])\n self.start = dict()\n current = self.start\n \n while True:\n states = nfa.epsilon(states=states)\n transitions = set()\n for state in states:\n if state is nfa.end:\n current.accepting = True\n for transition in state:\n transitions .add(transition.next)\n \n states = transitions\n\n if None:\n brak", "def update_epsilon(self):\n\t\tif self.epsilon > self.epsilon_min:\n\t\t\tself.epsilon *= self.epsilon_decay", "def _step(self, epsilon, state: State) -> State:\n pot = self._potential\n q, p, v, q_grad, energy, logp = state\n\n dt = 0.5 * epsilon\n\n # Half momentum step\n p_new = p + dt * q_grad\n\n # Whole position step\n v_new = pot.velocity(p_new)\n q_new = (q + epsilon * v_new).astype(q.dtype)\n\n # Half momentum step\n logp, q_new_grad = self._logp_dlogp_func(q_new)\n p_new = p_new + dt * q_new_grad\n\n kinetic = pot.velocity_energy(p_new, v_new)\n energy = kinetic - logp\n\n return State(q_new, p_new, v_new, q_new_grad, energy, logp)", "def update_epsilon(self):\n self.epsilon = self.epsilon * self.decay", "def final(self, state):\n deltaReward = state.getScore() - self.lastState.getScore()\n self.observeTransition(self.lastState, self.lastAction, state, deltaReward)\n self.stopEpisode()\n\n # Make sure we have this var\n if not 'episodeStartTime' in self.__dict__:\n self.episodeStartTime = time.time()\n if not 'lastWindowAccumRewards' in self.__dict__:\n self.lastWindowAccumRewards = 0.0\n self.lastWindowAccumRewards += state.getScore()\n\n NUM_EPS_UPDATE = 100\n if self.episodesSoFar % NUM_EPS_UPDATE == 0:\n print 'Reinforcement Learning Status:'\n windowAvg = self.lastWindowAccumRewards / float(NUM_EPS_UPDATE)\n if self.episodesSoFar <= self.numTraining:\n trainAvg = self.accumTrainRewards / float(self.episodesSoFar)\n print '\\tCompleted %d out of %d training episodes' % (\n self.episodesSoFar,self.numTraining)\n print '\\tAverage Rewards over all training: %.2f' % (\n trainAvg)\n else:\n testAvg = float(self.accumTestRewards) / (self.episodesSoFar - self.numTraining)\n print '\\tCompleted %d test episodes' % (self.episodesSoFar - self.numTraining)\n print '\\tAverage Rewards over testing: %.2f' % testAvg\n print '\\tAverage Rewards for last %d episodes: %.2f' % (\n NUM_EPS_UPDATE,windowAvg)\n print '\\tEpisode took %.2f seconds' % (time.time() - self.episodeStartTime)\n self.lastWindowAccumRewards = 0.0\n self.episodeStartTime = time.time()\n\n if self.episodesSoFar == self.numTraining:\n msg = 'Training Done (turning off epsilon and alpha)'\n print '%s\\n%s' % (msg,'-' * len(msg))", "def _epsilon_successors_(self, fsm=None):\n if not hasattr(self, 'transitions'):\n raise ValueError('State %s does not belong to a '\n 'finite state machine.' % (self,))\n\n it = _FSMProcessIteratorEpsilon_(fsm, input_tape=[],\n initial_state=self)\n # TODO: optimize the following lines (use already calculated\n # epsilon successors)\n for _ in it:\n pass\n _epsilon_successors_dict_ = it.visited_states\n _epsilon_successors_dict_[self].remove([]) # delete starting state\n if not _epsilon_successors_dict_[self]:\n del _epsilon_successors_dict_[self]\n for s, outputs in _epsilon_successors_dict_.iteritems():\n _epsilon_successors_dict_[s] = [t for t, _ in\n itertools.groupby(sorted(outputs))]\n return _epsilon_successors_dict_", "def act(self, state, eps=0.):", "def after_move(self):\n self.epsilon -= self.epsilon_decay_rate\n self.epsilon = max(self.epsilon, self.epsilon_min)", "def determinisation(self):\n if any(len(t.word_in) > 1 for t in self.iter_transitions()):\n return self.split_transitions().determinisation()\n\n epsilon_successors = {}\n direct_epsilon_successors = {}\n for state in self.iter_states():\n direct_epsilon_successors[state] = set(\n t.to_state\n for t in self.iter_transitions(state)\n if not t.word_in)\n epsilon_successors[state] = set([state])\n\n old_count_epsilon_successors = 0\n count_epsilon_successors = len(epsilon_successors)\n\n while old_count_epsilon_successors < count_epsilon_successors:\n old_count_epsilon_successors = count_epsilon_successors\n count_epsilon_successors = 0\n for state in self.iter_states():\n for direct_successor in direct_epsilon_successors[state]:\n epsilon_successors[state] = epsilon_successors[state].union(epsilon_successors[direct_successor])\n count_epsilon_successors += len(epsilon_successors[state])\n\n def set_transition(states, letter):\n result = set()\n for state in states:\n for transition in self.iter_transitions(state):\n if transition.word_in == [letter]:\n result.add(transition.to_state)\n result = result.union(*(epsilon_successors[s] for s in result))\n return (frozenset(result), [])\n\n result = self.empty_copy()\n new_initial_states = [frozenset(set().union(\n *(epsilon_successors[s]\n for s in self.iter_initial_states()\n )))]\n result.add_from_transition_function(set_transition,\n initial_states=new_initial_states)\n\n for state in result.iter_states():\n state.is_final = any(s.is_final for s in state.label())\n if all(s.color is None for s in state.label()):\n state.color = None\n else:\n state.color = frozenset(s.color for s in state.label())\n\n return result", "def train(self, episodes, epsilon_initial, epsilon_min, epsilon_stop_episode,\n network_update_freq, gamma, memory_capacity, batch_size):\n\n memory = ReplayMemory(memory_capacity)\n\n tot_steps = 0\n running_loss = 0\n\n depsilon = (epsilon_initial-epsilon_min)/epsilon_stop_episode\n\n for episode in range(episodes):\n\n if epsilon_initial > epsilon_min:\n epsilon_initial -= depsilon\n\n if episode % network_update_freq == 0:\n # Update target network\n self.NN_target.load_state_dict(self.NN.state_dict())\n\n if (episode + 1) % 10 == 0:\n print(f'Episode {episode + 1}/{episodes} completed!')\n print(f'Average steps per episode: {tot_steps / 10}')\n writer.add_scalar('training loss', running_loss / tot_steps, episode)\n self.plotValue()\n tot_steps = 0\n running_loss = 0\n\n state, done = self.env.reset()\n\n\n while not done:\n tot_steps += 1\n\n action = self.chooseAction(epsilon_initial, state)\n\n reward, next_state, done= self.env.transitionState(state, action)\n\n #score += reward\n reward = torch.tensor([[reward]], device=device)\n done = torch.tensor([[done]], device=device)\n\n # Saves the transition\n memory.push(self.RBF[state], self.RBF[next_state], reward, done)\n\n # Perform one step of batch gradient descent\n running_loss += self.optimizeModel(memory, batch_size, gamma)\n\n state = next_state\n\n writer.close()", "def epsilon_successors(self, state):\n return self.state(state)._epsilon_successors_(self)", "def kleene_star(self):\n from copy import deepcopy\n result = deepcopy(self)\n for initial in result.iter_initial_states():\n for final in result.iter_final_states():\n result.add_transition(final, initial, [], final.final_word_out)\n\n for initial in result.iter_initial_states():\n initial.is_final = True\n\n return result", "def reset_for_new_run(\n self,\n state: State\n ):\n\n super().reset_for_new_run(state)\n\n self.epsilon = self.original_epsilon\n self.greedy_action = list(self.Q.keys())[0]", "def reset(self):\n self.epsilon = self.start", "def to_epsilon_nfa(self):\n self._initialize_enfa()\n s_initial = self._set_and_get_initial_state_in_enfa()\n s_final = self._set_and_get_final_state_in_enfa()\n self._process_to_enfa(s_initial, s_final)\n return self._enfa", "def astar(model, start, end):\n\n # Create start and end node\n start_node = Node(None,0.0, start)\n start_node.g = 0\n start_node.h = calculate_h(start_node,end)\n start_node.f = start_node.g + start_node.h \n \n end_node = Node(None,0.0, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n\n\n iterations = 0\n # Loop until you find the end\n while len(open_list) > 0:\n iterations += 1\n # print(iterations)\n print('openlist={},closed_list={}'.format(len(open_list),len(closed_list)))\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n print('currentstate={}, parentaction={}, f={}'.format(current_node.state, current_node.parent_action, current_node.f))\n\n # run_sim(current_node.parent_action)\n\n # Found the goal\n if current_node == end_node: \n actions = []\t\n current = current_node\n while current is not None:\n actions.append(current.parent_action)\n current = current.parent_state\n # print(current.state)\n return actions[::-1] # Return reversed path\n\n # Generate children states\n children = []\n\n for action in discretized_action: # all possible actions\n\n # Get new state node (closest to discretized state space)\n \n # new_state = get_next_state_from_model(model,current_node,action)\n new_state = get_next_state_from_eqn(current_node,action)\n # new_state = run_sim(action)\n # print(new_state)\n\n # # Make sure within range\n # if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n # continue\n\n # # Make sure walkable terrain\n # if maze[node_position[0]][node_position[1]] != 0:\n # continue\n\n #############Make sure to wrap theta \n \n # Create new node\n new_node = Node(current_node, action, new_state)\n\n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\t\n continue\n\n # Create the f, g, and h values\n child.g = calculate_g(child,current_node) #current_node.g + 1\n child.h = calculate_h(child,end_node)\n child.f = child.g + child.h\n \n # \tprint(child.state, child.parent_action, child.f) \n \n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n open_list.append(child)", "def epsilon_delta(self):", "def makeTransition(self, currentStates, character, isEpsilon = False):\n\n nextStates = set()\n for state in currentStates:\n currentConfiguration = state + \",\" + character\n if currentConfiguration in self.transitions:\n newStates = self.transitions.get(currentConfiguration)\n nextStates = nextStates.union(newStates)\n self.writer.writeTransition(state, character, newStates, isEpsilon)\n\n nextStates.discard('#') #hashtas will be added later if needed\n\n return nextStates", "def reset(self):\n self.epsilon = self.epsilon_start", "def __init__(self, start_epsilon, end_epsilon, decay_steps, decay_schedule='lin'):\n self.epsilon_updater = common.ParameterUpdater(\n start_epsilon, end_epsilon, decay_steps, decay_schedule)", "def act(self, state_and_prev_recurrent, eps=0.):\n state_and_prev_recurrent = torch.from_numpy(state_and_prev_recurrent).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state_and_prev_recurrent)[:, :4]\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.action_size))", "def test_remove_initial(self):\n nfa = NondeterministicFiniteAutomaton()\n state0 = State(0)\n state1 = State(1)\n symb_a = Symbol(\"a\")\n nfa.add_transition(state0, symb_a, state1)\n nfa.add_start_state(state0)\n nfa.add_final_state(state1)\n self.assertTrue(nfa.is_deterministic())\n self.assertTrue(nfa.accepts([symb_a]))\n self.assertEqual(nfa.remove_start_state(state1), 0)\n self.assertTrue(nfa.accepts([symb_a]))\n self.assertEqual(nfa.remove_start_state(state0), 1)\n self.assertFalse(nfa.accepts([symb_a]))", "def update_epsilon(self, epsilon):\n self.weights = self.get_alternate_weights(epsilon)\n self.epsilon = epsilon\n self.ess = effective_sample_size(self.weights)", "def NFAtoDFA(self):\n q0 = self.NFA.FindEpsilonClosure(self.NFA.GetStartState().pop())\n Q = [q0,]\n WorkList = [q0,]\n SetCounter = 0\n WorkListIndex = [SetCounter,]\n\n self.TransitionMap = dict()\n self.StartStates = [0,]\n self.AcceptStates = list()\n\n while len(WorkList) > 0:\n\n q = WorkList.pop()\n idx = WorkListIndex.pop()\n\n for state in q:\n if state in self.NFA.GetAcceptState():\n self.AcceptStates.append(idx)\n break\n\n for char in rule.ForAllChar():\n # t <- e-closure(Delta(q, c))\n t = self.NFA.FindTransitionList(q, char)\n if len(t) == 0:\n continue\n t = self.NFA.FindEpsilonClosureList(t)\n\n # if t not in Q then\n # add t to Q and WorkList\n if t not in Q:\n SetCounter += 1\n Q.append(t)\n WorkList.append(t)\n WorkListIndex.append(SetCounter)\n \n # T[q, c] <- t\n key = str(idx) + '_' + char\n self.TransitionMap[key] = Q.index(t)\n \n self.NumStates = SetCounter\n \n # for key in self.TransitionMap.keys():\n # print key, '=>',\n # print self.TransitionMap[key]\n # print 'Accept =', self.AcceptStates", "def train_episode(self):\n state = self.env.reset()\n states = []\n actions = []\n rewards = []\n for _ in range(self.options.steps):\n probs = self.actor_baseline.predict([[state]])[0][0]\n action = np.random.choice(len(probs), p=probs)\n\n next_state, reward, done, _ = self.step(action)\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n\n state = next_state\n\n if done:\n break\n\n # Compute and store returns in G\n G = np.zeros_like(rewards)\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n\n # One-hot encoding for actions\n actions_one_hot = np.zeros([len(actions), self.env.action_space.n])\n actions_one_hot[np.arange(len(actions)), actions] = 1\n\n # Compute one-hot encoded deltas\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n deltas = [[0]]\n\n # Update actor and state estimator\n self.actor_baseline.fit(x=[np.array(states)],\n y={'actor_output': deltas, 'baseline_output': returns},\n epochs=1, batch_size=self.options.batch_size, verbose=0)", "def update_Q_expsarsa(alpha, gamma, nA, eps, Q, state, action, reward, next_state=None):\n current = Q[state][action] # estimate in Q-table (for current state, action pair)\n policy_s = np.ones(nA) * eps / nA # current policy (for next state S')\n policy_s[np.argmax(Q[next_state])] = 1 - eps + (eps / nA) # greedy action\n Qsa_next = np.dot(Q[next_state], policy_s) # get value of state at next time step\n target = reward + (gamma * Qsa_next) # construct target\n new_value = current + (alpha * (target - current)) # get updated value \n return new_value", "def Q_learning_train(env,alpha,gamma,epsilon,episodes):\n %time\n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n #Initialize Q table of 22500 x 8 size (22500 states and 8 actions) with all zeroes\n q_table = np.zeros([env.observation_space.n, env.action_space.n]) \n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n if random.uniform(0, 1) < epsilon:\n action = env.action_space.sample() # Explore action space randomly\n else:\n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n\n next_state, reward, done, info = env.step(action) \n\n old_value = q_table[state, action]\n next_max = np.max(q_table[next_state])\n\n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[state, action] = new_value\n\n if reward == -10:\n penalties += 1\n \n\n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n rewards.append(np.sum(episode_rewards))\n \n if i % 1000 == 0:\n clear_output(wait=True)\n print(f\"Episode: {i}\")\n \n \n print(\"Training finished.\\n\")\n \n plt.plot(savgol_filter(rewards, 1001, 3, mode = \"interp\"))\n plt.title(\"Smoothened training reward per episode\", pad = 30, size = BIGGER_SIZE)\n plt.legend()\n plt.xlabel('Episodes', labelpad = 20);\n plt.ylabel('Total Reward', labelpad = 20);\n plt.tick_params(axis='both', which='major');\n plt.tick_params(axis='both', which='minor');\n #plt.xlim(0, 60000);\n #plt.ylim(0,50)\n #plt.xticks(np.arange(0, episodes+1, 5000));\n #plt.yticks(np.arange(min(rewards), max(rewards)+1, 1000));" ]
[ "0.68393135", "0.65938413", "0.64521855", "0.6450548", "0.63267624", "0.6233461", "0.62020105", "0.61552405", "0.60778964", "0.6039172", "0.600596", "0.5991411", "0.59820724", "0.597354", "0.5958634", "0.5930484", "0.592039", "0.5908626", "0.5803106", "0.5798349", "0.5791438", "0.5789217", "0.5720353", "0.568099", "0.5668905", "0.5665546", "0.5653831", "0.56071687", "0.5591098", "0.5585354" ]
0.8106117
0
concatenation of two top automatons
def concat(self): nfa2 = self.aut_stack.pop() nfa1 = self.aut_stack.pop() nfa1_star = nfa1.transform('X') nfa2_star = nfa2.transform('Y') nfa_concat = Automaton() nfa_concat.final = nfa2_star.final nfa_concat.q_0 = nfa1_star.q_0 nfa_concat.states = list(set(nfa1_star.states).union(nfa2_star.states)) nfa_concat.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet)) nfa_concat.transition = dict(nfa1_star.transition, **nfa2_star.transition) for a in nfa1_star.final: key = a + ', .' if nfa_concat.transition.get(key, 0) == 0: nfa_concat.transition[key] = [nfa2_star.q_0] else: nfa_concat.transition[key].append(nfa2_star.q_0) self.aut_stack.append(nfa_concat)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_two_calls(self) -> None:", "def union(self):\n nfa2 = self.aut_stack.pop()\n nfa1 = self.aut_stack.pop()\n\n nfa1_star = nfa1.transform('X')\n nfa2_star = nfa2.transform('Y')\n\n nfa_union = Automaton()\n nfa_union.states = list(set(nfa1_star.states).union(nfa2_star.states))\n nfa_union.states.append('S')\n nfa_union.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet))\n nfa_union.final = list(set(nfa1_star.final).union(nfa2_star.final))\n nfa_union.change_start_state('S')\n nfa_union.transition = dict(nfa1_star.transition, **nfa2_star.transition)\n nfa_union.transition['S, .'] = [nfa1_star.q_0, nfa2_star.q_0]\n\n self.aut_stack.append(nfa_union)", "def autostop():", "def combine(cls, first: 'Output', second: 'Output') -> 'Output':\n return cls(\n first.output,\n second.target,\n second.input,\n second.params or first.params,\n first.delay + second.delay,\n times=first.times if second.times < 0\n else second.times if first.times < 0\n else min(first.times, second.times),\n inst_out=first.inst_out,\n inst_in=second.inst_in,\n comma_sep=first.comma_sep and second.comma_sep,\n )", "def MergeLogic(self) -> str:", "def merge(): #Status: WIP\r\n pass", "def top():\n print (\"\")\n double_line()\n print (\"Starting sp_controller...\")", "def test_BuildModel2(self):\n print(\"\\nTest 6: Building a Model with Concat\")\n builder = StaticBuilder(\"Concat\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3, num_islots=2)\n out1 = builder.addOutput()\n\n builder.addDirectedLink(in1, enc1, islot=0)\n builder.addDirectedLink(in2, enc1, islot=1)\n builder.addDirectedLink(enc1, out1)\n \n builder.build()", "def concat_obs_and_action(obs, action):\n return F.concat((obs, action), axis=-1)", "def concat_obs_and_action(obs, action):\n return F.concat((obs, action), axis=-1)", "def top(self):", "def test_alternate_orderings(self):\r\n t1 = self.task_xml1\r\n t2 = self.task_xml2\r\n xml_to_test = [[t1], [t2], [t1, t1], [t1, t2], [t2, t2], [t2, t1], [t1, t2, t1]]\r\n for xml in xml_to_test:\r\n definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(self.rubric), 'task_xml': xml}\r\n descriptor = Mock(data=definition)\r\n combinedoe = CombinedOpenEndedV1Module(self.test_system,\r\n self.location,\r\n definition,\r\n descriptor,\r\n static_data=self.static_data,\r\n metadata=self.metadata,\r\n instance_state=self.static_data)\r\n\r\n changed = combinedoe.update_task_states()\r\n self.assertFalse(changed)\r\n\r\n combinedoe = CombinedOpenEndedV1Module(self.test_system,\r\n self.location,\r\n definition,\r\n descriptor,\r\n static_data=self.static_data,\r\n metadata=self.metadata,\r\n instance_state={'task_states': TEST_STATE_SA})\r\n\r\n combinedoe = CombinedOpenEndedV1Module(self.test_system,\r\n self.location,\r\n definition,\r\n descriptor,\r\n static_data=self.static_data,\r\n metadata=self.metadata,\r\n instance_state={'task_states': TEST_STATE_SA_IN})", "def mergenotes():", "def __concatenateB0(self, source1, source2, target):\n cmd = \"mrcat {} {} {} -axis 3 -nthreads {} -quiet\".format(source1, source2, target, self.getNTreadsMrtrix())\n self.launchCommand(cmd)\n return target", "def anchor():\n return 'concat'", "def top(self, body_output, targets):\n raise NotImplementedError(\"Abstract Method\")", "def prepost_hook_two(self) -> None:\n self.poutput(\"two\")", "def finalize_top(self, top):\n if not self.use_pr:\n return top\n\n # Expose AXI interface to the top level, and then wrap the entire\n # user design.\n top.add_port('axil_clk', 'axil_clk', parent_sig=False, dir='in')\n top.add_port('axil_rst_n', 'axil_clk', parent_sig=False, dir='in')\n top.add_port('M_AXI_araddr', 'M_AXI_araddr', width=32, parent_sig=False, dir='in')\n top.add_port('M_AXI_arready', 'M_AXI_arready', parent_sig=False, dir='out')\n top.add_port('M_AXI_arvalid', 'M_AXI_arvalid', parent_sig=False, dir='in')\n top.add_port('M_AXI_awaddr', 'M_AXI_awaddr', width=32, parent_sig=False, dir='in')\n top.add_port('M_AXI_awready', 'M_AXI_awready', parent_sig=False, dir='out')\n top.add_port('M_AXI_awvalid', 'M_AXI_awvalid', parent_sig=False, dir='in')\n top.add_port('M_AXI_bready', 'M_AXI_bready', parent_sig=False, dir='in')\n top.add_port('M_AXI_bresp', 'M_AXI_bresp', width=2, parent_sig=False, dir='out')\n top.add_port('M_AXI_bvalid', 'M_AXI_bvalid', parent_sig=False, dir='out')\n top.add_port('M_AXI_rdata', 'M_AXI_rdata', width=32, parent_sig=False, dir='out')\n top.add_port('M_AXI_rready', 'M_AXI_rready', parent_sig=False, dir='in')\n top.add_port('M_AXI_rresp', 'M_AXI_rresp', width=2, parent_sig=False, dir='out')\n top.add_port('M_AXI_rvalid', 'M_AXI_rvalid', parent_sig=False, dir='out')\n top.add_port('M_AXI_wdata', 'M_AXI_wdata', width=32, parent_sig=False, dir='in')\n top.add_port('M_AXI_wready', 'M_AXI_wready', parent_sig=False, dir='out')\n top.add_port('M_AXI_wstrb', 'M_AXI_wstrb', width=4, parent_sig=False, dir='in')\n top.add_port('M_AXI_wvalid', 'M_AXI_wvalid', parent_sig=False, dir='in')\n if self.enable_wishbone:\n top.add_port('wbm_cyc_o', 'wbm_cyc_o', parent_sig=False, dir='in')\n top.add_port('wbm_stb_o', 'wbm_stb_o', parent_sig=False, dir='in')\n top.add_port('wbm_we_o ', 'wbm_we_o ', parent_sig=False, dir='in')\n top.add_port('wbm_sel_o', 'wbm_sel_o', parent_sig=False, dir='in', width=4)\n top.add_port('wbm_adr_o', 'wbm_adr_o', parent_sig=False, dir='in', width=32)\n top.add_port('wbm_dat_o', 'wbm_dat_o', parent_sig=False, dir='in', width=32)\n top.add_port('wbm_dat_i', 'wbm_dat_i', parent_sig=False, dir='out', width=32)\n top.add_port('wbm_ack_i', 'wbm_ack_i', parent_sig=False, dir='out')\n top.add_port('wb_clk_i', 'wb_clk_i', parent_sig=False, dir='in')\n top.add_port('wb_rst_i', 'wb_rst_i', parent_sig=False, dir='in')\n top.instantiate_child_ports()\n # With PR, we're not going to be using this module as top. Instead, let's\n # rename is `user_top` which will be instantiated within a high-level static top-level.\n # The assumpion is that the static top-level is already routed and included in a project and need\n # not be generated here.\n top.name = 'user_top'\n return top", "def test5():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n exp1.pingAllTest() # **************** Parece que es necesario que se de un arranque al controlador\n # **************** para que aprenda las reglas antes del ataque.\n\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n #exp1.trafico.pingMeasure(filename='ping_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def task2(self):\n\n pass", "def concatenate(self, other: \"CFG\") -> \"CFG\":\n start_temp = Variable(\"#STARTCONC#\")\n temp_0 = Terminal(\"#0CONC#\")\n temp_1 = Terminal(\"#1CONC#\")\n production0 = Production(start_temp, [temp_0, temp_1])\n cfg_temp = CFG({start_temp},\n {temp_0, temp_1},\n start_temp,\n {production0})\n return cfg_temp.substitute({temp_0: self,\n temp_1: other})", "def concatenate_data():", "def test3():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n exp1.trafico.pingMeasure(filename='ensayo_ping.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test6():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_2(self):\r\n r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)\r\n o = MyOp.make_node(r1, r1)\r\n r2b = o.outputs[0]\r\n o2 = MyOp.make_node(r2b, r2b)\r\n all = io_toposort([r2b], o2.outputs)\r\n assert all == [o2]\r\n\r\n o2 = MyOp.make_node(r2b, r5)\r\n all = io_toposort([r2b], o2.outputs)\r\n assert all == [o2]", "def tidyup():\n global cola1\n cola1.put(0)", "def tidyup():\n global cola1\n cola1.put(0)", "def transact(self):", "def transact(self):", "def mergeWith(self, others):" ]
[ "0.6010168", "0.5590357", "0.53088385", "0.51978385", "0.51666653", "0.51333827", "0.5098099", "0.50811666", "0.5070868", "0.5070868", "0.5062796", "0.50482255", "0.50468844", "0.50343424", "0.50241405", "0.5002939", "0.50017685", "0.49519634", "0.49431038", "0.49408138", "0.49241933", "0.48761916", "0.4873055", "0.48537406", "0.4826652", "0.48246813", "0.48246813", "0.48195866", "0.48195866", "0.4790942" ]
0.5941984
1
union of two top automatons in the stack
def union(self): nfa2 = self.aut_stack.pop() nfa1 = self.aut_stack.pop() nfa1_star = nfa1.transform('X') nfa2_star = nfa2.transform('Y') nfa_union = Automaton() nfa_union.states = list(set(nfa1_star.states).union(nfa2_star.states)) nfa_union.states.append('S') nfa_union.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet)) nfa_union.final = list(set(nfa1_star.final).union(nfa2_star.final)) nfa_union.change_start_state('S') nfa_union.transition = dict(nfa1_star.transition, **nfa2_star.transition) nfa_union.transition['S, .'] = [nfa1_star.q_0, nfa2_star.q_0] self.aut_stack.append(nfa_union)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def union(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return Set(lhs | rhs)", "def union(self, *args):\n return self.phy2abs.union(*args)", "def union(first, second):\n # Put your code here.", "def union(self, *others):\r\n return self.r.sunion(self.r_key, *[o.r_key for o in others])", "def union(set1, set2):", "def headsofunion(h1, h2):\n res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)\n return {ctx.node() for ctx in res}", "def union(self, p, q):\n pass", "def union(p1: Iterator[Posting], p2: Iterator[Posting]) -> Iterator[Posting]:\n raise NotImplementedError(\"You need to implement this as part of the assignment.\")", "def union(self,x,y):\n assert x in self and y in self\n rx,ry = self.find(x),self.find(y)\n if rx!=ry:\n nx,ny = self.__rank[rx],self.__rank[ry]\n if nx<=ny:\n self.__parent[rx] = ry\n self.__size[ry] += self.__size[rx]\n if nx==ny: self.__rank[ry]+=1\n else:\n self.__parent[ry] = rx\n self.__size[rx] += self.__size[ry]", "def union(self, other: \"CFG\") -> \"CFG\":\n start_temp = Variable(\"#STARTUNION#\")\n temp_0 = Terminal(\"#0UNION#\")\n temp_1 = Terminal(\"#1UNION#\")\n production_0 = Production(start_temp, [temp_0])\n production_1 = Production(start_temp, [temp_1])\n cfg_temp = CFG({start_temp},\n {temp_0, temp_1},\n start_temp,\n {production_0, production_1})\n return cfg_temp.substitute({temp_0: self,\n temp_1: other})", "def make_union(self, *args, **kwargs): # real signature unknown\n pass", "def _union(\n lows: np.array, ups: np.array, new_lows: np.array, new_ups: np.array\n) -> Union[np.array, np.array]:\n out_lows = []\n out_ups = []\n\n for i in range(0, lows.shape[1]):\n low, up = _union_one_dim( # pylint:disable=invalid-name\n lows[:, i], ups[:, i], new_lows[:, i], new_ups[:, i]\n )\n out_lows.append(low.reshape(-1, 1))\n out_ups.append(up.reshape(-1, 1))\n\n out_lows_array, out_ups_array = np.hstack(out_lows), np.hstack(out_ups)\n\n return out_lows_array, out_ups_array", "def union(self, other):\n if isinstance(other, list):\n self.substrates = unary_union([self.substrates] + other)\n elif isinstance(other, Substrate):\n self.substrates = unary_union([self.substrates, other.substrates])\n else:\n self.substrates = unary_union([self.substrates, other])\n self.oriented = False", "def union(self, other, inplace=True):\n if self.target != other.target:\n raise ValueError('target mismatch (%s != %s)' % (\n self.target, other.target))\n\n union = self if inplace else copy.deepcopy(self)\n\n sections = [name for name in self.sections if name != 'target']\n for name in sections:\n mine = getattr(union, name)\n yours = getattr(other, name)\n setattr(union, name, mine | yours)\n\n return union", "def _control_union(self, entities_1: List[str], entities_2: List[str]):\n return list(set(entities_1).union(set(entities_2)))", "def union(one, other):\n left = min(one.left, other.left)\n right = max(one.right, other.right)\n top = min(one.top, other.top)\n bottom = max(one.bottom, other.bottom)\n return BBox([[left, top], [right, bottom]])", "def union(self,i,j):\n ii, jj = self.find(i), self.find(j)\n if ii != jj:\n\n if self.rank[ii] > self.rank[jj]: \n ii,jj = jj,ii\n\n if self.rank[ii] == self.rank[jj]: \n self.rank[jj]+=1\n\n self.size[jj] += self.size[ii]\n self.par[ii] = jj\n\n self.cMax = max(self.size[jj],self.cMax)\n self.nComp -=1\n\n del self.size[ii]", "def union(self, a, b):\n if (a in self.node_id) and (b in self.node_id) and (self.node_id[a] != self.node_id[b]):\n self.merge(a, b)\n elif (a in self.node_id) or (b in self.node_id):\n self.add(a,b)\n else:\n self.create_new_group(a,b)", "def union(self, other):\n self.find_set()._link(other.find_set())", "def union2(s, t):\n if empty(s):\n return t\n elif empty(t):\n return s\n else:\n e1, e2 = s.first, t.first\n if e1 == e2:\n return Link(e1, union2(s.rest, t.rest))\n elif e1 < e2:\n return Link(e1, union2(s.rest, t))\n elif e2 < e1:\n return Link(e2, union2(s, t.rest))", "def union(self, piDD2):\r\n #For the two piDDs if the self has a top node (x1, y1) and piDD has the form (x2, y2) then x1>=x2 and y1>=y2\r\n top_node = piDD2.top_node\r\n self_top_node = self.top_node\r\n self.piDD.update(piDD2.piDD)\r\n self.top_node = self.union_helper(self_top_node, top_node)\r\n if self.piDD[self.top_node] == None:\r\n self.dim = 0\r\n else:\r\n self.dim = self.piDD[self.top_node][0][0]\r\n self.run_clean_up()", "def union(s1, s2):\n \"*** YOUR CODE HERE ***\"\n s = set()\n for member in s1:\n s.add(member)\n for member in s2:\n s.add(member)\n return s", "def union(self, other):\n return PermClass([S_1 + S_2 for S_1, S_2 in zip(self, other)])", "def upset(self,\n _sortkey=operator.attrgetter('index'),\n _next_concepts=operator.attrgetter('upper_neighbors')):\n return algorithms.iterunion([self], _sortkey, _next_concepts)", "def union(A,B):\n set_A = A\n set_B = B\n sorted_union = []\n for elements in set_A:\n if elements not in sorted_union:\n sorted_union.append(elements)\n for elements in set_B:\n if elements not in sorted_union:\n sorted_union.append(elements)\n return sorted_union", "def union(A, B, *C):\n return setutils(\"union\", A, B, *C)", "def _union(cls, s1, s2):\n return s1.union(s2)", "def union(self, *lists):\n if self.is_a(set):\n return _(self._.union(*lists))\n return _(_union(self._, *lists))", "def concat(self):\n nfa2 = self.aut_stack.pop()\n nfa1 = self.aut_stack.pop()\n\n nfa1_star = nfa1.transform('X')\n nfa2_star = nfa2.transform('Y')\n\n nfa_concat = Automaton()\n nfa_concat.final = nfa2_star.final\n nfa_concat.q_0 = nfa1_star.q_0\n nfa_concat.states = list(set(nfa1_star.states).union(nfa2_star.states))\n nfa_concat.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet))\n nfa_concat.transition = dict(nfa1_star.transition, **nfa2_star.transition)\n for a in nfa1_star.final:\n key = a + ', .'\n if nfa_concat.transition.get(key, 0) == 0:\n nfa_concat.transition[key] = [nfa2_star.q_0]\n else:\n nfa_concat.transition[key].append(nfa2_star.q_0)\n\n self.aut_stack.append(nfa_concat)", "def union(a, b):\r\n return list(set(a) | set(b))" ]
[ "0.6618299", "0.646862", "0.6269154", "0.6163646", "0.61239934", "0.6003781", "0.5924754", "0.58439076", "0.58394533", "0.5780357", "0.5768155", "0.5723707", "0.5701208", "0.5686139", "0.56796765", "0.5636822", "0.56243503", "0.56036437", "0.5600259", "0.5587218", "0.5572553", "0.55295587", "0.5520405", "0.54845244", "0.54714185", "0.5458127", "0.5436236", "0.54265964", "0.5424214", "0.54236805" ]
0.73748773
0
converts stack to nfa
def stack2nfa(stack): for op in stack.operations: if op == '=push': stack.push() if op == '=star': stack.star() if op == '=concat': stack.concat() if op == '=union': stack.union() if op == '=print': return stack.print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_nfa_from_postfix(regex: str):\n\n nfa_stack = []\n\n for char in regex:\n if char == '.':\n # to concat two nfas, add an epsilon arrow from every accepting state\n # of the first to the start state of the second and turn all accepting states\n # of the first into non accepting states\n\n if len(nfa_stack) < 2:\n raise InvalidRegexException()\n\n nfa2 = nfa_stack.pop()\n nfa1 = nfa_stack.pop()\n\n if nfa2.is_one_character_nfa:\n nfa2_matched_character, nfa2_accept_state = nfa2.start_state.transitions[0]\n for accept_state in nfa1.accept_states:\n accept_state.add_transition(nfa2_matched_character, nfa2_accept_state)\n accept_state.is_accepting = False\n\n else:\n for accept_state in nfa1.accept_states:\n accept_state.add_transition('eps', nfa2.start_state)\n accept_state.is_accepting = False\n\n\n nfa1.accept_states = nfa2.accept_states\n nfa1.is_one_character_nfa = False\n nfa_stack.append(nfa1)\n\n # for garbage collection\n nfa2.start_state = None\n nfa2.accept_states = None\n elif char == '*':\n # to apply a kleene star to an nfa, add a new start state, which is also an accept state,\n # to the nfa with an epsilon arrow going into the original start state.\n # add epsilon arrows from every accept state to the original start state\n\n if len(nfa_stack) < 1:\n raise InvalidRegexException()\n\n nfa = nfa_stack.pop()\n new_start_state = State([('eps', nfa.start_state)], True)\n for accept_state in nfa.accept_states:\n accept_state.add_transition('eps', nfa.start_state)\n\n nfa.accept_states.append(new_start_state)\n nfa.start_state = new_start_state\n nfa.is_one_character_nfa = False\n nfa_stack.append(nfa)\n\n elif char == '+':\n # TODO try this out on paper\n # we add epsilon arrows from every accept state to the start state\n\n if len(nfa_stack) < 1:\n raise InvalidRegexException()\n\n nfa = nfa_stack.pop()\n for accept_state in nfa.accept_states:\n accept_state.add_transition('eps', nfa.start_state)\n\n nfa.is_one_character_nfa = False\n nfa_stack.append(nfa)\n elif char == '|':\n # we apply the union operation by adding a new non accepting start state with\n # epsilon arrows going into the start state of each operand nfa\n\n if len(nfa_stack) < 2:\n raise InvalidRegexException()\n\n nfa2 = nfa_stack.pop()\n nfa1 = nfa_stack.pop()\n\n new_start_state = State([('eps', nfa1.start_state), ('eps', nfa2.start_state)], False)\n\n nfa1.start_state = new_start_state\n nfa1.accept_states.extend(nfa2.accept_states)\n nfa1.is_one_character_nfa = False\n nfa_stack.append(nfa1)\n\n # for garbage collection\n nfa2.start_state = None\n nfa2.accept_states = None\n else:\n # character from the alphabet\n accept_state = State([], True)\n start_state = State([(char, accept_state)], False)\n nfa_stack.append(NFA(start_state, [accept_state], True))\n\n if len(nfa_stack) != 1:\n raise InvalidRegexException()\n\n return nfa_stack[0]", "def reverse(stack):\n\n result = create_stack()\n while (stack.length() > 0):\n result.push(stack.pop())\n\n return result", "def as_nfa(self) -> NFA:\n nfa = NFA()\n prev_node = nfa.add_node()\n next_node = prev_node\n nfa.start_nodes.add(prev_node)\n for item in self.sequence:\n next_node = nfa.add_node()\n nfa.add_transition(prev_node, next_node, item)\n prev_node = next_node\n nfa.accept_nodes.add(next_node)\n return nfa", "def convert_stack(g, op, blcok):\n\n x = op.input(\"X\")\n all_inputs = []\n for inp in x:\n all_inputs.append(g.get_node(inp))\n axis = op.attr(\"axis\")\n out = _op.stack(all_inputs, axis)\n g.add_node(op.output(\"Y\")[0], out)", "def fn(node):\n ans, stack = [], []\n while stack or node: \n if node: \n stack.append(node)\n node = node.left\n else: \n node = stack.pop()\n ans.append(node.val)\n node = node.right \n return ans", "def fn(i, n):\n if not (n <= len(s)-i <= 3*n): return \n if i == len(s): return ans.append(\".\".join(stack))\n k = i+1 if s[i] == \"0\" else i+3\n for j in range(i+1, min(k, len(s))+1): \n if j == i+3 and s[i:j] > \"255\": continue\n stack.append(s[i:j])\n fn(j, n-1)\n stack.pop()", "def anagram_stack(self):\n for i in self.utils.get_anagram_prime():\n stack.push(i)\n\n for i in range(0, stack.size()):\n print(stack.pop())", "def stack(*bpfs) -> core.Stack:\n if len(bpfs) == 1 and isinstance(bpfs[0], (list, tuple)):\n bpfs = bpfs[0]\n return core.Stack(bpfs)", "def queue_to_stack(queue):\n stack = Stack()\n check_list = []\n\n while len(queue) != 0:\n check_list.append(queue.dequeue())\n\n check_list.reverse()\n\n while check_list != []:\n stack.push(check_list[0])\n check_list.remove(check_list[0])", "def enfaceStack(stack):\n enface=np.swapaxes(stack,0,1)\n enface_downsize=np.empty((enface.shape[0],256,256))\n # writeText('\\n')\n for i, frame in enumerate(enface):\n enface_downsize[i] = transform.resize(frame,(256,256),order=3,mode='reflect')\n print('\\rResizing: {:.2f} % done'.format((100.0*((i+1)/enface.shape[0]))), end='', flush=True)\n print('\\n')\n mask=np.any(enface_downsize!=0,axis=(1,2))\n enface_cleaned = enface_downsize[mask]\n\n return enface_cleaned", "def make_stack(tb, stack=None):\n if stack is None:\n stack = []\n if tb is not None:\n make_stack(tb.tb_next, stack)\n stack.append(tb)\n return stack", "def stack_man(self):\r\n c = self.eat_char()\r\n if c == 's':\r\n #Push n onto the stack.\r\n self.log += ' Push '\r\n n = self.read_number()\r\n self.push(n)\r\n elif c == 't':\r\n c = self.eat_char()\r\n if c == 's':\r\n #Duplicate the nth value from the top of the stack\r\n self.log += ' Duplicate '\r\n n = self.read_number()\r\n if not self.scan_only:\r\n if n>=len(self.stack) or n<0:\r\n raise Exception('Non existant stack address.')\r\n self.push(self.stack[n])\r\n elif c == 'n':\r\n #Discard the top n values below the top of the stack from the stack. (For n<0 or n>=stack.length, remove everything but the top value.)\r\n self.log += ' Discard '\r\n n = self.read_number()\r\n if not self.scan_only:\r\n if n < 0 or n >= (len(self.stack)-1):\r\n self.stack = [self.stack[0]]\r\n else:\r\n self.stack = [self.stack[0]] + self.stack[n+1:] #!!not sure\r\n else:\r\n raise Exception('Invalid command')\r\n elif c == 'n':\r\n c = self.eat_char()\r\n if c=='s':\r\n #Duplicate the top value on the stack.\r\n self.log += ' Duplicate top '\r\n if not self.scan_only:\r\n self.push(self.stack[0])\r\n elif c=='t':\r\n #Swap the top two value on the stack.\r\n self.log += ' Swap top '\r\n if not self.scan_only:\r\n self.stack = [self.stack[1]] + [self.stack[0]] + self.stack[2:] #!!not sure\r\n elif c=='n':\r\n #Discard the top value on the stack.\r\n self.log += ' Discard top '\r\n if not self.scan_only:\r\n if len(self.stack) == 0:\r\n raise Exception('Stack is already empty.')\r\n self.stack = self.stack[1:] #!!not sure\r", "def fn(node):\n if not node: return #null node\n stack.append(node.val)\n if node.left is node.right: ans.append(\"->\".join(map(str, stack))) #leaf node\n fn(node.left) or fn(node.right)\n stack.pop()", "def load_stack_top_into_d():\n return ['@SP', 'A=M', 'D=M']", "def stack_to_queue(stack):\n temp_stack = ArrayStack()\n result_queue = ArrayQueue()\n while not stack.isEmpty():\n elem = stack.pop()\n result_queue.add(elem)\n temp_stack.push(elem)\n while not temp_stack.isEmpty():\n stack.push(temp_stack.pop())\n return result_queue", "def tower_of_hanoi_stack(n, beg, aux, end):", "def push(num):\r\n i = len(stack) - 1\r\n while i:\r\n stack[i] = stack[i - 1]\r\n i -= 1\r\n stack[0] = float(num)", "def Stack():\n return []", "def stack_nonlinearity(self):\n self.flow = self.NONLINEARITY[0](self.flow)", "def array_to_stack(stack, source):\r\n \r\n while source != []:\r\n temp = source.pop()\r\n stack.push(temp)\r\n \r\n return", "def stack_to_array(stack, target):\r\n \r\n while stack.is_empty() == False:\r\n temp = stack.pop()\r\n target.insert(0, temp) #adds temp to the beginning, while append adds temp to the end\r\n return", "def input_string_to_nfa(string: str, nfa: NFA):\n\n # ? is it possible to get a loop of epsilon transitions\n\n # we store a list of all current active states in the nfa\n # as each character is read, we follow all transition(including all series of epsilon transitions) to get a new set of active states\n\n # begin with the start state as the only active state\n active_states = [nfa.start_state]\n\n # mark all states as active that can be reached by following epsilon arrows from the start state\n i = 0\n while i < len(active_states):\n for transition_char, transition_state in active_states[i].transitions:\n if transition_char == 'eps':\n active_states.append(transition_state)\n i += 1\n\n string_index = 0\n while string_index < len(string) and len(active_states) > 0:\n character = string[string_index]\n new_active_states = []\n for active_state in active_states:\n # make active all states that can be reached from this state by reading [character]\n next_states = [transition_state for transition_char, transition_state in active_state.transitions if transition_char == character]\n\n # now make active all states that can be reached by epsilon arrows from these states\n i = 0\n while i < len(next_states):\n for transition_char, transition_state in next_states[i].transitions:\n if transition_char == 'eps':\n next_states.append(transition_state)\n i += 1\n \n new_active_states.extend(next_states)\n\n active_states = new_active_states\n string_index += 1\n\n for active_state in active_states:\n if active_state.is_accepting:\n return True\n\n return False", "def stack():\n return currentframe().f_back.f_locals.setdefault(SN, [])", "def NFAtoDFA(self):\n q0 = self.NFA.FindEpsilonClosure(self.NFA.GetStartState().pop())\n Q = [q0,]\n WorkList = [q0,]\n SetCounter = 0\n WorkListIndex = [SetCounter,]\n\n self.TransitionMap = dict()\n self.StartStates = [0,]\n self.AcceptStates = list()\n\n while len(WorkList) > 0:\n\n q = WorkList.pop()\n idx = WorkListIndex.pop()\n\n for state in q:\n if state in self.NFA.GetAcceptState():\n self.AcceptStates.append(idx)\n break\n\n for char in rule.ForAllChar():\n # t <- e-closure(Delta(q, c))\n t = self.NFA.FindTransitionList(q, char)\n if len(t) == 0:\n continue\n t = self.NFA.FindEpsilonClosureList(t)\n\n # if t not in Q then\n # add t to Q and WorkList\n if t not in Q:\n SetCounter += 1\n Q.append(t)\n WorkList.append(t)\n WorkListIndex.append(SetCounter)\n \n # T[q, c] <- t\n key = str(idx) + '_' + char\n self.TransitionMap[key] = Q.index(t)\n \n self.NumStates = SetCounter\n \n # for key in self.TransitionMap.keys():\n # print key, '=>',\n # print self.TransitionMap[key]\n # print 'Accept =', self.AcceptStates", "def fn(i):\n if i == len(nums): return ans.append(stack.copy())\n if not stack or stack[-1] != nums[i]: fn(i+1)\n stack.append(nums[i])\n fn(i+1)\n stack.pop()", "def DFS(graph, s, n):\n # Stack implemented using list. list.append() and list.pop() inherently have LIFO structure.\n visited = [False] * n\n stack = []\n \n stack.append(s)\n visited[s] = True\n while(stack):\n v = stack.pop()\n print(v, end= \" \")\n \n for i in range(len(graph[v])):\n if (not visited[graph[v][i]]):\n stack.append( graph[v][i] )\n visited[graph[v][i]] = True", "def print(self):\n nfa = self.aut_stack.pop()\n return nfa", "def fn(n, i=1):\n if n == 0 and len(stack) == k: return ans.append(stack.copy())\n if n < 0 or len(stack) == k: return \n for nn in range(i, 10):\n stack.append(nn)\n fn(n-nn, nn+1)\n stack.pop()", "def fn(i):\n if len(nums) == i: return ans.append(stack.copy())\n fn(i+1)\n stack.append(nums[i])\n fn(i+1)\n stack.pop()", "def reverse(stack):\n list = []\n while not stack.is_empty():\n list.append(stack.pop())\n while not len(list) == 0:\n stack.push(list.pop(0))" ]
[ "0.57262903", "0.5415393", "0.5326861", "0.5321518", "0.5242631", "0.51943195", "0.5160745", "0.5048725", "0.5032095", "0.49959958", "0.49540788", "0.49480787", "0.4946257", "0.4938141", "0.49019092", "0.48895568", "0.48797578", "0.48735604", "0.48648134", "0.48560274", "0.4842256", "0.48155117", "0.48144114", "0.48119766", "0.48104697", "0.48086965", "0.47998118", "0.47869104", "0.47860977", "0.477115" ]
0.8418595
0
~30x faster than hankel_weights_ascii
def hankel_weights(order=0): if order == 0: return N.frombuffer(b'\x8fC\xa7\xfbr\xaa\xfa9{\xd9\x8cj+y\x0c\xbd\xfb\xad\x9eC\xf8\xfb)=\xa3Ng\x98\xb6\x82\x1f\xbd\xc2\x9a\x84Y\xff\xc4.=\x92"\xfc\x1f\x8f\xde\x1d\xbd)p\xe2\x83i\xf2/=\xdbF\xa8W\xedI\x18\xbd+2\xef\xb8Ij0=\x18\xb0\x10x\xbdi\x11\xbd/\x8a\xee\x14\xbf\x0c1=\xed|\x19%`%\x03\xbd\x16\xdc\xd6W\r\xfb1=\x9a\xd4A\xe0\x1cd\xc4\xbc\x90\xc9#a\xddF3=\xe3\xabJg\xf0M\x03=\x1d\xc7u\x04&\x035=\xa5\x17\x1c\xc2o\xa0\x15=\x9d\x02\xe4\xfc\xb0F7=\x0c\x1c\x12\x94\x9e\xd8!=4\x14\xad\xf1[-:=\xe3\x93\xd5\xf4\x03,*=\xd9\xb2\x91\x1dJ\xd9==e\x0f_\xb7\xc4\x0b2=\x03/\xc6\xd8$:A=\x9c\xe2\x0eY\x96\xfc7=\x01\xef\xda\xbb\xc2\x18D=`~\xca\xf9\xc2!?=A\xc7\x8a\x01\xcb\xa7G=\r\xb9\xc3\xa7\x96\xe0C=\xa8I\xdd\x02\x84\rL=+\xfa\xe0\xc9\x0c\x18I=\xe9WEt\\\xbcP=\x15WzPKkO=]zq\xedM\x11T=\xc0\xc3\x12\x13\xea\x8cS=T\x10\x86~\x89(X=\xe6P0q\x978X=\xdd\x17\x10\xec\xc3,]=\x1a\x04\x8a\x0e\x02\xe8]=\x8e:\xae\x85\x15\xa9a=M\xdd\x104zjb=\xa0\x0f\x8e5?le=\x92\xc8\xc30\x00\xa3f=N\xd0\x8fh\xcf\x06j=\xaf\xde\xa1\xe6\xa9\xc8k=M\xbc!nW\xa8o=\x03)\xecV\x0b\x08q=\xf4]5;yEs=\x9cd\xf3\\\xc5\xdct=\xb1\x1a\xc4d\xcdzw=\t\xe7\x9c\x95\xc1\x89y=\xa7\xfe\xf7\x9c\x81\x9f|= \xe5\xb7!\xee>\x7f=UJ\'\x86Dt\x81=\xd4,\xcc\x85j\x1b\x83=\x9c{A[HK\x85=,\xc5\x96\xd0z\\\x87= \x7fd\x07L\xfc\x89=\xd3=\xcaxQ\x8e\x8c=_\xba.X\x8e\xb7\x8f=\x04#\x17:!s\x91=\n\x8b\xd0s\xfe[\x93=l\x1e\xdd\x01\xb8R\x95=\xb7\xf8\xdc\xca\xc8\xa2\x97=\\\xb1W\xee\xb3\r\x9a=\x8b\xc2\x0b\x08\x1b\xdc\x9c=I\xf6p<\xaa\xd4\x9f=WUn\xc5\xd2\x9e\xa1=\x8a\xd2\xd8\xd6xq\xa3=\x9cf\xcd\xed}\x84\xa5=\xddUz\x16\x82\xc0\xa7=\xaf\'\x9a6\x1bG\xaa=\'\xce\x85\xb6\xb3\x03\xad=\x0b\xe8\xc7b\xca\x0b\xb0=\xdb\xf7\xea\x98\x8e\xb8\xb1=\x11e\x8f\x9e\xd4\x98\xb3=\xb6\xc92\x08c\xa5\xb5=\x1ex\xe6\xc8(\xef\xb7=}\xe7\xd8%\xaap\xba=Hz\x94cW;\xbd=\x92\xa7n9\xd2%\xc0=\x12\xee\xd0j\xe7\xd9\xc1=\x9d[\xed\xfc<\xb9\xc3=@\xa6ET\x85\xcd\xc5=\x07\x95\x95.R\x17\xc8=\x89fRK\x1f\xa1\xca=\x1a\x00\xed\xdd\xf0l\xcd=\x9f,!5&C\xd0=\xc7\x81S\xaes\xf8\xd1=\xd8\xeaDD\xcf\xdc\xd3=\x13D1*\x13\xf3\xd5=^\x95iD\x88B\xd8=_9\xc6C8\xcf\xda=\x19/^7\x7f\xa1\xdd=\x9d-\xe0\xb5h_\xe0=Vmaoq\x18\xe2=\xc8\x0b\x9e\x91m\xff\xe3=\xc5\x88{\xd8\x0c\x1a\xe6=\x8ag\xa9H\xe9l\xe8=\xdfcQ*\xbb\xfe\xea=\x1a\x17riY\xd5\xed=\x1a\xc9b[a|\xf0=[N\xe3\xee&8\xf2=Io&\xab\xc5"\xf4=~2\xe8\xe2\xd1@\xf6=\xe1H=\xa6\n\x98\xf8=\x98cQ[\x1f.\xfb=t\x91~\xed\xfd\t\xfe=\xfbp\xfe\x15W\x99\x00>|\xea\x03\x9cHX\x02>\x15,\xb4#)F\x04>\xdcw\x8f\x8d\x0ch\x06>\xf6\xed\xea\xfcG\xc3\x08>\xd2\xf5\x11\x8a\x05^\x0b>\xe2 b\xe4\xd1>\x0e>\xe0\x89\\\xc0\x95\xb6\x10>\xc6\x03\x8c\x86\x8dx\x12>\xa4\xd8\x00\x9d\xdfi\x14>\x02J\xfe*x\x8f\x16>\xce\t\x8b\xf8\xe4\xee\x18>u`\xdc\x13-\x8e\x1b>\xd5\xe6\x89D\x15t\x1e>x\xc0\xc0\xf4\xfe\xd3 >u\xbb\x1f\xee\x13\x99">`\xefy\x85\xcc\x8d$>\xd1z\x8c]1\xb7&>\x80\xce\x96\xc6\xc6\x1a)>\x88+e\x1b\xb1\xbe+>y\xaf\xca\xe7\xae\xa9.>F\x99]\x93\x9f\xf10>wB\xc7\r\xd0\xb92>\xe4X\xd2\x1a\xfc\xb14>5\xdb\xaf\'-\xdf6>\xa2\xf4\x16\x11\xf9F9>\x8eK_e\x87\xef;>?x\xbc\xf3\xa9\xdf>>\x04{\xe7\xddr\x0fA>\x13#\x05>\xc7\xdaB>\x97\xf2D\xfdi\xd6D>b\xae\x08\xb0p\x07G>H.\t\xd7wsI>\xb8\x90\x95\xef\xb4 L>[*\x97\xc7\x02\x16O>2\x80\xcdC{-Q>\x8a\xe9\x05\xe0\xf7\xfbR>\x92\xa9M\x92\x18\xfbT>\x05\xf7\x9f\x8b\xfa/W>\x8d\x85/zE\xa0Y>d\xfa9\x868R\\>]!\x82\xc8\xbbL_>R\x8d\xfcH\xb8Ka>\xc8n/,c\x1dc>\x00S\xa4~\x07 e>\x8bL\x80\xfd\xcbXg>d\xe6\xe0\xc3a\xcdi>\xadD\xac|\x13\x84l>\xfd\x8eR\xe9\xd4\x83o>\xf64\xb0\xa2*jq>T\xad\xbc3\t?s>\xbf\xfa\x19\x877Eu>\xcbc\x8a2\xe5\x81w>-\xe1\\\x8d\xcd\xfay>\x81w6 F\xb6|>\xc1\xf8\x8d\x1dO\xbb\x7f>\t\xd8\xb4\x8a\xd2\x88\x81>c\x0b\x1b\x81\xea`\x83>\x96}E\xfc\xa8j\x85>\x1d\xf2\x1c\xcaF\xab\x87>\xff\xdf~B\x89(\x8a>\xd1\x13Y*\xd1\xe8\x8c>\x14p\xd4\xf1*\xf3\x8f>~\xe3\x02n\xb0\xa7\x91>\x91\xbb\\n\x07\x83\x93>\xff\xa8-_\\\x90\x95>\xc5\x19/6\xf1\xd4\x97>\xdd"\x08}\x95V\x9a>\xe9\xd1\x00*\xb5\x1b\x9d>y\x91)\x8f\xb4\x15\xa0>1\xfc\xa4\xa5\xc4\xc6\xa1>\xaf\xc4Fj`\xa5\xa3>j\x1b<\x1eR\xb6\xa5>\xbd\xf5o\xfc\xe4\xfe\xa7>\xf1Dd\xc5\xf2\x84\xaa>\xa9\xf4\xf0\xc0\xf2N\xad>\xbca\x91%\x052\xb0>7\x81\xad\x93\x0f\xe6\xb1>>\xcc2\xdc\xf5\xc7\xb3>tu\x8c\xb0\x8a\xdc\xb5>-\x02\xc2\x9b")\xb8>\xd9\xd7i\xac\xa1\xb3\xba>Q\x8a\xc0\x8a\x8a\x82\xbd>p\xd0G\x94\x87N\xc0>\r\xaej\x97\x91\x05\xc2>\x95\x02\x81/\xc8\xea\xc3>\r\xd8\xca\x8a\x06\x03\xc6>\xe3\xf0\x03\x97\xaaS\xc8>\xe1\x06\xd8\xc0\xa2\xe2\xca>\x04\xbb\x01\'}\xb6\xcd>oI\x952<k\xd0>\xa11,\x12K%\xd2>Yw\xe3\xce\xd7\r\xd4>t_\xac#\xc6)\xd6>fS\x96p}~\xd8>\x19\xf4\x7f\x93\xf6\x11\xdb>\x97\xce\xf54\xcb\xea\xdd>\x95\x03\xceX#\x88\xe0>\xdc$+e<E\xe2>\x16>\x14&%1\xe4>\xda\xd1\xd5\xf1\xc9P\xe6>\xef\xc9\xcd\xab\x9b\xa9\xe8>B\xda\x0f\xb5\x9dA\xeb>\xc3\xb1\x9bTu\x1f\xee>\x95\x8e\x12_=\xa5\xf0>\xaa\xdf\xc1\xf1ee\xf2>\xacHG\xa0\xb0T\xf4>Ll\x8dk\x12x\xf6>\x17\xd0\xca\xca\x05\xd5\xf8>\x8b\xad\xda\xb4\x98q\xfb>\xf4\xf8d#|T\xfe>\xde\x02\xe3\x9b\x8a\xc2\x00?\xe2\xc5\xb8\x16\xc8\x85\x02?\xe9\xdeE\xa5zx\x04?\x83\xb6\x17\x02\xa0\x9f\x06?\xf3\xbc\xecH\xbc\x00\t?"\xe6\xa8\x18\xe8\xa1\x0b?\xd3\xd9\xd41\xe0\x89\x0e?|\x11\xca\\\x0b\xe0\x10?Iy\xb1&c\xa6\x12?\t\x910\x8c\x83\x9c\x14?P\xc9#\x10s\xc7\x16?A\xbd\xcc\x82\xbf,\x19?\x0e\x97\x94<\x8c\xd2\x1b?\xb1#\xd6\xd7\xa1\xbf\x1e?2-\x07\xc9\xbf\xfd ?d6k@7\xc7"?\xafU\xfee\xcb\xc0$?L\xb1p\x91\x8b\xef&?\xed\x07\xb4T\x0fY)?\xa0o=\xcf\x84\x03,?\x7fv\xf5\x82\xc0\xf5.?3!\x80i\xa7\x1b1?p\x18\xc7\xacC\xe82?\xab\xfd\xbe"Q\xe54?D\r\xa1\xfb\xe7\x177?\xbfE\r\x8c\xa9\x859?\x8b\xc2\xec\xb7\xce4<?7=,\xe07,??a\xa4\xb9>\xbf9A?\x9b\xe6*I\x84\tC?\x119\xb6\x14\x0f\nE?\xdfk\xb1\x88\x80@G?\x87(p\x8c\x83\xb2I?E\xb0\xf3\x81[fL?v\xb1eG\xf4bO?`\xbb\x94\xef\xf9WQ?~\xb5\xb8\xfa\xe6*S?"B\xd6\xb1\xec.U?\x0c\xe4\xe9\xfd3iW? #J[p\xdfY?\x11\xfe%V\xee\x97\\?Y$\xe6p\xa3\x99_?\xff\xa6\x90\xe0\x1fva?\xc6\x9e\xc3\x98 Lc?&\xb7\x12p\x84Se?h\xd8\x92+y\x91g?\xab\xfc\xf7\xb1\xb6\x0bj?p\xa3t\xf3\x8c\xc8l?\x17&Ov\xf3\xceo?b\xd1p\xe8L\x93q?\xc4\x91\xcc\x1e\xfdks?\xaeA\x99e6vu?J\x9cx\xa8\x1e\xb7w?\x95\xb3Y\x92`4z?\xc4A\xe7M8\xf4|?&\x1d\xa20\x7f\xfd\x7f? w@\x12\xdd\xab\x81?\xfa\xbe\xca|\x92\x85\x83?\x14\xe6\x86\xf9`\x90\x85?.\x8d9\x191\xd1\x87?Q\xecT\x8dZM\x8a?\x9b\x12\x85Q\xa4\n\x8d?.\xf0\xc1u\xa5\x07\x90?\xe7\xc4_\xba\xfb\xb0\x91?\xaa\r\xf6\xe5\xe0\x84\x93?\x18f\xbah\n\x87\x95?\x163\x8b\xe8S\xbb\x97?\xdc4-\xea\x9e%\x9a?:x\xa6\x12\xc7\xc9\x9c?&\x89"\xaa^\xab\x9f?\xdc&\xda\xec\xc9f\xa1?\x94\x1e0\xdfU\x19\xa3?\x08\xc0\xe5\x80\xe7\xed\xa4?\xbe7\x92\xcc\x06\xe4\xa6?\xe3\xcf\x0c\x9f\x03\xfa\xa8?\x7fw\x91E\r,\xab?\xe2B~\xe8\xf1s\xad?|&\xd9R~\xc7\xaf?3\x08\xf8^\x1b\x0c\xb1?\xd6\x05AoC(\xb2?6\x82m|\xde)\xb3?T\x80o\x81\xb3\xfc\xb3?\x87\xe7:\x07B\x87\xb4?\xb4\xef_\x805\xa7\xb4?\xad$\xe3\xd8\xf23\xb4?\xa3m\x80j\xcb\xf9\xb2?\x9aGc\xde\xe5\xc1\xb0?\x92E\xca\xb03\x9a\xaa?1\x10U\x11s\xd1\xa0?\xe05)\x1aA\xb5~?\xcd\xf4\xf9\xc8\\p\x98\xbf`90\x0f@\xcf\xae\xbfw\xc2\x83AB"\xb9\xbf\xddP\xad\x1f\x13\x00\xc1\xbf\x9e\xe2:\xf74\xe6\xc3\xbf\xf3\x11\x01S\xdb\x04\xc4\xbf\xd7hg\x9cs\xd2\xbf\xbf\xf1\ni\xf0\xad\x17\xac\xbf\xbbn\x8d\x8c^\xfe\xa7?tV\xe1\xc9\xf5W\xc3?\xbe\x89\x82\xb9\x92 \xcb?\xc5n\xcc\x93\x9d\xb2\xc5?\xfb\xcd\xe3\xae\xaac\x8c?\xaa7\xc5\xd0|\xed\xc7\xbf\x8en7\x85uo\xcf\xbf\xf9\xae\xef0\xf7.\xab\xbf\xfd\xb0\x8b\xf1\xc3 \xd0?\xeb\'\xb1\xb1z1\xc9?\xc9\xd5\xd5\x82\x91\xc8\xc9\xbfjQ\x18\x07\xdaw\xcf\xbfm%\x12(\x8b\xf9\xd5?P\xf8A\xb98l\xa8\xbfjbGp;\xd6\xca\xbf\xfcI\xbd\x93\xe2\x0e\xd0?\x9ar\xc0\xddDu\xc5\xbf\xcd\xf8\x9d\xdb\r0\xb3?\xc5\x0b%\xe9\xb1[\x91\xbf\xeb\x1b\xbcu\xb0\x14\x82\xbf\xea\x99\xc6\xa0O\x8e\x90?\x11\xafI\x7f\x14\xe0\x8f\xbf\x00E\xcb\\y\xa0\x89?\xabA\xe3\xf6\x84\x0b\x83\xbf\xec\x91\x84\xfa\x94={?3A\xae*r\x1es\xbf|\xe6T%h\x9dj?6:4\xf8/zb\xbf\x98Hxr\xf3\xabY?\xb6,\r\x96\x8a\xe0Q\xbf~\xeen\x8a\xa2\xfcH?\xf6=,V\t\x8aA\xbf\xd1\x0b\x19\x0f\xee\xc08?\xee\xc1\n\x18\x89\x941\xbf?\x99\xa7OW+)?\x1c\xee\x19\x82\x122"\xbf\xfbr\x93\xd9@\xa2\x1a?"\n\xb7\xe3\x88\xca\x13\xbf\xa8g\x7f\xddn\xf5\r?\x08\xaf\x84%L+\x07\xbf\xef\xc8\xf1#\x01\\\x02?\xfbN\xa43`\xde\xfd\xbe<\t\x84\x93\x0b\xf5\xf8>\xecQPr\x81g\xf5\xbe,\xaa\x93\x01\x06\xce\xf2>F#"\x1e\x1d\xe0\xf0\xbe\xdc,\x9d\x13\x1f\xd6\xee>~[\x17\x8d\xab\x96\xec\xbe~\x13~*\x1d\xd0\xea>\x1a4\x15\x16\xa8_\xe9\xbe*\x06,\xcb\xf1,\xe8>\xcf\xb2o\xe9\xdc&\xe7\xbe\xberRiZA\xe6>P\xc3/2\xf2s\xe5\xbe"x1\x13\xb7\xb8\xe4> 4\xf1>|\x0b\xe4\xbe\x92\xe5O\x81Ei\xe3>d\'\xe3\xfe\xed\xcf\xe2\xbe\x1c\xccSb\xf0=\xe2>\xdc\x08\x05]9\xb2\xe1\xbe\x11W+\x95\xff+\xe1>\x12\x07\x0fK\xa9\xaa\xe0\xbeK\xa0\xd1]\xc1-\xe0>\x19\x83A\xea\xe2i\xdf\xbeT~\xf64\xf1\x7f\xde>\x7f\xef\x8f\xa1A\x9d\xdd\xbe\x7fd\xfa\xf4t\xc1\xdc>RpX\xd7:\xec\xdb\xbe\xaa\x97\x0c\xa4R\x1d\xdb>\xe2)\x9a\xa4\x84T\xda\xbe/\x85E\xb0\x9a\x91\xd9>F\x06\xbd\xdb_\xd4\xd8\xbe\xb1\x8c\xc8\x80\xa4\x1c\xd8>\x95D\x8a\xec>j\xd7\xbe\x1fV\x85m\x07\xbd\xd6>\xc2\x90\xa8b\xd5\x14\xd6\xbed\x15\xc0\xb3\x80q\xd5>P\xc9x\x83\xe4\xd2\xd4\xbe\xf6\x8d\x9a\xe4\xde8\xd4>$\xc8kSN\xa3\xd3\xbe\xb0\xf8\xe8\xb4\x10\x12\xd3> O\xaf\xdb\x04\x85\xd2\xbe\xb0\xf4\xdc\xeb\x0b\xfc\xd1>\x01\xb8.\xa4\x08w\xd1\xbe\x89\xd5j\xdb\xdd\xf5\xd0>\'\xa3\xb4`nx\xd0\xbe\xae\xc7\x8c<<\xfd\xcf>\x1a\xacdM\xa5\x10\xcf\xbe\x83Tv\xf4\xe4*\xce>\xd5z\xedq\xc8K\xcd\xbeD\x03D\xb3\x1ds\xcc>\x80!\x87\xc3\xb4\xa0\xcb\xbe\xb5\xc7\xb8\x01`\xd4\xca>\x04i\xe6\xfb\xf2\r\xca\xbe\xc1=\x1c\xb5AM\xc9>S\x08\x926!\x92\xc8\xbe\x8a\xe3\x8dYh\xdc\xc7>\xba\xa6#\x9b\xef+\xc7\xbe\xf9\x98\x1fW\x90\x80\xc6>\xf2\xf4\x87\x8a$\xda\xc5\xbe\xc4\xdasN\x878\xc5>\xec>74\x95\x9b\xc4\xbe\x14\x87\xab\xfd+\x03\xc4>\xc3\xd1\xc9\'*o\xc3\xbe\xb1\xb6\xf1\xe7n\xdf\xc2>$\xca\xa7\x80\xdaS\xc2\xbeO\x99\xb1^N\xcc\xc1>U\xf7\xb9\xd6\xacH\xc1\xbe%\x0e\\\xe7\xd8\xc8\xc0>\r\x11\x86K\xb6L\xc0\xbeu?\xc3XS\xa8\xbf>\x17f\xc7>1\xbe\xbe\xbe)\xdd\x9a\xe8\xd2\xda\xbd>\xd99\x042\x06\xfe\xbc\xbe=\x1f\xd6\\\x9a\'\xbc>+g\xfdA`W\xbb\xbe\x98\xca\xd45*\x8d\xba>\xdf7\xa5\xc5\xcb\xc8\xb9\xbe!\xff\x88\xa3\x19\n\xb9>\xf1{\xa0\xc6\xe9P\xb8\xbe\x94Vd|\x13\x9d\xb7>F\xab\rIo\xee\xb6\xbe\nNR\xbf\xd6D\xb6>\x99\x81\xc5|$\xa0\xb5\xbe\xd4\x94\x91=4\x00\xb5>#Z\xa9\xdc\xe2d\xb4\xbeC2\xb68\x0e\xce\xb3>\xcd\x9c\x80\x1d\x95;\xb3\xbe\xd2\x9b\xebEW\xad\xb2>\x02\xfc;e5#\xb2\xbe\xee4\xb6\x1f\x11\x9d\xb1>[qP\xf6\xcc\x1a\xb1\xbe\xf6\xb2\xb3:L\x9c\xb0>S}\xb0\x11s!\xb0\xbeKC\xdb\xeaLT\xaf>`Z\xc8W\x98l\xae\xbe\x9e\xf2~q\x95\x8b\xad>\xb9\xdf\x1b\xae\x12\xb1\xac\xbe$\xe2\xad\xf4\xdf\xdc\xab>\xef\xe0r\x99\xce\x0e\xab\xbe\xa0o\xd5K\xb1F\xaa>}\xba\x82\x03\\\x84\xa9\xbe\xc0K/\xf9\xa3\xc7\xa8>\xe7\x1b\x91\xa5_\x10\xa8\xbe\xbaz\xdd\xb9f^\xa7>\xecd\xc1\x10\x92\xb1\xa6\xbe\xb4\xbe\xc2\xa1\xbb\t\xa6>\xb5W0|\xbef\xa5\xbe\x95c\xb8\xc3v\xc8\xa4>\xdc\x9a\xdb\xa7\xc1.\xa4\xbed\xa4\x1eX}\x99\xa3>\x05+W\xfb\x88\x08\xa3\xbef\x07K\xab\xc4{\xa2>\x0f\xe6\xefo\x11\xf3\xa1\xbeV\xa9A7Qn\xa1>\xb5\xae3\xccf\xed\xa0\xbet0O\xd05p\xa0>\xab*\xf4nE\xed\x9f\xbe\rM|\x85%\x01\x9f>\xdf\x8a9\xf2\xd7\x1b\x9e\xbe\xd1\x052A*=\x9d>=\xb0\\r\xebd\x9c\xbe\xb5\xedU\xf1\xeb\x92\x9b>\xc8^$\x8b\xfd\xc6\x9a\xbe\xeb\xe1\x1fb\xf3\x00\x9a> \x0f\n\xe3\xa1@\x99\xbe\xe67\x9a\xbc\xde\x85\x98>E`\x84\xd7\x80\xd0\x97\xbe\x99\xad\x15M` \x97>,\xde-]Vu\x96\xbeV&we=\xcf\x95>\x90D\x07\xda\xf0-\x95\xbe\xea\xae\xef=M\x91\x94>\xab3\xf3\x1a0\xf9\x93\xbe3\xae<\xf9we\x93>\xf8\xd7\x18X\x04\xd6\x92\xbe@\x0fm\xa7\xb5J\x92>,$\xf7@m\xc3\x91\xbe\xe7\x94#a\r@\x91>\xd6\xe0) y\xc0\x90\xbe\x0b\x8b\xe2k\x94D\x90>\xb6O\xee\x03\x88\x98\x8f\xbeY\xc4\xc4\xd3\xda\xae\x8e>\x95\xf7\xa0\xdd\xed\xcb\x8d\xbe\xc5Y\xad3\x8f\xef\x8c>D\xf1JY\x8e\x19\x8c\xbe\xc1\xf3\xc58\xbcI\x8b>X\xdc\xd3\x18\xeb\x7f\x8a\xbe\xbcM%\x92\xee\xbb\x89>o\xed\x96\x85\x9b\xfd\x88\xbe\xa9\xb8\xfd\x12\xc8D\x88>Ac\x18\x90K\x91\x87\xbeEXr\x7f\xfe\xe2\x86>\xfe7\x80\x87\xba9\x86\xbe\x9ba<jZ\x95\x85>\xca\xad$\xfd\xb9\xf5\x84\xbeQ\xa2O!\xb6Z\x84>\xa2\xe7\x8f\xbb,\xc4\x83\xbe\xd8\xf0\xdb\xac\xfc1\x83>\x1fq\x16\xcb\x05\xa4\x82\xbe\x0f\x97\x15\xda(\x1a\x82>\x12\xa8\xc0\x84G\x94\x81\xbe\xe2)LVD\x12\x81>\x10$\xb7\xb3\x02\x94\x80\xbe\xd8\xbc\x8f\xd5f\x19\x80>\x7fT\xc7\x83\xabD\x7f\xbe\xce\xf6\x92\x8cj]~>\xf7V\x1e\xe4\xd7|}\xbe\xeeDB!\xc2\xa2|>D\x03LH\xf9\xce{\xbe\xf9\x92x\xc0N\x01{>\xd7\xe5\xa5I\x959z\xbef\xccO\xf2\xa0wy>\x7fG\xea\rG\xbbx\xbe$\x01\x8c+^\x04x>\xc6\xc9\xd1\x0c\xbeRw\xbe\x1d\x0c\xf9\x9c?\xa6v>\xca\x7f@\xe8\xbc\xfeu\xbep\xba\x94\x13\x11\\u>\xa2\xf0{T\x18\xbet\xbe\xc3A4\xe9\xaf$t>\x1a\xfe\x06\x11\xb6\x8fs\xbeP\xeb\xd9\x04\n\xffr>P \xfe\xef\x8brr\xbe\x9e\xa82\xe9\x1c\xeaq>\xc6\xd0\xd4\xeb\x9eeq\xbed\\A\xd1\xf4\xe4p>\xa6\x00kJ\x02hp\xbe\x0f\xb8F\xb3W\xddo>zf\x1f\x99\xad\xf1n\xbe\n\xd6\x8dl\xd2\x0cn>\x02\x9f6\xd3\x93.m\xbe\x93 #\xe7\xc0Vl>\xb6o\x03,*\x85k\xbeU\xc7\xbb\x84\xa1\xb9j>\xdd\xd0;)\xfa\xf3i\xbe\xcb\xcd\xa2\x9c\x084i>\x06:\xb0\xa3\xa2yh\xbe0@{;\x9f\xc4g>|\xf9k\x90\xd6\x14g\xbe\xea\x18v\xf5!jf>\x1b8\x96\xdb[\xc4e\xbe\xa4\xf1\x8f\xc9_#e>Wj\xe8S\n\x87d\xbe\xe7`\x1a\x159\xefc>\xa8\xa4\x03\xa6\xca[c\xbe\xab\xe9\x8b\x96\x9e\xccb>1\xc3\x82f\x95Ab\xbe\t\x9b\xb1~\x90\xbaa>\xfe\xeb\x1f*r7a\xbem{\x89\x8f\x1d\xb8`>2\xd3\x05\xabv<`\xbe\x9a\xc8\xbd\x8f\xc4\x88_>\xe71)\xf5\x8b\x9f^\xbe\xfdk\x186\x10\xbd]>^\x1f\xd6}\x1f\xe1\\\xbe\x9a\x077h\x89\x0b\\>U\xe5\xf4\xf6\x1e<[\xbe^IW\x87\xb2rZ>6\x88(\xc8\x17\xafY\xbe\x05\x89\xf5\xaf#\xf1X>\x8a5\x96s\xac8X\xbe\x84\xad\xfb|\x89\x85W>\xc1\xf2Ab\x93\xd7V\xbeb\x9c\x03\xdd\xa3.V>u\x1b\xee\xc1\x95\x8aU\xbe\xfaz\x94\xf8D\xebT>\xeaV~s\x8ePT\xbe\xc3Gq(P\xbaS>"q\xf3\x08i(S\xbe\xcc\xb9\x05\xfb\xb8\x9aR>\xaa\xd7\x13\xd2 \x11R\xbe\xc9c\x18H\x82\x8bQ>\x83\x89\xf3\xf6\xbf\tQ\xbe\xb6\x16\xf3Q\xbd\x8bP>\x9e\x88\x8a\x9f^\x11P\xbe\xe5\x0bs\xe6\x115O>\xde\xa6>ODNN\xbeZ\x1ft\xb1!nM>\x0b&\xa1\xbcx\x94L\xbe\xa0d\x0b\x8d\x19\xc1K>\xdc(\'\xa1\xd5\xf3J\xbe|\xd5[\xcf\x7f,J>\xef\x02\x14<\xecjI\xbe~?\x17P\xf0\xaeH>\x839+\xafb\xf8G\xbe\n!\xfa.\x1bGG>\xeeB<\xce\xf2\x9aF\xbe\xd7\t#\xac\xc3\xf3E>\xc0\x8b\x03\x00iQE\xbe6\xbb>\x11\xbf\xb3D>Fce/\xa3\x1aD\xbe\xf6;\x96\xaa\xf3\x85C>C{\x14\xcc\x8f\xf5B\xbe\xe4A\x15\xcfWiB>\xfe?\xc2\xd9,\xe1A\xbep\x01p\xf6\xf0\\A>\x18l\x06\r\x87\xdc@\xbe.\x06\x9a\xdc\xd2_@>\xfd1i\xeaq\xcd?\xbe\xc2\x91\x97c=\xe2>>\x10\x9a\xc8d\xd4\xfd=\xbe\x9a\x07\xc1\xac\x04 =>\xa5M\xf2m\x9dH<\xbe\x9c\xc7\xbdCow;>2M\x07(L\xac:\xbe\xb6\xdf\x14i\x07\xe79>\x1c?\xb9\x9fu\'9\xbeS?\xc7\xa5lm8>\x83\xbd\xcb\x8c\xc3\xb87\xbe`&\x0c\x95R\t7>Y\x95\xc7$\xf3^6\xbe`\xa5\xb8\xbf\x7f\xb95>g\x14\xd6\xfe\xd3\x185\xbe\xd1dP\x88\xcc|4>\xf6\xb7\xca\x07G\xe53\xbe\x92.\xcd&"R3>Q(o\x85=\xc32\xbe\x01\xc47\xb3y82>\x94\x0c3(\xb8\xb11\xbe9M:>\xdb.1>G\x14n*\xc6\xaf0\xbe\x90v\xe0\xf6\\40>\x19Z\xdc\xf8\x08y/\xbe\xcca\x8a\xb9D\x90.>\xd1\x0b/\xf99\xae-\xbe\xbb\xdb\xf1\xfb\xb6\xd2,>i\x8f\xccu\x8b\xfd+\xbe\x9b\xca\xea\x7f\x88.+>\x16XX\x8e\x80e*\xbe0\xbc\xfbeG\xa2)>\xb5\xe5\xda\x12\xb2\xe4(\xbe\xf8\xd8\xa7\xde\x96,(>-?\x93G\xcdy\'\xbe\xf8\xd4b\xf7-\xcc&>\xf1\xc2\xc9\xba\x92#&\xbe\x8f\xf9\x00y\xd6\x7f%>\\\xb6\x9d+\xd5\xe0$\xbe/f\xa4\xd6kF$>\xc1%\xd6\x80x\xb0#\xbe\x0807,\xda\x1e#>H\x95\xcc\xcep\x91"\xbe\xba\xa1\x8fK\x1d\x08">If\x95k\xc1\x82!\xbe\x10\xe2h\xd7?\x01!>|V\x96\x10|\x83 \xbe\x81ZfkZ\t >\xd6\x96\x8e\x11\x80%\x1f\xbe\xaf\x88\xc7\xa0%?\x1e>\xb4\x9e\xc9\xd5r_\x1d\xbev\x1a\xbdx6\x86\x1c>g3\xcc\xbd@\xb3\x1b\xbe\xe8\xf5\x9e:c\xe6\x1a>{\xe8$\xdcp\x1f\x1a\xbeG8\xaa\xdc=^\x19>\x8f>6\xba\x9f\xa2\x18\xbeG@1-m\xec\x17>\x18YO\x1f~;\x17\xbe\xf9\x91\xbe\xa2\xab\x8f\x16>\xff2\x96\xe9\xcf\xe8\x15\xbe\xf7m\x85=\xc6F\x15>\x05\x8d\xbf\xf7j\xa9\x14\xbe\x80\xde#y\x9b\x10\x14>\xe5\xa3\x9f"6|\x13\xbepW\xc8M\x1a\xec\x12>"\xa7\xacE(`\x12\xbe\xf8\x90\xda?A\xd8\x11>\x1c\x17\x99UGT\x11\xbe\x8d\x0eT}\x1d\xd4\x10>\xdb\x958\x84\xa7W\x10\xbe\xe0\x97\x01\x10\x94\xbd\x0f>\x10\xd3\xda\xe1\xd4\xd2\x0e\xbe\xcf\xd3\xd7\xd7\xdd\xee\r>\x97\xf2\xcf\xc9|\x11\r\xbe5\xd2\x8d\x02\x81:\x0c>\x02\xe6\x175\xbbi\x0b\xbeB:Hr\xfd\x9e\n>\x824\xb1\x1e\x1b\xda\t\xbe\xc3\x12\xcd\xe8\xe8\x1a\t>\xb2\x00v\xbf<a\x08\xbe\xf9\xa9\xa4\xc8\xed\xac\x07>c@sX\xd4\xfd\x06\xbe\x89\xfbb\xe8\xc9S\x06>|&\xe2\x0e\xa9\xae\x05\xbe\xe6\xdd\x10wM\x0e\x05>\x14\xaf\xc2\xd8\x93r\x04\xbe\x8eU\xbb\xf0Y\xdb\x03>\x10\xe2$y~H\x03\xbeR\xa2="\xe1\xb9\x02>C-<\x8bb/\x02\xbe=\x03h;\xe4\xa8\x01>a=e\x9bH&\x01\xbe}\xd2\xb1\xeer\xa7\x00>\xc1\x02SMG,\x00\xbeL\x0cc;Ui\xff=lGF\x1d\x05\x81\xfe\xbd\xa6\xc3@#k\x9f\xfd=\xc0\x12J\xaaU\xc4\xfc\xbd3~t~\x94\xef\xfb=\x15\xffQ\xd0\xf8 \xfb\xbd\xc3\xa5\xa7*UX\xfa=_-mh}\x95\xf9\xbd\xa8\x87\x16\xabF\xd8\xf8=!>%Q\x87 \xf8\xbdY\x95\xff\xec\x16n\xf7=8n\x0b<\xce\xc0\xf6\xbd\x92\xf0\n\x1e\x87\x18\xf6=-\x18\xb9\x8c\x1cu\xf5\xbd\x03L\xa4\x93j\xd6\xf4=\xb24EHN<\xf4\xbd\xb7\x14P\xc2\xa5\xa6\xf3=\x8d\xf1>\x14P\x15\xf3\xbd\xb3\xe9\x12D-\x88\xf2=U\x1fKD\x1e\xff\xf1\xbd\x82\xab\x0f\xed\x04z\xf1=\xec\x18\x8f\xf5\xc3\xf8\xf0\xbd\xa8\xf0\x8c\xed>{\xf0=\xe4\xed\x1f7Z\x01\xf0\xbdf\xf0<\x01\xf6\x15\xef=o\x1cp}\x0e0\xee\xbd\x85\xa8tM\xcbP\xed=S\x08\x01R\xfbw\xec\xbd\xd8\t\x18\xd7n\xa5\xeb=p\xf3\x89\x89\xf7\xd8\xea\xbd\xabA\xc3lh\x12\xea=\x82\xc6\xe6\xd0\x95Q\xe9\xbd\xc4\xfc0IU\x96\xe8=\x1dr\xa2\xa2}\xe0\xe7\xbd\t:\xef\xda\xe6/\xe7=\x19k\xb1\x17j\x84\xe6\xbd\xba\xb6\xdc\x9d\xe1\xdd\xe5=\x1b:q\xc9(<\xe5\xbdW\xb4k\x05\x1c\x9f\xe4=B\\\xf1\xc3\x98\x06\xe4\xbdS\x9c\xb5v}r\xe3=\xf3\x08\x98\x87\xa9\xe2\xe2\xbd&\xf1xQ\xfdV\xe2=\x8d\xf5B\x19Z\xcf\xe1\xbd\xfc\x1d(\x07\xa2K\xe1=i\xf0\x10 \xb8\xcb\xe0\xbd\x96\x17<?\x80O\xe0=\x99d\x1a \xbe\xad\xdf\xbd\x9d\xdc\x12\x10t\xc3\xde=\xcb8\x01\xc2\xee\xdf\xdd\xbd\x01\xe9\xc2&\xfc\x02\xdd=M\x95o\xa1k,\xdc\xbd\t\xfd\xa6\xfc\r\\\xdb=^\xf0-`\xb5\x91\xda\xbd\r\x1c\xd8F5\xcd\xd9=\xe1n\xbctb\x0e\xd9\xbd/\xf2\xb1\xed\x12U\xd8=\xe0\xfe\x12\xec\x1d\xa1\xd7\xbdH\xca\xc4\xd7[\xf2\xd6=\x87Y\x81=\xa6H\xd6\xbd\x89\x03b\xc6\xd7\xa3\xd5=\x94\xd0\xa8/\xcc\x03\xd5\xbd\xb3M\xc7B`h\xd4=\x1a\xfd\xa1\xcdq\xd1\xd3\xbd\xd6\xcf\x10\x9b\xdf>\xd3=\x0f\xf2\x9fk\x89\xb0\xd2\xbdU\xc8\x9a\xeeO&\xd2=\xb75z\xbb\x14\xa0\xd1\xbd\xe7\x8d\xf2K\xba\x1d\xd1=_\x7f/\xf7#\x9f\xd0\xbd\xe8m\x8a\xef5$\xd0=\x0c\xa7\x82\x8b\xaaY\xcf\xbd]\xa6k\xf1\xcfq\xce=\x04\xff\x13D\xaa\x90\xcd\xbd[\x18\x18\xcb\n\xb6\xcc=@\xa2"\xf7\xc7\xe1\xcb\xbd7z\xdcE\xc2\x13\xcb=\x0c\xdd\xd1\xab\xefK\xca\xbd\xeb\xfc\\$v\x8a\xc9=\x87#\xc4o\xe9\xcf\xc8\xbdr\xef\x9e\x85\xda\x1d\xc8=\x97\xf7\\\xc8$x\xc7\xbd@M\x8c\xeb\xf3\xe7\xc6=\xb2#^\\\xc9\x82\xc6\xbd\x06\x95Y\xbc\xcaz\xc6=\xe8\',\x0b\xa1D\xc7\xbd\xc50\'um\xef\xc9=\xdb\x03\xf7\x07\x88x\xd0\xbd\xcd\xc4\x10\x87J\x00\xd9=') elif order == 1: return N.frombuffer(b'/Y\x98\x1f\xba$\xfe\xb9&\x1d\x87\x7f\xb0\x94O:{\xd7\x0e#^]>:\x9co\xdb\xc8\xaa\xd2P:\xee\x10\xb8\x074NP:QQ\x16B\x88\xcbU:ce\x85\xdc\xb8@Z:\xae\xcdG\x08\xa6\x89_:\x18\x0f\x8e\xd6\x0e\xcbc:\xeay,\x9f\xfatg:u\xf77D\xe4\x82m:\xb3d/Sj\x8bq:\x1fFR\x1cK\xeau:klMswUz:F\xdf\xca\xff\x1d;\x80:\x91F\x9c\x9bJ\xcb\x83:\xed%L}\xa4\x08\x88:\x0b\xb5\xdb\\\x0f\xb9\x8d:!M\n\x97\xa6\xd5\x91:\xbc \xbe\x7f\x11?\x96:\xb4(\xe5x\x03\x8f\x9a:\xcd\x9d\x81+0\x99\xa0:s?t\t\x99\xd4\xa3:\x89s\x9b\xb9\xec\xb5\xa8:_\xbc\xd8W\x0c\xac\xad:\xd8\x85\xb0OJ^\xb2:\xd7\x056Zx6\xb6:\x03\xdbP\xa4\x10P\xbb:\xff\xde\xb9g&\x9d\xc0:\xbc\xf8b\xa7\xfcT\xc4:\xc9\xbb\xd8\xf8u\xd1\xc8:\x13g)\xe5\xf1N\xce:\xa37\xa2h\xcb\x84\xd2:\x19\xf4\xd91\x01\x9b\xd6:\x1a\xeb\xd5\r\x04\xa0\xdb:NI\xea\xcc\x19\xdd\xe0:&\xe5\xc9\x08u\x9a\xe4:G~\x90\xd5\x06)\xe9:\x19\x90(a\xf6\xbb\xee:RQJ\xf0\xae\xc4\xf2:r\x80\x89\x8e\xa3\xec\xf6:\xd7kO}\xd8\xff\xfb:\x94B\xdc\xf2x\x19\x01;RN\x16X\x93\xe2\x04;\xec\xffDOs\x82\t;\xa1e\x93l\x1c(\x0f;\xc2I\xf5=+\x07\x13;+\x9a\xd40t=\x17;ev2J\xf3b\x1c;:\xa3\x9a\xb0\xc4U!;\x966\x80\x95\x7f,%;\x07)\x94\xf7e\xdc);\x12h\x82\xe4c\x96/;\x81\x91\xb2\x01DJ3;\xe1T\xb1~\xb4\x8f7;)L\xc2\xaa\x0f\xc7<;\x0f\xdf#R\x1b\x93A;}\xc8\xc0f2wE;\xfc&\xcd\x97\xdd7J;\xeb\x14g\xd0\xf0\x02P;\xdb\xea\xdauu\x8eS;\xbe\xb4t\x96\xe7\xe2W;\x1b\x06\xc8\xb0\xc1,];\xf0\x80\x9b\xd7.\xd1a;#\x10k\xfb\t\xc3e;\xde2\xc7\xab{\x94j;\x03\xb3#\x18\x82;p;\x1d\xb4`\xf6\x8b\xd3s;>\xbe\xc8^J7x;\x8b\xa5\xe6c\xd3\x93};\x8b\x9e\xb0\x91 \x10\x82;\xbey\xf3(\xeb\x0f\x86;\xd2f\xf0\x08c\xf2\x8a;M\xb4m7\xdat\x90;\xfbY9}\x97\x19\x94;\xdeD\x96c\xd6\x8c\x98;\xb0\xa6C\x18R\xfc\x9d;\x07\xef\x9d\x1d\xf0O\xa2;h\xd2\x15\xc1\xdc]\xa6;\x7f\xe7\x8f\xe8\x94Q\xab;\x00\xc3\xaf\xcb\xfd\xae\xb0;7\xd8/U\x99`\xb4;_2\xb5\x06\x92\xe3\xb8;\xa9\xfb\x1dM@f\xbe;XP\xef\x0f\xa2\x90\xc2;\xb0"{\x92\xe0\xac\xc6;\x80{\xedl\x18\xb2\xcb;\x1a9 \xf9\xed\xe9\xd0;\x12\xacv\xfa\x96\xa8\xd4;q\x96\x14\xf0~;\xd9;\xbd\x90\x8e\x1b\xa6\xd1\xde;\xe9\x91Y\xbb7\xd2\xe2;t\xebk\x8d\xfc\xfc\xe6;>C\xd3\xa6\xef\x13\xec;\xa1\x9dH$\xaf%\xf1;iG"\xff\x91\xf1\xf4;\'\x9c.\xa7\xa3\x94\xf9;XV\x90\xde\x85>\xff;TMg\x00\xb6\x14\x03<bM.l2N\x07<V\x14\x04\xec!w\x0c<\xa9I=\x8bBb\x11<s\x7f\xf1\xeb\x8f;\x15<t\xaf\xd6\xf1\x01\xef\x19<\x10\x0f5\xf2\xe7\xac\x1f<P\x8d- \x1eX#<\xcdlu\x82\x88\xa0\'<\x12N\xf7\xfb\xb0\xdb,<\xde&2\xfb\xac\x9f1<5\xb8\xf3\xf2\x91\x865<\x8b\x96\xdb\x1d\xa1J:<\xc0\x14B\xf8f\x0e@<h\x83U\xabu\x9cC<s\xe7\xa3\xdb\xff\xf3G<\x07-_U\xa5AM<\x0b0\xf6\x1b\xef\xddQ<\xfb=\xcb\x92\x9e\xd2U<\x90\x15\xdb\xf1\x81\xa7Z<\xd8`\xdb\xe5 G`<\x93\xd65\x0c\xbd\xe1c<\x7f\xb6\xef\x17\xa0Hh<\xbe1\xf1S\xff\xa8m<\x91:\xf4\xc7\x0e\x1dr<\xde\xcc\x9d\xda\xb5\x1fv<\x1fkOl\xad\x05{<p]\xf4\xa2\xa1\x80\x80<\xe2Lk-\xfb\'\x84<\xc5\xf8I\xc6h\x9e\x88<7\xdfE\x9d\xc9\x11\x8e<j*\xe9u\x0b]\x92<L\xa7\x0f\xfe\xdfm\x96<3\xde\xael"e\x9b<\xf3Bd\x82\xef\xba\xa0<\x93\xc4q\xf5.o\xa4<""!\xa9c\xf5\xa8<m\x05t$\x02|\xae<\xc5\xacu\xaf\xec\x9d\xb2<\xfe\xe4\x86"\x1b\xbd\xb6<.v\'\x9a\xec\xc5\xbb<\xb2\xc2g\xe0\x08\xf6\xc0<\xab\xba5ha\xb7\xc4<Y\x03%\xe6\x8dM\xc9<t\xda|\xdf\xb6\xe7\xce<c#q\x01\xb0\xdf\xd2<\xa8\x06\x9a\x19r\r\xd7<\xc7\xf6\xa2\xca\x07(\xdc<\x88$\xf4 \xf61\xe1<5\x8e7\x03\x8f\x00\xe5<r\xaf\xae\x83\xf4\xa6\xe9<\xc2\x91n\xee\xe1T\xef<\x8a\xa3\xe1\x89_"\xf3<dK\x01\x00\xe0^\xf7<\x10?\xe7\xb8\x83\x8b\xfc<\xcc\x0328\xb3n\x01=\xfbx\x0e\x03\xc4J\x05=\xc4\xff)\xd6\x90\x01\n=\x8e \xc4^\x96\xc3\x0f=\xe4/\x99\xce\xf5e\x13=bJ\x92\xads\xb1\x17=O\x80\xbamW\xf0\x1c=\'\xa7t\xb8K\xac!=\x1b\x9d\xa6\x16\xf9\x95%=\\_\x7f\xebt]*=Mt\x9a#\xe4\x190=Wp\x8d\xe7\x80\xaa3=\xc0\x83\xf1u#\x058=\x00?\x8e\xed\x98V==\xde\xbf\xc8\xca\xb7\xeaA=\xb54`s?\xe2E=\xc8\x07\xa4\x14\x94\xbaJ=\xd2q\x17I\xc9RP=r\xb9l\x96\xf6\xefS=L$\xd4h\x04ZX=O\xe5/\xb37\xbe]=n_v\xec\x07*b=\xf1\xcc\x89\xcb\x89/f=\xbb\x91\xf1&\x08\x19k=\'\xd7\xb9\xeco\x8cp=p\xbeO\x1ak6t=\x8a\xb96U\x05\xb0x=M~\xe9~S\'~=\xfdd\x90Q.j\x82=\x0f\xa1\xa4\x07\xf1}\x86=\x82\x19\xe5\x02\xbbx\x8b=\x89\x89\x04\x9b\xeb\xc6\x90=4\xack\xba\xcc}\x94=\xceN\xee\xedD\x07\x99=\x17\x86$\xf2\xcf\x91\x9e=\xa8D2\x17C\xab\xa2=\xceB\xf7v^\xcd\xa6=\xa9\x00F\x8e\xd2\xd9\xab=\x06\xfb@1*\x02\xb1=M\xefcA9\xc6\xb4=\x999+:\xa6_\xb9=[S\xec\xe7\xdb\xfd\xbe=\x0fl\x99\x1c/\xed\xc2=OS\x8f\xf5\xf6\x1d\xc7=\x80\x0eD\xe1)<\xcc=aE\xde\xb0H>\xd1=\xbe\xc3\nA\x93\x0f\xd5=\x956\x10\xe4V\xb9\xd9=\xe0\xe0\x96uHk\xdf=,N\xa1V\x160\xe3=\xc4;&"\x95o\xe7==\xf3\xdf\x9f\xf9\x9f\xec=\x95\x1c\xcdT){\xf1=\xc9\xe2\x11Y\x07Z\xf5=\x8a\x8c7\x85\'\x14\xfa=\xd1"\xdf\xef[\xda\xff=i1\\\x0c\xd3s\x03>\x12\xd5\xb2lp\xc2\x07>\x14,\x8a\xc4\x05\x05\r>&\x00>\xd2\xf7\xb8\x11>\xe07\xc4\xcce\xa5\x15>\t^\xfa\x0c]p\x1a>\xdf\xaf\x9b6e% >\xa6@(\x9d\x9b\xb8#>\xf5\xdev}L\x16(>QiT\x19\xa4k->\xdc\x9e\xb84\x84\xf71>r\xf80M\xf2\xf15>\x06}0G\xab\xcd:>H\x14|b\x7f^@>*\xcc8\x803\xfeC>\x9a\xb1\xb7\xa8}kH>\xff\xd1\x19vt\xd3M>C\xf5\x05\x0e\x117R>6ro~`?V>\xc6\xe3}P{,[>\r\xd1f\xda?\x98`>g\xf1\x90\xb3\xedDd>G\xb3\x14\xa9\xa3\xc1h>\xd8\x10\xe5\xea\xf9<n>\x8a\x80\xaa\xf0Qwr>hI\x00\xde\x17\x8ev>\xc3=\xed\xcfS\x8c{>\xe4|\x9dU\xf8\xd2\x80>I\tN\xe0i\x8c\x84>Ok\xe3\x86?\x19\x89>+\x86\xeby\x9b\xa7\x8e>k\xdc6\xb7\xac\xb8\x92>\xca\x9cy\xe9\x9e\xdd\x96>\xe0\xc4\xb0\x84\xd5\xed\x9b>\xdc>\x83HH\x0e\xa1>\xb7\xf1~\xce&\xd5\xa4>\xb6\xa6\xe7\xbd\xb7q\xa9>\xe5)1\xf8 \x14\xaf>\xab>\'7\xa7\xfa\xb2>\x97/\xe8\xef\x92.\xb7>K1\xc1\xbc=P\xbc>|\xeeqT\xabJ\xc1>T\xe65\x0e\x89\x1e\xc5>"\xacy*\xce\xcb\xc9>L\xb2o\x8b\x91\x81\xcf>v\xdc\x92\xe8\xd8=\xd3>NU\x89\xff+\x80\xd7>\x924\x8e\xfaw\xb4\xdc>\n\x11R\x01\x80\x87\xe1>g\x16\x1a_Fi\xe5>\x0f\xe2|1|&\xea>\xfc \x98\xad\x02\xf1\xef>\xcd\xa5\x12Dj\x81\xf3>\xcak\x1dn:\xd3\xf7>\xf0A\xac%\x1e\x19\xfd>\x05-\xde\xb7^\xc5\x01?VCv\xa10\xb4\x05?\xbaO\xfaf\x97\x82\n?\xbe\xf6?Z60\x10?y\xe7\x86\x9b\xe5\xc5\x13?L<\x15\xff\xf4%\x18?\xba\\\x81\xf0\xc6~\x1d?\x84\xe8\x0f\x95\xaa\x02"?\xbd\xb0~>s\xff%?R\x01\xe2\xe9 \xdd*?t\x8b?\xd5\xb5g0?\x8a p\x0en\x084?8\xa3\xc9\x08Qw8?\xf6\xcb\x1dX\xd2\xdf=?;\x12\x12\x04\xc0=B?\xdb!\x0c\\cEF?\xa9\xcc\xb5\xed\xb01K?F\xaa\xd8\xb5)\x99P?\xb2^\x11\xc2\x83CT?.\x93\xcbJ\x11\xbbX?\xa9=\xe0@\x13/^?U\x0e9B\x9aib?\xa1#{\x0f\x9dvf?\xb9\xf4s\x13Hdk?O\xb2\x19\xd6\xe3\xb2p?@\xd6\x94\x19\xcfXt?\xd1Z\n\x85\x0e\xc9x?\xe5\xa5\x16\xa8m*~?,\xc3\xa7V\x06Y\x82?\x93<\x10\xf7\xe6J\x86?\xa1\x06\x17\xfa\xe1\x0f\x8b?\x03\xc4\xb1<\xf1e\x90?mn:\xed7\xd8\x93?i\xcd\x85\xdd\x01\xf5\x97?\xf5~\xb6\x9c\x0b\xdb\x9c?\xbd.(k\xf5P\xa1?\xce\xb2\x83\xf6(\xb5\xa4?\x80\xdd87\xd7\xa1\xa8?\x0f\xa3\xa1\x18\xe9!\xad?U\xd9\xbel\xa3\x16\xb1?D\xd5@b\x03\xdd\xb3?;\x1f\x83\t\xd3\xcb\xb6?Ybi\xe4v\xc4\xb9?\xdeu\x8f\x8c\xc7\x84\xbc?sE\xb8\x12\xdd\xbc\xbe?\x063\xd4\xce\xdd\xe4\xbf?n\xdc\xf9Kuh\xbf?\xc7\xfa\xb7\x9b\xf1t\xbc?\xae\xca\x18\xbe\x99Z\xb6?\x9e\xf9\x98\x8eg\xd0\xa8?\r\xfb\xbd*\xe8Lt\xbf\nXpX\xdb\xc8\xb1\xbfT\x0b\x08\xaa\xf1\x16\xc1\xbf\x85=\x8c\xe4P\xfc\xc6\xbf\xb3\x93\x13\x99\xdcH\xc7\xbf\xb0\xa0[\x87\xdb\xf8\xbe\xbfC\x9c\x96h\xc0\xb8w?\xa7\xf7\x8b\\F\xa5\xc3?N\xf5i\x89\xb8\x1b\xce?\xb7\xdbS\xb0{d\xc3?=<\xc0\x1a\xc81\xb5\xbf\xc0\x08\xd6\x9b\x97Z\xd1\xbf\xa1\xb4\x82qV\n\xc1\xbf\xbf5X\xcd\xc6\xb2\xcc?2\x80rp\xb4~\xcd?\xd8\x1a\x94RLw\xd0\xbfp\x8c\x08\x95\x14\xe3\xc2\xbf\xb3t\x10\x90\xcb\x1d\xd7?\x1f\xd1\x88\xba9,\xcf\xbfi\x1a\xef\xfb\xbf\x83\xa3?\xc9\xf8}/8.\xb5??gt\x12\x0b\xf1\xbb\xbfJ\xe6\\\xc2\xdc\xf2\xb6?T\xcc\xd8f~8\xaf\xbf\xfb\x1b\xd1\xdc\xaeZ\xa3?UB\x13a&\xfa\x96\xbf,\x8b\x80\xc4\xad\xf1\x8a?\xa7\xf7O\xe0\x15\xe8\x7f\xbf\xf9\xc4\x0cx#gs?\xb4\xff\x9at-\x8ch\xbf\xde\x877@\xd5H`?\xa5\x12EYm\xb9V\xbf\x96{\xa0\x90R\xa4P?\x05T\xc9\xdf:pI\xbf\xbcf\xb6\x18\xac#D?\x16TK&\nd@\xbf\xdd\x95xed>;?\xbdA\xe4t\x87\xfe6\xbf\x8c\xc4\xfec\xab\xa03?t\x1d\x97\x8c\x16\xe40\xbf\xec\xa3g](>-?I&\xb8\x8b\xa4k)\xbf=J;\x94,*&?\x87\xa4\xda\xe56^#\xbfA\x0b\xfd\xa9\\\xf3 ?\xeb9\\s\x07\xb4\x1d\xbf.\xc6X9\xcb\x0b\x1a?|\xf0\xba\x0b:\xda\x16\xbf\xac,\xc9\xb9\x0c\x0f\x14?\xdb4\xcf:\xa8\x9c\x11\xbf0O;j\x10\xef\x0e?\x9a\xe7\xcc\xc9\xa8+\x0b\xbf\x1b\x8f\xf8>%\xde\x07?\x86\xd9b2\xd8\xf7\x04\xbf)\xce\x87\x84\xffk\x02?J\xdcl\xb9{/\x00\xbf^\x03\xde\xa1)q\xfc>\xc0\xf3\xf2\x07\x93\xfd\xf8\xbe\x1a\x83\xf5\xc6L\xf5\xf5>\x08\xa0F\x10FK\xf3\xbe\xed@\xf8W\x08\xf4\xf0>\xd3\x94\x07\xab\x07\xcb\xed\xbe\\m\xdf\x04\xc6-\xea>\x08\x00\xa1\xc3\xc7\x00\xe7\xbe\x81\xd3TDl6\xe4>\x0f\x9fU\xf4\xba\xc2\xe1\xbe5\xc1\x80\x0b_6\xdf>\xa8\x13iB\x19m\xdb\xbe\xadJ\xd0Eh\x19\xd8>!\xb8EL\x08-\xd5\xbe\xd9\xd1\x94\xa9p\x9b\xd2>\xa6q\xe9\xff\x9dY\xd0\xbe\x01\xad\xe8\x11\xc6\xbb\xcc>\xb9Y\xd1%~?\xc9\xbeH\xe4\xf6\xfbr/\xc6>\x04\xaa\xccW\x83~\xc3\xbeQ\xf8d\xb9%!\xc1>\x86\x0b/\xdem\x1a\xbe\xbe\x15\x80\xad9\x9ds\xba>@;\xb6\x072>\xb7\xbe\xed8\xd6\xe3jl\xb4>QP\t\x921\xf2\xb1\xbe\x1eQ\x9df\xce\x89\xaf>\x92\xa5\xeftm\xb6\xab\xbemx\xf4\xc9\xd9Y\xa8>\xcc\x07\xfd&\xaae\xa5\xbe\x90\xcc&\xd34\xcd\xa2>\xe7\xc0\xe5TY\x85\xa0\xbe>\xfc\x12v\xa1\x08\x9d>I3\n_\x07\x83\x99\xbe\xbd\xa1vH\xcbj\x96>0\xb3\xd5\xf1\xa8\xb2\x93\xbe\xd5kU\x17\xf8N\x91>\xef\xd4@\xea\xf4j\x8e\xbe\xeb\x05\x8e\xb8_\xba\x8a>"i\x05=_|\x87\xbeP\xc4\x96P\r\xa3\x84>%(Xk3"\x82\xbe\xae\xd7^\x8d,\xde\x7f>\xaf\x96\xf1\xb6\x8f\x00|\xbe\x83\xf1\xb6\xed\xfd\x9ax>\xe5\xd8\xa6k\xe7\x9eu\xbe\xe1\tX\x9c\x80\xffr>\xe8I\tA\x8b\xb1p\xbe\xa2%\x0e|LVm>\x05\xd9\xc1\x83F\xc7i\xbe\x9e>\xc2\x1d\xc3\xa6f>\xcd\xbf(\x8eZ\xe7c\xbe\x88\xa7\xf0_E}a>\tQ\xfb\xccS\xbc^\xbe\xd1a0\xbf\xdf\x01[>r#\xb9\xec2\xbbW\xbe\xfe\xd1\x92\x00B\xdaT>\x95\xa9\x1b\xc5\xb5RR\xbe\x00\xb6p=\xb6\x19P>\x02\xa3"SxKL\xbev\xd4\x95Z\xd0\xdcH>\xe7\xd6\xac\xd6\xbd\xd8E\xbe\xf6\xfc\xed\xf6R2C>Ah\xa2h3\xde@\xbe\x15\x08\xc2F\xc7\xa4=>\x8aW\xd0:<\x0c:\xbeL\x04\xbaa[\xe36>5\x97D"\x99\x1c4\xbe\x9c(\xd6\x84\x0e\xac1>8\x98\xe2Z\x8c\x0e/\xbe\r\xe2\x9a\x0b\x1fJ+>q\xff\xa0\xaf\xae\xfa\'\xbe\xb6\x02i_\n\x12%>\xce\x0eh\xe2\xb9\x83"\xbeMn?:\xc8D >\xf5\xbcqS)\x97\x1c\xbe\xa3\xec!\xddR\x1f\x19>~XO\xfa.\x13\x16\xbe\xf8,\xc8D\xade\x13>\x97\xa7#\x06S\x0b\x11\xbe\x1eL!\x03\x14\xf4\r>eq\xf8k\xeaQ\n\xbe\x0f?F\xbe\x95 \x07>U\x83\xfc$fR\x04\xbel{\xc1\xd1T\xdb\x01>\xf4\x81x\xdc\xa0a\xff\xbdl=\x19\x9d\x1f\x93\xfb=\xb3\xb5BE\xd4:\xf8\xbd{sN\xf7gJ\xf5=\x8c\x14A\x1fA\xb5\xf2\xbd\xa9e\xf0nMp\xf0=\x1e\x19=\xcf\xa4\xe3\xec\xbd\xf2\x98\xf3J\x87b\xe9=\xf9dJt<N\xe6\xbd\xeb\xc5s\xf2\x90\x99\xe3=C\x9b\x8bY\xeb8\xe1\xbd5\xe4\xed\xe14D\xde=\xf0\xf4\xca\x03S\x98\xda\xbd\x9d,:\xe5s^\xd7=\xe4T\x07\x14\xc3\x88\xd4\xbdn\xb5\xa2\x95\x19\x0b\xd2=\x11uL\x9d\x93\xb5\xcf\xbd\xccu-x\xe3\xdc\xcb=\xff\xb6Dt\xa5{\xc8\xbd\x8dd\xedW\\\x83\xc5=I\xff]\xdaL\xe7\xc2\xbd\x9fvd\x0fG\x9c\xc0=\xc1\x98\xfa\xe3\xec0\xbd\xbdh|_\x80o\xa6\xb9=\xad\x1d\x1e\xe7\xe7\x89\xb6\xbd6lIo\xff\xcd\xb3=\xee\xed\x84\xa5\xfdf\xb1\xbdU\xad\xa2\x1a,\x95\xae=Ma@\xf5w\xdf\xaa\xbd\xbb\t\xdf\x8c\xf7\x9c\xa7=\x90\xc8;p\xb1\xbf\xa4\xbd\x0b\xb5\xa0"^;\xa2=\x05\x1f\x01x3\x05\xa0\xbd>\xdd\x80\xa7l\'\x9c=\xcd\t\xb9\x07$\xbd\x98\xbdn\x9cz\x14\xe9\xbc\x95=\x90j\x19v\xde\x19\x93\xbd_\x8f\x1eS\xb6\xc8\x90=\xda\x94%\xb5\x03\x7f\x8d\xbd ]J^\r\xeb\x89=\xbe?B\xf92\xc6\x86\xbdq\xd1\x96.\xfa\x02\x84=\x12\x12k0\x8b\x95\x81\xbd\xee\x01\xc0\xea\xfb\xe6~=\xf5\xcf#8[\'{\xbd\xf1\x81\xdbo"\xdcw=g\xa7\xa3\xbe2\xf7t\xbd\x1b\xb8\xa1\xce#lr=h)\xb1\x96\x0e0p\xbd\x13j\xe2:\xbdrl=\xef@j\xcfQ\xffh\xbd\xeb\xd1\x8f\xc4\x0f\xf7e=+%\xa1X\xf7Lc\xbdQ+\xd0t\x9c\xf5`=\x01O\xb9k\xeb\xcd]\xbd$]\xa2\xcab0Z=\x9c\xcd\xbfU\x1f\x03W\xbd\x1bU\x98\xa7\x828T=\x0b\x8d\xedC\x95\xc4Q\xbdr\xf9\xa1\x95\xa69O=\x89\xa2\x92\xc9\xfeoK\xbda2\x93M\xf6\x1bH=\x83\xfaZ\x88H/E\xbdQ\xe4\x0b\xf3k\x9dB=A\xc85Z\\[@\xbd\x12\x84\xb7G\xd7\xbe<=9\x1d\x0e\xa00B9\xbd\x18\xf6\x03\x04\xd216=8\xbd\xd2\xeb\x98\x803\xbd\x19er\xb2\xfa"1=\xf1\xe6\x896\xa6\x1d.\xbd2gs\xb0qv*=x<\x0e\xac\xae@\'\xbdLhlU\x9an$=\x17R/-\x1d\xf4!\xbd\t\xb9\xc0d.\x8d\x1f={\xc0\xff\xabd\xb9\x1b\xbd\xfak\x0e\xeat\\\x18=J\xd1\x93Z\xf4g\x15\xbd\xc0\x99\xe4\xec7\xcf\x12=5T?\xf5\x1d\x87\x10\xbd\xa9\x11\xf8\xe8\xbc\x0b\r=\xa0W9S\xc2\x85\t\xbd\xe5D\xf9r1m\x06=itW\x9d\xc4\xb4\x03\xbd\xa8\x07TM\xd2P\x01=&\xec?J6n\xfe\xbc=\xe9\xe5\xff;\xbd\xfa<Z\xe7&\xb0\xe2~\xf7\xbc\xb2\xf4*\xb7B\xa5\xf4< \x02\xd0<$$\xf2\xbcbX\xaf\xa7\x95\xe1\xef<\xb2\xf4:\xe7\x8e\x03\xec\xbc\x04\x92\x0f\x0e\xa0\x9d\xe8<\xc8G\xa9\xc67\xa1\xe5\xbc4D\xd7\x1c\x89\x01\xe3<\xa1\xfd\xb0\x9dT\xb3\xe0\xbc\xad\xec;?pY\xdd<i\xd7t\xc7\x08\xca\xd9\xbc\xec\xd6\xec\xb5/\xa9\xd6<\x02n\xaa\xde{\xe9\xd3\xbc(\x19\x1e\x8a$\x7f\xd1<\xe4\xb7i\xe1\x9d\xbf\xce\xbcK\xf6R\xae\xc3\x04\xcb<\xfdh\x92\x1a\xbd\xbd\xc7\xbc\x85$\xecO}\xdc\xc4<+\xe0\xf2\xc6\xabT\xc2\xbcO\xcb\x17Zo\x1b\xc0<\x92\xe9\x83\x88\x7fN\xbc\xbc\xec{\x1f\x87y\xdf\xb8<E\xa8\'b\x14\xdb\xb5\xbc\xd4Z>\xe7`4\xb3<;R\xab\x8c\x01\xe0\xb0\xbc\xc8\x04\xcdp\xf3\xa7\xad<\xdf\x0bJ\xe0\x05\x0f\xaa\xbc\x00]\xc0u\xce\xe5\xa6<\x1b<#%\xc0\x1e\xa4\xbcz\xb0\xde\xb0\xf2\xad\xa1<\xc6\x0b\x8d<\xdf\x11\x9f\xbc\xf9\x1eQ\xb6\nM\x9b<\x19\xd0s\xa8?\xfd\x97\xbc\xae\xf2\xd3\xa6K\x14\x95<\x8f\xadI#\xb5\x85\x92\xbcY\x8d!\xf3\x85F\x90<\xfa\x91\x97\xa28\x9a\x8c\xbc\r\xb2\x99\'\x03"\x89<iq\xd8\xc6\x8b\x15\x86\xbc\xe2\x11.\xb4\xc0g\x83<\xac\xc7\x94\xfe%\r\x81\xbc\xefq\xb5\xa9H\xf7}<q\xfbO\x86\xbbTz\xbc4m\xc6_\x0f#w<\xc7|\x00\xea\x92Tt\xbc7\xa3\x11\r>\xddq<p(6\xa2\xfcdo\xbc\xe3\xb8\xc2\x17\x13\x96k<\xb3\t\x96\x1bl=h\xbcI\x19\x1eG\xafLe<\xc09\x1f\xadA\xb7b\xbc\xdc\xa6\x1f\xd0\x0fr`< \xa6\xc3M\xbc\xe6\\\xbcxZ\xbf\xc6>eY<\xa5j\xd4\x92\x9fPV\xbc\x9f\x00\x86\xef\xa9\x9bS<\x88\xae+3\xc3:Q\xbc\x8d\xa3\xe4\x1brGN<RpQ\xa7+\x9bJ\xbcs\x1d\xe9%\xf4`G<\x0f]\x8e\xaa\xf5\x8aD\xbc\xd3\x18\xc6\xed\x07\rB<V<>_\xf8\xb8?\xbc\xd2<\x14\xd8\xde\xdf;<pZ\x9f:D~8\xbcD\xb6D\xc0\xa9\x855<\xd5\xf5o\xc3R\xe92\xbc\x05\x9cp%\x0e\x9e0<\x16\r\xde\xa7\x0c4-\xbc\xd9\x81\x95@.\xa9)<\xf2\x06NhQ\x8c&\xbc2X\xad\x08\x1e\xd0#<\xba=9m\xdah!\xbc\xec\xea\x8a\xfeq\x98\x1e<\xebM\xa45X\xe2\x1a\xbc\xb5k\x03~~\x9f\x17<\xc8\x97\x89\xe7\xe9\xc1\x14\xbc\xf8\xa3\x14\xa5Q=\x12<\xa7M\xfab\xea\x06\x10\xbc2\xd9\xba\x01p*\x0c<\xb3\xa8\xfb\xd0\xc9\xbf\x08\xbc\x0e\x8bg\xa6<\xbf\x05<\x03\x90\xd8\xc9\xe9\x1b\x03\xbcf\xb8\x1f,\x82\xca\x00<\xdd\x01B\xd8+\x82\xfd\xbboH\xd3z\xd3\xed\xf9;znE\xf3\xa2\xc8\xf6\xbb<\xfc\xd2x\x1e\x05\xf4;\x89\xc1\x1a\xf9l\x97\xf1\xbb\x07\x1b4\x9bJ\xea\xee;\x93\xcb\xcb4C*\xeb\xbbe6\x9c-\xb0\xde\xe7;\xf1\x98\x98/q\xf9\xe4\xbb7\xe5\r\x91\x1cn\xe2;\x9f\x988\x1e\xca1\xe0\xbb\x86\x01\x9d\xae\xc8u\xdc;4\xa8\x14\xb5\xfe\x01\xd9\xbb\x84\r,\x94i\xf9\xd5;\x04\x14\x1c&\x08O\xd3\xbb\x8cj<\x1bm\xf7\xd0;q>\xe0\xfa\x1b\xd1\xcd\xbb\xf4\x0e\x84I03\xca;u\xe1U\xc9\x95\x05\xc7\xbb\x00\x1a\xc6\x9f\xac:\xc4;\xf0[\x98\x08|\xc6\xc1\xbb}7\x14\x07\xfe<\xbf;\xde\xbf{w\xeer\xbb\xbbj&\x14\xcf\x8a\x1e\xb8;3[\x86\xed\x8c1\xb5\xbb\xe4Mz\xf4i\x9f\xb2;\xb5\xfb\xc2\x82\x1c]\xb0\xbb\xd5\xb0}\xea\xea\xc1\xac;\xc1b\xc7\xc5\xe4D\xa9\xbb\x9b\xe2\xffC24\xa6;7\xad\xbft\xaf\x82\xa3\xbb\xafb\xf2u\xd0$\xa1;}\xa95\xe9\xdf \x9e\xbb\x13U\x04]Gy\x9a;\x8c7Vt,C\x97\xbb\xe5\x19\x99\xe2\xcap\x94;%C\\\xdc\t\xf6\x91\xbb\x03\x8a\xf6v\x90\x90\x8f;\xcc\xb7f\xdd]\xbc\x8b\xbb<\x13\xaa\xe1\x11_\x88;0~\xbf5@j\x85\xbb\x87\xa7\xc7r<\xd1\x82;[!\xca\xc1\xe3\x88\x80\xbb\x0e"\xb97\xda\x0e};viz\xad~\x88y\xbb"\x17M\x93\x98ov;\x8fH\x96\xd5\xe0\xb6s\xbb>z\xc3\xb3\xacRq;r\x1erxwqn\xbb\xca\x04p\xb2\x17\xc0j;`3\xa6ce\x81g\xbbz\x80\x9drw\xa7d;\xaf\xd6g\xb9\x14&b\xbb\xee\xf2Z@\xff\xe4_;\x93D\xe3+\x90\x06\\\xbb\x7fw\xed=F\xa0X;\x98\xd3A\x91\x8e\xa3U\xbbT\xf8f\xd4\x9a\x03S;\x9b\xd4A\\*\xb5P\xbbq$\x06\xc2\xb3\\M;wn\xdd4\xf2\xccI\xbb\xc4\xc1\xce\x0c\xcb\xabF;sjel\xd3\xebC\xbb\xaf\xed\x0f!A\x81A;\xb9R\xbb+p\xc3>\xbb\x1c\x03M\xc6;\x08;;\x94\xff\n\x8d\xe5\xc07\xbbi\xa3f\xbb^\xdf4;CI\x93\x13MW2\xbbQ\x03\xa9\xe4\xd5\x1d0;\xa4JH\xe4\xdeR,\xbb\x8d\xba\xfb\x15p\xe3(;*\xe5\x9ee\xa4\xde%\xbb\x0b.\x88\x81\x8a7#;\xb72\xbe\xb1\xc3\xe2 \xbb9M\xaa\xbd\xa6\xac\x1d;\x86\xed\xd1v\xe8\x12\x1a\xbb\xc0\xd2\x0b\xce\xe4\xe8\x16;\n{E8\x14!\x14\xbb\xd1p\xe5\xe7\x94\xaf\x11;y(\x88h\xee\x13\x0f\xbb\xc7n\x19!\x1fN\x0b;\xb4\n\'\xfd\x9a\xfd\x07\xbb]\x95\x07\x94/\x14\x05;\x9bX\x93\xa1\\\x85\x02\xbb\x99\xc5\xb0o!F\x00;\x82\xf4\x1d\xcf\xa2\x99\xfc\xbaL5"\xc1\xda!\xf9:\xb55\x81\xe1\x00\x16\xf6\xba8\x16B\x9a\xffh\xf3:\xc6]6\x11[\x0f\xf1\xba\x90Y\xa7R\xf8\xfd\xed:jYxq\x02^\xea\xbaI\xdbPo*/\xe7:\'\xed~\x1e\x9cc\xe4\xba\xdcV\xed\xc0"\xef\xe1:U\xbdpq\xf6\x8d\xdf\xba\x93\xd9v\xadU\xc3\xdb:\x8e\xa0\x0c\xd7\xb3m\xd8\xba^:\xc3\xf5f~\xd5:\xc2\x18w`\x96\xe8\xd2\xba\xdf\xe3\'\xd3\x11\xa1\xd0:\xf65S\xa2\\<\xcd\xba\xe7\xfb\x02\xebA\xaf\xc9:ox\rq\xa4\x8c\xc6\xba\x91;\x93\xdeV\xc8\xc3:\x8e\x1c\xf3\xd9\xcaW\xc1\xba\xfet\xfa\xb1vc\xbe:UDC$\xce\x9b\xba\xba\x02}\xaf\x02\xbbI\xb7:[|\x88\x9c\xc5_\xb4\xba\x1b\x16a\xaf\xcf\xd1\xb1:\x93\\\xb9\xa8!*\xaf\xba\xd3\x07=E\x00@\xab:\x97\xf0u2H\xd4\xa7\xba\xd0\x18Z\xf7\xbe\xd7\xa4:6x\xf8\xf8\xa9<\xa2\xbau\xc1\x9f-\x90\xed\x9f:\xaa~\xd0\x97\xa7\xf6\x9b\xba\x8fj\xbe\xbd\xd0\x81\x98:\xe0\xb8J\xe6\xa5~\x95\xba\x1ft\xb7C\xe1\xde\x92:[\xa1d\xd2>\x96\x90\xba\x8d&A\xdd\xa14\x8d:\x94l\xc99P\xc4\x89\xba\x97M*\xb8\xc0\xcb\x86:PW+WU<\x84\xbarN\\\xc6\xd7\x07\x82:\x9e\x89"\x00+ \x80\xba>c\x80\xaf\xeb\xee|:u\xcb\xb3v\xee\x00z\xba\xf2\xec`\x8b\xc7]w:\x9bl\xff~$\xf0t\xbaO\x15\n\x16\x1f\xa8r:f\x86/\xae*}p\xba\xf23\x9a\x1f\xa1\xdcl:@\xaf\xa9\xbd\x07\x00i\xba\xccN=W$pe:\xda\xc6\xc3:\x0e3b\xba\xb5\x9b\x96\x1df\x8c^:\x17\xa3^\xa5\x8dEY\xbai\xd2\xec\xce\x0e\x8bT:\x1f\x03\x0b\xb4\xd4cP\xbaE{a\x17\x99\xb6I:}\x80\x88\x80f\xe6C\xbaqo\x06\xc0\x03w>:\x01\x9a\xde\x1c|\x0c7\xbak8\xc4\x9a\xd2,1:8\xado~~\')\xba\xa1\xdaO\x84\x1fH":\xf9+\xb3\xf31\x8a\x1b\xbaU*(\xf6s\x90\x17:\x8d\\\x8b\xcd[\xde\x18\xba\x80]\x83\x06\xc4[\x1f:\xa7\x88~u\xb8F%\xba\xda\x91\xe2DY\xcc,:\x11S\xa9\xac\xd5\xd32\xba$H\xe1q\xbc\x977:\x0bIm;\xf8j<\xba\x93#\xa4@\x90\x93@:\x9e\x85\xb4ob\xe1B\xba ^-\xb8\x05\x1cE:\x8d\xc2\xad\xed\x8d5G\xba\xca\xaf\x1d\xbf\xc8\x12I:\xd8\xb1n\xcf\x01\x94J\xba\xe6c\xea\x92\xe9\xa1K:\xb4\x13M)\x827L\xba2\x10\xb8\xd0\x08eL:P\xc1\x12\x8d\x0fKL\xba\t\xac\xbd\x89\xd3\x10L:R\xd9\xea\xde\xc9\xdbK\xba;x\x13:\xbe\xccK:\xee\x9f\x05Ko\x01L\xbaO\xd5+t\xcc\x94L:') else: raise NotImplementedError("%d-order Hankel transform not implemented" % order)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_weighted_hash(cls, word):\n\n hash_value = 0\n for char in word:\n hash_value += cls.alpha_lookup[char.lower()]\n return hash_value", "def calc_weight(str,dict):\n for i,c in enumerate(str):\n dict[c] += 10**(len(str)-(i+1))", "def elementary_weight_str(tree,style='python'):\n from nodepy.strmanip import mysimp\n from nodepy.rooted_trees import Dmap_str\n ewstr='dot(b,'+tree.Gprod_str(RKeta_str,Dmap_str)+')'\n ewstr=ewstr.replace('1*','')\n ewstr=mysimp(ewstr)\n if style=='matlab': raise Exception(\"Use elementary_weight_str_matlab() instead.\")\n if style=='fortran': ewstr=python_to_fortran(ewstr)\n return ewstr", "def hamming_weight(num):\n\n return bin(num).count(\"1\");", "def get_weights(self):", "def wer(r, h):\n\n # Split string into list of words\n r = r.split()\n h = h.split()\n\n # Dynamic programming matrix\n d = [[0 for j in range(len(h)+1)] for i in range(len(r)+1)]\n for i in range(len(r) + 1):\n for j in range(len(h) + 1):\n if i == 0:\n d[0][j] = j\n elif j == 0:\n d[i][0] = i\n\n # computation of values in matrix\n for i in range(1, len(r) + 1):\n for j in range(1, len(h) + 1):\n if r[i - 1] == h[j - 1]:\n d[i][j] = d[i - 1][j - 1]\n else:\n substitution = d[i - 1][j - 1] + 1\n insertion = d[i][j - 1] + 1\n deletion = d[i - 1][j] + 1\n d[i][j] = min(substitution, insertion, deletion)\n\n # returning as percentage\n return round(d[len(r)][len(h)] * 100/float(len(r)), 2)", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")", "def label_smoothing_regularization(self, chars_labels, weight=0.1):\n one_hot_labels = tf.one_hot(\n chars_labels, depth=self.num_char_classes, axis=-1)\n pos_weight = 1.0 - weight\n neg_weight = weight / self.num_char_classes\n return one_hot_labels * pos_weight + neg_weight", "def preprocessing():\n english_dictionary = nltk.corpus.brown.words()\n slang_vocab = pickle.load(open('vocab_pattern_match_with_freq.pkl', 'rb'))\n\n normalize_english_dict = len(english_dictionary)\n normalize_slang_vocab = 0\n for w, n in slang_vocab.items():\n normalize_slang_vocab += n\n\n words = {}\n for w, n in Counter(english_dictionary).items():\n words[w] = n/normalize_english_dict\n \n for w, n in slang_vocab.items():\n if w not in words:\n words[w] = 0.\n words[w] += n/normalize_slang_vocab\n\n words_by_freq = [w for w,_ in sorted(words.items(), key=lambda x: x[1], reverse=True)]\n\n # Build a cost dictionary, assuming Zipf's law and cost = -math.log(probability).\n #words = open(\"words_by_frequency.txt\").read().split()\n wordcost = dict((k, log((i+1)*log(len(words_by_freq)))) for i,k in enumerate(words_by_freq))\n maxword = max(len(x) for x in words_by_freq)\n return wordcost,maxword", "def invert_input(inverter):\n\tnum_lines = 0\n\tnum_found = 0\n\tS = min(get_compressed_size(RAINBOW_TABLES), sum([getsize(table) for table in RAINBOW_TABLES]))\n\twith open(INPUT_FILE, 'r') as f:\n\t\tfor line in f:\n\t\t\tnum_lines += 1\n\t\t\thash = int(''.join(line.split()), 16)\n\t\t\tword = inverter.invert(hash)\n\t\t\tif word != '0':\n\t\t\t\tnum_found += 1\n\t\t\tprint word.upper()\n\t\t\tif IS_DEBUG:\n\t\t\t\tF = ((num_lines * (1<<23))/inverter.get_sha1_calls())\n\t\t\t\tprint \"[+] Chain length: %d\" % CHAIN_LENGTH\n\t\t\t\tprint \"[+] Number of reduce functions: %d\" % NUM_REDUCE_FNS\n\t\t\t\tprint \"[+] Number of tables: %d\" % NUM_TABLES\n\t\t\t\tprint \"[+] Tail hash length: %d bytes\" % TAIL_HASH_LEN\n\t\t\t\tprint \"[+] Size of rainbow table (S): %d bytes\" % S\n\t\t\t\tprint \"[+] Speedup factor (F): %d\" % F\n\t\t\t\tprint \"[+] Grade: %d\" % get_grade(F, S)\n\n\t# Print test results\n\tprint \"The total number of words found is: %d\" % num_found\n\tt = inverter.get_sha1_calls()\n\tC = (num_found/float(num_lines) * 100)\n\tF = ((5000 * 2**23)/inverter.get_sha1_calls())\n\tprint \"[+] Number of SHA1 calls (t): %d\" % t\n\tprint \"[+] Percentage of words correctly inverted (C): %.2f%%\" % C\n\tprint \"[+] Size of rainbow table (S): %d bytes\" % S\n\tprint \"[+] Speedup factor (F): %d\" % F\n\tprint \"[+] Number of void bytes: %d\" % inverter.get_num_of_void_bytes()\n\tprint \"[+] Chain length: %d\" % CHAIN_LENGTH\n\tprint \"[+] Number of reduce functions: %d\" % NUM_REDUCE_FNS\n\tprint \"[+] Number of tables: %d\" % NUM_TABLES\n\tprint \"[+] Tail hash length: %d bytes\" % TAIL_HASH_LEN\n\tprint \"[+] Grade: %d\" % get_grade(F, S)", "def calc_weight(sequence):\r\n return len(sequence) * AVG_WEIGHT", "def entropy(message):\n # Should the import be here or should it be at the top of the page?\n freq_dict = letter_freq(message)\n length_message = len(message)\n bit_entropy = 0\n for occurrences in freq_dict.values():\n frequency = occurrences / length_message\n bit_entropy = bit_entropy - frequency * log2(frequency)\n return bit_entropy", "def train():\n k = len(accepted_chars)\n enc = \"UTF-8\"\n # Assume we have seen 10 of each character pair. This acts as a kind of\n # prior or smoothing factor. This way, if we see a character transition\n # live that we've never observed in the past, we won't assume the entire\n # string has 0 probability.\n counts = [[10 for i in xrange(k)] for i in xrange(k)]\n \n bigrams = filter_chars(accepted_chars, ngrams(2, counter(counts)))\n for c in open('big.txt').read().decode(enc): bigrams.send(c)\n \n # Normalize the counts so that they become log probabilities. \n # We use log probabilities rather than straight probabilities to avoid\n # numeric underflow issues with long texts.\n # This contains a justification:\n # http://squarecog.wordpress.com/2009/01/10/dealing-with-underflow-in-joint-probability-calculations/\n for row in counts:\n s = float(sum(row))\n for j in xrange(len(row)):\n row[j] = math.log(row[j] / s)\n\n # Find the probability of generating a few arbitrarily choosen good and\n # bad phrases.\n good_probs = [avg_transition_prob(line, counts) \\\n for line in open('good.txt').read().decode(enc).split('\\n') if line]\n bad_probs = [avg_transition_prob(line, counts) \\\n for line in open('bad.txt').read().decode(enc).split('\\n') if line]\n # Assert that we actually are capable of detecting the junk.\n assert min(good_probs) > max(bad_probs)\n\n # And pick a threshold halfway between the worst good and best bad inputs.\n thresh = (min(good_probs) + max(bad_probs)) / 2\n pickle.dump({'mat': counts, 'thresh': thresh}, open('gib_model.pki', 'wb'))", "def entropy(message):\n n = len(message)\n message = letter_freq(message)\n h = 0\n for n_i in message.values():\n p_i = n_i/n\n h += -p_i*(log2(p_i))\n return h", "def estimate_entropy(pwlen):\n return pwlen * math.log(len(frozenset(default_charlist)), 2)", "def entropy(message):\n message = letter_freq(message)\n n = sum(message.values())\n h = 0\n for n_i in message.values():\n p_i = n_i / n\n h += -p_i * log2(p_i)\n return h", "def weighted_l1(X, ebunch):\n edge_embeds = np.zeros((len(ebunch), len(X[list(X.keys())[0]])))\n i = 0\n for edge in ebunch:\n edge_embeds[i] = np.abs(X[str(edge[0])] - X[str(edge[1])])\n i += 1\n return edge_embeds", "def lossFun(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n\n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation\n xs[t][inputs[t]] = 1\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state\n ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars\n ps[t] = np.exp(ys[t]-np.max(ys[t])) / np.sum(np.exp(ys[t]-np.max(ys[t]))) # probabilities for next chars\n loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)\n\n assert_array_equal(van.window_step,t)\n assert_array_equal(van.state[t-1],hs[t-1].T[0])\n assert_array_equal(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]]),hs[t].T[0])\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n assert_array_equal(vantr.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(vantr.outputnet[t].net.elements[1].W.get(),by.T[0])\n\n #\n # #Neg\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].W,Why)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].W,by.T[0])\n # assert_array_almost_equal(van.outputnet[t].forward(hs[t].T[0]),ps[t].T[0])\n # assert_array_almost_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]])),ps[t].T[0])\n # assert_array_almost_equal(van.forward(xs[t].T[0]),ps[t].T[0])\n #\n # Cross\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(van.outputnet[t].net.elements[1].W.get(),by.T[0])\n assert_array_equal(van.outputnet[t].forward(hs[t].T[0]),ys[t].T[0])\n assert_array_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]])),ys[t].T[0])\n assert_array_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],van.state[t-1]])),ys[t].T[0])\n assert_array_equal(van.forward(xs[t].T[0]),ys[t].T[0])\n assert_array_equal(soft.forward(ys[t].T[0]),ps[t].T[0])\n\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n\n for t in reversed(xrange(len(inputs))):\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext # backprop into h\n dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n\n #\n # #Neg\n # van.backward(negLog.dJdy_gradient(ps[t].T[0],to_one_hot_vect(targets[t],vocab_size)),opt)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].x,hs[t].T[0])\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].dW,dWhy)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].dW,dby.T[0])\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].W,Why)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].W,by.T[0])\n #\n #Cross\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].x,hs[t].T[0])\n assert_array_equal(van.outputnet[t].net.forward(hs[t].T[0]),ys[t].T[0])\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0])),ps[t].T[0])\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0]))-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n\n err = cross.dJdy_gradient(ys[t].T[0],to_one_hot_vect(targets[t],vocab_size))\n\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0]))-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n assert_array_equal(ps[t].T[0]-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n assert_array_equal(err,dy.T[0])\n\n van.backward(err,opt)\n\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get_dW(),dWhy)\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(van.outputnet[t].net.elements[1].W.get_dW(),dby.T[0])\n assert_array_almost_equal(van.outputnet[t].net.elements[1].W.get(),by.T[0])\n #\n\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get_dW(),dWxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get_dW(),dWhh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get_dW(),dbh.T[0])\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n assert_array_equal(van.dJdh[t],dhnext.T[0])\n\n dhnext = np.dot(Whh.T, dhraw)\n\n opt.update_model()\n trainer.learn_window(vantr,zip(to_hot_vect(inputs,vocab_size),to_hot_vect(targets,vocab_size)),crosstr,opttr)\n\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def getLossWeights(weights_dict, n):\n\n w = torch.ones(2*n+6,)\n w[0] *= weights_dict['r1']\n w[1] *= weights_dict['r2']\n w[2] *= weights_dict['e1_r']\n w[3] *= weights_dict['e1_i']\n w[4] *= weights_dict['e3_r']\n w[5] *= weights_dict['e3_i']\n w[6:6+n] *= weights_dict['e2_r']\n w[6+n:] *= weights_dict['e2_i']\n return w", "def _get_label_weight(opts, data):\n experiments = data[\"exp_names\"].value\n label_mat = numpy.zeros((experiments.size, 7))\n vid_lengths = numpy.zeros((experiments.size,))\n for i in range(experiments.size):\n exp_key = experiments[i]\n exp = data[\"exps\"][exp_key]\n for j in range(6):\n # label_counts[j] += exp[\"org_labels\"].value[:, j].sum()\n label_mat[i, j] = exp[\"org_labels\"].value[:, j].sum()\n # label_counts[-1] +=\\\n # exp[\"org_labels\"].shape[0] - exp[\"org_labels\"].value.sum()\n label_mat[i, -1] =\\\n exp[\"org_labels\"].shape[0] - exp[\"org_labels\"].value.sum()\n\n # vid_lengths[i] = exp[\"hoghof\"].shape[0]\n vid_lengths[i] = exp[\"org_labels\"].shape[0]\n\n # label_counts = label_mat.sum(axis=0)\n label_weight = 1.0 / numpy.mean(label_mat, axis=0)\n # label_weight[-2] = label_weight[-2] * 10\n if opts[\"flags\"].reweight is False:\n label_weight = [5, 5, 5, 5, 5, 5, .01]\n # import pdb; pdb.set_trace()\n return label_weight", "def weighted_l2(X, ebunch):\n edge_embeds = np.zeros((len(ebunch), len(X[list(X.keys())[0]])))\n i = 0\n for edge in ebunch:\n edge_embeds[i] = np.power(X[str(edge[0])] - X[str(edge[1])], 2)\n i += 1\n return edge_embeds", "def weight(self):", "def compute_wl_subtree_kernel(graphs, h):\n for G in graphs:\n for node in G.nodes():\n G.node[node]['label'] = G.degree(node)\n\n start_time = time.time()\n\n labels = {}\n label_lookup = {}\n label_counter = 0\n\n N = len(graphs)\n\n orig_graph_map = {it: {i: defaultdict(lambda: 0) for i in range(N)} for it in range(-1, h)}\n\n # initial labeling\n ind = 0\n for G in graphs:\n labels[ind] = np.zeros(G.number_of_nodes(), dtype=np.int32)\n node2index = {}\n for node in G.nodes():\n node2index[node] = len(node2index)\n\n for node in G.nodes():\n label = G.node[node]['label']\n if not label_lookup.has_key(label):\n label_lookup[label] = len(label_lookup)\n\n labels[ind][node2index[node]] = label_lookup[label]\n orig_graph_map[-1][ind][label] = orig_graph_map[-1][ind].get(label, 0) + 1\n\n ind += 1\n\n compressed_labels = copy.deepcopy(labels)\n\n # WL iterations\n for it in range(h):\n unique_labels_per_h = set()\n label_lookup = {}\n ind = 0\n for G in graphs:\n node2index = {}\n for node in G.nodes():\n node2index[node] = len(node2index)\n\n for node in G.nodes():\n node_label = tuple([labels[ind][node2index[node]]])\n neighbors = G.neighbors(node)\n if len(neighbors) > 0:\n neighbors_label = tuple([labels[ind][node2index[neigh]] for neigh in neighbors])\n node_label = str(node_label) + \"-\" + str(sorted(neighbors_label))\n if not label_lookup.has_key(node_label):\n label_lookup[node_label] = len(label_lookup)\n\n compressed_labels[ind][node2index[node]] = label_lookup[node_label]\n orig_graph_map[it][ind][node_label] = orig_graph_map[it][ind].get(node_label, 0) + 1\n\n ind += 1\n\n print \"Number of compressed labels at iteration %s: %s\" % (it, len(label_lookup))\n labels = copy.deepcopy(compressed_labels)\n\n K = np.zeros((N, N))\n\n for it in range(-1, h):\n for i in range(N):\n for j in range(N):\n common_keys = set(orig_graph_map[it][i].keys()) & set(orig_graph_map[it][j].keys())\n K[i][j] += sum([orig_graph_map[it][i].get(k, 0) * orig_graph_map[it][j].get(k, 0) for k in common_keys])\n\n end_time = time.time()\n print \"Total time for WL subtree kernel: \", (end_time - start_time)\n\n return K", "def get_hardwired_speed_weights(self):\n \n phase_shift=self.speed_phase_shift\n \n # row 1 has the weights of speed cells to grid cell 1\n self.W_speed_east=np.zeros_like(self.W_ee) \n self.W_speed_west=np.zeros_like(self.W_ee) \n self.W_speed_north=np.zeros_like(self.W_ee) \n self.W_speed_south=np.zeros_like(self.W_ee) \n\n if self.use_eight_directions is True:\n self.W_speed_north_east=np.zeros_like(self.W_ee) \n self.W_speed_north_west=np.zeros_like(self.W_ee) \n self.W_speed_south_east=np.zeros_like(self.W_ee) \n self.W_speed_south_west=np.zeros_like(self.W_ee) \n\n\n for phase_idx,phase in enumerate(self.gp.phases):\n shifted_north_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/2.),self.gp.phases)\n shifted_south_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/2.),self.gp.phases)\n shifted_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(0),self.gp.phases)\n shifted_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi),self.gp.phases)\n\n self.W_speed_north[phase_idx,:]=self.W_ee[shifted_north_phase_idx,:]\n self.W_speed_south[phase_idx,:]=self.W_ee[shifted_south_phase_idx,:]\n self.W_speed_east[phase_idx,:]=self.W_ee[shifted_east_phase_idx,:]\n self.W_speed_west[phase_idx,:]=self.W_ee[shifted_west_phase_idx,:] \n \n if self.use_eight_directions is True:\n shifted_north_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/4),self.gp.phases)\n shifted_north_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi*3/4),self.gp.phases)\n shifted_south_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/4),self.gp.phases)\n shifted_south_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi*3/4),self.gp.phases)\n \n self.W_speed_north_east[phase_idx,:]=self.W_ee[shifted_north_east_phase_idx,:]\n self.W_speed_north_west[phase_idx,:]=self.W_ee[shifted_north_west_phase_idx,:]\n self.W_speed_south_east[phase_idx,:]=self.W_ee[shifted_south_east_phase_idx,:]\n self.W_speed_south_west[phase_idx,:]=self.W_ee[shifted_south_west_phase_idx,:]", "def get_english_score(input_bytes):\n character_frequencies = {\n 'a': .08167, 'b': .01492, 'c': .02782, 'd': .04253,\n 'e': .12702, 'f': .02228, 'g': .02015, 'h': .06094,\n 'i': .06094, 'j': .00153, 'k': .00772, 'l': .04025,\n 'm': .02406, 'n': .06749, 'o': .07507, 'p': .01929,\n 'q': .00095, 'r': .05987, 's': .06327, 't': .09056,\n 'u': .02758, 'v': .00978, 'w': .02360, 'x': .00150,\n 'y': .01974, 'z': .00074, ' ': .13000\n }\n\n return sum([character_frequencies.get(chr(byte), 0) for byte in input_bytes.lower()])", "def get_english_score(input_bytes):\n character_frequencies = {\n 'a': .08167, 'b': .01492, 'c': .02782, 'd': .04253,\n 'e': .12702, 'f': .02228, 'g': .02015, 'h': .06094,\n 'i': .06094, 'j': .00153, 'k': .00772, 'l': .04025,\n 'm': .02406, 'n': .06749, 'o': .07507, 'p': .01929,\n 'q': .00095, 'r': .05987, 's': .06327, 't': .09056,\n 'u': .02758, 'v': .00978, 'w': .02360, 'x': .00150,\n 'y': .01974, 'z': .00074, ' ': .13000\n }\n\n return sum([character_frequencies.get(chr(byte), 0) for byte in input_bytes.lower()])", "def hwt(data):\n sz = len(data)\n i = 0\n res1 = []\n res2 = []\n while i < sz:\n s0 = data[i]\n s1 = data[i+1]\n res1.append((s0+s1)/2.)\n res2.append((s0-s1)/2.)\n i += 2\n return (res1,res2)", "def compute_HMM_weights(trainfile,smoothing):\n tag_trans_counts = most_common.get_tag_trans_counts(trainfile)\n all_tags = tag_trans_counts.keys()\n\n transition_weights = compute_transition_weights(tag_trans_counts, smoothing)\n for tag in all_tags + [END_TAG]:\n transition_weights[(START_TAG, tag, TRANS)] = -np.inf\n transition_weights[(tag, END_TAG, TRANS)] = -np.inf\n counters = most_common.get_tag_word_counts(trainfile)\n nb_weights = naive_bayes.estimate_nb_tagger(counters, smoothing)\n emission_weights = {}\n for nb_weight in nb_weights:\n if not OFFSET in nb_weight:\n emission_weights[(nb_weight[0], nb_weight[1], EMIT)] = nb_weights[nb_weight]\n all_weights = Counter(transition_weights)\n all_weights.update(Counter(emission_weights))\n\n return defaultdict(float, all_weights), all_tags", "def weiner_tf(H, K):\r\n\r\n W = (1 / H) * ((np.conjugate(H) * H) / ((np.conjugate(H) * H) + K))\r\n return W", "def lkruskalwallish(*args):\r\n args = list(args)\r\n n = [0]*len(args)\r\n all = []\r\n n = map(len,args)\r\n for i in range(len(args)):\r\n all = all + args[i]\r\n ranked = rankdata(all)\r\n T = tiecorrect(ranked)\r\n for i in range(len(args)):\r\n args[i] = ranked[0:n[i]]\r\n del ranked[0:n[i]]\r\n rsums = []\r\n for i in range(len(args)):\r\n rsums.append(sum(args[i])**2)\r\n rsums[i] = rsums[i] / float(n[i])\r\n ssbn = sum(rsums)\r\n totaln = sum(n)\r\n h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)\r\n df = len(args) - 1\r\n if T == 0:\r\n raise ValueError, 'All numbers are identical in lkruskalwallish'\r\n h = h / float(T)\r\n return h, chisqprob(h,df)" ]
[ "0.5841104", "0.5627546", "0.56087494", "0.5506234", "0.5505255", "0.5500743", "0.5451423", "0.5432521", "0.54016435", "0.5336379", "0.53301096", "0.53196824", "0.5318167", "0.53105646", "0.527878", "0.5275149", "0.52684206", "0.52534753", "0.52520436", "0.5230744", "0.5225175", "0.52198756", "0.52024436", "0.5201729", "0.51664186", "0.51664186", "0.5157351", "0.5157188", "0.51554734", "0.51523936" ]
0.7021357
0
~30x faster than hankel_points_ascii
def hankel_points(): return N.frombuffer(b'`\rC\x94r\x199=\xb5\x8dn\xc17\xbd;=1h34\x0f\xa8>=\x02\x02X7\xb9\xf0@=\x9cb\xeb\x18\xd2\xb8B=\x92-\xdb\xd3\xe2\xb0D=\xa1H\xcd\xe3\xf6\xddF=\xba\x02\xfa\x97\xa1EI=\xc9\\!\\\x0c\xeeK=\x04^\x1b\x82\x06\xdeN=\x82=;Z\x8b\x0eQ=\x8b\x11l\x1f\xc7\xd9R=\\\x8e\xc3.O\xd5T=[\xc2\\\xe57\x06W=u)\xffb\x1erY=dy9\xec6\x1f\\=\x89\x83\xbc\xcf\\\x14_=\x054\xd8\xfb\x91,a=\x96o\x03*\xf6\xfab=\x12\x9d\xc4\xa7\xfb\xf9d=O\x06N\xc3\xbf.g=\xd6M=~\xe9\x9ei=\xe5\x02\x08\t\xb8Pl=\xee\xfcQ\xc4\x12Ko=\xfa\xb5\x97x\xcdJq=qO\xd2\x9e_\x1cs=7\xd9\xbc\xaf\xe8\x1eu=dp^\xfa\x8eWw=\xb1A\x90s\x03\xccy=ow\xe8J\x90\x82|=\xcfP=\x08)\x82\x7f=\xfd?\x85->i\x81=\xc0K\xad\xe4\x03>\x83=\xa2rQ\xb8\x16D\x85=G\xf2&\x08\xa6\x80\x87=\xb3\x84\xc6\xcdl\xf9\x89=\xd0\x88BK\xc0\xb4\x8c=xn\x08E\xa0\xb9\x8f=<\x19Px\xe4\x87\x91=\x96\x03\x1ec\xe3_\x93=@\xa7\xef3\x86i\x95=\xf7\x95\x1dk\x05\xaa\x97=\x8c\xef\xa2\x18&\'\x9a=\xa1\xf4\x8b\xa4H\xe7\x9c=\xa4\xb7g%y\xf1\x9f=\xc4sL\xb7\xc0\xa6\xa1=\x1aYd\x82\xfe\x81\xa3=A#\xce\x957\x8f\xa5=\xfa\x02\x97\xa2\xad\xd3\xa7=\x17b\xde\xe0/U\xaa=\x9f_J\xf2)\x1a\xad=\xed\x06\x9e*\xda\x14\xb0=\xd1\x8etI\xd3\xc5\xb1=^\xb2w\xabU\xa4\xb3=\xadc\xeeQ+\xb5\xb5=A\x06\xc8.\x9f\xfd\xb7=yt)\xb4\x8a\x83\xba=b4\x15\xd1dM\xbd=\xbcq\xca@)1\xc0=&\xdbi\x8e\x1c\xe5\xc1=s<\x08H\xe9\xc6\xc3=n\x1b\x1e\xdda\xdb\xc5=\xb7\x1c\xc7\x90\xda\'\xc8=(+.!7\xb2\xca=9\x85\x97\xde\xf9\x80\xcd= (Y,\xaaM\xd0=P!v\xe6\x9c\x04\xd2=\xa00\x80\xc2\xb9\xe9\xd3=\xc2\x9a\xf8\xac\xdb\x01\xd6=\x8b\x00\x8eJ`R\xd8=\x02\xae\x91\xb75\xe1\xda=r\xf1\x91\xb9\xe9\xb4\xdd=\xd5\x94\x03E]j\xe0=\x0c\xaa\x8c\xb2T$\xe2=\xf6\x1b\x05\x86\xc7\x0c\xe4=<9\xe87\x99(\xe6=;9\xfb\xde0}\xe8=h\x02\xf6\x07\x87\x10\xeb=\xf3\x8d\xdc\x015\xe9\xed=n\x8f\x1d\xe3B\x87\xf0=\x9chKTDD\xf2=\x03)y\xfe\x120\xf4=)\xc2\'\xf5\x9aO\xf6=O\xae\xd3\xd1L\xa8\xf8=V\xc8\xfb\xa3+@\xfb="\xd1hX\xdc\x1d\xfe=)l\x96_[\xa4\x00>>\'\xfc-ld\x02>\xe3k|\x98\x9cS\x04>\x8a\xe3\xc3\\\xe1v\x06>\xec<\xc4\xa7\xb4\xd3\x08>\xa0\xfaC\x1e$p\x0b>F\x82C_\xe0R\x0e>\xaa\r\xfa\x13\xa7\xc1\x10>\xa9\xb5\x95\xa2\xcc\x84\x12>\x900n\xc1dw\x14>\x8d\x9f\x9c\xe7l\x9e\x16>(Pc\xe6h\xff\x18>0\xb2q\nq\xa0\x1b>9\xac\x96\xb9A\x88\x1e>\x91\xf8qZ&\xdf >\x9e\x19\xbd\x15f\xa5">wKn\xe7k\x9b$>\x8c\xc0f\x0f>\xc6&>(|2\x14j+)>i\xec+\xfd\x12\xd1+>\x98\x93\xab\x0b\x01\xbe.>\xf6h\xc6\x8d\xd9\xfc0>\x99\xc1\xc6\xeb8\xc62>kl^y\xb2\xbf4>\xa0O\xadNU\xee6>\x17\x1c\xa0\xb8\xb8W9>\xa2T\x1f\x8c\n\x02<>X\xb0\xec\xfa\x1e\xf4>>\xcdi_\t\xc1\x1aA>\x94\xb9\xb7\x89E\xe7B>\xe3s\xe3\xe68\xe4D>\xc6\r\xd3 \xb3\x16G>\xfa\xf2\x08\\U\x84I>\xd3\x10\x00NX3L>\xe7\xaa\xe7-\x9c*O>;\xeeE)\xdd8Q>\xf1\xe1FU\x8c\x08S>\x96\xcaf\xa0\xff\x08U>\xa0\xef\x13\x02X?W>Z\xcf\xb9\x87@\xb1Y>c\x91\x8b\xda\xfcd\\>\xc2\\OLya_>\xd5\xec$J.Wa>\x85(\xde\xb4\r*c>s\xbb\x17\x17\x07.e>\xcb\x9b\x86oDhg>\xd71\xf1\xc5z\xdei>*d\x8a\xca\xf8\x96l>\x93\xd4\xfd\xfe\xb6\x98o>\xd3|J\xc9\xb4uq>\xc8\xc3\x9b\x0f\xcaKs>\xf6\xcf\xec\xbcOSu>\xd9\xeb\x1d\xe7x\x91w>\x99\xf6\xe0\xa1\x04\x0cz>\xa0\n\xd2\xb7L\xc9|>\xd7]\xf6\xefU\xd0\x7f>7\xf5\xa8\x04q\x94\x81>3pS\xcd\xc1m\x83>\xe5.\xa5\x04\xdax\x85>\xf3o\xaa\xe7\xf5\xba\x87>\xbd\x01\xb0\xa7\xde9\x8a>]\xd3F=\xf9\xfb\x8c>\x13\xc63e+\x04\x90>\xf7\r\xd8Zc\xb3\x91>\xcf\xaf\x8fV\xf5\x8f\x93>\x7f\xfd\xc9a\xa6\x9e\x95>(\xf5\xdb\xf0\xbb\xe4\x97>\xa8\xee{d\th\x9a>\xc2\xb6\xdd\xf6\xfe.\x9d>\xff\xa4V\x1d] \xa0>\x11\x03\x16+\x8c\xd2\xa1>\xe8\x0b\x93\x14e\xb2\xa3>\x01\xc3\xafH\xb5\xc4\xa5>]\x0eC\x83\xcb\x0e\xa8>U\xc2Zf\x85\x96\xaa>\xfc6\x9e\x81^b\xad>\xa2u)w\xc0<\xb0>\xb6\xb9H\xd5\xeb\xf1\xb1>\x0cYYq\x11\xd5\xb3>\xb9\xcew.\x07\xeb\xb5>\x01\xa0R %9\xb8>\x9c\xa0\\<S\xc5\xba>CC\xa4{\x18\x96\xbd>\xab\xa1\n\xcaUY\xc0>g\xe7\xfe\xb9\x82\x11\xc2>4\xfd\x98\xd7\xfa\xf7\xc3>\x7f\xa0\x11\x89\x9c\x11\xc6>vnaJ\xc9c\xc8>\x8a\x84\x8dvs\xf4\xca>\x81\x1e"\x84-\xca\xcd>o_\xf2m\x1dv\xd0>(;q:Q1\xd2>68\xc4\xb2!\x1b\xd4>\xadS<\xcfu8\xd6>/\xaf\xab\x84\xb8\x8e\xd8>\xaa\xfb\xf6\xa5\xe6#\xdb>?Ib;\x9e\xfe\xdd>\xa3\xc0s\xbb\x17\x93\xe0>\xae\x88\x83\xb8WQ\xe2>ln\no\x86>\xe4>\x90\x0c\x88x\x93_\xe6>\x98\x9cTS\xf3\xb9\xe8>i\xe4\xa1\\\xadS\xeb>\xf6n\xc9Bk3\xee>\xf6\xc2\xbd\x0bE\xb0\xf0>\xaa\xf5\xc5\x96\x96q\xf2>\xaduYy)b\xf4>ehW\xfd\xf5\x86\xf6>\xbe\x0ch;z\xe5\xf8>\x84/\x98-\xc8\x83\xfb>\xc6V\xd8<\x95h\xfe>\x8cb\x9c\xb8\xa5\xcd\x00?")v8\x0e\x92\x02?\x8f\xe4^?\x0b\x86\x04?\xde\xef\xe0\xd6\x9d\xae\x06?\xd2\n\xdc\xc2M\x11\t?\x9b\xa4\xe6\xac7\xb4\x0b?\x9e\xd7-\xcd\x1c\x9e\x0e?]\xaey\x1c:\xeb\x10?\xda|\x80\x01\xbf\xb2\x12?\xeec\x89/,\xaa\x14?\'\x8c0\x7f\x8b\xd6\x16?gs\x92pn=\x19?\xc9\xa9\x9eo\xfc\xe4\x1b?\xc0\xcf\x88\x98\x02\xd4\x1e?s\xde^\x92\x02\t!?\xdf0\x81V\xa9\xd3"?\xd1\x02\n\xb9\x8c\xce$?\x8b\xfe(q\xbf\xfe&?\x99\x93Z\xcc\xdci)?w\x0e\xd8\x0b\x17\x16,?\xd5\x1f\xcaDG\n/?\x1el\xf5u\xff&1?,\xa1\xc5\x9c\xcd\xf42?\x9b\x8c\xd5K-\xf34?\xa2Z\x85(:\'7?\x00\xcb\xf2^\x99\x969?C\xd9\xb2\x18\x88G<?k\xa9\xf6x\xeb@??\n,\x88#1EA?n}M:,\x16C?\x9f\xe1\xa5X\x0e\x18E?%\x83\xda!\xfcOG?\x910\n\xb2\xa4\xc3I?$\x19Y.PyL?\xffP9\xdd\xefwO?Mj\x04\xf8\x97cQ?\xf1\x02\xcc\x95\xc57S?\x0bR\xfbP0=U?Q\xa9\x98\xda\x05yW?X:BP\xff\xf0Y?\xb5\xb9\x00\xe6o\xab\\?\x8b\x03\xe5\x1aU\xaf_?e\x08\xfbP4\x82a?\xb18\xa9\x16\x9aYc?>\xfb\x1d\xa7\x93be?\xfa\xce\x0c\xd1W\xa2g?\x1dh0\xc5\xa9\x1ej?\xb8Z\xed\xd9\xe7\xddl?\xa3\xbfv\xdc\x1b\xe7o?.\x9d\xa1\x8c\x06\xa1q?\x9c-\x03%\xaa{s?z\'\x1f\xce8\x88u?AKb\x84\xf2\xcbw?\x06\xf1_\x9d\xa4Lz?\xe8*r\xa5\xb8\x10}?\x18\xd1\xcbf\xa2\x0f\x80?\xd7\x96\xd3\t\x0f\xc0\x81?\x029\xaf)\xf6\x9d\x83?\x10\xb0\xda9 \xae\x85?\xf7Q\xa4t\xd6\xf5\x87?\x1atSf\xf0z\x8a?\xf8\xc5\xf3\xe4\xe2C\x8d?]{\x8fM\xe8+\x90?\xcd^\x13(N\xdf\x91?6=;\x8e~\xc0\x93?\xe8a\xf8^J\xd4\x95?\xaa}\xbf"\x04 \x98?\xc4\xab\x86\xae\x8d\xa9\x9a?\xe6\x15\xea5gw\x9d?\xac%\x8a\xf9_H\xa0?\xb2\x7f\x8bG\xc4\xfe\xa1?p\xec\xee\xbcC\xe3\xa3?\x94d\xed\xb2\xb7\xfa\xa5?z\\\x83\x10|J\xa8?e$p\x05}\xd8\xaa?\xaa7\xe26F\xab\xad?-\xc4X\xc2\te\xb0?T\xcd\x0f\xc9q\x1e\xb2?\xe0\x0f\xcd F\x06\xb4?\xc4\xa3\xfd\xabh!\xb6?\x8e\xff\xa3\xc0>u\xb8?\xca\xf5\x82\xfb\xbe\x07\xbb?\x1bc\x80\x87\x80\xdf\xbd?\xbd\x852\x00\xe6\x81\xc0?\xa6\x8e\x1e\x0eW>\xc2?\x05\xd1\x94%\x86)\xc4?C;=\xc1]H\xc6?^\x8d\xbb\xb6L\xa0\xc8?\xc4\x7f0"T7\xcb?I\xd6\x81\xc8\x16\x14\xce?t\xe3\xe9\x0b\xf5\x9e\xd0?\xd6\xa9\xe1xt^\xd2?L\x05\xc37\x04M\xd4?x\xe5\x91j\x97o\xd6?\xbb\xd6Kw\xa6\xcb\xd8?\xcd)\xea\x0b=g\xdb?8\xc4\xbe\x9b\tI\xde?\x06\xb2\xed>7\xbc\xe0?g\xd2/l\xca~\xe2?\xf2{\x93\xc4\xc0p\xe4?bl\xb4 \x16\x97\xe6?\x9c\xee\xbf\x87L\xf7\xe8?\xb4%#Lz\x97\xeb?\xf0F,\xa4Y~\xee?\xf94J\xf3\xac\xd9\xf0?b\xb9\x8dKY\x9f\xf2?5N\x02:\xbc\x94\xf4?$\x1d2]\xda\xbe\xf6?\xbf\xc4mn?#\xf9?q5Rw\x0c\xc8\xfb?\tU\xde\x85\x07\xb4\xfe?\xcf3\xaa\x83V\xf7\x00@\x9d?/{!\xc0\x02@\xcd1\xcd\x06\xf7\xb8\x04@\x1c>n\x9a\xe4\xe6\x06@\x11\xc3\x97\xb2\x7fO\t@\x07t\xf3"\xf4\xf8\x0b@\xa0\xba\t\xe6\x13\xea\x0e@\x0c\x11XK4\x15\x11@!\xaa\xf8_#\xe1\x12@\xc5\xcdt\x9aq\xdd\x14@\x8f\x87\xa3S5\x0f\x17@\x01nn\xdc\r|\x19@\x9d!\x8a\xe51*\x1c@\xd0\x15\x06k\x7f \x1f@.\xe3>\xa6F3!@\xab\xd8\x7f__\x02#@\xa1\x11>e,\x02%@\xeb\x9e\xe5\x04\xcd7\'@\x9d\x07\x12u\xea\xa8)@\xb2r\xa2V\xc6[,@\x9e\xd6O\xbcJW/@\x93\x8f\xeb\xf0\x8dQ1@S~\r\xe0\xd5#3@\xe9\x8e3\xd8\'\'5@\xa6\x94"+\xac`7@\xa05\x94\x06\x16\xd69@\x90b\xd3\x0e\xb2\x8d<@xB\x8a\x82v\x8e?@N\xe7\x8d\x88\npA@U\\\x9eH\x87EC@\r\xd5&edLE@\xb8d$D\xd3\x89G@V\xaa\xf9\x1b\x91\x03J@\xdb\x88\xc0\xa7\xf5\xbfL@+{\x81g\x03\xc6O@\xf1\xc5\xf9\xca\xbc\x8eQ@\t\x7f\xe4\x00tgS@\xb1\xcf\xb1~\xe2qU@\xc2y\x92\xceB\xb3W@\x81\xd0;A\\1Z@x\xf2\x1b\xbc\x91\xf2\\@\x84\x89,\x16\xf2\xfd_@X2\xa8\x16\xa5\xada@\x05}Hq\x9c\x89c@a\'8\x98\xa2\x97e@\xd52\xf3I\xfb\xdcg@\x19zJ\x03x_j@\x9e\xfd\xa7\xe7\x86%m@\xb8\xb5W\x9d!\x1bp@a\x81\xb8\xca\xc3\xccq@r\xb8\xea\x02\x01\xacs@\xb1\xa4\xe8%\xa5\xbdu@\xf2k\xad6\xfd\x06x@\x16\x92\r\xf0\xe4\x8dz@:99\xc7\xd5X}@\xe8\x92.\xc1{7\x80@\xb8z\xf1F\x19\xec\x81@\x9b\xa2\xa4\x1f\xa2\xce\x83@\xd5\x95\xbe\x9c\xea\xe3\x85@5\t\n\x16I1\x88@+\xd1f\x96\xa3\xbc\x8a@\x9cG\xb8\xf8~\x8c\x8d@\xf2\xec\xdc\xcd\x07T\x90@\x91\x7f\xc2\xeb\xa5\x0b\x92@\xaa\x01\n2\x80\xf1\x93@\x9c6\x83rs\n\x96@\xb6\x845j\xdf[\x98@~u3\x86\xb4\xeb\x9a@m\xc4#\x1b\x83\xc0\x9d@2o>\x1b\xc6p\xa0@|\xb3D\x1aj+\xa2@\xb68j\xa5\x9b\x14\xa4@\x03\x1b\xcf\x1d@1\xa6@5\x7fA\xb6\xc0\x86\xa8@v\xfdMP\x18\x1b\xab@\x0e.\x92\xce\xe2\xf4\xad@\'n\xc9\x01\xb7\x8d\xb0@5\'<4fK\xb2@\xf9\x91\xd1\xe5\xf47\xb4@#\x9c\x0b\x16QX\xb6@nS&~\xed\xb1\xb8@\x7f\xe5\x8f\x86\xcfJ\xbb@?\xd23\xb4\x9e)\xbe@\xb1\xf7\x8f\xda\xda\xaa\xc0@\x89\x05\x19\x9c\x9ak\xc2@a\x8b\n`\x8c[\xc4@\xbcGt\xd3\xa6\x7f\xc6@L\xac\xc4Ff\xdd\xc8@\xf1h\xd3\xbb\xdaz\xcb@:\xbeTn\xb7^\xce@6\xe5@\xff1\xc8\xd0@L\xc2\xf8\xb4\x07\x8c\xd2@a%\x9f\x81b\x7f\xd4@=R\x18\xcfA\xa7\xd6@\xd9\x1d\xe7\x95+\t\xd9@\x04F\xf5\x83:\xab\xdb@4\xb2^\xa0-\x94\xde@\xa5\xef(\xca\xbc\xe5\xe0@fK\xa7\xe2\xad\xac\xe2@\x124\xda\xb8w\xa3\xe4@Z\x0b\xdc\x82"\xcf\xe6@\x04\xc1C\xf2=5\xe9@\xe3\x84\xd6s\xef\xdb\xeb@F\x18\xdb\xee\x01\xca\xee@]\xc53\x96{\x03\xf1@\xf2;\xa0\x89\x8d\xcd\xf2@\x9f\xb2\xc8t\xcc\xc7\xf4@,UziI\xf7\xf6@8\xd2}\xe3\x9da\xf9@\xe0A^!\xfa\x0c\xfc@\xd7\xfet\xff4\x00\xff@\x03"\xed\xben!\x01A\x84\x11\x10\x0f\xa7\xee\x02A\x12\x19;%a\xec\x04A\xeb\x1d\x86\xfe\xb6\x1f\x07A\xcaR\'\xf2K\x8e\tA\xc7z{#[>\x0cAn\x16\xfbx\xc76\x0fA:\xe8\x81\xa0\x96?\x11A\x82c\xd5\xd8\xfa\x0f\x13Al\xb4\xc6:6\x11\x15A>\xdck\xbekH\x17AO\xad\xc2\xa7H\xbb\x19Ah\xdf&\x12\x13p\x1cA\x1f\xb3a\x03\xbam\x1fA\\=\xc1\x97\xf3]!A\xb3\x1c\x82M\x891#A.\x01\xc7&L6%A0\x0es&hq\'A\xd3\\\xc4\x8e\x94\xe8)AT\xa5d\x86"\xa2,Ap\xd1\xc4G\r\xa5/A\x17\xa7\x1d\x02\x86|1A\xe4\xb6\\\xd4RS3A2\x08_[\xa3[5A\xbc\xba\xbf\xb4\xac\x9a7A\xf1\x96\x9420\x16:A\xc0^F\x1a\x8a\xd4<A\xca\x1ej\xf0\xc1\xdc?A\x04+\xae=N\x9bAA\xc2xa\xd5WuCA\xef\xbdzK<\x81EA\x03\xf6S\xe89\xc4GA\xe3\xf8\x90\x1f\x1cDJA\xba\xd4\xechJ\x07MA\xc5\x82aTl\nPABp/\xa9L\xbaQA\xe8\xb5C\xb9\x98\x97SA2d\xd0j\x17\xa7UA4h\x11A\x10\xeeWA}7\x0e\xe3XrZA\x90\xe4\x89\x0ed:]AM^\xb7\x0e\xa9&`A\xfe\xe2\x04\xa4\x81\xd9aA\x10\x11o\xe9\x15\xbacA4\xee\xe1-5\xcdeA\x1d\xd7\xba?0\x18hA.\xd2Y\x0b\xe7\xa0jA\x8f`b\xa8\xd7mmAd-\x1e~\x17CpA\x0e\xda9\x8e\xed\xf8qA\x8f\xc0\x08\xd0\xcf\xdcsA2g\xfe\t\x96\xf3uAr\xb2\xf5e\x9aBxA\xe8\xc8\xbb\'\xc7\xcfzA\xff\xf3\xcf\xd4\xa5\xa1}A\xd1v\x16\xfa\xb7_\x80A\x83\xbe\x82\xc8\x90\x18\x82A\xff\xd4\xf0\xd7\xc6\xff\x83Av[Cu:\x1a\x86A\xd3\xa2K6Om\x88A(Ux\xc8\xf9\xfe\x8aA\x87\nC3\xcf\xd5\x8dA\r\xca\xba\xda\x8a|\x90AM5>\xb4k8\x92A+\x82\xc3l\xfb"\x94A\xd7C\x9e\xe6"A\x96A\x86\x1b,4O\x98\x98A\xe9\xa5\xd1~\x7f.\x9bA\xd3\xbaDdT\n\x9eAi\xce\xc0x\x90\x99\xa0A\xe2Jv\xb3~X\xa2AAj\xda\xfamF\xa4A\xc3\xf3\xcd\xd5Oh\xa6A\xfe\xee\xed\xe3\x9a\xc3\xa8A\xba\x9e\t\xddX^\xabA\xb3\xb4x\t6?\xaeA\x12Tz-\xc9\xb6\xb0A\xfd\xa0\xe1(\xcax\xb2AI\xebM\xef\x1ej\xb4A\xc7\td\xbb\xc1\x8f\xb6A\'\xe6\xd0\xca2\xef\xb8A\xdc\x99cv\x86\x8e\xbbA\x882\x9f\xc5tt\xbeA\xf5f\xd6R5\xd4\xc0Ah\x9e\xe4wN\x99\xc2A\xe2n\xf6\xb7\x0e\x8e\xc4A\x9cb\xc6\x10y\xb7\xc6A{Z\xffn\x17\x1b\xc9A}-&\xdf\x08\xbf\xcbA0\xee\x96<\x11\xaa\xceA\x8acbC\xd5\xf1\xd0A\xe7\xa0\x92\x04\x0c\xba\xd2AZ\xbcm\xc3=\xb2\xd4A\xcd\x8e0Pv\xdf\xd6A\xf0\xd2\x8fWIG\xd9A\x11\xf4\x9c\xac\xe0\xef\xdbA]\x19_\x13\x0c\xe0\xdeA\x86\rKZ\xa9\x0f\xe1A41\xaf3\x03\xdb\xe2A\x0fM\x10\x81\xac\xd6\xe4A\xe3J\xb5\xf4\xb9\x07\xe7A\xb7\xa3\x86\x0c\xc9s\xe9A\xcaW\x1au\x0e!\xecA]Y\x19\xf0e\x16\xefAq\xa8]\xf3\xb1-\xf1A\x1c9\xafj4\xfc\xf2A*\xa3\xfe`[\xfb\xf4A/\xfa?zD0\xf7A\xdb\x90\xd7\x16\x97\xa0\xf9A>a\xf9\xcf\x92R\xfcAi\xc6\x0bz\x1fM\xffA2\x12\tk\xefK\x01B\xc1;\xba\x0f\xa0\x1d\x03B\xb5\xa2\x1e\xd4J \x05B)$\x96]\x16Y\x07B\xbasg\x00\xb4\xcd\tB6\x89\x9fUn\x84\x0cBr\xee\xa2Y9\x84\x0fB\x8a\xdf^\x1ebj\x11B\x00\x90\xab\x89F?\x13B\x14\xed\x1cL{E\x15Br\xf4X\x1c0\x82\x17Bq\xe3\rT \xfb\x19B\xb9\x8d~\x9f\xa1\xb6\x1cB\x82\xdbs8\xb4\xbb\x1fB\x80z\x14k\n\x89!B\xfc\x9c\x13@(a#B\xdb>n;\xedj%B{\xbd\x065\x92\xab\'B$\xe0\x96\x9d\xdc(*BGJ\x16H-\xe9,B\x9d\x1d>\xc1\x90\xf3/B\xcaB\x84\xaf\xe8\xa71B\xdc\x189\x9bE\x833B\x11\xd0P\x15\xa1\x905B\xcc}\xfc&=\xd57B=\x81\xc4i\xe9V:BX\x93\xf6\xea\x11\x1c=B#\xec\xf6\xcf\xe7\x15@B0\xb0\xaeJ\xfd\xc6AB\xbcI\x1a\x04\x9f\xa5CB\xe1\xb6\xcdM\x97\xb6EB\x0chwr1\xffGB\xaa\xa6PFG\x85JB\'\x15\xc1$POMB\xd2\xe9\xce\xc082PB\xeev;\x9cH\xe6QB\xc7Hn\xe44\xc8SB\xb2L\xbaY\xd0\xdcUB\x9em\x96\x98o)XB\xfe\xac\xee\xc1\xf6\xb3ZB\xb75+\x93\xe8\x82]B\x9b\xc8L\x8a\xbbN`B\x17\xaez\x04\xcb\x05bB\x9aG\xa6\xa6\x07\xebcB\xbe\x95\xb9\xaeL\x03fB\x10\xcc[\x1b\xf8ShB\xaa$Ml\xf8\xe2jB8\xfa\xff\xd4\xdb\xb6mB%\xb2/\x84pkpB\x03\xf8e\xe4\x84%rB\xd8\xd7\xee\xb5\x17\x0etB\x1e\xab=\xc3\x0c*vB9\x9d\xae}\xcb~xB1\x8c\x17\xd6L\x12{B\xc0\xef!\x8a*\xeb}B\x0bG\xd1\x06X\x88\x80B\xc2\xac\xa1\x9dvE\x82B\x0151~e1\x84BP\'\x89\x0e\x11Q\x86B\x15j\\C\xea\xa9\x88Bc\r\xf8\x90\xf4A\x8bBW\x17\x8dS\xd5\x1f\x8eB\xc5\xae&kr\xa5\x90B\x9e\x06~\x92\xa0e\x92B\x91\x90\x14l\xf1T\x94BC\x95\xb0\x08Zx\x96Bh\xc0\x1a\xf1T\xd5\x98B\xa8=\x99/\xf0q\x9bBj\xd5X\xd3\xdcT\x9eBm\xa9\xc1\n\xc0\xc2\xa0B\xaeP\xf8%\x03\x86\xa2Bg`\xff\xec\xbbx\xa4B\xea\xe1\x9b*\xe8\x9f\xa6B7\xcb\x88\x0c\x0c\x01\xa9Bm\xe1\xa7E@\xa2\xabB\xb2\xe4\xb9\xacA\x8a\xaeBo\xa3\xd1?A\xe0\xb0B\x7f\x17\xbc\xbb\x9e\xa6\xb2Br\xaf\x18o\xc5\x9c\xb4BS\xd0\x07\xee\xbb\xc7\xb6B\xf9\xed0\x1c\x10-\xb9B\x99\xb2\xd4g\xe5\xd2\xbbBsL\x04\x84\x04\xc0\xbeB\x12\xcb$e\xf6\xfd\xc0B\xd4[$\xb8s\xc7\xc2B\xbcpIa\x0e\xc1\xc4BWp\x87\xcd\xd5\xef\xc6B\xb5b\x8a\xa7aY\xc9B2*\xd6+\xe0\x03\xccBBZ\xad\xfe%\xf6\xceB\xf4\')\xd6\xdf\x1b\xd1B\x7f\xc7<\x80\x82\xe8\xd2B\xb8\xd4=3\x97\xe5\xd4B\xd2\x97\x85D6\x18\xd7B\xe2\xda\xfa6\x01\x86\xd9B\x1dMj(15\xdcB>\x9fM\xc3\xa6,\xdfBn\xb4\xed\xee\xfd9\xe1Be\xe4\xc2y\xcb\t\xe3B\xfe\xa0fU`\n\xe5B\x81^F\xcf\xdd@\xe7B;#\xd8S\xef\xb2\xe9B\x0f|X\xf5\xd8f\xecB\xc7\xf0\xa2y\x87c\xefB\xe2x#\x0cQX\xf1B\x91U\'\x0bO+\xf3BM\x8a\xfa8j/\xf5Bj\x9c\xe8\xea\xcci\xf7B^\xcai\x88,\xe0\xf9B\xa8Fs+\xd8\x98\xfcB\xbel\x92\xca\xc8\x9a\xffB\x06\xa9\x1e\x8b\xd9v\x01C\x85\x12\x8f\x9b\rM\x03C\x03\x91\xf7O\xb5T\x05C\xf8jg\x15\x04\x93\x07Ch\xca\xea_\xb9\r\nC\xcdA\x9ad/\xcb\x0cCR\x81*`k\xd2\x0fC\x1e\xc3\xd7\xc9\x97\x95\x11C\xa6\xa4\xd4\x92\x07o\x13C\xea_$\rBz\x15C\xb3\xa8\x9b\xcd\x83\xbc\x17Cu5\x8bf\x96;\x1aC:\xe1\xbb;\xdf\xfd\x1cC2|\xd2\xf27\x05 C1\xb1\xec&\x8c\xb4!C\xddf\x89Y=\x91#C|\xad\x11\xe4\x10\xa0%C\xa3\x7f=\x93L\xe6\'C\x11\xe5q)\xc4i*CST\xd7L\xe80-C>\x83\xb4\x83k!0C>\xec\xa1\x01\xb7\xd31Cm\xc7\xf6X\xaf\xb33C\x8d\x9f\x1bI"\xc65Ch\xee\xe5\xe6^\x108C\xa7,\xbe6C\x98:CCf\xfe4Kd=C\xb4\xae\x86\xb9\xd0=@Co\xa1\xe3\xb9\x18\xf3AC\xf9\x8b\x1f\xfb]\xd6CCd1k\xb1v\xecEC\xf8S\x10J\xbb:HC\xdf\x8e\x89\x1d\x14\xc7JC\\aW\x92\x08\x98MC8!\xad\xebgZPCL\xd9F\xb0\xb1\x12RC\xc8\x17\xc1\xaaI\xf9SCV\x9c\xf7\x92\x0e\x13VC*\xfe\x1b?beXC\x13v\xe9m7\xf6ZC\xdb\xf5\x1e\x04!\xcc]C\'\xd4%r1w`C\x00\xa1\nF\x822bCI\xb4T\xd3r\x1cdC\xd0\xc2\x87d\xea9fC\xee\xbaMIT\x90hC\xb8\xef\xf0\xb8\xad%kC\xf5#\xaa*\x95\x00nCa\xa6\x89\xa5-\x94pC\xa15\x19\xdd\x8aRrC\xd5\xdb\x10\xe1\xd9?tC\xe6\x9d\xb3\x9d\navCLl\xd1\xec\x91\xbbxC\xddj\xb2\x90wU{CN)h\xa7e5~C\xf8l\r\xdf\\\xb1\x80C\x861\t\xd8\xcbr\x82C\xbb\x86\xea@\x7fc\x84Ce\xad\xe5\xb6o\x88\x86C/\x9f\xbb\xae\x1b\xe7\x88C\xbbyA\x88\x95\x85\x8bC\xcbq\xe4\x1c\x93j\x8eC\xbc\x05\x83x\xbf\xce\x90C\xbc\xbb\x1e\x9aE\x93\x92C\x99z\x96`c\x87\x94Coj\\)\x1a\xb0\x96C\xf7$\x0b\x15\xf2\x12\x99C\\\x96\xb43\x08\xb6\x9bC\xdb\x8b\xc8.\x1e\xa0\x9eC\xa9kZ\xccU\xec\xa0C|\xb9L\x87\xf8\xb3\xa2C\xf7\x9b\x8a\xae\x86\xab\xa4C\x9b\xbc+o\n\xd8\xa6C\xdd\xaf\xaa\xa6\x15?\xa9C_\xea&(\xd0\xe6\xabC$ \xde\x81\x07\xd6\xaeC?\xcd\xa25 \n\xb1C\xd3\x016\x04\xe5\xd4\xb2CEB\xff\x99\xe9\xcf\xb4C\xb1q>\x03A\x00\xb7C,rr\xeb\x86k\xb9C\xda\x19\xba\xfb\xed\x17\xbcC\xaa\xec\x10\xbcO\x0c\xbfC\xbc\xa4\x0b\x10\x1f(\xc1CN\x93.v\x0b\xf6\xc2C\x1e\x8e\xf0\x92\x8c\xf4\xc4C\xf2\xb7Wa\xbe(\xc7CW\xc0)lF\x98\xc9Ci\x11\x98EbI\xccCz\xc3p\x84\xf7B\xcfCQ\xd2\xe5\xb7RF\xd1C\xdf\xcb<Cl\x17\xd3C\xf8\xc1\x1f\np\x19\xd5C\xff\x9a\x14\x06\x83Q\xd7C\xef\xb5\x88\xb2T\xc5\xd9Cl\xd7\xf4\x9d-{\xdcC\xd0\x8c3\x83\xffy\xdfC:\xb8$\x8a\xbbd\xe1C\xd9\xa2\x1a\xd2\x079\xe3C&\x9d\x14q\x94>\xe5Cc\x83\xedn\x8fz\xe7C\x7f\xdd9I\xb2\xf2\xe9Cu`\x10\x9eP\xad\xecC\xcbL\xb7ah\xb1\xefC\xd3X_\xe4Y\x83\xf1C\x15\xe56\x8a\xdeZ\xf3C;\xb9\x1e:\xfac\xf5C\xb0\xb87\x1a\xe4\xa3\xf7CJ\xdb\xdb\xbb_ \xfaC\xe8f8\xe0\xcb\xdf\xfcC\xa9,\x84\xca2\xe9\xffC\xa2v\xd1$.\xa2\x01DMs\xb6\xd3\xf0|\x03D\xe2\xe9V\xd8\xa1\x89\x05DY\xe6&\x87\x81\xcd\x07D\x07\x1b\x03\x97]N\nD\xe8E\xca\xff\x9f\x12\rD\xd3C\xa7\xb4\xaf\x10\x10DX\xb6\\\xaa8\xc1\x11D\x9a\x82u\x17?\x9f\x13D\x13\x9f\xa0\xbf\x8b\xaf\x15D%\xa3\xce5h\xf7\x17D\x81\x80;h\xac|\x1aDy\xd74\x99\xcdE\x1dD\xb2}|u\xf7, D\xd9\xc2\x89\xd4y\xe0!D3\xdf\x08\xbf\xc9\xc1#D\xbeI\xabd\xb8\xd5%Dc\xfb#\xa7\x98!(D?\x1b\n\xbeL\xab*D\xf8U\xfaIUy-D\x05>\xcb\xfepI0D>s\x89\x03\xf2\xff1D\\1\xbf4\x91\xe43D\xeb\xc2\xf3<(\xfc5D\xc9\xfd\xfe\\\x13L8D \xdd\xee\'?\xda:D\xd7@\xb2\xb07\xad=D\xfd56\xa8\x1cf@D\xdf\xf25\x98\xa1\x1fBD\x91D\xa2\xe3\x95\x07DDM\xb5\xc5\xbe\xdb"FD\x10K\x1c\xda\xd8vHD\x01Tf6\x84\tKD\xbaD\x0bmu\xe1MD\xa1[\xfa\xc9\xfa\x82PDj\xeb\x13\xf4\x88?RD\xf7Px7\xd8*TDW\t=a\xd3IVDU\xa8\x1e\xa2\xe9\xa1XDmf\xebz\x1c9[D\xe6&\xcd\x1f\x0f\x16^D]\xf9\xef\xbc\x0b\xa0`D\x03\xb1Sy\xa8_bD\x0eG\xc5\x9cXNdD\xd3SG\x9c\x0fqfD<\x94\x909F\xcdhDZ\x13\xf9\x87\x08ikD\x18\xb4\xdaj\x05KnDv\xbf\x8b\xdaO\xbdpD')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_points_expansion(self):\n tot_points = 0\n if 'capi' in args.exp:\n be = ['_'] * 8\n be += self.b[ 0: 5]\n be += ['_'] * 2\n be += self.b[ 5:10]\n be += ['_'] * 2\n be += self.b[10:15]\n be += ['_'] * 2\n be += self.b[15:20]\n be += ['_'] * 8\n max_points = 0\n for i in range(8, 34):\n if be[i] == 'U':\n points = 0\n if be[i - 1] == 'P' or be[i - 1] == 'G':\n points += 5\n elif be[i - 1] == 'S':\n points += 3\n elif be[i - 1] == 'U':\n points += 2\n elif be[i - 1] == 'A' or be[i - 1] == 'F' or ord(be[i - 1]) < 54:\n points -= 5\n if be[i + 1] == 'P' or be[i + 1] == 'G':\n points += 5\n elif be[i + 1] == 'S':\n points += 3\n elif be[i + 1] == 'U':\n points += 2\n elif be[i + 1] == 'A' or be[i + 1] == 'F' or ord(be[i + 1]) < 54:\n points -= 5\n if be[i - 7] == 'P' or be[i - 7] == 'G':\n points += 5\n elif be[i - 7] == 'S':\n points += 3\n elif be[i - 7] == 'U':\n points += 2\n elif be[i - 7] == 'A' or be[i - 7] == 'F' or ord(be[i - 7]) < 54:\n points -= 5\n if be[i + 7] == 'P' or be[i + 7] == 'G':\n points += 5\n elif be[i + 7] == 'S':\n points += 3\n elif be[i + 7] == 'U':\n points += 2\n elif be[i + 7] == 'A' or be[i + 7] == 'F' or ord(be[i + 7]) < 54:\n points -= 5\n if points > max_points:\n max_points = points\n tot_points += max_points\n if 'plan' in args.exp:\n nb_b_in_district = [0, 0, 0, 0, 0]\n i_to_district = (0, 0, 1, 2, 2, 0, 0, 1, 2, 2, 3, 3, 1, 4, 4, 3, 3, 1, 4, 4)\n for i in range(20):\n if self.b[i] != '_':\n nb_b_in_district[i_to_district[i]] += 1\n points = len([1 for x in nb_b_in_district if x == 4])\n if points == 5:\n points = 6\n tot_points += points\n if 'fire' in args.exp:\n be = ['_'] * 8\n be += self.b[ 0: 5]\n be += ['_'] * 2\n be += self.b[ 5:10]\n be += ['_'] * 2\n be += self.b[10:15]\n be += ['_'] * 2\n be += self.b[15:20]\n be += ['_'] * 8\n max_points = 0\n for i in range(8, 34):\n if be[i] == 'U':\n points = 0\n if be[i - 1] == 'A' or be[i - 1] == 'F':\n points += 3\n if be[i + 1] == 'A' or be[i + 1] == 'F':\n points += 3\n if be[i - 7] == 'A' or be[i - 7] == 'F':\n points += 3\n if be[i + 7] == 'A' or be[i + 7] == 'F':\n points += 3\n if points > max_points:\n max_points = points\n tot_points += max_points\n return tot_points", "def test_phred_to_ascii(self):\r\n self.assertEqual(phred_to_ascii(0, 120), 'x')\r\n self.assertEqual(phred_to_ascii(1, 119), 'x')", "def printPolyCoeffs(lam) :\n ell = len(lam)\n useFormat = \"2.6e\"\n count = 0\n def printLine(s, count) :\n if lam[count] < 0 :\n s = s + 3 * \" \"\n else :\n s = s + 4 * \" \"\n s = s + \"{0:\" + useFormat + \"}\"\n print(s . format(lam[count]))\n count = count + 1\n return count\n if ell >= 1 :\n count = printLine(\"x0y0\", count)\n if ell >= 3 :\n count = printLine(\"x1y0\", count)\n count = printLine(\"x0y1\", count)\n if ell >= 6 :\n count = printLine(\"x2y0\", count)\n count = printLine(\"x1y1\", count)\n count = printLine(\"x0y2\", count)\n if ell >= 10 :\n count = printLine(\"x3y0\", count)\n count = printLine(\"x2y1\", count)\n count = printLine(\"x1y2\", count)\n count = printLine(\"x0y3\", count)\n if ell >= 15 :\n count = printLine(\"x4y0\", count)\n count = printLine(\"x3y1\", count)\n count = printLine(\"x2y2\", count)\n count = printLine(\"x1y3\", count)\n count = printLine(\"x0y4\", count)\n if ell >= 21 :\n count = printLine(\"x5y0\", count)\n count = printLine(\"x4y1\", count)\n count = printLine(\"x3y2\", count)\n count = printLine(\"x2y3\", count)\n count = printLine(\"x1y4\", count)\n count = printLine(\"x0y5\", count)\n if ell >= 28 :\n count = printLine(\"x6y0\", count)\n count = printLine(\"x5y1\", count)\n count = printLine(\"x4y2\", count)\n count = printLine(\"x3y3\", count)\n count = printLine(\"x2y4\", count)\n count = printLine(\"x1y5\", count)\n count = printLine(\"x0y6\", count)\n if ell >= 36 :\n count = printLine(\"x7y0\", count)\n count = printLine(\"x6y1\", count)\n count = printLine(\"x5y2\", count)\n count = printLine(\"x4y3\", count)\n count = printLine(\"x3y4\", count)\n count = printLine(\"x2y5\", count)\n count = printLine(\"x1y6\", count)\n count = printLine(\"x0y7\", count)\n if (ell > 36) or (ell < 1) :\n raise ValueError(\"Polynomial degree less than or equal to 7, please.\")", "def test_ascii_to_phred(self):\r\n self.assertEqual(ascii_to_phred('x', 120), 0)\r\n self.assertEqual(ascii_to_phred('x', 119), 1)", "def calc_points_harbor(self):\n points = 0\n if self.cnt_1 + self.cnt_2 + self.cnt_3 + self.cnt_4 + self.cnt_5 >= 2:\n hor = 0\n for i in range(4):\n j = 0\n while j < 5 and ord(self.b[i * 5 + j]) >= 54:\n j += 1\n if j < 4:\n start = j\n j += 1\n while j < 5 and ord(self.b[i * 5 + j]) < 54:\n j += 1\n length = j - start\n if length > hor:\n hor = length\n vptab_harbor = (0, 0, 3, 7, 12, 18)\n points += vptab_harbor[hor]\n ver = 0\n for j in range(5):\n i = 0\n while i < 4 and ord(self.b[i * 5 + j]) >= 54:\n i += 1\n if i < 3:\n start = i\n i += 1\n while i < 4 and ord(self.b[i * 5 + j]) < 54:\n i += 1\n length = i - start\n if length > ver:\n ver = length\n points += vptab_harbor[ver]\n if 'cust' in args.exp:\n if ver == 4 or hor == 5:\n points += 5\n points += 2 * self.cnt_2 + 3 * self.cnt_3\n return points", "def ConvertTetsToHexes(self):\n\n self.__do_essential_memebers_exist__()\n if self.element_type == \"hex\":\n return\n assert self.element_type == \"tet\"\n if self.IsHighOrder:\n raise ValueError('High order tetrahedral elements cannot be converted to low/high order hexahedrals')\n\n tconv = time()\n\n # SPLIT THE TET INTO 4 QUADS\n\n # FIND MEDIAN OF TETS\n # median = self.Median()\n median = np.sum(self.points[self.elements,:],axis=1)/self.elements.shape[1]\n # FIND EDGE MIDPOINTS OF TETS\n mid01 = np.sum(self.points[self.elements[:,[0,1]],:],axis=1)/2.\n mid02 = np.sum(self.points[self.elements[:,[2,0]],:],axis=1)/2.\n mid03 = np.sum(self.points[self.elements[:,[0,3]],:],axis=1)/2.\n mid12 = np.sum(self.points[self.elements[:,[1,2]],:],axis=1)/2.\n mid13 = np.sum(self.points[self.elements[:,[1,3]],:],axis=1)/2.\n mid23 = np.sum(self.points[self.elements[:,[2,3]],:],axis=1)/2.\n # FIND MEDIAN OF FACES\n med012 = np.sum(self.points[self.elements[:,[0,1,2]],:],axis=1)/3.\n med013 = np.sum(self.points[self.elements[:,[0,1,3]],:],axis=1)/3.\n med023 = np.sum(self.points[self.elements[:,[0,2,3]],:],axis=1)/3.\n med123 = np.sum(self.points[self.elements[:,[1,2,3]],:],axis=1)/3.\n\n # # STABLE APPROACH\n # points = np.zeros((1,3))\n # for elem in range(self.nelem):\n # hex0 = np.concatenate((self.points[self.elements[elem,0],:][None,:], mid01[elem,:][None,:],\n # med012[elem,:][None,:], mid02[elem,:][None,:],\n # mid03[elem,:][None,:], med013[elem,:][None,:],\n # median[elem,:][None,:], med023[elem,:][None,:]),axis=0)\n\n # hex1 = np.concatenate((self.points[self.elements[elem,1],:][None,:], mid13[elem,:][None,:],\n # med123[elem,:][None,:], mid12[elem,:][None,:],\n # mid01[elem,:][None,:], med013[elem,:][None,:],\n # median[elem,:][None,:], med012[elem,:][None,:]),axis=0)\n\n # hex2 = np.concatenate((self.points[self.elements[elem,3],:][None,:], mid23[elem,:][None,:],\n # med123[elem,:][None,:], mid13[elem,:][None,:],\n # mid03[elem,:][None,:], med023[elem,:][None,:],\n # median[elem,:][None,:], med013[elem,:][None,:]),axis=0)\n\n # hex3 = np.concatenate((self.points[self.elements[elem,2],:][None,:], mid02[elem,:][None,:],\n # med012[elem,:][None,:], mid12[elem,:][None,:],\n # mid23[elem,:][None,:], med023[elem,:][None,:],\n # median[elem,:][None,:],med123[elem,:][None,:]),axis=0)\n\n # points = np.concatenate((points,hex0,hex1,hex2,hex3))\n # points = points[1:,:]\n\n points = np.zeros((4*self.nelem*8,3))\n points[0::4*8,:] = self.points[self.elements[:,0],:]\n points[1::32,:] = mid01\n points[2::32,:] = med012\n points[3::32,:] = mid02\n points[4::32,:] = mid03\n points[5::32,:] = med013\n points[6::32,:] = median\n points[7::32,:] = med023\n\n points[8::32,:] = self.points[self.elements[:,1],:]\n points[9::32,:] = mid13\n points[10::32,:] = med123\n points[11::32,:] = mid12\n points[12::32,:] = mid01\n points[13::32,:] = med013\n points[14::32,:] = median\n points[15::32,:] = med012\n\n points[16::32,:] = self.points[self.elements[:,3],:]\n points[17::32,:] = mid23\n points[18::32,:] = med123\n points[19::32,:] = mid13\n points[20::32,:] = mid03\n points[21::32,:] = med023\n points[22::32,:] = median\n points[23::32,:] = med013\n\n points[24::32,:] = self.points[self.elements[:,2],:]\n points[25::32,:] = mid02\n points[26::32,:] = med012\n points[27::32,:] = mid12\n points[28::32,:] = mid23\n points[29::32,:] = med023\n points[30::32,:] = median\n points[31::32,:] = med123\n\n # KEEP ZEROFY ON, OTHERWISE YOU GET STRANGE BEHVAIOUR\n Decimals = 10\n rounded_points = points.copy()\n makezero(rounded_points)\n rounded_repoints = np.round(rounded_points,decimals=Decimals)\n points, inv_points = unique2d(rounded_points,order=False,\n consider_sort=False,return_inverse=True)\n\n\n elements = np.arange(points.shape[0])[inv_points].reshape(4*self.nelem,8)\n\n\n self.__reset__()\n\n self.element_type = \"hex\"\n self.elements = elements\n self.points = points\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryFacesHex()\n self.GetBoundaryEdgesHex()\n\n print(\"Tetrahedral to hexahedral mesh conversion took\", time() - tconv, \"seconds\")", "def to_points(self, divisions=100):", "def get_hershey():\n hershey_path = pkg_resources.resource_filename('pymicrofluidics', 'data/hershey.txt')\n hershey_table = {}\n first = True\n with open(hershey_path) as openfileobject:\n for tline in openfileobject:\n if re.search('Ascii',tline):\n if first == False:\n newline = hershey_table[asci]['coord'].split('-1,-1,')\n newline = [list(filter(None, x.split(','))) for x in newline if len(x)>0]\n hershey_table[asci]['coord'] = [np.array([[float(y[x]),float(y[x+1])] for x in range(0,len(y)-1,2)])/21 for y in newline]\n if len(hershey_table[asci]['coord'])>0:\n middle = 0.5*(np.max(np.concatenate(hershey_table[asci]['coord'])[:,0])+np.min(np.concatenate(hershey_table[asci]['coord'])[:,0]))\n #middle = float(middle)\n hershey_table[asci]['coord'] = [np.array([[x[0]-middle,x[1]] for x in y]) \n for y in hershey_table[asci]['coord']]\n hershey_table[asci]['width'] = np.max(np.concatenate(hershey_table[asci]['coord'])[:,0])-np.min(np.concatenate(hershey_table[asci]['coord'])[:,0])\n else:\n hershey_table[asci]['width'] = 0.5\n asci = int(re.findall('.*Ascii (\\d+).*',tline)[0])\n width = float(re.findall('\\d+,\\s*(\\d+),.*',tline)[0])\n hershey_table[asci] = {'coord': '', 'width': width}\n first = False\n else:\n newline = tline.rstrip('\\n')\n hershey_table[asci]['coord'] = hershey_table[asci]['coord']+newline\n return hershey_table", "def findChar(self, position, spaceLength ):\n leer=0 ## numeator of empty column\n Queue=[] ##this will help in serching for neighbours of pixels\n PiksList=[] ##list of balck piksels, of with consist the charakter\n length, high = self.getSize()\n \n while (position < length and self.vLineHistogram(position)==0): #serching for a first not empty line, for given position\n position+=1\n leer+=1\n if position == length: ## check if it is Space or it is End of line\n return position, \"Enter\", 0\n elif leer>=spaceLength:\n return position, \"Space\", 0\n else:\n for i in range(0,high): ##extracting all black pixels from this line\n if self.getPixel(position, i)<128:\n Queue.append((position, i))\n PiksList.append((position, i))\n\n while len(Queue)>0:\n Piksel=Queue.pop(0) ##geting firs element from Queue\n neighbourhood=[(Piksel[0]-1, Piksel[1]+1),(Piksel[0]-1, Piksel[1]),(Piksel[0]-1, Piksel[1]-1),(Piksel[0], Piksel[1]+1),(Piksel[0], Piksel[1]-1),(Piksel[0]+1, Piksel[1]+1),(Piksel[0]+1, Piksel[1]),(Piksel[0]+1, Piksel[1]-1)]\n ##to co wyzej to lista współrzędnych sąsiadów Piksela\n\n for neighbour in neighbourhood: ##cheking neighbourhood of each pixel\n if not(neighbour in PiksList) and (neighbour[0] in range(0,length)) and (neighbour[1] in range(0,high)) and self.getPixel(neighbour[0],neighbour[1])==0:\n Queue.append(neighbour)\n PiksList.append(neighbour)\n \n PiksList.sort() ##sorts list with number of column\n\n \n PiksList=self.addHigherPiks(PiksList) ##adds all piksel over finden pixels\n PiksList.sort()\n position1,High1=PiksList[0]\n position2,High2=PiksList[len(PiksList)-1] ## geting number of smalest and biggest column in group\n charLength=position2-position1\n if len(PiksList)>5: ##checkin if there are more then 5 piksels in group to eliminate case, when there are single pixels not eliminated by initial fomating\n if charLength<high: ##check if the length of finden group of pixels isn't bigger then length of tile\n newPosition= position1+(charLength/2) ##new position in the center of finden char to eliminate case, when one char is over the second\n Char=CharFrame(high,high) ##create new CrarFrame object\n \n for el in PiksList: ##making all pixels in PiksList black in ChatFrame object and white in self(LineFrame object)\n Char.putPixel(el[0]-position1,el[1])\n self.makeWhite(el[0],el[1])\n \n Char.reScale(30,30) #scaling CharFrame to the ening size\n \n return newPosition, Char, charLength/2\n\n else: ##length of goup of pixels is too big\n PiksList, Char = reconChar(PiksList,high) ## finding where to divide group of pixels\n for Piks in PiksList:\n self.makeWhite(Piks[0],Piks[1])\n position1,High1=PiksList[0]\n position2,High2=PiksList[len(PiksList)-1] ## geting number of smalest and biggest column in group\n charLength=position2-position1\n newPosition= position1+(charLength/2) ##new position in the center of finden char to eliminate case, when one char is over the second\n return newPosition, Char, charLength/2\n else: ##if there is less then 5 pixels in group\n for el in PiksList: ##making all pixels in PiksList white in self(LineFrame object)\n self.makeWhite(el[0],el[1])\n newPosition= position1+(charLength/2)\n return newPosition, \"None\", charLength/2", "def points_to_index(points, points_dict):\r\n index_locations = ''\r\n for point in points:\r\n index_locations += str(points_dict[point]) + ' '\r\n return index_locations", "def _get_halluc_points(_, halluc_pts):\n if len(halluc_pts) > 0:\n return halluc_pts\n else:\n return halluc_pts", "def publickey_unsafe(sk: bytes) -> bytes:\n h = H(sk)\n a = decodecoord(h)\n A = scalarmult_B(a)\n return encodepoint(A)", "def decipher_single_char_xored(cipher_text):\n best_score = 0\n p = ''\n key = -1\n for i in range(0,256):\n c = xor_hex(cipher_text,(chr(i) * (len(cipher_text) / 2)).encode('hex'))\n if score(c.decode('hex')) > best_score:\n best_score = score(c.decode('hex'))\n p = c.decode('hex').strip('\\n')\n key = i\n\n return (p, key, best_score)", "def poly_hash(text, p, x):\n hash = 0\n for ch in reversed(text):\n hash = (hash * x + ord(ch)) % p\n\n return hash", "def encode_points(Plist):\n return '[' + ','.join([encode_point(P) for P in Plist]) + ']'", "def codepoint_ords(self):\n pass", "def test_ascii_to_phred33(self):\r\n self.assertEqual(ascii_to_phred33('!'), 0)\r\n self.assertEqual(ascii_to_phred33('?'), 30)", "def englishtest(result, etaoin_shrdlu=[12.02,9.1,8.12,7.68,7.31,6.95,6.28,6.02,5.92,4.32,3.98,2.88]):\n \n a = len(result)\n single = []\n for i in range(12):\n single.append(9999)\n total = 0\n single[0]= result.count(b'e') + result.count(b'E')\n single[1]= result.count(b't') + result.count(b'T')\n single[2]= result.count(b'a') + result.count(b'A')\n single[3]= result.count(b'o') + result.count(b'O')\n single[4]= result.count(b'i') + result.count(b'I')\n single[5]= result.count(b'n') + result.count(b'N')\n single[6]= result.count(b's') + result.count(b'S')\n single[7]= result.count(b'h') + result.count(b'H')\n single[8]= result.count(b'r') + result.count(b'R')\n single[9]= result.count(b'd') + result.count(b'D')\n single[10]= result.count(b'l') + result.count(b'L')\n single[11]= result.count(b'u') + result.count(b'U')\n\n for i in range(12):\n if single[i] == 0:\n single[i] =100\n else:\n single[i] = single[i]/a\n for i in single:\n total = total + i\n \n return total, single", "def GOST34112012H256(msg):\n pi_sharp = [\n 252, 238, 221, 17, 207, 110, 49, 22, 251, 196, 250, 218, 35, 197, 4, 77, 233, 119, 240,\n 219, 147, 46, 153, 186, 23, 54, 241, 187, 20, 205, 95, 193, 249, 24, 101, 90, 226, 92, 239,\n 33, 129, 28, 60, 66, 139, 1, 142, 79, 5, 132, 2, 174, 227, 106, 143, 160, 6, 11, 237, 152, 127,\n 212, 211, 31, 235, 52, 44, 81, 234, 200, 72, 171, 242, 42, 104, 162, 253, 58, 206, 204, 181,\n 112, 14, 86, 8, 12, 118, 18, 191, 114, 19, 71, 156, 183, 93, 135, 21, 161, 150, 41, 16, 123,\n 154, 199, 243, 145, 120, 111, 157, 158, 178, 177, 50, 117, 25, 61, 255, 53, 138, 126, 109,\n 84, 198, 128, 195, 189, 13, 87, 223, 245, 36, 169, 62, 168, 67, 201, 215, 121, 214, 246, 124,\n 34, 185, 3, 224, 15, 236, 222, 122, 148, 176, 188, 220, 232, 40, 80, 78, 51, 10, 74, 167, 151,\n 96, 115, 30, 0, 98, 68, 26, 184, 56, 130, 100, 159, 38, 65, 173, 69, 70, 146, 39, 94, 85, 47,\n 140, 163, 165, 125, 105, 213, 149, 59, 7, 88, 179, 64, 134, 172, 29, 247, 48, 55, 107, 228,\n 136, 217, 231, 137, 225, 27, 131, 73, 76, 63, 248, 254, 141, 83, 170, 144, 202, 216, 133, 97,\n 32, 113, 103, 164, 45, 43, 9, 91, 203, 155, 37, 208, 190, 229, 108, 82, 89, 166, 116, 210,\n 230, 244, 180, 192, 209, 102, 175, 194, 57, 75, 99, 182\n ]\n\n C = [\n 0xb1085bda1ecadae9ebcb2f81c0657c1f2f6a76432e45d016714eb88d7585c4fc4b7ce09192676901a2422a08a460d31505767436cc744d23dd806559f2a64507,\n 0x6fa3b58aa99d2f1a4fe39d460f70b5d7f3feea720a232b9861d55e0f16b501319ab5176b12d699585cb561c2db0aa7ca55dda21bd7cbcd56e679047021b19bb7,\n 0xf574dcac2bce2fc70a39fc286a3d843506f15e5f529c1f8bf2ea7514b1297b7bd3e20fe490359eb1c1c93a376062db09c2b6f443867adb31991e96f50aba0ab2,\n 0xef1fdfb3e81566d2f948e1a05d71e4dd488e857e335c3c7d9d721cad685e353fa9d72c82ed03d675d8b71333935203be3453eaa193e837f1220cbebc84e3d12e,\n 0x4bea6bacad4747999a3f410c6ca923637f151c1f1686104a359e35d7800fffbdbfcd1747253af5a3dfff00b723271a167a56a27ea9ea63f5601758fd7c6cfe57,\n 0xae4faeae1d3ad3d96fa4c33b7a3039c02d66c4f95142a46c187f9ab49af08ec6cffaa6b71c9ab7b40af21f66c2bec6b6bf71c57236904f35fa68407a46647d6e,\n 0xf4c70e16eeaac5ec51ac86febf240954399ec6c7e6bf87c9d3473e33197a93c90992abc52d822c3706476983284a05043517454ca23c4af38886564d3a14d493,\n 0x9b1f5b424d93c9a703e7aa020c6e41414eb7f8719c36de1e89b4443b4ddbc49af4892bcb929b069069d18d2bd1a5c42f36acc2355951a8d9a47f0dd4bf02e71e,\n 0x378f5a541631229b944c9ad8ec165fde3a7d3a1b258942243cd955b7e00d0984800a440bdbb2ceb17b2b8a9aa6079c540e38dc92cb1f2a607261445183235adb,\n 0xabbedea680056f52382ae548b2e4f3f38941e71cff8a78db1fffe18a1b3361039fe76702af69334b7a1e6c303b7652f43698fad1153bb6c374b4c7fb98459ced,\n 0x7bcd9ed0efc889fb3002c6cd635afe94d8fa6bbbebab076120018021148466798a1d71efea48b9caefbacd1d7d476e98dea2594ac06fd85d6bcaa4cd81f32d1b,\n 0x378ee767f11631bad21380b00449b17acda43c32bcdf1d77f82012d430219f9b5d80ef9d1891cc86e71da4aa88e12852faf417d5d9b21b9948bc924af11bd720,\n ]\n\n tau = [\n 0, 8, 16, 24, 32, 40, 48, 56, 1, 9, 17, 25, 33, 41, 49, 57, 2, 10, 18, 26, 34, 42, 50, 58,\n 3, 11, 19, 27, 35, 43, 51, 59, 4, 12, 20, 28, 36, 44, 52, 60, 5, 13, 21, 29, 37, 45, 53, 61, 6, 14,\n 22, 30, 38, 46, 54, 62, 7, 15, 23, 31, 39, 47, 55, 63\n ]\n\n A = [\n 0x8e20faa72ba0b470, 0x47107ddd9b505a38, 0xad08b0e0c3282d1c, 0xd8045870ef14980e,\n 0x6c022c38f90a4c07, 0x3601161cf205268d, 0x1b8e0b0e798c13c8, 0x83478b07b2468764,\n 0xa011d380818e8f40, 0x5086e740ce47c920, 0x2843fd2067adea10, 0x14aff010bdd87508,\n 0x0ad97808d06cb404, 0x05e23c0468365a02, 0x8c711e02341b2d01, 0x46b60f011a83988e,\n 0x90dab52a387ae76f, 0x486dd4151c3dfdb9, 0x24b86a840e90f0d2, 0x125c354207487869,\n 0x092e94218d243cba, 0x8a174a9ec8121e5d, 0x4585254f64090fa0, 0xaccc9ca9328a8950,\n 0x9d4df05d5f661451, 0xc0a878a0a1330aa6, 0x60543c50de970553, 0x302a1e286fc58ca7,\n 0x18150f14b9ec46dd, 0x0c84890ad27623e0, 0x0642ca05693b9f70, 0x0321658cba93c138,\n 0x86275df09ce8aaa8, 0x439da0784e745554, 0xafc0503c273aa42a, 0xd960281e9d1d5215,\n 0xe230140fc0802984, 0x71180a8960409a42, 0xb60c05ca30204d21, 0x5b068c651810a89e,\n 0x456c34887a3805b9, 0xac361a443d1c8cd2, 0x561b0d22900e4669, 0x2b838811480723ba,\n 0x9bcf4486248d9f5d, 0xc3e9224312c8c1a0, 0xeffa11af0964ee50, 0xf97d86d98a327728,\n 0xe4fa2054a80b329c, 0x727d102a548b194e, 0x39b008152acb8227, 0x9258048415eb419d,\n 0x492c024284fbaec0, 0xaa16012142f35760, 0x550b8e9e21f7a530, 0xa48b474f9ef5dc18,\n 0x70a6a56e2440598e, 0x3853dc371220a247, 0x1ca76e95091051ad, 0x0edd37c48a08a6d8,\n 0x07e095624504536c, 0x8d70c431ac02a736, 0xc83862965601dd1b, 0x641c314b2b8ee083,\n ]\n\n def mult_b_A(b):\n c = 0\n for i in range(64):\n if b % 2 == 1:\n c = c ^ A[63-i]\n b = b // 2\n return c\n\n def MSB256(val):\n return val // (2**256)\n\n def int512(msg):\n res = 0\n for i in range(len(msg)):\n res += (2**(8 * i)) * msg[-i-1]\n return res\n\n def S(m):\n res = 0\n for i in range(64):\n byte = m // (2 ** (8 * i)) % 256\n res += pi_sharp[byte] * (2 ** (8 * i))\n return res\n\n def P(m):\n res = 0\n for i in range(64):\n byte = m // (2 ** (8 * tau[i])) % 256\n res += byte * (2 ** (8 * i))\n return res\n\n def L(m):\n res = 0\n for i in range(8):\n block = m // (2 ** (64 * i)) % (2**64)\n res += mult_b_A(block) * (2 ** (64 * i))\n return res\n\n def X(K, m):\n return K ^ m\n\n def E(K, m):\n res = X(K, m)\n for i in range(2, 14):\n res = L(P(S(res)))\n K = L(P(S(K ^ C[i - 2])))\n res = X(K, res)\n return res\n\n def g(h, m, N):\n return E(L(P(S(h ^ N))), m) ^ h ^ m\n\n IV = 0\n for i in range(64):\n IV += 2 ** (i * 8)\n h = IV\n N = 0\n Sigma = 0\n\n while len(msg) * 8 >= 512:\n m = int512(msg[-512 // 8:])\n h = g(h, m, N)\n N = (N + 512) % (2**512)\n Sigma = (Sigma + m) % (2**512)\n msg = msg[:-512 // 8]\n\n m = 2**(len(msg)*8) + int512(msg)\n h = g(h, m, N)\n N = (N + len(msg) * 8) % (2**512)\n Sigma = (Sigma + m) % (2**512)\n h = g(h, N, 0)\n h = MSB256(g(h, Sigma, 0))\n\n return h.to_bytes(64, 'big')", "def polytope2str(p, name):\n k = p.A.shape[0]\n l = p.A.shape[1]\n # pik=k\n s = \"idxint \"+name+\"k = \"+str(k)+\";\\n\"\n # pil=l\n s = s+\"idxint \"+name+\"l = \"+str(l)+\";\\n\"\n # piA = [A11,A21,...,Ak1,A12,...,Ak2,...,A1l,...,Akl];\n s = s+\"pfloat \"+name+\"A[] = {\"+matrix2str(-1*p.A)+\"};\\n\"\n # pib = [b1,b2,...,bk];\n s = s+\"pfloat \"+name+\"b[] = {\"+matrix2str(-1*p.b)+\"};\\n\"\n # picenter = [p.chebXc1,...,p.chebXcl];\n s = s+\"pfloat \"+name+\"center[] = {\"+matrix2str(p.chebXc)+\"};\\n\"\n return s", "def _rawprng(self):\n self.p += 1 \n if self.p >= self.o:\n\t\t\tself.p = 0\n t = 1768863 * self.s[self.p] + self.c * 2.3283064365386963e-10\n self.c = int(t) | 0\n self.s[self.p] = t - self.c\n return self.s[self.p]", "def ascii_hist(x):\n fAbs = frecuencia_abs(x)\n for k in sorted(fAbs):\n print(\"{0:5d} {1}\".format(k, \"+\"*fAbs[k]))", "def part2():\r\n my_input = 368078\r\n coords = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]\r\n x = y = dx = 0\r\n dy = -1\r\n grid = {}\r\n\r\n while True:\r\n total = 0\r\n for offset in coords:\r\n ox, oy = offset\r\n if (x+ox, y+oy) in grid:\r\n total += grid[(x+ox, y+oy)]\r\n if total > int(my_input):\r\n return total\r\n if (x, y) == (0, 0):\r\n grid[(0, 0)] = 1\r\n else:\r\n grid[(x, y)] = total\r\n if (x == y) or (x < 0 and x == -y) or (x > 0 and x == 1-y):\r\n dx, dy = -dy, dx\r\n x, y = x+dx, y+dy", "def keypoints_hflip(keypoints, rows, cols):\n keypoints[:, 0] = (cols - 1) - keypoints[:, 0]\n return keypoints", "def test_pds_txt(self):\n lcurve_txt = os.path.join(self.datadir,\n 'lcurve_txt_lc' +\n HEN_FILE_EXTENSION)\n command = '{0} -f 128'.format(lcurve_txt)\n hen.fspec.main(command.split())", "def getNUMS(index=0):\n\n assert index in range(256)\n nums_point = None\n for G in [getG(True), getG(False)]:\n seed = G + chr(index)\n for counter in range(256):\n seed_c = seed + chr(counter)\n hashed_seed = hashlib.sha256(seed_c).digest()\n #Every x-coord on the curve has two y-values, encoded\n #in compressed form with 02/03 parity byte. We just\n #choose the former.\n claimed_point = \"\\x02\" + hashed_seed\n try:\n nums_point = secp256k1.PublicKey(claimed_point, raw=True, ctx=ctx)\n return nums_point\n except:\n continue\n assert False, \"It seems inconceivable, doesn't it?\" # pragma: no cover", "def numberOfBoomerangsSlow(self, points):\n\n def is_boomerang(i, j, k):\n dist_a = pow(j[0] - i[0], 2) + pow(j[1] - i[1], 2)\n dist_b = pow(k[0] - i[0], 2) + pow(k[1] - i[1], 2)\n return dist_a == dist_b\n\n total = 0\n for i in points:\n for j in points:\n for k in points:\n if i != j and j != k and is_boomerang(i, j, k):\n total += 1\n return total", "def text_points(points, strs, **kw):\n xs, ys = asarray(points, float).T\n if isinstance(strs, str): #vectorize strs\n strs = [strs] * len(xs)\n for x, y, s in zip(xs, ys, strs):\n if not s: continue\n pylab.text(x, y, s, **kw)", "def crack_vigenere(stats: TextStats, dictionary: Dictionary, n_best: int=3, key_limit: int=10, verbose: bool=False):\n\n output_texts = []\n for key_len in range(2, key_limit):\n if verbose:\n print(key_len)\n # to simplify working with numpy, letters that don't fit in a table are thrown away\n filler_size = stats.N % key_len\n if filler_size:\n text = stats.text[0:-filler_size]\n else:\n text = stats.text\n\n # write the text into a matrix, each row is for one key letter\n m = int(len(text) / key_len)\n f = np.array(list(text))\n cipher_table = np.transpose(np.reshape(f, (m, key_len)))\n candidates = []\n\n for line_id in range(0, key_len):\n line_data = \"\".join(cipher_table[line_id, :])\n\n # try all possible shifts for the line (key letter) and save them\n line_candidates = []\n for shift in range(0, 26):\n shifted_line = rot(line_data, shift)\n shifted_score = compute_score(shifted_line, dictionary, bi=0, tri=0)\n line_candidates.append((shifted_score, shift, shifted_line))\n line_candidates.sort(key=lambda c: c[0], reverse=True)\n\n # only save the best guesses for each line\n candidates.append(line_candidates[0:n_best])\n\n possible_candidate_ids = list(range(0, n_best))\n output_texts_for_key_len = []\n\n # try all possible combinations of key letters in different lines, this will create the original key\n for combination in combinations(possible_candidate_ids, key_len):\n plaintext_table = np.zeros(cipher_table.shape)\n # convert the key to a readable format\n key = \"\"\n for line_id in range(0, key_len):\n key += chr(candidates[line_id][combination[line_id]][1] + 65)\n plaintext_table[line_id] = list(map(ord, list(candidates[line_id][combination[line_id]][2])))\n\n # assemble the lines together to read the main text\n table = np.reshape(np.transpose(plaintext_table), (1, len(text))).tolist()\n table = table[0]\n for i in range(0, len(table)):\n table[i] = chr(int(table[i]))\n text = \"\".join(table)\n\n score = compute_score(text, dictionary)\n output_texts_for_key_len.append((score, text, key, combination))\n\n output_texts_for_key_len.sort(key=lambda c: c[0], reverse=True)\n for i in range(0, n_best):\n if verbose:\n print(output_texts_for_key_len[i])\n output_texts.append(output_texts_for_key_len[i])\n\n # after all key sizes have been tried, select the best plaintext\n output_texts.sort(key=lambda c: c[0], reverse=True)\n return output_texts[0][1], \"vigenere\", output_texts[0][2]", "def letter_prob(c):\n if c == ' ': return 0.1904\n if c == 'e' or c == 'E': return 0.1017\n if c == 't' or c == 'T': return 0.0737\n if c == 'a' or c == 'A': return 0.0661\n if c == 'o' or c == 'O': return 0.0610\n if c == 'i' or c == 'I': return 0.0562\n if c == 'n' or c == 'N': return 0.0557\n if c == 'h' or c == 'H': return 0.0542\n if c == 's' or c == 'S': return 0.0508\n if c == 'r' or c == 'R': return 0.0458\n if c == 'd' or c == 'D': return 0.0369\n if c == 'l' or c == 'L': return 0.0325\n if c == 'u' or c == 'U': return 0.0228\n if c == 'm' or c == 'M': return 0.0205\n if c == 'c' or c == 'C': return 0.0192\n if c == 'w' or c == 'W': return 0.0190\n if c == 'f' or c == 'F': return 0.0175\n if c == 'y' or c == 'Y': return 0.0165\n if c == 'g' or c == 'G': return 0.0161\n if c == 'p' or c == 'P': return 0.0131\n if c == 'b' or c == 'B': return 0.0115\n if c == 'v' or c == 'V': return 0.0088\n if c == 'k' or c == 'K': return 0.0066\n if c == 'x' or c == 'X': return 0.0014\n if c == 'j' or c == 'J': return 0.0008\n if c == 'q' or c == 'Q': return 0.0008\n if c == 'z' or c == 'Z': return 0.0005\n return 1.0" ]
[ "0.6047821", "0.5564484", "0.5453595", "0.54502755", "0.5411063", "0.53716034", "0.53273135", "0.51558906", "0.5144419", "0.50915307", "0.5058742", "0.50469184", "0.5027793", "0.50248283", "0.50232935", "0.50198644", "0.5017458", "0.50110656", "0.5005989", "0.49963257", "0.498107", "0.4978686", "0.4978525", "0.4976141", "0.49707994", "0.4956479", "0.49518406", "0.49363604", "0.49346614", "0.49322024" ]
0.73444813
0
0th/1storder Hankeltransform of f. F_n(k) = int_0^oo r f(r) J_n(kr) dr, n=0,1 Note that Anderson's implementation includes the r factor in input function (i.e. g(r) = r f(r)), but this is not the case for this procedure.
def hankelTransform(f, k, order=0): # Get [cached] points and weights if order in hankelTransform.__dict__: p, w = hankelTransform.__dict__[order] else: p = hankel_points() w = hankel_weights(order=order) hankelTransform.__dict__[order] = p, w # Anderson's implementation requires function g to include the r factor def g(r): return r*f(r) # N.dot(w, g(p/k[:,N.newaxis]).T) / k is only valid for 1D arrays return N.dot(w, g(p/k[..., N.newaxis]).swapaxes(-1, -2)) / k
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fisher_z_transform(r):\r\n if abs(r) == 1: # fisher z transform is undefined, have to return nan\r\n return nan\r\n return .5 * log((1. + r) / (1. - r))", "def f(y):\n \n\n k = 1.0\n return y*(1-y)", "def _f(X, g, n):\n if n == 3:\n n = 3.001 # for numerical stability\n hyp2f1_term = hyp2f1((n-1)/2, g/2, n/2, 1/(1+X**2))\n beta_term = beta((n-1)/2, 0.5)\n return 0.5 * beta_term * hyp2f1_term * (1+X**2) ** ((1-n)/2)", "def crank_nicholson(t, h, state, f, kwargs):\n dim = len(state)//2\n K = [ np.zeros_like(state) for i in range(2) ]\n C = [ 0, h, h ]\n \n for i in range(2): \n A = f(t + C[i], state + C[i]*K[i-1], kwargs)\n K[i][dim:] = np.asarray(A)\n K[i][:dim] = state[dim:] + C[i+1] * K[i][dim:]\n \n return state + ( K[0] + K[1] ) * h / 2.0", "def rk2(x,t,h,f):\n\n k1=h*f(x,t)\n k2=h*f(x+k1/2,t+h/2)\n\n return x + k2", "def fisher_trans (r):\r\n #Smooth correlation\r\n r = r - sign(r) * 0.0001\r\n return (1/2) * math.log((1 + r) / (1 - r))", "def eval_hankel_function(pt, n=MAX_N):\n j_0 = 0\n for i in range(n):\n j_0 += (-1)**i * (1 / 4 * e**2)**i / factorial(i)**2\n\n g = 0.57721566490153286\n y_0 = (ln(e / 2) + g) * j_0\n h_n = 0\n for i in range(n):\n h_n += 1 / (i + 1)\n y_0 += (-1)**(i) * h_n * (e**2 / 4)**(i+1) / (factorial(i+1))**2\n y_0 *= 2 / pi\n\n imag_unit = (np.zeros(1, dtype=np.complept128) + 1j)[0]\n h_0 = j_0 + imag_unit * y_0\n return h_0", "def f0(E, fermi, T):\n return 1. / (1. + np.exp((E - fermi) / (k_B * T)))", "def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def lorentzian(f, A, fc):\n return A/(1+(2*np.pi*f/fc)**2)", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def F(k0):\r\n assert((not np.any(np.isnan(k0))) and np.all(np.isfinite(k0)) and\r\n np.all(np.isreal(k0))),\\\r\n \"k0 must be real, finite and not NaN\"\r\n assert(len(k0) == 4), \"K must have length 4\"\r\n assert(hasattr(F, '__call__')), \\\r\n \"F must be a callable function\"\r\n k1 = np.array([k0[0], k0[1]])\r\n k2 = np.array([k0[2], k0[3]])\r\n f1 = k1 - np.array([f(t + dt / 3,\r\n qn + (dt / 12) * (5 * k1 - k2), r, e, w)])\r\n f2 = k2 - np.array([f(t + dt,\r\n qn + (dt / 4) * (3 * k1 + k2), r, e, w)])\r\n f3 = np.reshape(np.array([f1, f2]), (4,))\r\n return f3", "def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)", "def stirling(k, r) :\n\n return sum((-1)**(r-i)*binomial(r, i)*i**k for i in range(r+1)) / math.factorial(r)", "def F0(t):\n if (t < 1e-6):\n return 1.0 - t / 3.0\n else:\n return 0.5 * (np.pi / t) ** 0.5 * sp.erf(t ** 0.5)", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)", "def fdq1(f, x, h=1e-5):\n return (f(x+h) - f(x))/h\n \n raise NotImplementedError(\"Problem 2 Incomplete\")", "def fn(n, k):\n if n == k: return 1\n if k == 0: return 0\n return ((n-1)*fn(n-1, k) + fn(n-1, k-1)) % 1_000_000_007", "def zfr(x: Tensor, fs: int, N: int = 150, R: int = 3, fc: float = 70.) -> Tensor:\n for _ in range(R):\n x = hann_sinc_high_pass(x, N, fs, fc)\n x = x.cumsum(dim=-1)\n x = hann_sinc_high_pass(x, N, fs, fc)\n return x", "def rk4(x,t,h,f):\n\n k1=h*f(x,t)\n k2=h*f(x+k1/2,t+h/2)\n k3=h*f(x+k2/2,t+h/2)\n k4=h*f(x+k3,t+h)\n\n return x + (k1 + 2*k2 + 2*k3 + k4)/6", "def infilCapaHorton(f0, fc, k, t):\n ft = fc + (f0 - fc)*np.exp(-k*t)\n return ft", "def bdq1(f, x, h=1e-5):\n return (f(x)-f(x-h))/h\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def H(n,x):\r\n H_values = [] #a list of sequential H values for different n's up to n=n.\r\n H_values.append(1) #appends H_0.\r\n H_values.append(2*x) #appends H_1.\r\n if n>1:\r\n for i in range(1,n):\r\n H_values.append((2*x*H_values[-1])-(2*i*H_values[-2]))\r\n return H_values[-1]\r\n elif n == 0:\r\n return H_values[0]\r\n else:\r\n return H_values[1]", "def fn(m, k):\n if m == 0 or k == 0: return 0 \n return 1 + fn(m-1, k-1) + fn(m-1, k)", "def weiner_tf(H, K):\r\n\r\n W = (1 / H) * ((np.conjugate(H) * H) / ((np.conjugate(H) * H) + K))\r\n return W", "def make_herm(n,transpose=False):\n #S we need special cases to handle the coefficients less than two, as the\n #S recursion formula works only for n>2. These cases aren't hard though!\n\n #S make the first element equal to 1\n h = np.zeros([n+1,n+1],dtype=np.float64)\n h[0,0] = 1.\n \n #S if the array is large enough, make element_2,2 equal to 2\n if n > 0:\n h[1,1] = 2.\n #S formula seems to work, found a different one on wikipedia. this one from\n #S make_herm.pro, maybe just the same result? need to work them out to \n #S equivalence. this returns correct array up to H_10\n if n > 1:\n for ind in range(2,n+1):\n h[ind,:] = np.roll(h[ind-1,:],1)*2.-2.*float(ind-1)*h[ind-2,:]\n #S if we want the transpose\n if transpose:\n return h.T\n\n #S otherwise just send out the h array\n else:\n return h", "def f1_score_at_k(r, max_rel, k = None):\n p = precision_at_k(r, k)\n r = recall_at_k(r, max_rel, k)\n return 2 * p * r / (p + r)", "def skew_js_fgan_lower_bound(f):\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1/n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))\n return alpha*first_term - (1-alpha)*second_term", "def f(X_,K_):\r\n return max(exp(X_)-K_,0)", "def fn(k, i, j):\n if not (0 <= i < N and 0 <= j < N): return 0\n if k == 0: return 1 \n return 1/8*sum(fn(k-1, i+ii, j+jj) for ii, jj in ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)))" ]
[ "0.64156175", "0.62107056", "0.61803037", "0.6101504", "0.60944474", "0.60662603", "0.60073453", "0.597241", "0.5946434", "0.5942573", "0.5939346", "0.593745", "0.5881655", "0.58690447", "0.58384955", "0.5835517", "0.5816677", "0.5767257", "0.57494926", "0.5738316", "0.5734998", "0.5724104", "0.57144564", "0.57134855", "0.5704528", "0.57029736", "0.5689248", "0.568424", "0.56736964", "0.567121" ]
0.67184377
0
Return a SymPy object representing the mole fraction as a function of site fractions.
def mole_fraction(phase, active_comps, species): result = S.Zero site_ratio_normalization = S.Zero # Calculate normalization factor for idx, sublattice in enumerate(phase.constituents): active = set(sublattice).intersection(set(active_comps)) if 'VA' in active: site_ratio_normalization += phase.sublattices[idx] * \ (1.0 - v.SiteFraction(phase.name, idx, 'VA')) else: site_ratio_normalization += phase.sublattices[idx] site_ratios = [c/site_ratio_normalization for c in phase.sublattices] # Sum up site fraction contributions from each sublattice for idx, sublattice in enumerate(phase.constituents): active = set(sublattice).intersection(set(active_comps)) if species in active: result += site_ratios[idx] * \ v.SiteFraction(phase.name, idx, species) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moleFraction(self, s): \n if type(s) == types.StringType:\n kk = self._contents.speciesIndex(s)\n else:\n kk = s\n x = self.moleFractions()\n return x[kk]", "def __init__ (self,numerator,denominator=1):\n self.debug = False\n if (self.debug): print(f'enter fraction.__init__ with {numerator}, {denominator}')\n sign = int(numerator * denominator / abs(numerator * denominator))\n if (self.debug): print(f'enter sign is {sign}')\n self.value=(sign * abs(numerator),abs(denominator))\n self.simplify()", "def simplify (self):\n if (self.debug): print(f'enter fraction.simplify')\n hcf = find_hcf(self.value[0], self.value[1])\n self.value = (self.value[0] // hcf.product(), self.value[1] // hcf.product())\n return", "def reciprocal(self):\n return Rational(self.denominator, self.numerator)", "def __init__(self):\n BuiltinFunction.__init__(self, \"frac\",\n conversions=dict(sympy='frac'),\n latex_name=r\"\\operatorname{frac}\")", "def moleFraction(self, species):\n k = self.speciesIndex(species)\n return _cantera.phase_molefraction(self._phase_id,k)", "def moleFractions(self):\n y = self.massFractions()\n self._contents.setMassFractions(y)\n return self._contents.moleFractions()", "def inverse( self ):\r\n\t\treturn fraction( self.denominator, self.numerator )", "def equivalent (self, factor):\n if (self.debug): print(f'enter fraction.equivalent {factor}')\n return (self.value[0] * factor, self.value[1]* factor)", "def inverse(self):\n return fraction(self.denom, self.num)", "def get_element_density(mt):\r\n fraction_matrix = zeros(100)\r\n \r\n composition = Composition(mt['pretty_formula'])\r\n \r\n for element in composition:\r\n fraction = composition.get_atomic_fraction(element) # get the atomic fraction.\r\n fraction_matrix[element.Z] = fraction\r\n \r\n return fraction_matrix", "def to_sympy_dict(f):\n return dmp_to_sympy_dict(f.rep, f.lev, f.dom)", "def fluxonium_potential(self):\n return -0.5*(self.Ej * ((1+self.d)*cos(self.phis - 2. * pi * self.phi - 2. * pi * self.phiL) + (1-self.d)*cos(self.phis-2. * pi * self.phiL))) + self.El/2. * (self.phis) ** 2\n #return -0.5*(self.Ej * cos(self.phis - 2. * pi * self.phi) + self.Ej * cos(self.phis)) + self.El/2. * (self.phis-self.phiL)** 2", "def massFractions(self):\n nsp = self._contents.nSpecies()\n y = zeros(nsp,'d')\n for k in range(nsp):\n y[k] = self.massFraction(k)\n return y", "def monoms(f):\n return dmp_monoms(f.rep, f.lev, f.dom)", "def make_sympy(self, xml=None):\r\n\r\n if self.the_sympy:\r\n return self.the_sympy\r\n\r\n if xml is None:\t # root\r\n if not self.is_mathml():\r\n return my_sympify(self.expr)\r\n if self.is_presentation_mathml():\r\n cmml = None\r\n try:\r\n cmml = self.cmathml\r\n xml = etree.fromstring(str(cmml))\r\n except Exception, err:\r\n if 'conversion from Presentation MathML to Content MathML was not successful' in cmml:\r\n msg = \"Illegal math expression\"\r\n else:\r\n msg = 'Err %s while converting cmathml to xml; cmml=%s' % (err, cmml)\r\n raise Exception(msg)\r\n xml = self.fix_greek_in_mathml(xml)\r\n self.the_sympy = self.make_sympy(xml[0])\r\n else:\r\n xml = etree.fromstring(self.expr)\r\n xml = self.fix_greek_in_mathml(xml)\r\n self.the_sympy = self.make_sympy(xml[0])\r\n return self.the_sympy\r\n\r\n def gettag(expr):\r\n return re.sub('{http://[^}]+}', '', expr.tag)\r\n\r\n # simple math\r\n def op_divide(*args):\r\n if not len(args) == 2:\r\n raise Exception('divide given wrong number of arguments!')\r\n # print \"divide: arg0=%s, arg1=%s\" % (args[0],args[1])\r\n return sympy.Mul(args[0], sympy.Pow(args[1], -1))\r\n\r\n def op_plus(*args):\r\n return args[0] if len(args) == 1 else op_plus(*args[:-1]) + args[-1]\r\n\r\n def op_times(*args):\r\n return reduce(operator.mul, args)\r\n\r\n def op_minus(*args):\r\n if len(args) == 1:\r\n return -args[0]\r\n if not len(args) == 2:\r\n raise Exception('minus given wrong number of arguments!')\r\n #return sympy.Add(args[0],-args[1])\r\n return args[0] - args[1]\r\n\r\n opdict = {\r\n 'plus': op_plus,\r\n 'divide': operator.div, # should this be op_divide?\r\n 'times': op_times,\r\n 'minus': op_minus,\r\n 'root': sympy.sqrt,\r\n 'power': sympy.Pow,\r\n 'sin': sympy.sin,\r\n 'cos': sympy.cos,\r\n 'tan': sympy.tan,\r\n 'cot': sympy.cot,\r\n 'sinh': sympy.sinh,\r\n 'cosh': sympy.cosh,\r\n 'coth': sympy.coth,\r\n 'tanh': sympy.tanh,\r\n 'asin': sympy.asin,\r\n 'acos': sympy.acos,\r\n 'atan': sympy.atan,\r\n 'atan2': sympy.atan2,\r\n 'acot': sympy.acot,\r\n 'asinh': sympy.asinh,\r\n 'acosh': sympy.acosh,\r\n 'atanh': sympy.atanh,\r\n 'acoth': sympy.acoth,\r\n 'exp': sympy.exp,\r\n 'log': sympy.log,\r\n 'ln': sympy.ln,\r\n }\r\n\r\n # simple symbols - TODO is this code used?\r\n nums1dict = {\r\n 'pi': sympy.pi,\r\n }\r\n\r\n def parsePresentationMathMLSymbol(xml):\r\n \"\"\"\r\n Parse <msub>, <msup>, <mi>, and <mn>\r\n \"\"\"\r\n tag = gettag(xml)\r\n if tag == 'mn':\r\n return xml.text\r\n elif tag == 'mi':\r\n return xml.text\r\n elif tag == 'msub':\r\n return '_'.join([parsePresentationMathMLSymbol(y) for y in xml])\r\n elif tag == 'msup':\r\n return '^'.join([parsePresentationMathMLSymbol(y) for y in xml])\r\n raise Exception('[parsePresentationMathMLSymbol] unknown tag %s' % tag)\r\n\r\n # parser tree for Content MathML\r\n tag = gettag(xml)\r\n\r\n # first do compound objects\r\n\r\n if tag == 'apply':\t\t# apply operator\r\n opstr = gettag(xml[0])\r\n if opstr in opdict:\r\n op = opdict[opstr] # pylint: disable=invalid-name\r\n args = [self.make_sympy(expr) for expr in xml[1:]]\r\n try:\r\n res = op(*args)\r\n except Exception, err:\r\n self.args = args # pylint: disable=attribute-defined-outside-init\r\n self.op = op # pylint: disable=attribute-defined-outside-init, invalid-name\r\n raise Exception('[formula] error=%s failed to apply %s to args=%s' % (err, opstr, args))\r\n return res\r\n else:\r\n raise Exception('[formula]: unknown operator tag %s' % (opstr))\r\n\r\n elif tag == 'list':\t\t# square bracket list\r\n if gettag(xml[0]) == 'matrix':\r\n return self.make_sympy(xml[0])\r\n else:\r\n return [self.make_sympy(expr) for expr in xml]\r\n\r\n elif tag == 'matrix':\r\n return sympy.Matrix([self.make_sympy(expr) for expr in xml])\r\n\r\n elif tag == 'vector':\r\n return [self.make_sympy(expr) for expr in xml]\r\n\r\n # atoms are below\r\n\r\n elif tag == 'cn':\t\t\t# number\r\n return sympy.sympify(xml.text)\r\n # return float(xml.text)\r\n\r\n elif tag == 'ci':\t\t\t# variable (symbol)\r\n if len(xml) > 0 and (gettag(xml[0]) == 'msub' or gettag(xml[0]) == 'msup'):\t # subscript or superscript\r\n usym = parsePresentationMathMLSymbol(xml[0])\r\n sym = sympy.Symbol(str(usym))\r\n else:\r\n usym = unicode(xml.text)\r\n if 'hat' in usym:\r\n sym = my_sympify(usym)\r\n else:\r\n if usym == 'i' and self.options is not None and 'imaginary' in self.options:\t # i = sqrt(-1)\r\n sym = sympy.I\r\n else:\r\n sym = sympy.Symbol(str(usym))\r\n return sym\r\n\r\n else:\t\t\t\t# unknown tag\r\n raise Exception('[formula] unknown tag %s' % tag)", "def massFraction(self, s):\n if type(s) == types.StringType:\n kk = self._contents.speciesIndex(s)\n else:\n kk = s\n return _cantera.reactor_massFraction(self.__reactor_id, kk)", "def to_sympy(self, a):\n return sympy_mpf(a)", "def PGL_repn(rational_function):\n if is_Matrix(rational_function):\n return rational_function\n K = rational_function.parent()\n F = K.base_ring()\n if not K.is_field():\n return matrix(F, 2, [rational_function[1], rational_function[0], 0, 1])\n else:\n f = rational_function.numerator()\n g = rational_function.denominator()\n return matrix(F, 2, [f[1], f[0], g[1], g[0]])", "def get_s( self ):\n\n # initialize scaling factor as unknown variable, assuming it's real and\n # greater than zero\n _s = Symbol( 's', real = True, positive = True )\n\n # solve for scaling factor (first argument is expression set equal to zero)\n s = solve( self.a * _s ** self.n + self.b * _s - 1, _s )\n\n # save result as float\n self.s = float( s[ 0 ] )", "def __mul__(self, frac):\n numerator = self.numerator * frac.numerator\n denominator = self.denominator * frac.denominator\n gcd = math.gcd(numerator, denominator)\n\n return Fraction(int(numerator/gcd), int(denominator/gcd))", "def simplification(self):\n from copy import deepcopy\n\n fsm = deepcopy(self)\n fsm.prepone_output()\n return fsm.quotient(fsm.equivalence_classes())", "def monic(f):\n return f.per(dmp_ground_monic(f.rep, f.lev, f.dom))", "def continued_fraction(a) -> list:\n e = _sympify(a)\n if all(i.is_Rational for i in e.atoms()):\n if e.is_Integer:\n return continued_fraction_periodic(e, 1, 0)\n elif e.is_Rational:\n return continued_fraction_periodic(e.p, e.q, 0)\n elif e.is_Pow and e.exp is S.Half and e.base.is_Integer:\n return continued_fraction_periodic(0, 1, e.base)\n elif e.is_Mul and len(e.args) == 2 and (\n e.args[0].is_Rational and\n e.args[1].is_Pow and\n e.args[1].base.is_Integer and\n e.args[1].exp is S.Half):\n a, b = e.args\n return continued_fraction_periodic(0, a.q, b.base, a.p)\n else:\n # this should not have to work very hard- no\n # simplification, cancel, etc... which should be\n # done by the user. e.g. This is a fancy 1 but\n # the user should simplify it first:\n # sqrt(2)*(1 + sqrt(2))/(sqrt(2) + 2)\n p, d = e.expand().as_numer_denom()\n if d.is_Integer:\n if p.is_Rational:\n return continued_fraction_periodic(p, d)\n # look for a + b*c\n # with c = sqrt(s)\n if p.is_Add and len(p.args) == 2:\n a, bc = p.args\n else:\n a = S.Zero\n bc = p\n if a.is_Integer:\n b = S.NaN\n if bc.is_Mul and len(bc.args) == 2:\n b, c = bc.args\n elif bc.is_Pow:\n b = Integer(1)\n c = bc\n if b.is_Integer and (\n c.is_Pow and c.exp is S.Half and\n c.base.is_Integer):\n # (a + b*sqrt(c))/d\n c = c.base\n return continued_fraction_periodic(a, d, c, b)\n raise ValueError(\n 'expecting a rational or quadratic irrational, not %s' % e)", "def _symbolic_system(self):\n system = sym.Matrix(self._symbolic_equations)\n return system.subs(self._symbolic_change_of_vars)", "def pdf(s, x):\r\n x = Basic.sympify(x)\r\n if not isinstance(x, Number):\r\n raise NotImplementedError(\"SymPy does not yet support\"\r\n \"piecewise functions\")\r\n if x < s.a or x > s.b:\r\n return Rational(0)\r\n return 1/(s.b-s.a)", "def to_molar_ratio(massfrac_numerator, massfrac_denominator, numerator_mass, denominator_mass):\n return (massfrac_numerator / numerator_mass) / (massfrac_denominator / denominator_mass)", "def get_symmetric_system(self):\n W = DynamicalSystem(lambda x:1.0-self.f1(1.0-x),\n lambda x:1.0-self.f0(1.0-x))\n W.set_rho(1.0-self.rho)\n return W", "def __mul__ (self,other):\n if (self.debug): print(f'enter fraction.__mul__ with {other}')\n f3 = fraction(self.value[0]*other.value[0],self.value[1]*other.value[1])\n if (self.debug): print(f3, self, other)\n f3.simplify()\n return f3", "def value(G):\n from .printing import unicode_fraction\n\n return unicode_fraction(G._n.numerator, G._n.denominator)" ]
[ "0.5262331", "0.52277535", "0.5183561", "0.5163303", "0.51579064", "0.49737516", "0.49443674", "0.48803365", "0.48623955", "0.48332852", "0.48077378", "0.4806169", "0.48024052", "0.47912028", "0.47703204", "0.4750244", "0.47342396", "0.47308242", "0.47271502", "0.47234315", "0.47030133", "0.46971542", "0.46782878", "0.46745124", "0.46694744", "0.46330208", "0.46233714", "0.46216786", "0.46191624", "0.46041325" ]
0.55855596
0
Generate `n` points of `d` dimension
def generate(self, n, d): self.n = n self.d = d self.X = np.random.rand(n, d) self.Y = np.random.choice([0, 1], size=n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_point_cloud(n:int, d:int = 2, seed=1234) -> np.ndarray:\n initial_seed = np.random.get_state()\n np.random.seed(seed)\n points = np.random.rand(n, d)\n np.random.set_state(initial_seed)\n return points", "def create_random_points(n):\n\n\treturn [(random.randint(0,n),random.randint(0,n)) for i in range(n)]", "def initialization(n, D):\n\n samples = []\n while len(samples) < n:\n # X = np.random.randint(0, D.shape[1], 10*n)\n # Y = np.random.randint(0, D.shape[0], 10*n)\n X = np.random.uniform(0, D.shape[1], 10*n)\n Y = np.random.uniform(0, D.shape[0], 10*n)\n P = np.random.uniform(0, 1, 10*n)\n index = 0\n while index < len(X) and len(samples) < n:\n x, y = X[index], Y[index]\n x_, y_ = int(np.floor(x)), int(np.floor(y))\n if P[index] < D[y_, x_]:\n samples.append([x, y])\n index += 1\n return np.array(samples)", "def generate_points(num_points):\n for i in xrange(0, num_points):\n pass", "def scatter_points(n):\r\n P1 = np.random.randn(int(np.ceil(n/2)), 2) - 4\r\n P2 = 3 * np.random.rand(int(np.ceil(n/4)), 2) - np.array([10, 0])\r\n P3 = np.random.randn(int(np.ceil(n/4)), 2) + 3\r\n \"\"\"\r\n P1=np.floor(P1)\r\n P2=np.floor(P2)\r\n P3=np.floor(P3)\r\n \"\"\"\r\n L = list(np.concatenate((P1,P2,P3), axis=0))\r\n \r\n return L \r\n #return no_dupli(L)\r", "def give_rand_points(n_points, xmin, xmax, ymin, ymax, n_dim=2):\n random_points = np.random.rand(n_points, n_dim)\n random_points[:, 0] = random_points[:, 0]*(xmax-xmin)+xmin\n random_points[:, 1] = random_points[:, 1]*(ymax-ymin)+ymin\n\n return random_points", "def random_points(n, shape):\n n = int(n)\n if n < 0:\n raise ValueError('n must be a positive integer')\n else:\n try:\n d = len(shape)\n if d == 0:\n raise ValueError('shape must contain at least one integer value')\n except TypeError:\n raise ValueError('shape must be array-like')\n idx = np.random.choice(np.prod(shape), size=n, replace=False)\n return np.unravel_index(idx, shape)", "def gen_test_points(n=50, extent=(0,0,100,100), rand_seed=None):\n if rand_seed:\n random.seed(rand_seed)\n return [(random.randint(extent[0], extent[2]), random.randint(extent[1], extent[3]))\n for i in xrange(n)]", "def generatePoints(N, k=2, scale=1, same_quadrant=False):\n if same_quadrant:\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n else:\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def _generate_random_points_in_plane(nvect, dparam, npts, eps=0.0):\n np.random.seed(12345)\n a, b, c = nvect / np.linalg.norm(nvect)\n x, y = np.random.rand(npts), np.random.rand(npts)\n z = (dparam - a * x - b * y) / c\n if eps > 0:\n z += np.random.normal(loc=0., scale=eps, size=npts)\n return np.column_stack((x, y, z))", "def d2xy(n, d):\n x, y, rx, ry, s, t = 0, 0, 0, 0, 0, d\n s = 1\n while s < n:\n rx = 1 & (t // 2)\n ry = 1 & (t ^ rx)\n x, y = rot(s, x, y, rx, ry)\n x += s * rx\n y += s * ry\n t = t // 4\n s *= 2\n return x, y", "def get_random_points(N): \n x1 = np.random.uniform(-1,1,N)\n x2 = np.random.uniform(-1,1,N)\n return (x1,x2)", "def random(cls, n=random.randint(5, 10), d=2, borns=[-1, 1], **kwargs):\n points = [Point.random(d=d, borns=borns) for i in range(n)]\n form = cls(points, **kwargs)\n form.makeSparse()\n return form", "def _build_point_grid(n_per_side: int) -> np.ndarray:\n offset = 1 / (2 * n_per_side)\n points_one_side = np.linspace(offset, 1 - offset, n_per_side)\n points_x = np.tile(points_one_side[None, :], (n_per_side, 1))\n points_y = np.tile(points_one_side[:, None], (1, n_per_side))\n points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)\n return points", "def generate(self, n):\n num_variables = len(self.xmin)\n\n # Generate in [0,1] space\n x = np.random.rand(n, num_variables)\n\n # Scale from [0,1] to [self.xmin, self.xmax]\n x_scaled = self.scale_to_new_domain(x, self.xmin, self.xmax)\n if self.use_logger:\n self.logger.info(\"Random design: generated {0} points in {1} dimensions\".format(n, num_variables))\n return x_scaled", "def generate_latent_points(latent_dim, n):\n\n # Sample from uniform distribution\n x_input = np.random.uniform(-1, 1, latent_dim * n)\n\n # Make batch\n x_input = x_input.reshape(n, latent_dim)\n\n return x_input", "def gen_points(lo, hi, N):\n\treturn np.linspace(lo, hi, num=N)\n\t\n\t## a = np.array(range(0, N))\n\t## return lo + (a * (hi-lo)/float(N))", "def create_points(N, M):\n arr = numpy.random.randint(1, N+1, size=(M, 2))\n idx = 0\n coords = []\n points = []\n \n for ele in arr:\n if (ele[0], ele[1]) not in coords:\n idx += 1\n coords.append((ele[0], ele[1]))\n \n while idx < M:\n missed = numpy.random.randint(1, N+1, size=(M-idx, 2))\n for ele in missed:\n if (ele[0], ele[1]) not in coords:\n idx += 1\n coords.append((ele[0], ele[1]))\n\n # creates real points in the plane\n idx = 0\n for coord in coords:\n idx += 1\n points.append(Point(id=idx, x=coord[0], y=coord[1]))\n\n return points", "def get_random_points(n=5, scale=0.8, mindst=None, rec=0):\n mindst = mindst or .7 / n\n a = np.random.rand(n, 2)\n d = np.sqrt(np.sum(np.diff(ccw_sort(a), axis=0), axis=1) ** 2)\n if np.all(d >= mindst) or rec >= 200:\n return a * scale\n else:\n return get_random_points(n=n, scale=scale, mindst=mindst, rec=rec + 1)", "def generate_points(rng: RNG, number_of_points, dimension, scale=1.0) -> np.array:\n\n # points = []\n # for _ in range(self.number_of_points):\n # point = []\n # for _ in range(self.dimension):\n # value = rng.next_float()\n # while value > self.scale:\n # value = rng.next_float()\n # point.append(value)\n # points.append(points)\n # return np.array(points)\n\n # function to generate a point.\n def generate_point(i, j):\n # i and j are required for np.fromfunction, but not used.\n del i, j\n value = rng.next_float()\n while value > scale:\n value = rng.next_float()\n return value\n\n # Then creates the array using said function.\n return np.fromfunction(np.vectorize(generate_point), (number_of_points, dimension))", "def pick_chosen_points(m, n):\r\n return [i * n // m + n // (2 * m) for i in range(m)]", "def uniform_but_one_dataset(n, p):\n elements = []\n for i in range(n):\n elements.append((i, 1))\n elements.append((1, (n**(1.0 / p)) - 1))\n return elements", "def curve_to_arrays(p, n, d):\n x_list = []\n y_list = []\n\n for i in range(0, n, 1):\n x_list.append(calc_value(p.x, 0 + i * d))\n y_list.append(calc_value(p.y, 0 + i * d))\n\n return x_list, y_list", "def _ScatterXUniformlyExtendedRange(self, num_points, lattice_sizes,\n input_dims):\n x = []\n for _ in range(num_points):\n point = [\n np.random.random() * (lattice_sizes + 1.0) - 1.0\n for _ in range(input_dims)\n ]\n x.append(np.asarray(point))\n if input_dims == 1:\n x.sort()\n return x", "def _ScatterXUniformly(self, num_points, lattice_sizes, input_dims):\n x = []\n for _ in range(num_points):\n point = [\n np.random.random() * (lattice_sizes - 1.0) for _ in range(input_dims)\n ]\n x.append(np.asarray(point))\n if input_dims == 1:\n x.sort()\n return x", "def generate_H0_data(n, dist_d):\n d = dist_d.shape[0]#number of dimensions\n h = dist_d.shape[1]#number of bins to calculate the frequency distribution along each dimenison\n\n h0_data = np.zeros([n,d])\n #get the min value for each bin;again, range of valid values assumed to be in [0,1]\n bin_base = np.arange(h)/h\n for i in range(d):\n #get bin selections randomly for each point\n bin_random = np.random.choice(h, n, p=dist_d[i,:])\n # h0_data[:,i] = np.reshape(bin_base[bin_random], [-1,1]) + np.random.rand(n,1)/h\n h0_data[:,i] = bin_base[bin_random] + np.random.rand(n)/h\n\n return h0_data", "def to_npoints(self, n):\n segments = self.segments\n Ltotal = self.cumulength()[-1]\n step = Ltotal / float(n-1)\n step_remaining = step\n\n vertices = [self[0].get_vertex()]\n x = 0.0\n pos = self[0]\n seg = next(segments)\n seg_remaining = seg.displacement()\n\n while x < Ltotal-1e-8:\n direction = seg[0].azimuth(seg[1])\n\n if step_remaining <= seg_remaining:\n pos = pos.walk(step_remaining, direction)\n x += step_remaining\n seg_remaining -= step_remaining\n step_remaining = step\n vertices.append(pos.get_vertex())\n seg.vertices[0] = np.array(pos.vertex, dtype=np.float64)\n\n else:\n pos = seg[1]\n x += seg_remaining\n step_remaining -= seg_remaining\n\n seg = next(segments, seg)\n seg_remaining = seg.displacement()\n # except StopIteration as e:\n # if abs(Ltotal-x) > 1e-8: # tolerance for endpoint\n # raise e\n\n if len(vertices) == n-1:\n vertices.append(seg[-1].get_vertex())\n return Multipoint(vertices, crs=self.crs)", "def GeneratePointsImg(self, n, ppa):\n x = np.linspace(0,self.camera.sensorSize,n)+ppa[0]\n y = np.linspace(0,self.camera.sensorSize,n)+ppa[1]\n\n return np.meshgrid(x, y)", "def equidistributed(n: int) -> List[Tuple[float, float]]:\n\n # init\n a = 4 * np.pi / n\n d = np.sqrt(a)\n m_phi = round(np.pi / d)\n d_phi = np.pi / m_phi\n d_varphi = a / d_phi\n grid = []\n\n # conversion radians -> degrees\n r2d = 180.0 / np.pi\n\n # loop latitudinal\n for m in range(0, m_phi):\n lat = np.pi * (m + 0.5) / m_phi\n m_varphi = round(2 * np.pi * np.sin(lat) / d_varphi)\n\n # loop longitudinal\n for n in range(0, m_varphi):\n lon = 2 * np.pi * n / m_varphi\n\n # append to grid\n grid.append((lon * r2d, lat * r2d - 90.0))\n\n # finished\n return grid", "def create_derivative_graph(f, xrange, n):\n plot_points = []\n for x in xrange:\n plot_points.append(nth_derivative(f, x, n))\n return plot_points" ]
[ "0.7702441", "0.6789765", "0.6712974", "0.66599315", "0.65611076", "0.6416333", "0.6307879", "0.6275656", "0.62721384", "0.6263218", "0.6248472", "0.6243516", "0.6242459", "0.6237172", "0.6169062", "0.6159954", "0.6099011", "0.6078545", "0.6072246", "0.6034091", "0.59908116", "0.59740007", "0.5943985", "0.5933512", "0.5917469", "0.5911785", "0.5894746", "0.5873148", "0.5864825", "0.5857631" ]
0.73754287
1
Get the AssetKey associated with this InputDefinition for the given
def get_asset_key(self, context: "InputContext") -> Optional[AssetKey]: if callable(self._asset_key): return self._asset_key(context) else: return self.hardcoded_asset_key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")", "def access_key_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"access_key_id\")", "def file_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"file_key\")", "def file_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"file_key\")", "def tag_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"tag_key\")", "def tag_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"tag_key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")" ]
[ "0.6646105", "0.6646105", "0.6646105", "0.6646105", "0.6646105", "0.6646105", "0.6646105", "0.6646105", "0.6646105", "0.6646105", "0.6646105", "0.6646105", "0.6646105", "0.6646105", "0.6354836", "0.6335745", "0.6335745", "0.63294554", "0.63294554", "0.6288275", "0.6288275", "0.6288275", "0.6288275", "0.6288275", "0.6288275", "0.6288275", "0.6288275", "0.6288275", "0.6288275", "0.6288275" ]
0.739318
0
Create an input mapping to an input of a child node. In a GraphDefinition, you can use this helper function to construct
def mapping_to( self, node_name: str, input_name: str, fan_in_index: Optional[int] = None ) -> "InputMapping": check.str_param(node_name, "node_name") check.str_param(input_name, "input_name") check.opt_int_param(fan_in_index, "fan_in_index") return InputMapping( graph_input_name=self.name, mapped_node_name=node_name, mapped_node_input_name=input_name, fan_in_index=fan_in_index, graph_input_description=self.description, dagster_type=self.dagster_type, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_graph_from_input(self, input_node):\n raise NotImplementedError", "def map_from_parent_nid(self, layer_id, parent_nids, remap_local=...):\n ...", "def map_input_and_node(cls, onnx_model: onnx.ModelProto):\n\n input2node: Dict[str, List] = dict()\n for node in onnx_model.graph.node:\n for idx, input_name in enumerate(node.input):\n if input_name not in input2node:\n input2node[input_name] = []\n input2node[input_name].append([node, idx])\n return input2node", "def _lookup_input(nodes, name, value, definition):\n # containers\n if isinstance(value, list):\n return [_lookup_input(nodes, name, elem, definition) for elem in value]\n\n if isinstance(value, dict):\n return {k: _lookup_input(nodes, name, v, definition) for k, v in value.items()}\n\n # node reference\n if not isinstance(value, six.string_types):\n raise ValueError(\n \"Invalid definition for node '%s': invalid reference '%s' of type '%s' in inputs\"\n % (name, value, type(value))\n )\n # node not yet discovered yet\n if not value in nodes:\n # Look for it in the definition items:\n for found_name, d in definition.items():\n if value != found_name:\n continue\n # Load the node into nodes\n _process_kwargs(found_name, d, definition, nodes)\n\n break\n\n if not value in nodes:\n raise ValueError(\n \"Invalid definition for node '%s': reference to nonexistent node '%s' in inputs\" % (name, value)\n )\n node = nodes[value]\n\n # copy in debug mode\n if settings[\"DEBUG\"]:\n node = deepcopy(node)\n\n return node", "def _add_input(self, node_entry, idx):\n if node_entry[\"name\"] in self._params:\n self._add_params(node_entry, idx)\n else:\n node_type = node_entry[\"types\"][0]\n dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(node_type.dtype)]\n input = onnx.helper.make_tensor_value_info(\n node_entry[\"name\"], dtype, shape=get_node_shape(node_type)\n )\n self._mc.add_inputs([input])", "def _update_input_after_create_node(self):\n for node in self._normal_node_map.values():\n for src_node_id, input_attr in dict(node.inputs).items():\n node.delete_inputs(src_node_id)\n if not self._is_node_exist(node_id=src_node_id):\n message = f\"The input node could not be found by node id({src_node_id}) \" \\\n f\"while updating the input of the node({node})\"\n logger.warning(message)\n\n continue\n\n src_node = self._get_normal_node(node_id=src_node_id)\n input_attr['shape'] = src_node.output_shape\n input_attr['data_type'] = src_node.output_data_type\n node.add_inputs(src_name=src_node.name, input_attr=input_attr)", "def convert(cls, node_entry, model_container, node_dict):\n name = node_entry[\"name\"]\n shape = numpy.asarray(\n [a.value for a in node_entry[\"relay_node\"].attrs.newshape], dtype=numpy.int64\n )\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(shape, name, \"shape\", model_container),\n ]\n\n node = onnx.helper.make_node(cls.__name__, input_names, node_entry[\"output_names\"])\n model_container.add_nodes([node])", "def map_to_parent_nid(self, nid):\n ...", "def node_mapping(self):\n ...", "def create_node(self, name, parent):\n\n try:\n node = self.map[name]\n return node\n except:\n node = Node(name,parent=parent.name)\n parent.children.add(node)\n\n node.parent = parent.name\n\n self.map[name] = node\n\n return node", "def map_to_parent_eid(self, eid):\n ...", "def register_inputs_to_main_model(input_edge_name: str, input_edge_user: NodeStruct):\n nd_parent = input_edge_user.parent_module_struct\n while nd_parent:\n nd_parent.add_inputs_edge(input_edge_name)\n nd_parent = nd_parent.parent_module_struct", "def gen_child(self, g, ng, child):\n with About(child.debug, self.relation):\n self.remap_node((g, child), g, child, ng, ng.apply())", "def _init_graph_parameter(cls, graph, init_inputs, device):\n tensor_map = {}\n # due to https://github.com/onnx/onnx/issues/2417\n # sometimes, input contains all initializer's info\n # sometimes, may not\n all_inputs = OrderedDict()\n for t in graph.input:\n all_inputs[t.name] = t\n # so we refresh the input by the initializer\n for t in graph.initializer:\n all_inputs[t.name] = t\n initializers = {t.name for t in graph.initializer}\n inp_idx = 0\n for name, x in all_inputs.items():\n if name in initializers:\n # if it has initializer, we use its value as the input\n np_tensor = numpy_helper.to_array(x)\n if np_tensor.dtype == \"int64\":\n np_tensor = np_tensor.astype(np.int32)\n # todo, we cannot support scalar tensor\n if np.ndim(np_tensor) == 0:\n np_tensor = np.array(np_tensor, ndmin=1)\n else:\n # if not, means it's a input rather than a inner weight\n # so if the user gives values, we use these values\n # if not, we just use the shape of input gived by onnx to init a random value\n # HOWEVER, the random value may not be correct for some inputs, such as gather which needs indices\n # so if have operators, the user must give inputs\n x_shape = tuple(\n dim.dim_value for dim in x.type.tensor_type.shape.dim)\n if init_inputs is not None:\n np_tensor = init_inputs[inp_idx]\n inp_idx += 1\n else:\n np_tensor = np.random.randn(*x_shape).astype(np.float32)\n tmp_tensor = tensor.from_numpy(np_tensor)\n tmp_tensor.to_device(device)\n # todo, for backward\n tmp_tensor.stores_grad = (name in initializers)\n tensor_map[x.name] = tmp_tensor\n return tensor_map", "def instantiate_inputs(self, args_):\n\n def traversal_function(obj):\n placeholder = input_placeholders.pop(0)\n placeholder.instantiate(obj)\n\n input_placeholders = [\n self.placeholders[input_id] for input_id in self.input_placeholder_ids\n ]\n\n Role.nested_object_traversal(args_, traversal_function, FrameworkTensor)", "def map_inputs(building_name, inputs, mapping):\n\tinput_tags = mapping[building_name][\"Inputs\"]\n\tret = []\n\tfor tag in input_tags:\n\t\tmatching_input = [t for t in inputs if t[0] == tag]\n\t\tret.append(matching_input[0][1])\n\n\treturn ret", "def call(self, inputs):\n return tf.nest.map_structure(lambda l: l(inputs), self._layer_nest)", "def create_inputs(self):\n return {}", "def make_node_dict(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n self.node_dict1 = {}\n for node in self.input1['knowledge_graph']['nodes']:\n self.node_dict1[node['id']] = node\n self.node_dict2 = {}\n for node in self.input2['knowledge_graph']['nodes']:\n self.node_dict2[node['id']] = node", "def __init__(self, inputs=[]):\n self.inputs = inputs # input_list <- C, Java <- 匈牙利命名法 -> Python 特别不建议\n # self.outputs = outputs # output_list\n self.value = None\n self.outputs = []\n self.gradients = {}\n\n for node in self.inputs:\n node.outputs.append(self) # build a connection relationship", "def add(self, inp, out):\n self.curr_node.input_frequencies[inp] += 1\n if inp not in self.curr_node.children.keys() or out not in self.curr_node.children[inp].keys():\n node = Node(out)\n self.curr_node.children[inp][out] = node\n\n self.curr_node = self.curr_node.children[inp][out]\n self.curr_node.frequency += 1", "def __init__(self, name, graph, plugs=None):\n self.name = name\n self.graph = graph\n self.plugs = plugs or []\n self.graph.inputs[self.name] = self", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n for child in paramInput.subparts:\n tag = child.getName()\n if tag == 'target':\n self.targets = set(child.value)\n elif tag == 'bins':\n self.numBins = child.value", "def createInput(self):\n return _libsbml.Transition_createInput(self)", "def get_parent_child_mapping(child_parent_map):\n parent_children_mapping = defaultdict(list)\n if isinstance(child_parent_map, dict):\n child_parent_map = list(child_parent_map.iteritems())\n for child, parent in child_parent_map:\n parent_children_mapping[parent].append(child)\n return parent_children_mapping", "def _map_inputs(module, wires):\n kwargs = {}\n for terminal in module.terminals:\n if terminal['use'] != \"in\": continue\n\n collect = [w['source'] for w in wires if w['target'][1] == terminal['id']]\n if len(collect) == 0:\n if terminal['required']:\n raise TypeError(\"Missing input for %s.%s\"\n % (module.id, terminal['id']))\n elif terminal['multiple']:\n kwargs[terminal['id']] = collect\n else:\n kwargs[terminal['id']] = None\n elif terminal['multiple']:\n kwargs[terminal['id']] = collect\n elif len(collect) > 1:\n raise TypeError(\"Excess input for %s.%s\"\n % (module.id, terminal['id']))\n else:\n kwargs[terminal['id']] = collect[0]\n return kwargs", "def remap_input(self, current_input: Any, new_input: Any) -> None:\n for i, arg in enumerate(self.args):\n if arg.id_at_location == current_input.id_at_location:\n self.args[i] = new_input\n\n for k, v in self.kwargs.items():\n if v.id_at_location == current_input.id_at_location:\n self.kwargs[k] = new_input", "def generic_visit(self, node, parent_map):\n node_map = []\n parent_map.append((node, node_map))\n \n for field, value in ast.iter_fields(node):\n if isinstance(value, list):\n for item in value:\n if isinstance(item, ast.AST):\n self.visit(item, node_map)\n elif isinstance(value, ast.AST):\n self.visit(value, node_map)", "def make_input_fn(step_output):\n return tf.nn.embedding_lookup(embeddings, step_output.predictions)", "def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n\n name = node_entry[\"name\"]\n pad_data = numpy.asarray(attrs[\"pads\"], dtype=attrs[\"pads\"][0].dtype).astype(numpy.int64)\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(pad_data, name, \"pads\", model_container),\n node_entry[\"input_names\"][1],\n ]\n\n node = onnx.helper.make_node(\n cls.__name__, input_names, node_entry[\"output_names\"], mode=attrs[\"mode\"]\n )\n model_container.add_nodes([node])" ]
[ "0.6538408", "0.635551", "0.6255453", "0.59544104", "0.5894463", "0.58544743", "0.5850743", "0.5818974", "0.57496595", "0.5615324", "0.5543781", "0.5497266", "0.5463415", "0.5447868", "0.54466885", "0.5313355", "0.5294535", "0.52467805", "0.5215256", "0.52111", "0.51674205", "0.516155", "0.51493484", "0.51432747", "0.5133073", "0.5132029", "0.5119421", "0.5114818", "0.5113292", "0.51107895" ]
0.70850724
0
Return a new InputDefinition that merges this ones properties with those inferred from type signature.
def combine_with_inferred(self, inferred: InferredInputProps) -> "InputDefinition": check.invariant( self.name == inferred.name, f"InferredInputProps name {inferred.name} did not align with InputDefinition name" f" {self.name}", ) dagster_type = self._dagster_type if self._type_not_set: dagster_type = _checked_inferred_type(inferred) description = self._description if description is None and inferred.description is not None: description = inferred.description default_value = self._default_value if not self.has_default_value: default_value = inferred.default_value return InputDefinition( name=self.name, dagster_type=dagster_type, description=description, default_value=default_value, metadata=self.metadata, asset_key=self._asset_key, asset_partitions=self._asset_partitions_fn, input_manager_key=self._input_manager_key, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _from_base(cls, _input: Optional[Union[Input, Dict]]) -> Optional[\"InternalInput\"]:\n if _input is None:\n return None\n if isinstance(_input, InternalInput):\n return _input\n if isinstance(_input, Input):\n # do force cast directly as there is no new field added in InternalInput\n # need to change the logic if new field is added\n _input.__class__ = InternalInput\n return _input\n return InternalInput(**_input)", "def create_inputs_for_inference(builder, conf):\r\n\r\n inputs = dict()\r\n\r\n inputs[\"mel_spec_input\"] = builder.addInputTensor(popart.TensorInfo(_get_popart_type(conf.precision),\r\n [conf.samples_per_device,\r\n conf.mel_bands,\r\n conf.max_spectrogram_length]),\r\n \"mel_spec_input\")\r\n\r\n return inputs", "def as_input(self):\n return Input(self.alias, self.eval())", "def build_model_input(cls, name='input'):\n return cls(name, PortDirection.INPUT, type=PortType.MODEL)", "def create_inputs(self):\n return {}", "def getInputSpecification(cls):\n spec = super().getInputSpecification()\n # TODO Entities should use factories to populate their allowable inputs\n # -> Entities themselves don't have inputs (I think)\n return spec", "def getInputSpecification(cls):\n inputSpecification = super(Custom1D, cls).getInputSpecification()\n inputSpecification.addSub(InputData.parameterInputFactory(\"workingDir\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"functionType\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"dataFilename\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"functionID\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"variableID\", contentType=InputTypes.StringType))\n\n return inputSpecification", "def _get_inputs(self):\n return self", "def get_specification(self) -> Dict:\n specification = {\n 'version': VERSION,\n 'metadata': {\n 'twoColumn': True,\n 'layout': self.autolayout\n },\n 'nodes': []\n }\n\n def strip_io(io_list: list, direction) -> list:\n \"\"\"\n Strips every input/output from metadata and leaves only\n `name` and `type` keys.\n \"\"\"\n return [\n {\n 'name': io['name'],\n 'type': io['type'],\n 'direction': direction\n }\n for io in io_list\n ]\n\n toremove = set()\n for key, node in self.nodes.items():\n try:\n node_cls = load_class(node.cls_name)\n except (ModuleNotFoundError, ImportError, Exception) as err:\n msg = f'Could not add {node_cls}. Reason:'\n _LOGGER.warn('-' * len(msg))\n _LOGGER.warn(msg)\n _LOGGER.warn(err)\n _LOGGER.warn('-' * len(msg))\n toremove.add(key)\n continue\n parameterschema = node_cls.form_parameterschema()\n\n properties = []\n for name, props in parameterschema['properties'].items():\n new_property = {'name': name}\n\n if 'default' in props:\n new_property['default'] = props['default']\n\n if 'description' in props:\n new_property['description'] = props['description']\n\n def add_default(default_val):\n if new_property.get('default') is None:\n new_property['default'] = default_val\n\n # Case for an input with range defined\n if 'enum' in props:\n new_property['type'] = 'select'\n new_property['values'] = list(map(str, props['enum']))\n add_default(new_property['values'][0])\n # Case for a single value input\n elif 'type' in props:\n if 'array' in props['type']:\n new_property['type'] = 'list'\n if 'items' in props and 'type' in props['items']:\n dtype = props['items']['type']\n new_property['dtype'] = dtype\n add_default([])\n elif 'boolean' in props['type']:\n new_property['type'] = 'checkbox'\n add_default(False)\n elif 'string' in props['type']:\n new_property['type'] = 'text'\n add_default('')\n elif 'integer' in props['type']:\n new_property['type'] = 'integer'\n add_default(0)\n elif 'number' in props['type']:\n new_property['type'] = 'number'\n add_default(0)\n elif 'object' in props['type']:\n # Object arguments should be defined in specification\n # as node inputs, rather than properties\n new_property = None\n else:\n new_property['type'] = 'text'\n add_default('')\n # If no type is specified then text is used\n else:\n new_property['type'] = 'text'\n add_default('')\n\n if new_property is not None:\n properties.append(new_property)\n\n specification['nodes'].append({\n 'name': node.name,\n 'type': node.type,\n 'category': node.category,\n 'properties': properties,\n 'interfaces': strip_io(\n self.io_mapping[node.type]['inputs'],\n 'input'\n ) + strip_io(\n self.io_mapping[node.type]['outputs'],\n 'output'\n )\n })\n\n for key in toremove:\n del self.nodes[key]\n return specification", "def from_input_once_single(cls) -> object:\n return cls(input_single())", "def get_input_schema(cls):\n return dict(properties=dict(folder_path=\"string\"))", "def reconstruct_input_ext(self, model_in):", "def convert_raw_configuration_to_input_instances(self) -> List[Input]:\n\n return [\n self.get_or_create_input_instance_from_raw(key, value)[0]\n for key, value in self.raw_configuration.items()\n ]", "def inputs(self) -> 'Input':\n return self.Input", "def _update_input_type(self):\n pass", "def get_input_spec(self):\r\n return self.input_spec", "def _get_unbatched_input_spec(self):\n if isinstance(self._input_spec, type_spec.BatchableTypeSpec):\n return self._input_spec._unbatch() # pylint: disable=protected-access\n if isinstance(self._input_spec, resource_variable_ops.VariableSpec):\n return resource_variable_ops.VariableSpec(\n shape=(None if self._input_spec.shape is None\n else self._input_spec.shape[1:]),\n dtype=self._input_spec.dtype,\n trainable=self._input_spec.trainable)\n else:\n raise NotImplementedError(\n f'`{self.value_type.__name__}`s `TypeSpec` is not supported for '\n f'inputs of type {type(self._input_spec)}.')", "def create_input_instances(self) -> dict:\n\n self.create_required_paths()\n return self.get_full_configuration()", "def get_initial_inputs(self) -> Dict[str, ValueType]:\n if self.const_inputs:\n return self.const_inputs.copy() # Clone predefined\n return {} # Nothing set yet", "def _get_schema(self):\n\n schema = ProtocolSchema()\n\n schema.id = self.id\n schema.type = type(self).__name__\n\n for input_path in self.required_inputs:\n\n if not (input_path.start_protocol is None or (input_path.start_protocol == self.id and\n input_path.start_protocol == input_path.last_protocol)):\n\n continue\n\n # Always make sure to only pass a copy of the input. Changing the schema\n # should NOT change the protocol.\n schema.inputs[input_path.full_path] = copy.deepcopy(self.get_value(input_path))\n\n return schema", "def getInputSpecification(cls):\n inputSpecification = super(Normal, cls).getInputSpecification()\n inputSpecification.addSub(InputData.parameterInputFactory(\"mean\", contentType=InputTypes.FloatType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"sigma\", contentType=InputTypes.FloatType))\n\n return inputSpecification", "def new_input_config(self, name, config, description=''):\n return self._new_ref_config(\n ConfigRef.INPUT_PARAMS, name, config, description)", "def getInputSpecification(cls):\n inputSpecification = super(Metric, cls).getInputSpecification()\n featuresInput = InputData.parameterInputFactory(\"Features\", contentType=InputTypes.StringListType)\n featuresInput.addParam(\"type\", InputTypes.StringType)\n inputSpecification.addSub(featuresInput)\n targetsInput = InputData.parameterInputFactory(\"Targets\", contentType=InputTypes.StringListType)\n targetsInput.addParam(\"type\", InputTypes.StringType)\n inputSpecification.addSub(targetsInput)\n multiOutputInput = InputData.parameterInputFactory(\"multiOutput\", contentType=InputTypes.StringType)\n inputSpecification.addSub(multiOutputInput)\n multiOutput = InputTypes.makeEnumType('MultiOutput', 'MultiOutputType', ['mean','max','min','raw_values'])\n multiOutputInput = InputData.parameterInputFactory(\"multiOutput\", contentType=multiOutput)\n inputSpecification.addSub(multiOutputInput)\n weightInput = InputData.parameterInputFactory(\"weight\", contentType=InputTypes.FloatListType)\n inputSpecification.addSub(weightInput)\n pivotParameterInput = InputData.parameterInputFactory(\"pivotParameter\", contentType=InputTypes.StringType)\n inputSpecification.addSub(pivotParameterInput)\n metricInput = InputData.parameterInputFactory(\"Metric\", contentType=InputTypes.StringType)\n metricInput.addParam(\"class\", InputTypes.StringType, True)\n metricInput.addParam(\"type\", InputTypes.StringType, True)\n inputSpecification.addSub(metricInput)\n\n return inputSpecification", "def _get_input_type(self):\n return self.__input_type", "def _get_input_type(self):\n return self.__input_type", "def _get_input_type(self):\n return self.__input_type", "def _get_input_type(self):\n return self.__input_type", "def _get_input_type(self):\n return self.__input_type", "def _get_input_type(self):\n return self.__input_type", "def from_input_fn(return_values):\n if isinstance(return_values, dataset_ops.Dataset):\n dataset = return_values\n return _Inputs(dataset=dataset)\n\n features, labels = _Inputs._parse_inputs(return_values)\n return _Inputs(features, labels)" ]
[ "0.5935494", "0.5238198", "0.5198502", "0.5185639", "0.51366514", "0.5023624", "0.5011453", "0.50001436", "0.4978054", "0.49546343", "0.4936571", "0.49176475", "0.4890198", "0.48576808", "0.48501834", "0.4840561", "0.48357502", "0.48231715", "0.4815088", "0.48061097", "0.48039716", "0.48003456", "0.47652423", "0.47393954", "0.47393954", "0.47393954", "0.47393954", "0.47393954", "0.47393954", "0.47269857" ]
0.7089827
0